summaryrefslogtreecommitdiff
path: root/tools/perf/scripts/python/stackcollapse.py
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/scripts/python/stackcollapse.py')
0 files changed, 0 insertions, 0 deletions
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig16
-rw-r--r--drivers/Makefile12
-rw-r--r--drivers/accel/Kconfig3
-rw-r--r--drivers/accel/Makefile3
-rw-r--r--drivers/accel/amdxdna/Kconfig18
-rw-r--r--drivers/accel/amdxdna/Makefile25
-rw-r--r--drivers/accel/amdxdna/TODO1
-rw-r--r--drivers/accel/amdxdna/aie2_ctx.c1079
-rw-r--r--drivers/accel/amdxdna/aie2_error.c419
-rw-r--r--drivers/accel/amdxdna/aie2_message.c1074
-rw-r--r--drivers/accel/amdxdna/aie2_msg_priv.h448
-rw-r--r--drivers/accel/amdxdna/aie2_pci.c1187
-rw-r--r--drivers/accel/amdxdna/aie2_pci.h346
-rw-r--r--drivers/accel/amdxdna/aie2_pm.c108
-rw-r--r--drivers/accel/amdxdna/aie2_psp.c146
-rw-r--r--drivers/accel/amdxdna/aie2_smu.c177
-rw-r--r--drivers/accel/amdxdna/aie2_solver.c380
-rw-r--r--drivers/accel/amdxdna/aie2_solver.h155
-rw-r--r--drivers/accel/amdxdna/amdxdna_ctx.c572
-rw-r--r--drivers/accel/amdxdna/amdxdna_ctx.h194
-rw-r--r--drivers/accel/amdxdna/amdxdna_error.h59
-rw-r--r--drivers/accel/amdxdna/amdxdna_gem.c972
-rw-r--r--drivers/accel/amdxdna/amdxdna_gem.h90
-rw-r--r--drivers/accel/amdxdna/amdxdna_mailbox.c575
-rw-r--r--drivers/accel/amdxdna/amdxdna_mailbox.h124
-rw-r--r--drivers/accel/amdxdna/amdxdna_mailbox_helper.c61
-rw-r--r--drivers/accel/amdxdna/amdxdna_mailbox_helper.h44
-rw-r--r--drivers/accel/amdxdna/amdxdna_pci_drv.c364
-rw-r--r--drivers/accel/amdxdna/amdxdna_pci_drv.h149
-rw-r--r--drivers/accel/amdxdna/amdxdna_pm.c94
-rw-r--r--drivers/accel/amdxdna/amdxdna_pm.h18
-rw-r--r--drivers/accel/amdxdna/amdxdna_sysfs.c67
-rw-r--r--drivers/accel/amdxdna/amdxdna_ubuf.c232
-rw-r--r--drivers/accel/amdxdna/amdxdna_ubuf.h19
-rw-r--r--drivers/accel/amdxdna/npu1_regs.c122
-rw-r--r--drivers/accel/amdxdna/npu2_regs.c115
-rw-r--r--drivers/accel/amdxdna/npu4_regs.c146
-rw-r--r--drivers/accel/amdxdna/npu5_regs.c115
-rw-r--r--drivers/accel/amdxdna/npu6_regs.c116
-rw-r--r--drivers/accel/drm_accel.c126
-rw-r--r--drivers/accel/ethosu/Kconfig11
-rw-r--r--drivers/accel/ethosu/Makefile4
-rw-r--r--drivers/accel/ethosu/ethosu_device.h197
-rw-r--r--drivers/accel/ethosu/ethosu_drv.c403
-rw-r--r--drivers/accel/ethosu/ethosu_drv.h15
-rw-r--r--drivers/accel/ethosu/ethosu_gem.c704
-rw-r--r--drivers/accel/ethosu/ethosu_gem.h46
-rw-r--r--drivers/accel/ethosu/ethosu_job.c497
-rw-r--r--drivers/accel/ethosu/ethosu_job.h40
-rw-r--r--drivers/accel/habanalabs/Kconfig25
-rw-r--r--drivers/accel/habanalabs/common/Makefile5
-rw-r--r--drivers/accel/habanalabs/common/command_submission.c15
-rw-r--r--drivers/accel/habanalabs/common/context.c3
-rw-r--r--drivers/accel/habanalabs/common/debugfs.c348
-rw-r--r--drivers/accel/habanalabs/common/device.c248
-rw-r--r--drivers/accel/habanalabs/common/firmware_if.c229
-rw-r--r--drivers/accel/habanalabs/common/habanalabs.h130
-rw-r--r--drivers/accel/habanalabs/common/habanalabs_drv.c11
-rw-r--r--drivers/accel/habanalabs/common/habanalabs_ioctl.c19
-rw-r--r--drivers/accel/habanalabs/common/hldio.c437
-rw-r--r--drivers/accel/habanalabs/common/hldio.h146
-rw-r--r--drivers/accel/habanalabs/common/hwmon.c60
-rw-r--r--drivers/accel/habanalabs/common/irq.c33
-rw-r--r--drivers/accel/habanalabs/common/memory.c34
-rw-r--r--drivers/accel/habanalabs/common/memory_mgr.c42
-rw-r--r--drivers/accel/habanalabs/common/mmu/mmu.c14
-rw-r--r--drivers/accel/habanalabs/common/pci/pci.c4
-rw-r--r--drivers/accel/habanalabs/common/sysfs.c25
-rw-r--r--drivers/accel/habanalabs/gaudi/gaudi.c31
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2.c476
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2P.h17
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c2
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2_security.c1
-rw-r--r--drivers/accel/habanalabs/goya/goya.c13
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/gaudi2.h4
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/gaudi2_async_ids_map_extended.h244
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/gaudi2_fw_if.h27
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/gaudi2_reg_map.h8
-rw-r--r--drivers/accel/habanalabs/include/hw_ip/pci/pci_general.h3
-rw-r--r--drivers/accel/ivpu/Kconfig10
-rw-r--r--drivers/accel/ivpu/Makefile18
-rw-r--r--drivers/accel/ivpu/ivpu_coredump.c39
-rw-r--r--drivers/accel/ivpu/ivpu_coredump.h25
-rw-r--r--drivers/accel/ivpu/ivpu_debugfs.c201
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c198
-rw-r--r--drivers/accel/ivpu/ivpu_drv.h112
-rw-r--r--drivers/accel/ivpu/ivpu_fw.c307
-rw-r--r--drivers/accel/ivpu/ivpu_fw.h28
-rw-r--r--drivers/accel/ivpu/ivpu_fw_log.c113
-rw-r--r--drivers/accel/ivpu/ivpu_fw_log.h17
-rw-r--r--drivers/accel/ivpu/ivpu_gem.c256
-rw-r--r--drivers/accel/ivpu/ivpu_gem.h32
-rw-r--r--drivers/accel/ivpu/ivpu_gem_userptr.c213
-rw-r--r--drivers/accel/ivpu/ivpu_hw.c420
-rw-r--r--drivers/accel/ivpu/ivpu_hw.h207
-rw-r--r--drivers/accel/ivpu/ivpu_hw_37xx.c1065
-rw-r--r--drivers/accel/ivpu/ivpu_hw_37xx_reg.h72
-rw-r--r--drivers/accel/ivpu/ivpu_hw_40xx.c1250
-rw-r--r--drivers/accel/ivpu/ivpu_hw_40xx_reg.h96
-rw-r--r--drivers/accel/ivpu/ivpu_hw_btrs.c905
-rw-r--r--drivers/accel/ivpu/ivpu_hw_btrs.h50
-rw-r--r--drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h112
-rw-r--r--drivers/accel/ivpu/ivpu_hw_btrs_mtl_reg.h83
-rw-r--r--drivers/accel/ivpu/ivpu_hw_ip.c1199
-rw-r--r--drivers/accel/ivpu/ivpu_hw_ip.h36
-rw-r--r--drivers/accel/ivpu/ivpu_hw_reg_io.h64
-rw-r--r--drivers/accel/ivpu/ivpu_ipc.c66
-rw-r--r--drivers/accel/ivpu/ivpu_ipc.h17
-rw-r--r--drivers/accel/ivpu/ivpu_job.c876
-rw-r--r--drivers/accel/ivpu/ivpu_job.h63
-rw-r--r--drivers/accel/ivpu/ivpu_jsm_msg.c331
-rw-r--r--drivers/accel/ivpu/ivpu_jsm_msg.h24
-rw-r--r--drivers/accel/ivpu/ivpu_mmu.c236
-rw-r--r--drivers/accel/ivpu/ivpu_mmu.h6
-rw-r--r--drivers/accel/ivpu/ivpu_mmu_context.c252
-rw-r--r--drivers/accel/ivpu/ivpu_mmu_context.h15
-rw-r--r--drivers/accel/ivpu/ivpu_ms.c342
-rw-r--r--drivers/accel/ivpu/ivpu_ms.h36
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c270
-rw-r--r--drivers/accel/ivpu/ivpu_pm.h12
-rw-r--r--drivers/accel/ivpu/ivpu_sysfs.c159
-rw-r--r--drivers/accel/ivpu/ivpu_sysfs.h13
-rw-r--r--drivers/accel/ivpu/ivpu_trace.h73
-rw-r--r--drivers/accel/ivpu/ivpu_trace_points.c9
-rw-r--r--drivers/accel/ivpu/vpu_boot_api.h70
-rw-r--r--drivers/accel/ivpu/vpu_jsm_api.h883
-rw-r--r--drivers/accel/qaic/Kconfig2
-rw-r--r--drivers/accel/qaic/Makefile3
-rw-r--r--drivers/accel/qaic/mhi_controller.c392
-rw-r--r--drivers/accel/qaic/mhi_controller.h2
-rw-r--r--drivers/accel/qaic/qaic.h66
-rw-r--r--drivers/accel/qaic/qaic_control.c29
-rw-r--r--drivers/accel/qaic/qaic_data.c200
-rw-r--r--drivers/accel/qaic/qaic_debugfs.c50
-rw-r--r--drivers/accel/qaic/qaic_drv.c231
-rw-r--r--drivers/accel/qaic/qaic_ras.c642
-rw-r--r--drivers/accel/qaic/qaic_ras.h10
-rw-r--r--drivers/accel/qaic/qaic_ssr.c815
-rw-r--r--drivers/accel/qaic/qaic_ssr.h17
-rw-r--r--drivers/accel/qaic/qaic_sysfs.c109
-rw-r--r--drivers/accel/qaic/qaic_timesync.c15
-rw-r--r--drivers/accel/qaic/qaic_timesync.h3
-rw-r--r--drivers/accel/qaic/sahara.c560
-rw-r--r--drivers/accel/rocket/Kconfig24
-rw-r--r--drivers/accel/rocket/Makefile10
-rw-r--r--drivers/accel/rocket/rocket_core.c110
-rw-r--r--drivers/accel/rocket/rocket_core.h64
-rw-r--r--drivers/accel/rocket/rocket_device.c60
-rw-r--r--drivers/accel/rocket/rocket_device.h30
-rw-r--r--drivers/accel/rocket/rocket_drv.c290
-rw-r--r--drivers/accel/rocket/rocket_drv.h32
-rw-r--r--drivers/accel/rocket/rocket_gem.c182
-rw-r--r--drivers/accel/rocket/rocket_gem.h34
-rw-r--r--drivers/accel/rocket/rocket_job.c637
-rw-r--r--drivers/accel/rocket/rocket_job.h52
-rw-r--r--drivers/accel/rocket/rocket_registers.h4404
-rw-r--r--drivers/accessibility/speakup/Makefile4
-rw-r--r--drivers/accessibility/speakup/devsynth.c59
-rw-r--r--drivers/accessibility/speakup/genmap.c1
-rw-r--r--drivers/accessibility/speakup/main.c20
-rw-r--r--drivers/accessibility/speakup/makemapdata.c1
-rw-r--r--drivers/accessibility/speakup/speakup.h2
-rw-r--r--drivers/accessibility/speakup/synth.c94
-rw-r--r--drivers/acpi/Kconfig23
-rw-r--r--drivers/acpi/Makefile8
-rw-r--r--drivers/acpi/ac.c12
-rw-r--r--drivers/acpi/acpi_apd.c8
-rw-r--r--drivers/acpi/acpi_dbg.c26
-rw-r--r--drivers/acpi/acpi_extlog.c17
-rw-r--r--drivers/acpi/acpi_lpit.c2
-rw-r--r--drivers/acpi/acpi_mrrm.c211
-rw-r--r--drivers/acpi/acpi_pad.c38
-rw-r--r--drivers/acpi/acpi_pcc.c13
-rw-r--r--drivers/acpi/acpi_pnp.c4
-rw-r--r--drivers/acpi/acpi_processor.c157
-rw-r--r--drivers/acpi/acpi_tad.c81
-rw-r--r--drivers/acpi/acpi_video.c75
-rw-r--r--drivers/acpi/acpica/acapps.h4
-rw-r--r--drivers/acpi/acpica/accommon.h2
-rw-r--r--drivers/acpi/acpica/acconvert.h2
-rw-r--r--drivers/acpi/acpica/acdebug.h2
-rw-r--r--drivers/acpi/acpica/acdispat.h2
-rw-r--r--drivers/acpi/acpica/acevents.h4
-rw-r--r--drivers/acpi/acpica/acglobal.h8
-rw-r--r--drivers/acpi/acpica/achware.h4
-rw-r--r--drivers/acpi/acpica/acinterp.h5
-rw-r--r--drivers/acpi/acpica/aclocal.h10
-rw-r--r--drivers/acpi/acpica/acmacros.h2
-rw-r--r--drivers/acpi/acpica/acnamesp.h2
-rw-r--r--drivers/acpi/acpica/acobject.h2
-rw-r--r--drivers/acpi/acpica/acopcode.h2
-rw-r--r--drivers/acpi/acpica/acparser.h2
-rw-r--r--drivers/acpi/acpica/acpredef.h5
-rw-r--r--drivers/acpi/acpica/acresrc.h2
-rw-r--r--drivers/acpi/acpica/acstruct.h2
-rw-r--r--drivers/acpi/acpica/actables.h2
-rw-r--r--drivers/acpi/acpica/acutils.h2
-rw-r--r--drivers/acpi/acpica/amlcode.h2
-rw-r--r--drivers/acpi/acpica/amlresrc.h10
-rw-r--r--drivers/acpi/acpica/dbconvert.c2
-rw-r--r--drivers/acpi/acpica/dbhistry.c2
-rw-r--r--drivers/acpi/acpica/dsargs.c2
-rw-r--r--drivers/acpi/acpica/dscontrol.c2
-rw-r--r--drivers/acpi/acpica/dsdebug.c2
-rw-r--r--drivers/acpi/acpica/dsfield.c2
-rw-r--r--drivers/acpi/acpica/dsinit.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c26
-rw-r--r--drivers/acpi/acpica/dsmthdat.c1
-rw-r--r--drivers/acpi/acpica/dsobject.c2
-rw-r--r--drivers/acpi/acpica/dsopcode.c2
-rw-r--r--drivers/acpi/acpica/dspkginit.c2
-rw-r--r--drivers/acpi/acpica/dsutils.c9
-rw-r--r--drivers/acpi/acpica/dswexec.c2
-rw-r--r--drivers/acpi/acpica/dswload.c2
-rw-r--r--drivers/acpi/acpica/dswload2.c2
-rw-r--r--drivers/acpi/acpica/dswscope.c2
-rw-r--r--drivers/acpi/acpica/dswstate.c2
-rw-r--r--drivers/acpi/acpica/evevent.c2
-rw-r--r--drivers/acpi/acpica/evglock.c6
-rw-r--r--drivers/acpi/acpica/evgpe.c2
-rw-r--r--drivers/acpi/acpica/evgpeblk.c2
-rw-r--r--drivers/acpi/acpica/evgpeinit.c2
-rw-r--r--drivers/acpi/acpica/evgpeutil.c2
-rw-r--r--drivers/acpi/acpica/evhandler.c2
-rw-r--r--drivers/acpi/acpica/evmisc.c2
-rw-r--r--drivers/acpi/acpica/evregion.c8
-rw-r--r--drivers/acpi/acpica/evrgnini.c2
-rw-r--r--drivers/acpi/acpica/evxface.c2
-rw-r--r--drivers/acpi/acpica/evxfevnt.c2
-rw-r--r--drivers/acpi/acpica/evxfgpe.c2
-rw-r--r--drivers/acpi/acpica/evxfregn.c14
-rw-r--r--drivers/acpi/acpica/exconcat.c2
-rw-r--r--drivers/acpi/acpica/exconfig.c2
-rw-r--r--drivers/acpi/acpica/exconvrt.c64
-rw-r--r--drivers/acpi/acpica/excreate.c2
-rw-r--r--drivers/acpi/acpica/exdebug.c2
-rw-r--r--drivers/acpi/acpica/exdump.c2
-rw-r--r--drivers/acpi/acpica/exfield.c2
-rw-r--r--drivers/acpi/acpica/exfldio.c2
-rw-r--r--drivers/acpi/acpica/exmisc.c2
-rw-r--r--drivers/acpi/acpica/exmutex.c2
-rw-r--r--drivers/acpi/acpica/exnames.c2
-rw-r--r--drivers/acpi/acpica/exoparg1.c2
-rw-r--r--drivers/acpi/acpica/exoparg2.c2
-rw-r--r--drivers/acpi/acpica/exoparg3.c2
-rw-r--r--drivers/acpi/acpica/exoparg6.c2
-rw-r--r--drivers/acpi/acpica/exprep.c5
-rw-r--r--drivers/acpi/acpica/exregion.c25
-rw-r--r--drivers/acpi/acpica/exresnte.c2
-rw-r--r--drivers/acpi/acpica/exresolv.c2
-rw-r--r--drivers/acpi/acpica/exresop.c2
-rw-r--r--drivers/acpi/acpica/exserial.c8
-rw-r--r--drivers/acpi/acpica/exstore.c2
-rw-r--r--drivers/acpi/acpica/exstoren.c2
-rw-r--r--drivers/acpi/acpica/exstorob.c2
-rw-r--r--drivers/acpi/acpica/exsystem.c13
-rw-r--r--drivers/acpi/acpica/extrace.c57
-rw-r--r--drivers/acpi/acpica/exutils.c2
-rw-r--r--drivers/acpi/acpica/hwacpi.c2
-rw-r--r--drivers/acpi/acpica/hwesleep.c2
-rw-r--r--drivers/acpi/acpica/hwgpe.c2
-rw-r--r--drivers/acpi/acpica/hwsleep.c2
-rw-r--r--drivers/acpi/acpica/hwtimer.c2
-rw-r--r--drivers/acpi/acpica/hwvalid.c2
-rw-r--r--drivers/acpi/acpica/hwxface.c2
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c17
-rw-r--r--drivers/acpi/acpica/nsarguments.c2
-rw-r--r--drivers/acpi/acpica/nsconvert.c2
-rw-r--r--drivers/acpi/acpica/nsdump.c2
-rw-r--r--drivers/acpi/acpica/nsdumpdv.c2
-rw-r--r--drivers/acpi/acpica/nsinit.c2
-rw-r--r--drivers/acpi/acpica/nsload.c2
-rw-r--r--drivers/acpi/acpica/nsnames.c2
-rw-r--r--drivers/acpi/acpica/nsparse.c2
-rw-r--r--drivers/acpi/acpica/nspredef.c2
-rw-r--r--drivers/acpi/acpica/nsprepkg.c2
-rw-r--r--drivers/acpi/acpica/nsrepair.c2
-rw-r--r--drivers/acpi/acpica/nsrepair2.c4
-rw-r--r--drivers/acpi/acpica/nsutils.c2
-rw-r--r--drivers/acpi/acpica/nswalk.c11
-rw-r--r--drivers/acpi/acpica/nsxfname.c2
-rw-r--r--drivers/acpi/acpica/psargs.c49
-rw-r--r--drivers/acpi/acpica/psloop.c2
-rw-r--r--drivers/acpi/acpica/psobject.c54
-rw-r--r--drivers/acpi/acpica/psopcode.c2
-rw-r--r--drivers/acpi/acpica/psopinfo.c6
-rw-r--r--drivers/acpi/acpica/psparse.c2
-rw-r--r--drivers/acpi/acpica/psscope.c2
-rw-r--r--drivers/acpi/acpica/pstree.c2
-rw-r--r--drivers/acpi/acpica/psutils.c2
-rw-r--r--drivers/acpi/acpica/pswalk.c2
-rw-r--r--drivers/acpi/acpica/psxface.c2
-rw-r--r--drivers/acpi/acpica/rsaddr.c12
-rw-r--r--drivers/acpi/acpica/rscalc.c22
-rw-r--r--drivers/acpi/acpica/rsdump.c2
-rw-r--r--drivers/acpi/acpica/rslist.c12
-rw-r--r--drivers/acpi/acpica/tbdata.c2
-rw-r--r--drivers/acpi/acpica/tbfadt.c2
-rw-r--r--drivers/acpi/acpica/tbfind.c6
-rw-r--r--drivers/acpi/acpica/tbinstal.c2
-rw-r--r--drivers/acpi/acpica/tbprint.c16
-rw-r--r--drivers/acpi/acpica/tbutils.c4
-rw-r--r--drivers/acpi/acpica/tbxface.c2
-rw-r--r--drivers/acpi/acpica/tbxfload.c2
-rw-r--r--drivers/acpi/acpica/tbxfroot.c2
-rw-r--r--drivers/acpi/acpica/utaddress.c2
-rw-r--r--drivers/acpi/acpica/utalloc.c2
-rw-r--r--drivers/acpi/acpica/utascii.c2
-rw-r--r--drivers/acpi/acpica/utbuffer.c2
-rw-r--r--drivers/acpi/acpica/utcache.c4
-rw-r--r--drivers/acpi/acpica/utcksum.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c2
-rw-r--r--drivers/acpi/acpica/utdebug.c2
-rw-r--r--drivers/acpi/acpica/utdecode.c2
-rw-r--r--drivers/acpi/acpica/utdelete.c6
-rw-r--r--drivers/acpi/acpica/uteval.c2
-rw-r--r--drivers/acpi/acpica/utglobal.c2
-rw-r--r--drivers/acpi/acpica/uthex.c2
-rw-r--r--drivers/acpi/acpica/utids.c2
-rw-r--r--drivers/acpi/acpica/utinit.c4
-rw-r--r--drivers/acpi/acpica/utlock.c2
-rw-r--r--drivers/acpi/acpica/utobject.c2
-rw-r--r--drivers/acpi/acpica/utosi.c3
-rw-r--r--drivers/acpi/acpica/utpredef.c2
-rw-r--r--drivers/acpi/acpica/utprint.c9
-rw-r--r--drivers/acpi/acpica/utresrc.c14
-rw-r--r--drivers/acpi/acpica/uttrack.c2
-rw-r--r--drivers/acpi/acpica/utuuid.c2
-rw-r--r--drivers/acpi/acpica/utxface.c2
-rw-r--r--drivers/acpi/acpica/utxfinit.c26
-rw-r--r--drivers/acpi/apei/Kconfig1
-rw-r--r--drivers/acpi/apei/apei-base.c2
-rw-r--r--drivers/acpi/apei/apei-internal.h2
-rw-r--r--drivers/acpi/apei/einj-core.c533
-rw-r--r--drivers/acpi/apei/einj-cxl.c14
-rw-r--r--drivers/acpi/apei/erst-dbg.c9
-rw-r--r--drivers/acpi/apei/ghes.c286
-rw-r--r--drivers/acpi/arm64/Kconfig3
-rw-r--r--drivers/acpi/arm64/Makefile7
-rw-r--r--drivers/acpi/arm64/agdi.c2
-rw-r--r--drivers/acpi/arm64/amba.c14
-rw-r--r--drivers/acpi/arm64/cpuidle.c70
-rw-r--r--drivers/acpi/arm64/dma.c5
-rw-r--r--drivers/acpi/arm64/ffh.c107
-rw-r--r--drivers/acpi/arm64/gtdt.c106
-rw-r--r--drivers/acpi/arm64/init.c2
-rw-r--r--drivers/acpi/arm64/iort.c47
-rw-r--r--drivers/acpi/arm64/mpam.c411
-rw-r--r--drivers/acpi/battery.c220
-rw-r--r--drivers/acpi/bgrt.c11
-rw-r--r--drivers/acpi/bus.c21
-rw-r--r--drivers/acpi/button.c33
-rw-r--r--drivers/acpi/cppc_acpi.c446
-rw-r--r--drivers/acpi/device_pm.c12
-rw-r--r--drivers/acpi/device_sysfs.c196
-rw-r--r--drivers/acpi/dptf/Makefile1
-rw-r--r--drivers/acpi/dptf/dptf_pch_fivr.c5
-rw-r--r--drivers/acpi/dptf/dptf_power.c8
-rw-r--r--drivers/acpi/dptf/int340x_thermal.c81
-rw-r--r--drivers/acpi/ec.c158
-rw-r--r--drivers/acpi/event.c4
-rw-r--r--drivers/acpi/evged.c2
-rw-r--r--drivers/acpi/fan.h56
-rw-r--r--drivers/acpi/fan_attr.c47
-rw-r--r--drivers/acpi/fan_core.c302
-rw-r--r--drivers/acpi/fan_hwmon.c180
-rw-r--r--drivers/acpi/hed.c7
-rw-r--r--drivers/acpi/internal.h42
-rw-r--r--drivers/acpi/irq.c35
-rw-r--r--drivers/acpi/mipi-disco-img.c31
-rw-r--r--drivers/acpi/nfit/core.c12
-rw-r--r--drivers/acpi/nfit/intel.c119
-rw-r--r--drivers/acpi/numa/Kconfig5
-rw-r--r--drivers/acpi/numa/hmat.c182
-rw-r--r--drivers/acpi/numa/srat.c164
-rw-r--r--drivers/acpi/osi.c1
-rw-r--r--drivers/acpi/osl.c40
-rw-r--r--drivers/acpi/pci_irq.c5
-rw-r--r--drivers/acpi/pci_link.c18
-rw-r--r--drivers/acpi/pci_mcfg.c12
-rw-r--r--drivers/acpi/pci_root.c23
-rw-r--r--drivers/acpi/pfr_telemetry.c5
-rw-r--r--drivers/acpi/pfr_update.c67
-rw-r--r--drivers/acpi/platform_profile.c716
-rw-r--r--drivers/acpi/pmic/intel_pmic.c2
-rw-r--r--drivers/acpi/pmic/intel_pmic.h4
-rw-r--r--drivers/acpi/pmic/intel_pmic_bxtwc.c4
-rw-r--r--drivers/acpi/pmic/intel_pmic_bytcrc.c4
-rw-r--r--drivers/acpi/pmic/intel_pmic_chtdc_ti.c17
-rw-r--r--drivers/acpi/pmic/intel_pmic_chtwc.c7
-rw-r--r--drivers/acpi/pmic/intel_pmic_xpower.c11
-rw-r--r--drivers/acpi/pmic/tps68470_pmic.c6
-rw-r--r--drivers/acpi/power.c99
-rw-r--r--drivers/acpi/pptt.c295
-rw-r--r--drivers/acpi/prmt.c92
-rw-r--r--drivers/acpi/proc.c17
-rw-r--r--drivers/acpi/processor_core.c20
-rw-r--r--drivers/acpi/processor_driver.c55
-rw-r--r--drivers/acpi/processor_idle.c156
-rw-r--r--drivers/acpi/processor_perflib.c25
-rw-r--r--drivers/acpi/processor_thermal.c52
-rw-r--r--drivers/acpi/processor_throttling.c7
-rw-r--r--drivers/acpi/property.c344
-rw-r--r--drivers/acpi/resource.c107
-rw-r--r--drivers/acpi/riscv/Kconfig7
-rw-r--r--drivers/acpi/riscv/Makefile3
-rw-r--r--drivers/acpi/riscv/cppc.c6
-rw-r--r--drivers/acpi/riscv/init.c15
-rw-r--r--drivers/acpi/riscv/init.h5
-rw-r--r--drivers/acpi/riscv/irq.c404
-rw-r--r--drivers/acpi/riscv/rimt.c520
-rw-r--r--drivers/acpi/sbs.c39
-rw-r--r--drivers/acpi/sbshc.c13
-rw-r--r--drivers/acpi/scan.c232
-rw-r--r--drivers/acpi/sleep.c28
-rw-r--r--drivers/acpi/sleep.h3
-rw-r--r--drivers/acpi/spcr.c13
-rw-r--r--drivers/acpi/sysfs.c4
-rw-r--r--drivers/acpi/tables.c36
-rw-r--r--drivers/acpi/thermal.c105
-rw-r--r--drivers/acpi/thermal_lib.c8
-rw-r--r--drivers/acpi/utils.c26
-rw-r--r--drivers/acpi/video_detect.c88
-rw-r--r--drivers/acpi/viot.c13
-rw-r--r--drivers/acpi/wakeup.c4
-rw-r--r--drivers/acpi/x86/lpss.c9
-rw-r--r--drivers/acpi/x86/s2idle.c65
-rw-r--r--drivers/acpi/x86/utils.c164
-rw-r--r--drivers/amba/Kconfig2
-rw-r--r--drivers/amba/bus.c28
-rw-r--r--drivers/amba/tegra-ahb.c1
-rw-r--r--drivers/android/Kconfig31
-rw-r--r--drivers/android/Makefile5
-rw-r--r--drivers/android/binder.c987
-rw-r--r--drivers/android/binder/Makefile9
-rw-r--r--drivers/android/binder/allocation.rs602
-rw-r--r--drivers/android/binder/context.rs180
-rw-r--r--drivers/android/binder/deferred_close.rs204
-rw-r--r--drivers/android/binder/defs.rs182
-rw-r--r--drivers/android/binder/error.rs100
-rw-r--r--drivers/android/binder/freeze.rs398
-rw-r--r--drivers/android/binder/node.rs1131
-rw-r--r--drivers/android/binder/node/wrapper.rs78
-rw-r--r--drivers/android/binder/page_range.rs734
-rw-r--r--drivers/android/binder/page_range_helper.c24
-rw-r--r--drivers/android/binder/page_range_helper.h15
-rw-r--r--drivers/android/binder/process.rs1745
-rw-r--r--drivers/android/binder/range_alloc/array.rs251
-rw-r--r--drivers/android/binder/range_alloc/mod.rs329
-rw-r--r--drivers/android/binder/range_alloc/tree.rs488
-rw-r--r--drivers/android/binder/rust_binder.h23
-rw-r--r--drivers/android/binder/rust_binder_events.c59
-rw-r--r--drivers/android/binder/rust_binder_events.h36
-rw-r--r--drivers/android/binder/rust_binder_internal.h87
-rw-r--r--drivers/android/binder/rust_binder_main.rs611
-rw-r--r--drivers/android/binder/rust_binderfs.c795
-rw-r--r--drivers/android/binder/stats.rs89
-rw-r--r--drivers/android/binder/thread.rs1596
-rw-r--r--drivers/android/binder/trace.rs16
-rw-r--r--drivers/android/binder/transaction.rs456
-rw-r--r--drivers/android/binder_alloc.c417
-rw-r--r--drivers/android/binder_alloc.h65
-rw-r--r--drivers/android/binder_alloc_selftest.c306
-rw-r--r--drivers/android/binder_internal.h54
-rw-r--r--drivers/android/binder_netlink.c32
-rw-r--r--drivers/android/binder_netlink.h21
-rw-r--r--drivers/android/binder_trace.h60
-rw-r--r--drivers/android/binderfs.c124
-rw-r--r--drivers/android/dbitmap.h169
-rw-r--r--drivers/android/tests/.kunitconfig7
-rw-r--r--drivers/android/tests/Makefile6
-rw-r--r--drivers/android/tests/binder_alloc_kunit.c572
-rw-r--r--drivers/ata/Kconfig38
-rw-r--r--drivers/ata/acard-ahci.c6
-rw-r--r--drivers/ata/ahci.c191
-rw-r--r--drivers/ata/ahci.h23
-rw-r--r--drivers/ata/ahci_brcm.c9
-rw-r--r--drivers/ata/ahci_ceva.c8
-rw-r--r--drivers/ata/ahci_da850.c8
-rw-r--r--drivers/ata/ahci_dm816.c4
-rw-r--r--drivers/ata/ahci_dwc.c2
-rw-r--r--drivers/ata/ahci_imx.c421
-rw-r--r--drivers/ata/ahci_mtk.c2
-rw-r--r--drivers/ata/ahci_mvebu.c2
-rw-r--r--drivers/ata/ahci_platform.c2
-rw-r--r--drivers/ata/ahci_qoriq.c6
-rw-r--r--drivers/ata/ahci_seattle.c2
-rw-r--r--drivers/ata/ahci_st.c8
-rw-r--r--drivers/ata/ahci_sunxi.c2
-rw-r--r--drivers/ata/ahci_tegra.c2
-rw-r--r--drivers/ata/ahci_xgene.c19
-rw-r--r--drivers/ata/ata_generic.c2
-rw-r--r--drivers/ata/ata_piix.c8
-rw-r--r--drivers/ata/libahci.c30
-rw-r--r--drivers/ata/libahci_platform.c57
-rw-r--r--drivers/ata/libata-acpi.c95
-rw-r--r--drivers/ata/libata-core.c1167
-rw-r--r--drivers/ata/libata-eh.c527
-rw-r--r--drivers/ata/libata-pmp.c29
-rw-r--r--drivers/ata/libata-sata.c273
-rw-r--r--drivers/ata/libata-scsi.c898
-rw-r--r--drivers/ata/libata-sff.c54
-rw-r--r--drivers/ata/libata-transport.c314
-rw-r--r--drivers/ata/libata-transport.h3
-rw-r--r--drivers/ata/libata-zpodd.c5
-rw-r--r--drivers/ata/libata.h57
-rw-r--r--drivers/ata/pata_acpi.c2
-rw-r--r--drivers/ata/pata_ali.c10
-rw-r--r--drivers/ata/pata_amd.c4
-rw-r--r--drivers/ata/pata_arasan_cf.c2
-rw-r--r--drivers/ata/pata_artop.c4
-rw-r--r--drivers/ata/pata_atiixp.c2
-rw-r--r--drivers/ata/pata_atp867x.c2
-rw-r--r--drivers/ata/pata_cs5520.c12
-rw-r--r--drivers/ata/pata_cs5536.c2
-rw-r--r--drivers/ata/pata_efar.c2
-rw-r--r--drivers/ata/pata_ep93xx.c115
-rw-r--r--drivers/ata/pata_falcon.c4
-rw-r--r--drivers/ata/pata_ftide010.c3
-rw-r--r--drivers/ata/pata_gayle.c6
-rw-r--r--drivers/ata/pata_hpt366.c12
-rw-r--r--drivers/ata/pata_hpt37x.c14
-rw-r--r--drivers/ata/pata_hpt3x2n.c2
-rw-r--r--drivers/ata/pata_icside.c4
-rw-r--r--drivers/ata/pata_imx.c2
-rw-r--r--drivers/ata/pata_it8213.c4
-rw-r--r--drivers/ata/pata_it821x.c9
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c3
-rw-r--r--drivers/ata/pata_jmicron.c2
-rw-r--r--drivers/ata/pata_macio.c45
-rw-r--r--drivers/ata/pata_marvell.c2
-rw-r--r--drivers/ata/pata_mpc52xx.c3
-rw-r--r--drivers/ata/pata_mpiix.c2
-rw-r--r--drivers/ata/pata_ns87410.c2
-rw-r--r--drivers/ata/pata_octeon_cf.c10
-rw-r--r--drivers/ata/pata_of_platform.c2
-rw-r--r--drivers/ata/pata_oldpiix.c4
-rw-r--r--drivers/ata/pata_opti.c2
-rw-r--r--drivers/ata/pata_optidma.c6
-rw-r--r--drivers/ata/pata_parport/pata_parport.c5
-rw-r--r--drivers/ata/pata_pcmcia.c5
-rw-r--r--drivers/ata/pata_pdc2027x.c16
-rw-r--r--drivers/ata/pata_piccolo.c2
-rw-r--r--drivers/ata/pata_platform.c2
-rw-r--r--drivers/ata/pata_pxa.c8
-rw-r--r--drivers/ata/pata_radisys.c2
-rw-r--r--drivers/ata/pata_rb532_cf.c2
-rw-r--r--drivers/ata/pata_rdc.c8
-rw-r--r--drivers/ata/pata_serverworks.c16
-rw-r--r--drivers/ata/pata_sis.c2
-rw-r--r--drivers/ata/pata_sl82c105.c2
-rw-r--r--drivers/ata/pata_triflex.c2
-rw-r--r--drivers/ata/pata_via.c11
-rw-r--r--drivers/ata/pdc_adma.c2
-rw-r--r--drivers/ata/sata_dwc_460ex.c4
-rw-r--r--drivers/ata/sata_fsl.c8
-rw-r--r--drivers/ata/sata_gemini.c35
-rw-r--r--drivers/ata/sata_gemini.h1
-rw-r--r--drivers/ata/sata_highbank.c15
-rw-r--r--drivers/ata/sata_inic162x.c2
-rw-r--r--drivers/ata/sata_mv.c16
-rw-r--r--drivers/ata/sata_nv.c30
-rw-r--r--drivers/ata/sata_promise.c4
-rw-r--r--drivers/ata/sata_qstor.c4
-rw-r--r--drivers/ata/sata_rcar.c4
-rw-r--r--drivers/ata/sata_sil.c16
-rw-r--r--drivers/ata/sata_sil24.c13
-rw-r--r--drivers/ata/sata_sis.c2
-rw-r--r--drivers/ata/sata_svw.c4
-rw-r--r--drivers/ata/sata_sx4.c45
-rw-r--r--drivers/ata/sata_uli.c4
-rw-r--r--drivers/ata/sata_via.c7
-rw-r--r--drivers/ata/sata_vsc.c2
-rw-r--r--drivers/atm/atmtcp.c15
-rw-r--r--drivers/atm/fore200e.c4
-rw-r--r--drivers/atm/idt77105.c4
-rw-r--r--drivers/atm/idt77252.c18
-rw-r--r--drivers/atm/iphase.c2
-rw-r--r--drivers/atm/lanai.c6
-rw-r--r--drivers/atm/nicstar.c2
-rw-r--r--drivers/atm/suni.c2
-rw-r--r--drivers/auxdisplay/Kconfig5
-rw-r--r--drivers/auxdisplay/arm-charlcd.c2
-rw-r--r--drivers/auxdisplay/cfag12864b.c12
-rw-r--r--drivers/auxdisplay/cfag12864bfb.c2
-rw-r--r--drivers/auxdisplay/charlcd.c6
-rw-r--r--drivers/auxdisplay/charlcd.h7
-rw-r--r--drivers/auxdisplay/hd44780.c23
-rw-r--r--drivers/auxdisplay/hd44780_common.c25
-rw-r--r--drivers/auxdisplay/hd44780_common.h4
-rw-r--r--drivers/auxdisplay/ht16k33.c30
-rw-r--r--drivers/auxdisplay/img-ascii-lcd.c12
-rw-r--r--drivers/auxdisplay/ks0108.c1
-rw-r--r--drivers/auxdisplay/lcd2s.c14
-rw-r--r--drivers/auxdisplay/line-display.c263
-rw-r--r--drivers/auxdisplay/line-display.h4
-rw-r--r--drivers/auxdisplay/max6959.c2
-rw-r--r--drivers/auxdisplay/panel.c22
-rw-r--r--drivers/auxdisplay/seg-led-gpio.c7
-rw-r--r--drivers/base/Kconfig7
-rw-r--r--drivers/base/Makefile3
-rw-r--r--drivers/base/arch_numa.c230
-rw-r--r--drivers/base/arch_topology.c327
-rw-r--r--drivers/base/attribute_container.c48
-rw-r--r--drivers/base/auxiliary.c187
-rw-r--r--drivers/base/auxiliary_sysfs.c113
-rw-r--r--drivers/base/base.h68
-rw-r--r--drivers/base/bus.c82
-rw-r--r--drivers/base/cacheinfo.c120
-rw-r--r--drivers/base/class.c60
-rw-r--r--drivers/base/component.c32
-rw-r--r--drivers/base/core.c732
-rw-r--r--drivers/base/cpu.c59
-rw-r--r--drivers/base/dd.c50
-rw-r--r--drivers/base/devcoredump.c175
-rw-r--r--drivers/base/devres.c125
-rw-r--r--drivers/base/devtmpfs.c185
-rw-r--r--drivers/base/driver.c21
-rw-r--r--drivers/base/faux.c261
-rw-r--r--drivers/base/firmware_loader/Kconfig11
-rw-r--r--drivers/base/firmware_loader/builtin/main.c2
-rw-r--r--drivers/base/firmware_loader/fallback_table.c8
-rw-r--r--drivers/base/firmware_loader/main.c219
-rw-r--r--drivers/base/firmware_loader/sysfs.c18
-rw-r--r--drivers/base/firmware_loader/sysfs.h2
-rw-r--r--drivers/base/firmware_loader/sysfs_upload.c6
-rw-r--r--drivers/base/init.c1
-rw-r--r--drivers/base/isa.c2
-rw-r--r--drivers/base/memory.c210
-rw-r--r--drivers/base/module.c63
-rw-r--r--drivers/base/node.c276
-rw-r--r--drivers/base/physical_location.c9
-rw-r--r--drivers/base/platform-msi.c351
-rw-r--r--drivers/base/platform.c115
-rw-r--r--drivers/base/power/Makefile1
-rw-r--r--drivers/base/power/clock_ops.c73
-rw-r--r--drivers/base/power/common.c98
-rw-r--r--drivers/base/power/generic_ops.c109
-rw-r--r--drivers/base/power/main.c589
-rw-r--r--drivers/base/power/qos.c1
-rw-r--r--drivers/base/power/runtime-test.c249
-rw-r--r--drivers/base/power/runtime.c336
-rw-r--r--drivers/base/power/sysfs.c33
-rw-r--r--drivers/base/power/trace.c4
-rw-r--r--drivers/base/power/wakeirq.c26
-rw-r--r--drivers/base/power/wakeup.c44
-rw-r--r--drivers/base/power/wakeup_stats.c2
-rw-r--r--drivers/base/property.c93
-rw-r--r--drivers/base/regmap/Kconfig4
-rw-r--r--drivers/base/regmap/internal.h15
-rw-r--r--drivers/base/regmap/regcache-flat.c107
-rw-r--r--drivers/base/regmap/regcache-maple.c67
-rw-r--r--drivers/base/regmap/regcache-rbtree.c43
-rw-r--r--drivers/base/regmap/regcache.c49
-rw-r--r--drivers/base/regmap/regmap-ac97.c1
-rw-r--r--drivers/base/regmap/regmap-debugfs.c10
-rw-r--r--drivers/base/regmap/regmap-i2c.c4
-rw-r--r--drivers/base/regmap/regmap-i3c.c9
-rw-r--r--drivers/base/regmap/regmap-irq.c181
-rw-r--r--drivers/base/regmap/regmap-kunit.c253
-rw-r--r--drivers/base/regmap/regmap-mmio.c1
-rw-r--r--drivers/base/regmap/regmap-ram.c1
-rw-r--r--drivers/base/regmap/regmap-raw-ram.c1
-rw-r--r--drivers/base/regmap/regmap-sccb.c1
-rw-r--r--drivers/base/regmap/regmap-sdw-mbq.c229
-rw-r--r--drivers/base/regmap/regmap-slimbus.c7
-rw-r--r--drivers/base/regmap/regmap-spi-avmm.c1
-rw-r--r--drivers/base/regmap/regmap-spi.c3
-rw-r--r--drivers/base/regmap/regmap-spmi.c1
-rw-r--r--drivers/base/regmap/regmap-w1.c1
-rw-r--r--drivers/base/regmap/regmap.c149
-rw-r--r--drivers/base/regmap/trace.h18
-rw-r--r--drivers/base/swnode.c41
-rw-r--r--drivers/base/syscore.c82
-rw-r--r--drivers/base/test/Kconfig1
-rw-r--r--drivers/base/test/platform-device-test.c41
-rw-r--r--drivers/base/topology.c114
-rw-r--r--drivers/base/trace.h8
-rw-r--r--drivers/bcma/driver_gpio.c6
-rw-r--r--drivers/bcma/driver_pci_host.c10
-rw-r--r--drivers/bcma/host_soc.c2
-rw-r--r--drivers/bcma/main.c12
-rw-r--r--drivers/block/Kconfig73
-rw-r--r--drivers/block/Makefile3
-rw-r--r--drivers/block/amiflop.c27
-rw-r--r--drivers/block/aoe/aoe.h1
-rw-r--r--drivers/block/aoe/aoeblk.c6
-rw-r--r--drivers/block/aoe/aoecmd.c27
-rw-r--r--drivers/block/aoe/aoedev.c22
-rw-r--r--drivers/block/aoe/aoemain.c4
-rw-r--r--drivers/block/aoe/aoenet.c2
-rw-r--r--drivers/block/ataflop.c24
-rw-r--r--drivers/block/brd.c339
-rw-r--r--drivers/block/drbd/Kconfig2
-rw-r--r--drivers/block/drbd/drbd_bitmap.c10
-rw-r--r--drivers/block/drbd/drbd_int.h51
-rw-r--r--drivers/block/drbd/drbd_main.c96
-rw-r--r--drivers/block/drbd/drbd_nl.c5
-rw-r--r--drivers/block/drbd/drbd_receiver.c286
-rw-r--r--drivers/block/drbd/drbd_req.c3
-rw-r--r--drivers/block/drbd/drbd_state.c2
-rw-r--r--drivers/block/drbd/drbd_worker.c62
-rw-r--r--drivers/block/floppy.c76
-rw-r--r--drivers/block/loop.c702
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c78
-rw-r--r--drivers/block/n64cart.c2
-rw-r--r--drivers/block/nbd.c323
-rw-r--r--drivers/block/null_blk/main.c373
-rw-r--r--drivers/block/null_blk/null_blk.h11
-rw-r--r--drivers/block/null_blk/trace.h7
-rw-r--r--drivers/block/null_blk/zoned.c56
-rw-r--r--drivers/block/pktcdvd.c2923
-rw-r--r--drivers/block/ps3disk.c19
-rw-r--r--drivers/block/rbd.c57
-rw-r--r--drivers/block/rnbd/rnbd-clt-sysfs.c2
-rw-r--r--drivers/block/rnbd/rnbd-clt.c27
-rw-r--r--drivers/block/rnbd/rnbd-proto.h15
-rw-r--r--drivers/block/rnbd/rnbd-srv-sysfs.c4
-rw-r--r--drivers/block/rnbd/rnbd-srv-trace.h12
-rw-r--r--drivers/block/rnbd/rnbd-srv.c14
-rw-r--r--drivers/block/rnull/Kconfig13
-rw-r--r--drivers/block/rnull/Makefile3
-rw-r--r--drivers/block/rnull/configfs.rs263
-rw-r--r--drivers/block/rnull/rnull.rs103
-rw-r--r--drivers/block/sunvdc.c29
-rw-r--r--drivers/block/swim.c13
-rw-r--r--drivers/block/swim3.c31
-rw-r--r--drivers/block/ublk_drv.c2456
-rw-r--r--drivers/block/virtio_blk.c231
-rw-r--r--drivers/block/xen-blkback/blkback.c3
-rw-r--r--drivers/block/xen-blkfront.c80
-rw-r--r--drivers/block/z2ram.c2
-rw-r--r--drivers/block/zloop.c1507
-rw-r--r--drivers/block/zram/Kconfig77
-rw-r--r--drivers/block/zram/Makefile8
-rw-r--r--drivers/block/zram/backend_842.c61
-rw-r--r--drivers/block/zram/backend_842.h10
-rw-r--r--drivers/block/zram/backend_deflate.c148
-rw-r--r--drivers/block/zram/backend_deflate.h10
-rw-r--r--drivers/block/zram/backend_lz4.c127
-rw-r--r--drivers/block/zram/backend_lz4.h10
-rw-r--r--drivers/block/zram/backend_lz4hc.c128
-rw-r--r--drivers/block/zram/backend_lz4hc.h10
-rw-r--r--drivers/block/zram/backend_lzo.c59
-rw-r--r--drivers/block/zram/backend_lzo.h10
-rw-r--r--drivers/block/zram/backend_lzorle.c59
-rw-r--r--drivers/block/zram/backend_lzorle.h10
-rw-r--r--drivers/block/zram/backend_zstd.c217
-rw-r--r--drivers/block/zram/backend_zstd.h10
-rw-r--r--drivers/block/zram/zcomp.c249
-rw-r--r--drivers/block/zram/zcomp.h88
-rw-r--r--drivers/block/zram/zram_drv.c1719
-rw-r--r--drivers/block/zram/zram_drv.h31
-rw-r--r--drivers/bluetooth/Kconfig34
-rw-r--r--drivers/bluetooth/Makefile1
-rw-r--r--drivers/bluetooth/ath3k.c2
-rw-r--r--drivers/bluetooth/bfusb.c5
-rw-r--r--drivers/bluetooth/bluecard_cs.c6
-rw-r--r--drivers/bluetooth/bpa10x.c8
-rw-r--r--drivers/bluetooth/btbcm.c21
-rw-r--r--drivers/bluetooth/btintel.c858
-rw-r--r--drivers/bluetooth/btintel.h41
-rw-r--r--drivers/bluetooth/btintel_pcie.c1648
-rw-r--r--drivers/bluetooth/btintel_pcie.h148
-rw-r--r--drivers/bluetooth/btmrvl_main.c3
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c7
-rw-r--r--drivers/bluetooth/btmtk.c1101
-rw-r--r--drivers/bluetooth/btmtk.h118
-rw-r--r--drivers/bluetooth/btmtksdio.c70
-rw-r--r--drivers/bluetooth/btmtkuart.c15
-rw-r--r--drivers/bluetooth/btnxpuart.c843
-rw-r--r--drivers/bluetooth/btqca.c229
-rw-r--r--drivers/bluetooth/btqca.h9
-rw-r--r--drivers/bluetooth/btqcomsmd.c4
-rw-r--r--drivers/bluetooth/btrsi.c2
-rw-r--r--drivers/bluetooth/btrtl.c92
-rw-r--r--drivers/bluetooth/btsdio.c3
-rw-r--r--drivers/bluetooth/btusb.c1734
-rw-r--r--drivers/bluetooth/h4_recv.h146
-rw-r--r--drivers/bluetooth/hci_ag6xx.c2
-rw-r--r--drivers/bluetooth/hci_aml.c754
-rw-r--r--drivers/bluetooth/hci_ath.c2
-rw-r--r--drivers/bluetooth/hci_bcm.c39
-rw-r--r--drivers/bluetooth/hci_bcm4377.c88
-rw-r--r--drivers/bluetooth/hci_bcsp.c9
-rw-r--r--drivers/bluetooth/hci_h4.c8
-rw-r--r--drivers/bluetooth/hci_h5.c61
-rw-r--r--drivers/bluetooth/hci_intel.c19
-rw-r--r--drivers/bluetooth/hci_ldisc.c40
-rw-r--r--drivers/bluetooth/hci_ll.c8
-rw-r--r--drivers/bluetooth/hci_mrvl.c6
-rw-r--r--drivers/bluetooth/hci_nokia.c15
-rw-r--r--drivers/bluetooth/hci_qca.c267
-rw-r--r--drivers/bluetooth/hci_serdev.c8
-rw-r--r--drivers/bluetooth/hci_uart.h19
-rw-r--r--drivers/bluetooth/hci_vhci.c85
-rw-r--r--drivers/bluetooth/virtio_bt.c24
-rw-r--r--drivers/bus/Kconfig8
-rw-r--r--drivers/bus/Makefile1
-rw-r--r--drivers/bus/arm-integrator-lm.c1
-rw-r--r--drivers/bus/brcmstb_gisb.c10
-rw-r--r--drivers/bus/bt1-apb.c23
-rw-r--r--drivers/bus/bt1-axi.c23
-rw-r--r--drivers/bus/fsl-mc/dpmcp.c22
-rw-r--r--drivers/bus/fsl-mc/dprc-driver.c10
-rw-r--r--drivers/bus/fsl-mc/dprc.c4
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-allocator.c26
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c83
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-private.h8
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-uapi.c11
-rw-r--r--drivers/bus/fsl-mc/mc-io.c39
-rw-r--r--drivers/bus/fsl-mc/mc-sys.c4
-rw-r--r--drivers/bus/hisi_lpc.c2
-rw-r--r--drivers/bus/imx-aipstz.c108
-rw-r--r--drivers/bus/imx-weim.c14
-rw-r--r--drivers/bus/mhi/ep/internal.h2
-rw-r--r--drivers/bus/mhi/ep/main.c59
-rw-r--r--drivers/bus/mhi/ep/ring.c16
-rw-r--r--drivers/bus/mhi/host/boot.c216
-rw-r--r--drivers/bus/mhi/host/debugfs.c3
-rw-r--r--drivers/bus/mhi/host/init.c62
-rw-r--r--drivers/bus/mhi/host/internal.h25
-rw-r--r--drivers/bus/mhi/host/main.c66
-rw-r--r--drivers/bus/mhi/host/pci_generic.c526
-rw-r--r--drivers/bus/mhi/host/pm.c61
-rw-r--r--drivers/bus/mhi/host/trace.h37
-rw-r--r--drivers/bus/mips_cdmm.c6
-rw-r--r--drivers/bus/moxtet.c13
-rw-r--r--drivers/bus/mvebu-mbus.c16
-rw-r--r--drivers/bus/omap-ocp2scp.c2
-rw-r--r--drivers/bus/omap_l3_smx.c2
-rw-r--r--drivers/bus/qcom-ssc-block-bus.c36
-rw-r--r--drivers/bus/simple-pm-bus.c24
-rw-r--r--drivers/bus/stm32_rifsc.c597
-rw-r--r--drivers/bus/sun50i-de2.c2
-rw-r--r--drivers/bus/sunxi-rsb.c42
-rw-r--r--drivers/bus/tegra-aconnect.c2
-rw-r--r--drivers/bus/tegra-gmi.c2
-rw-r--r--drivers/bus/ti-pwmss.c2
-rw-r--r--drivers/bus/ti-sysc.c95
-rw-r--r--drivers/bus/ts-nbus.c4
-rw-r--r--drivers/bus/vexpress-config.c1
-rw-r--r--drivers/cache/Kconfig45
-rw-r--r--drivers/cache/Makefile7
-rw-r--r--drivers/cache/hisi_soc_hha.c194
-rw-r--r--drivers/cache/sifive_ccache.c10
-rw-r--r--drivers/cache/starfive_starlink_cache.c130
-rw-r--r--drivers/cdrom/cdrom.c24
-rw-r--r--drivers/cdrom/gdrom.c5
-rw-r--r--drivers/cdx/Kconfig2
-rw-r--r--drivers/cdx/Makefile2
-rw-r--r--drivers/cdx/cdx.c34
-rw-r--r--drivers/cdx/cdx_msi.c7
-rw-r--r--drivers/cdx/controller/Kconfig2
-rw-r--r--drivers/cdx/controller/bitfield.h90
-rw-r--r--drivers/cdx/controller/cdx_controller.c38
-rw-r--r--drivers/cdx/controller/cdx_rpmsg.c5
-rw-r--r--drivers/cdx/controller/mcdi.c47
-rw-r--r--drivers/cdx/controller/mcdi.h242
-rw-r--r--drivers/cdx/controller/mcdi_functions.c1
-rw-r--r--drivers/cdx/controller/mcdi_functions.h3
-rw-r--r--drivers/cdx/controller/mcdid.h63
-rw-r--r--drivers/char/Kconfig5
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/adi.c17
-rw-r--r--drivers/char/agp/ali-agp.c1
-rw-r--r--drivers/char/agp/alpha-agp.c1
-rw-r--r--drivers/char/agp/amd-k7-agp.c1
-rw-r--r--drivers/char/agp/amd64-agp.c19
-rw-r--r--drivers/char/agp/ati-agp.c1
-rw-r--r--drivers/char/agp/efficeon-agp.c1
-rw-r--r--drivers/char/agp/intel-agp.c3
-rw-r--r--drivers/char/agp/intel-gtt.c58
-rw-r--r--drivers/char/agp/nvidia-agp.c2
-rw-r--r--drivers/char/agp/parisc-agp.c1
-rw-r--r--drivers/char/agp/sis-agp.c1
-rw-r--r--drivers/char/agp/sworks-agp.c1
-rw-r--r--drivers/char/agp/uninorth-agp.c1
-rw-r--r--drivers/char/agp/via-agp.c1
-rw-r--r--drivers/char/apm-emulation.c15
-rw-r--r--drivers/char/applicom.c6
-rw-r--r--drivers/char/bsr.c1
-rw-r--r--drivers/char/ds1620.c2
-rw-r--r--drivers/char/dsp56k.c1
-rw-r--r--drivers/char/dtlk.c8
-rw-r--r--drivers/char/hangcheck-timer.c26
-rw-r--r--drivers/char/hpet.c49
-rw-r--r--drivers/char/hw_random/Kconfig66
-rw-r--r--drivers/char/hw_random/Makefile3
-rw-r--r--drivers/char/hw_random/airoha-trng.c243
-rw-r--r--drivers/char/hw_random/amd-rng.c4
-rw-r--r--drivers/char/hw_random/arm_smccc_trng.c1
-rw-r--r--drivers/char/hw_random/atmel-rng.c12
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c15
-rw-r--r--drivers/char/hw_random/bcm74110-rng.c125
-rw-r--r--drivers/char/hw_random/cavium-rng-vf.c1
-rw-r--r--drivers/char/hw_random/cavium-rng.c1
-rw-r--r--drivers/char/hw_random/cctrng.c4
-rw-r--r--drivers/char/hw_random/cn10k-rng.c2
-rw-r--r--drivers/char/hw_random/core.c85
-rw-r--r--drivers/char/hw_random/exynos-trng.c227
-rw-r--r--drivers/char/hw_random/histb-rng.c2
-rw-r--r--drivers/char/hw_random/imx-rngc.c69
-rw-r--r--drivers/char/hw_random/ingenic-rng.c2
-rw-r--r--drivers/char/hw_random/ks-sa-rng.c6
-rw-r--r--drivers/char/hw_random/mtk-rng.c12
-rw-r--r--drivers/char/hw_random/mxc-rnga.c18
-rw-r--r--drivers/char/hw_random/n2-drv.c2
-rw-r--r--drivers/char/hw_random/n2rng.h4
-rw-r--r--drivers/char/hw_random/npcm-rng.c10
-rw-r--r--drivers/char/hw_random/omap-rng.c3
-rw-r--r--drivers/char/hw_random/omap3-rom-rng.c2
-rw-r--r--drivers/char/hw_random/rockchip-rng.c492
-rw-r--r--drivers/char/hw_random/s390-trng.c3
-rw-r--r--drivers/char/hw_random/stm32-rng.c112
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c7
-rw-r--r--drivers/char/hw_random/virtio-rng.c1
-rw-r--r--drivers/char/hw_random/xgene-rng.c6
-rw-r--r--drivers/char/ipmi/Kconfig7
-rw-r--r--drivers/char/ipmi/Makefile12
-rw-r--r--drivers/char/ipmi/bt-bmc.c7
-rw-r--r--drivers/char/ipmi/ipmb_dev_int.c9
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c5
-rw-r--r--drivers/char/ipmi/ipmi_ipmb.c8
-rw-r--r--drivers/char/ipmi/ipmi_kcs_sm.c16
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c1390
-rw-r--r--drivers/char/ipmi/ipmi_powernv.c23
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c8
-rw-r--r--drivers/char/ipmi/ipmi_si.h17
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c207
-rw-r--r--drivers/char/ipmi/ipmi_si_ls2k.c189
-rw-r--r--drivers/char/ipmi/ipmi_si_parisc.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_pci.c57
-rw-r--r--drivers/char/ipmi/ipmi_si_platform.c31
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c60
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c157
-rw-r--r--drivers/char/ipmi/kcs_bmc_aspeed.c8
-rw-r--r--drivers/char/ipmi/kcs_bmc_npcm7xx.c6
-rw-r--r--drivers/char/ipmi/ssif_bmc.c20
-rw-r--r--drivers/char/lp.c2
-rw-r--r--drivers/char/mem.c132
-rw-r--r--drivers/char/misc.c62
-rw-r--r--drivers/char/misc_minor_kunit.c689
-rw-r--r--drivers/char/mwave/3780i.c218
-rw-r--r--drivers/char/mwave/3780i.h12
-rw-r--r--drivers/char/mwave/Makefile6
-rw-r--r--drivers/char/mwave/README10
-rw-r--r--drivers/char/mwave/mwavedd.c337
-rw-r--r--drivers/char/mwave/mwavedd.h76
-rw-r--r--drivers/char/mwave/mwavepub.h22
-rw-r--r--drivers/char/mwave/smapi.c244
-rw-r--r--drivers/char/mwave/smapi.h6
-rw-r--r--drivers/char/mwave/tp3780i.c209
-rw-r--r--drivers/char/mwave/tp3780i.h30
-rw-r--r--drivers/char/nvram.c1
-rw-r--r--drivers/char/nwbutton.c1
-rw-r--r--drivers/char/nwflash.c1
-rw-r--r--drivers/char/pc8736x_gpio.c1
-rw-r--r--drivers/char/powernv-op-panel.c3
-rw-r--r--drivers/char/ppdev.c18
-rw-r--r--drivers/char/random.c134
-rw-r--r--drivers/char/scx200_gpio.c1
-rw-r--r--drivers/char/sonypi.c16
-rw-r--r--drivers/char/tlclk.c37
-rw-r--r--drivers/char/tpm/Kconfig33
-rw-r--r--drivers/char/tpm/Makefile5
-rw-r--r--drivers/char/tpm/eventlog/acpi.c15
-rw-r--r--drivers/char/tpm/eventlog/common.c48
-rw-r--r--drivers/char/tpm/eventlog/of.c8
-rw-r--r--drivers/char/tpm/eventlog/tpm1.c7
-rw-r--r--drivers/char/tpm/st33zp24/i2c.c2
-rw-r--r--drivers/char/tpm/st33zp24/st33zp24.c2
-rw-r--r--drivers/char/tpm/tpm-buf.c52
-rw-r--r--drivers/char/tpm/tpm-chip.c57
-rw-r--r--drivers/char/tpm/tpm-dev-common.c14
-rw-r--r--drivers/char/tpm/tpm-dev.c1
-rw-r--r--drivers/char/tpm/tpm-interface.c114
-rw-r--r--drivers/char/tpm/tpm.h5
-rw-r--r--drivers/char/tpm/tpm1-cmd.c5
-rw-r--r--drivers/char/tpm/tpm2-cmd.c224
-rw-r--r--drivers/char/tpm/tpm2-sessions.c754
-rw-r--r--drivers/char/tpm/tpm2-space.c5
-rw-r--r--drivers/char/tpm/tpm_atmel.c66
-rw-r--r--drivers/char/tpm/tpm_atmel.h140
-rw-r--r--drivers/char/tpm/tpm_crb.c136
-rw-r--r--drivers/char/tpm/tpm_crb_ffa.c414
-rw-r--r--drivers/char/tpm/tpm_crb_ffa.h23
-rw-r--r--drivers/char/tpm/tpm_ftpm_tee.c90
-rw-r--r--drivers/char/tpm/tpm_ftpm_tee.h5
-rw-r--r--drivers/char/tpm/tpm_i2c_atmel.c5
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c3
-rw-r--r--drivers/char/tpm/tpm_i2c_nuvoton.c3
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.c17
-rw-r--r--drivers/char/tpm/tpm_infineon.c3
-rw-r--r--drivers/char/tpm/tpm_loongson.c84
-rw-r--r--drivers/char/tpm/tpm_nsc.c3
-rw-r--r--drivers/char/tpm/tpm_ppi.c137
-rw-r--r--drivers/char/tpm/tpm_svsm.c121
-rw-r--r--drivers/char/tpm/tpm_tis.c2
-rw-r--r--drivers/char/tpm/tpm_tis_core.c33
-rw-r--r--drivers/char/tpm/tpm_tis_core.h5
-rw-r--r--drivers/char/tpm/tpm_tis_i2c.c2
-rw-r--r--drivers/char/tpm/tpm_tis_i2c_cr50.c152
-rw-r--r--drivers/char/tpm/tpm_tis_spi_main.c4
-rw-r--r--drivers/char/tpm/tpm_tis_synquacer.c2
-rw-r--r--drivers/char/tpm/tpm_vtpm_proxy.c5
-rw-r--r--drivers/char/tpm/tpmrm-dev.c1
-rw-r--r--drivers/char/tpm/xen-tpmfront.c3
-rw-r--r--drivers/char/ttyprintk.c1
-rw-r--r--drivers/char/virtio_console.c88
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c2
-rw-r--r--drivers/char/xillybus/xillybus_core.c5
-rw-r--r--drivers/char/xillybus/xillybus_of.c2
-rw-r--r--drivers/char/xillybus/xillyusb.c44
-rw-r--r--drivers/clk/.kunitconfig3
-rw-r--r--drivers/clk/Kconfig64
-rw-r--r--drivers/clk/Makefile36
-rw-r--r--drivers/clk/actions/owl-common.c1
-rw-r--r--drivers/clk/actions/owl-common.h2
-rw-r--r--drivers/clk/actions/owl-composite.c8
-rw-r--r--drivers/clk/actions/owl-composite.h2
-rw-r--r--drivers/clk/actions/owl-divider.c13
-rw-r--r--drivers/clk/actions/owl-divider.h2
-rw-r--r--drivers/clk/actions/owl-factor.c12
-rw-r--r--drivers/clk/actions/owl-factor.h2
-rw-r--r--drivers/clk/actions/owl-gate.h2
-rw-r--r--drivers/clk/actions/owl-mux.h2
-rw-r--r--drivers/clk/actions/owl-pll.c25
-rw-r--r--drivers/clk/actions/owl-pll.h2
-rw-r--r--drivers/clk/analogbits/wrpll-cln28hpc.c2
-rw-r--r--drivers/clk/at91/Makefile2
-rw-r--r--drivers/clk/at91/clk-audio-pll.c42
-rw-r--r--drivers/clk/at91/clk-h32mx.c33
-rw-r--r--drivers/clk/at91/clk-master.c5
-rw-r--r--drivers/clk/at91/clk-peripheral.c49
-rw-r--r--drivers/clk/at91/clk-pll.c12
-rw-r--r--drivers/clk/at91/clk-plldiv.c34
-rw-r--r--drivers/clk/at91/clk-sam9x60-pll.c153
-rw-r--r--drivers/clk/at91/clk-usb.c20
-rw-r--r--drivers/clk/at91/dt-compat.c5
-rw-r--r--drivers/clk/at91/pmc.c13
-rw-r--r--drivers/clk/at91/pmc.h22
-rw-r--r--drivers/clk/at91/sam9x60.c9
-rw-r--r--drivers/clk/at91/sam9x7.c952
-rw-r--r--drivers/clk/at91/sama7d65.c1379
-rw-r--r--drivers/clk/at91/sama7g5.c49
-rw-r--r--drivers/clk/at91/sckc.c24
-rw-r--r--drivers/clk/axs10x/i2s_pll_clock.c16
-rw-r--r--drivers/clk/axs10x/pll_clock.c12
-rw-r--r--drivers/clk/baikal-t1/ccu-div.c27
-rw-r--r--drivers/clk/baikal-t1/ccu-pll.c14
-rw-r--r--drivers/clk/baikal-t1/clk-ccu-div.c2
-rw-r--r--drivers/clk/baikal-t1/clk-ccu-pll.c2
-rw-r--r--drivers/clk/bcm/clk-bcm2711-dvp.c2
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c19
-rw-r--r--drivers/clk/bcm/clk-bcm53573-ilp.c4
-rw-r--r--drivers/clk/bcm/clk-bcm63xx-gate.c2
-rw-r--r--drivers/clk/bcm/clk-iproc-asiu.c25
-rw-r--r--drivers/clk/bcm/clk-kona.c21
-rw-r--r--drivers/clk/bcm/clk-kona.h2
-rw-r--r--drivers/clk/bcm/clk-raspberrypi.c108
-rw-r--r--drivers/clk/berlin/berlin2-avpll.c2
-rw-r--r--drivers/clk/clk-apple-nco.c17
-rw-r--r--drivers/clk/clk-asm9260.c4
-rw-r--r--drivers/clk/clk-ast2600.c2
-rw-r--r--drivers/clk/clk-axi-clkgen.c181
-rw-r--r--drivers/clk/clk-axm5516.c1
-rw-r--r--drivers/clk/clk-bm1880.c21
-rw-r--r--drivers/clk/clk-cdce706.c18
-rw-r--r--drivers/clk/clk-cdce925.c52
-rw-r--r--drivers/clk/clk-clps711x.c2
-rw-r--r--drivers/clk/clk-conf.c45
-rw-r--r--drivers/clk/clk-cs2000-cp.c14
-rw-r--r--drivers/clk/clk-devres.c37
-rw-r--r--drivers/clk/clk-divider.c39
-rw-r--r--drivers/clk/clk-en7523.c549
-rw-r--r--drivers/clk/clk-ep93xx.c853
-rw-r--r--drivers/clk/clk-eyeq.c859
-rw-r--r--drivers/clk/clk-fixed-factor.c29
-rw-r--r--drivers/clk/clk-fixed-mmio.c2
-rw-r--r--drivers/clk/clk-fixed-rate.c2
-rw-r--r--drivers/clk/clk-fixed-rate_test.c380
-rw-r--r--drivers/clk/clk-fixed-rate_test.h8
-rw-r--r--drivers/clk/clk-fractional-divider.c25
-rw-r--r--drivers/clk/clk-fractional-divider_test.c3
-rw-r--r--drivers/clk/clk-gate.c2
-rw-r--r--drivers/clk/clk-gate_test.c3
-rw-r--r--drivers/clk/clk-gemini.c15
-rw-r--r--drivers/clk/clk-gpio.c205
-rw-r--r--drivers/clk/clk-highbank.c26
-rw-r--r--drivers/clk/clk-hsdk-pll.c14
-rw-r--r--drivers/clk/clk-lan966x.c80
-rw-r--r--drivers/clk/clk-lmk04832.c100
-rw-r--r--drivers/clk/clk-loongson1.c12
-rw-r--r--drivers/clk/clk-loongson2.c133
-rw-r--r--drivers/clk/clk-max9485.c27
-rw-r--r--drivers/clk/clk-milbeaut.c22
-rw-r--r--drivers/clk/clk-multiplier.c12
-rw-r--r--drivers/clk/clk-nomadik.c5
-rw-r--r--drivers/clk/clk-npcm8xx.c430
-rw-r--r--drivers/clk/clk-palmas.c2
-rw-r--r--drivers/clk/clk-pwm.c51
-rw-r--r--drivers/clk/clk-qoriq.c6
-rw-r--r--drivers/clk/clk-rp1.c2462
-rw-r--r--drivers/clk/clk-rpmi.c620
-rw-r--r--drivers/clk/clk-s2mps11.c15
-rw-r--r--drivers/clk/clk-scmi.c82
-rw-r--r--drivers/clk/clk-scpi.c20
-rw-r--r--drivers/clk/clk-si514.c26
-rw-r--r--drivers/clk/clk-si521xx.c14
-rw-r--r--drivers/clk/clk-si5341.c24
-rw-r--r--drivers/clk/clk-si5351.c49
-rw-r--r--drivers/clk/clk-si544.c12
-rw-r--r--drivers/clk/clk-si570.c28
-rw-r--r--drivers/clk/clk-sp7021.c46
-rw-r--r--drivers/clk/clk-sparx5.c10
-rw-r--r--drivers/clk/clk-stm32f4.c183
-rw-r--r--drivers/clk/clk-tps68470.c12
-rw-r--r--drivers/clk/clk-twl.c69
-rw-r--r--drivers/clk/clk-versaclock3.c129
-rw-r--r--drivers/clk/clk-versaclock5.c73
-rw-r--r--drivers/clk/clk-versaclock7.c32
-rw-r--r--drivers/clk/clk-vt8500.c59
-rw-r--r--drivers/clk/clk-wm831x.c14
-rw-r--r--drivers/clk/clk-xgene.c45
-rw-r--r--drivers/clk/clk.c115
-rw-r--r--drivers/clk/clk_kunit_helpers.c237
-rw-r--r--drivers/clk/clk_parent_data_test.h10
-rw-r--r--drivers/clk/clk_test.c988
-rw-r--r--drivers/clk/clkdev.c20
-rw-r--r--drivers/clk/davinci/Makefile2
-rw-r--r--drivers/clk/davinci/da8xx-cfgchip.c11
-rw-r--r--drivers/clk/davinci/pll-da830.c71
-rw-r--r--drivers/clk/davinci/pll.c67
-rw-r--r--drivers/clk/davinci/pll.h2
-rw-r--r--drivers/clk/davinci/psc-da830.c118
-rw-r--r--drivers/clk/davinci/psc-da850.c7
-rw-r--r--drivers/clk/davinci/psc.c13
-rw-r--r--drivers/clk/davinci/psc.h7
-rw-r--r--drivers/clk/hisilicon/clk-hi3519.c2
-rw-r--r--drivers/clk/hisilicon/clk-hi3559a.c9
-rw-r--r--drivers/clk/hisilicon/clk-hi3660-stub.c18
-rw-r--r--drivers/clk/hisilicon/clk-hi6220-stub.c12
-rw-r--r--drivers/clk/hisilicon/clkdivider-hi6220.c12
-rw-r--r--drivers/clk/hisilicon/clkgate-separated.c16
-rw-r--r--drivers/clk/hisilicon/crg-hi3516cv300.c2
-rw-r--r--drivers/clk/hisilicon/crg-hi3798cv200.c2
-rw-r--r--drivers/clk/imgtec/clk-boston.c6
-rw-r--r--drivers/clk/imx/Kconfig2
-rw-r--r--drivers/clk/imx/Makefile1
-rw-r--r--drivers/clk/imx/clk-busy.c8
-rw-r--r--drivers/clk/imx/clk-composite-7ulp.c20
-rw-r--r--drivers/clk/imx/clk-composite-8m.c69
-rw-r--r--drivers/clk/imx/clk-composite-93.c22
-rw-r--r--drivers/clk/imx/clk-cpu.c10
-rw-r--r--drivers/clk/imx/clk-fixup-div.c10
-rw-r--r--drivers/clk/imx/clk-fixup-mux.c2
-rw-r--r--drivers/clk/imx/clk-frac-pll.c20
-rw-r--r--drivers/clk/imx/clk-fracn-gppll.c33
-rw-r--r--drivers/clk/imx/clk-gate-exclusive.c2
-rw-r--r--drivers/clk/imx/clk-imx5.c2
-rw-r--r--drivers/clk/imx/clk-imx6ul.c4
-rw-r--r--drivers/clk/imx/clk-imx7d.c6
-rw-r--r--drivers/clk/imx/clk-imx8-acm.c46
-rw-r--r--drivers/clk/imx/clk-imx8mm.c2
-rw-r--r--drivers/clk/imx/clk-imx8mn.c1
-rw-r--r--drivers/clk/imx/clk-imx8mp-audiomix.c64
-rw-r--r--drivers/clk/imx/clk-imx8mp.c164
-rw-r--r--drivers/clk/imx/clk-imx8qxp-lpcg.c1
-rw-r--r--drivers/clk/imx/clk-imx8qxp.c51
-rw-r--r--drivers/clk/imx/clk-imx8ulp-sim-lpav.c156
-rw-r--r--drivers/clk/imx/clk-imx93.c89
-rw-r--r--drivers/clk/imx/clk-imx95-blk-ctl.c200
-rw-r--r--drivers/clk/imx/clk-imxrt1050.c1
-rw-r--r--drivers/clk/imx/clk-lpcg-scu.c41
-rw-r--r--drivers/clk/imx/clk-pfd.c18
-rw-r--r--drivers/clk/imx/clk-pll14xx.c31
-rw-r--r--drivers/clk/imx/clk-pllv2.c23
-rw-r--r--drivers/clk/imx/clk-pllv3.c72
-rw-r--r--drivers/clk/imx/clk-pllv4.c29
-rw-r--r--drivers/clk/imx/clk-scu.c41
-rw-r--r--drivers/clk/imx/clk-vf610.c12
-rw-r--r--drivers/clk/imx/clk.c1
-rw-r--r--drivers/clk/imx/clk.h4
-rw-r--r--drivers/clk/ingenic/cgu.c12
-rw-r--r--drivers/clk/ingenic/cgu.h2
-rw-r--r--drivers/clk/ingenic/jz4725b-cgu.c2
-rw-r--r--drivers/clk/ingenic/jz4740-cgu.c2
-rw-r--r--drivers/clk/ingenic/jz4755-cgu.c2
-rw-r--r--drivers/clk/ingenic/jz4760-cgu.c2
-rw-r--r--drivers/clk/ingenic/jz4770-cgu.c2
-rw-r--r--drivers/clk/ingenic/jz4780-cgu.c26
-rw-r--r--drivers/clk/ingenic/pm.c14
-rw-r--r--drivers/clk/ingenic/pm.h2
-rw-r--r--drivers/clk/ingenic/tcu.c12
-rw-r--r--drivers/clk/ingenic/x1000-cgu.c21
-rw-r--r--drivers/clk/ingenic/x1830-cgu.c2
-rw-r--r--drivers/clk/keystone/sci-clk.c11
-rw-r--r--drivers/clk/keystone/syscon-clk.c13
-rw-r--r--drivers/clk/kunit_clk_assigned_rates.h8
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_multiple.dtso16
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_multiple_consumer.dtso20
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_null.dtso14
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_null_consumer.dtso18
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_one.dtso14
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_one_consumer.dtso18
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_u64_multiple.dtso16
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_u64_multiple_consumer.dtso20
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_u64_one.dtso14
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_u64_one_consumer.dtso18
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_without.dtso13
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_without_consumer.dtso17
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_zero.dtso12
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_zero_consumer.dtso16
-rw-r--r--drivers/clk/kunit_clk_fixed_rate_test.dtso19
-rw-r--r--drivers/clk/kunit_clk_hw_get_dev_of_node.dtso10
-rw-r--r--drivers/clk/kunit_clk_parent_data_test.dtso28
-rw-r--r--drivers/clk/mediatek/Kconfig123
-rw-r--r--drivers/clk/mediatek/Makefile18
-rw-r--r--drivers/clk/mediatek/clk-gate.c117
-rw-r--r--drivers/clk/mediatek/clk-gate.h3
-rw-r--r--drivers/clk/mediatek/clk-mt2701-aud.c14
-rw-r--r--drivers/clk/mediatek/clk-mt2701-bdp.c5
-rw-r--r--drivers/clk/mediatek/clk-mt2701-eth.c4
-rw-r--r--drivers/clk/mediatek/clk-mt2701-g3d.c4
-rw-r--r--drivers/clk/mediatek/clk-mt2701-hif.c4
-rw-r--r--drivers/clk/mediatek/clk-mt2701-img.c5
-rw-r--r--drivers/clk/mediatek/clk-mt2701-mm.c5
-rw-r--r--drivers/clk/mediatek/clk-mt2701-vdec.c5
-rw-r--r--drivers/clk/mediatek/clk-mt2701.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2712-apmixedsys.c4
-rw-r--r--drivers/clk/mediatek/clk-mt2712-bdp.c4
-rw-r--r--drivers/clk/mediatek/clk-mt2712-img.c4
-rw-r--r--drivers/clk/mediatek/clk-mt2712-jpgdec.c4
-rw-r--r--drivers/clk/mediatek/clk-mt2712-mfg.c4
-rw-r--r--drivers/clk/mediatek/clk-mt2712-mm.c4
-rw-r--r--drivers/clk/mediatek/clk-mt2712-vdec.c4
-rw-r--r--drivers/clk/mediatek/clk-mt2712-venc.c4
-rw-r--r--drivers/clk/mediatek/clk-mt2712.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6735-apmixedsys.c137
-rw-r--r--drivers/clk/mediatek/clk-mt6735-imgsys.c57
-rw-r--r--drivers/clk/mediatek/clk-mt6735-infracfg.c107
-rw-r--r--drivers/clk/mediatek/clk-mt6735-mfgcfg.c61
-rw-r--r--drivers/clk/mediatek/clk-mt6735-pericfg.c124
-rw-r--r--drivers/clk/mediatek/clk-mt6735-topckgen.c394
-rw-r--r--drivers/clk/mediatek/clk-mt6735-vdecsys.c79
-rw-r--r--drivers/clk/mediatek/clk-mt6735-vencsys.c53
-rw-r--r--drivers/clk/mediatek/clk-mt6765-audio.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6765-cam.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6765-img.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6765-mipi0a.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6765-mm.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6765-vcodec.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6765.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6779-aud.c5
-rw-r--r--drivers/clk/mediatek/clk-mt6779-cam.c5
-rw-r--r--drivers/clk/mediatek/clk-mt6779-img.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6779-ipe.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6779-mfg.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6779-mm.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6779-vdec.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6779-venc.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6779.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6795-apmixedsys.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6795-infracfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6795-mfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6795-mm.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6795-pericfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6795-topckgen.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6795-vdecsys.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6795-vencsys.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6797-img.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6797-mm.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6797-vdec.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6797-venc.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6797.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7622-apmixedsys.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7622-aud.c5
-rw-r--r--drivers/clk/mediatek/clk-mt7622-eth.c4
-rw-r--r--drivers/clk/mediatek/clk-mt7622-hif.c4
-rw-r--r--drivers/clk/mediatek/clk-mt7622-infracfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7622.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7629-eth.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7629-hif.c4
-rw-r--r--drivers/clk/mediatek/clk-mt7629.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7981-apmixed.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7981-eth.c4
-rw-r--r--drivers/clk/mediatek/clk-mt7981-infracfg.c4
-rw-r--r--drivers/clk/mediatek/clk-mt7981-topckgen.c4
-rw-r--r--drivers/clk/mediatek/clk-mt7986-apmixed.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7986-eth.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7986-infracfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7986-topckgen.c4
-rw-r--r--drivers/clk/mediatek/clk-mt7988-apmixed.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7988-eth.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7988-infracfg.c4
-rw-r--r--drivers/clk/mediatek/clk-mt7988-topckgen.c4
-rw-r--r--drivers/clk/mediatek/clk-mt7988-xfipll.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8135-apmixedsys.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8135.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8167-apmixedsys.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8167-aud.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8167-img.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8167-mfgcfg.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8167-mm.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8167-vdec.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8167.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8173-apmixedsys.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8173-img.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8173-infracfg.c14
-rw-r--r--drivers/clk/mediatek/clk-mt8173-mm.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8173-pericfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8173-topckgen.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8173-vdecsys.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8173-vencsys.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8183-apmixedsys.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8183-audio.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8183-cam.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8183-img.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8183-ipu0.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8183-ipu1.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8183-ipu_adl.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8183-ipu_conn.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8183-mfgcfg.c5
-rw-r--r--drivers/clk/mediatek/clk-mt8183-mm.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8183-vdec.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8183-venc.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8183.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8186-apmixedsys.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8186-cam.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8186-img.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8186-imp_iic_wrap.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8186-infra_ao.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8186-ipe.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8186-mcu.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8186-mdp.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8186-mfg.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8186-mm.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8186-topckgen.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8186-vdec.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8186-venc.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8186-wpe.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8188-adsp_audio26m.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8188-apmixedsys.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8188-cam.c22
-rw-r--r--drivers/clk/mediatek/clk-mt8188-ccu.c5
-rw-r--r--drivers/clk/mediatek/clk-mt8188-img.c23
-rw-r--r--drivers/clk/mediatek/clk-mt8188-imp_iic_wrap.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8188-infra_ao.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8188-ipe.c18
-rw-r--r--drivers/clk/mediatek/clk-mt8188-mfg.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8188-peri_ao.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8188-topckgen.c13
-rw-r--r--drivers/clk/mediatek/clk-mt8188-vdec.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8188-vdo0.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8188-vdo1.c15
-rw-r--r--drivers/clk/mediatek/clk-mt8188-venc.c5
-rw-r--r--drivers/clk/mediatek/clk-mt8188-vpp0.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8188-vpp1.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8188-wpe.c5
-rw-r--r--drivers/clk/mediatek/clk-mt8192-apmixedsys.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8192-aud.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8192-cam.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8192-img.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8192-imp_iic_wrap.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8192-ipe.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8192-mdp.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8192-mfg.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8192-mm.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8192-msdc.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8192-scp_adsp.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8192-vdec.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8192-venc.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8192.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8195-apmixedsys.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8195-apusys_pll.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8195-cam.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8195-ccu.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8195-img.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8195-infra_ao.c6
-rw-r--r--drivers/clk/mediatek/clk-mt8195-ipe.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8195-mfg.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8195-peri_ao.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8195-scp_adsp.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8195-topckgen.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8195-vdec.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8195-vdo0.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8195-vdo1.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8195-venc.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8195-vpp0.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8195-vpp1.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8195-wpe.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8196-apmixedsys.c204
-rw-r--r--drivers/clk/mediatek/clk-mt8196-disp0.c170
-rw-r--r--drivers/clk/mediatek/clk-mt8196-disp1.c170
-rw-r--r--drivers/clk/mediatek/clk-mt8196-imp_iic_wrap.c118
-rw-r--r--drivers/clk/mediatek/clk-mt8196-mcu.c167
-rw-r--r--drivers/clk/mediatek/clk-mt8196-mdpsys.c186
-rw-r--r--drivers/clk/mediatek/clk-mt8196-mfg.c150
-rw-r--r--drivers/clk/mediatek/clk-mt8196-ovl0.c154
-rw-r--r--drivers/clk/mediatek/clk-mt8196-ovl1.c154
-rw-r--r--drivers/clk/mediatek/clk-mt8196-peri_ao.c142
-rw-r--r--drivers/clk/mediatek/clk-mt8196-pextp.c131
-rw-r--r--drivers/clk/mediatek/clk-mt8196-topckgen.c985
-rw-r--r--drivers/clk/mediatek/clk-mt8196-topckgen2.c568
-rw-r--r--drivers/clk/mediatek/clk-mt8196-ufs_ao.c108
-rw-r--r--drivers/clk/mediatek/clk-mt8196-vdec.c253
-rw-r--r--drivers/clk/mediatek/clk-mt8196-vdisp_ao.c80
-rw-r--r--drivers/clk/mediatek/clk-mt8196-venc.c236
-rw-r--r--drivers/clk/mediatek/clk-mt8196-vlpckgen.c725
-rw-r--r--drivers/clk/mediatek/clk-mt8365-apmixedsys.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8365-apu.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8365-cam.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8365-mfg.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8365-mm.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8365-vdec.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8365-venc.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8365.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8516-aud.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8516.c2
-rw-r--r--drivers/clk/mediatek/clk-mtk.c40
-rw-r--r--drivers/clk/mediatek/clk-mtk.h24
-rw-r--r--drivers/clk/mediatek/clk-mux.c122
-rw-r--r--drivers/clk/mediatek/clk-mux.h87
-rw-r--r--drivers/clk/mediatek/clk-pll.c58
-rw-r--r--drivers/clk/mediatek/clk-pll.h11
-rw-r--r--drivers/clk/mediatek/clk-pllfh.c2
-rw-r--r--drivers/clk/mediatek/reset.c61
-rw-r--r--drivers/clk/mediatek/reset.h10
-rw-r--r--drivers/clk/meson/Kconfig55
-rw-r--r--drivers/clk/meson/Makefile3
-rw-r--r--drivers/clk/meson/a1-peripherals.c1189
-rw-r--r--drivers/clk/meson/a1-peripherals.h46
-rw-r--r--drivers/clk/meson/a1-pll.c154
-rw-r--r--drivers/clk/meson/a1-pll.h28
-rw-r--r--drivers/clk/meson/axg-aoclk.c177
-rw-r--r--drivers/clk/meson/axg-audio.c628
-rw-r--r--drivers/clk/meson/axg-audio.h68
-rw-r--r--drivers/clk/meson/axg.c463
-rw-r--r--drivers/clk/meson/axg.h105
-rw-r--r--drivers/clk/meson/c3-peripherals.c1128
-rw-r--r--drivers/clk/meson/c3-pll.c684
-rw-r--r--drivers/clk/meson/clk-cpu-dyndiv.c4
-rw-r--r--drivers/clk/meson/clk-dualdiv.c7
-rw-r--r--drivers/clk/meson/clk-mpll.c22
-rw-r--r--drivers/clk/meson/clk-mpll.h1
-rw-r--r--drivers/clk/meson/clk-phase.c19
-rw-r--r--drivers/clk/meson/clk-pll.c62
-rw-r--r--drivers/clk/meson/clk-pll.h2
-rw-r--r--drivers/clk/meson/clk-regmap.c62
-rw-r--r--drivers/clk/meson/clk-regmap.h24
-rw-r--r--drivers/clk/meson/g12a-aoclk.c274
-rw-r--r--drivers/clk/meson/g12a.c2415
-rw-r--r--drivers/clk/meson/g12a.h130
-rw-r--r--drivers/clk/meson/gxbb-aoclk.c145
-rw-r--r--drivers/clk/meson/gxbb.c1125
-rw-r--r--drivers/clk/meson/gxbb.h115
-rw-r--r--drivers/clk/meson/meson-aoclk.c42
-rw-r--r--drivers/clk/meson/meson-aoclk.h4
-rw-r--r--drivers/clk/meson/meson-clkc-utils.c90
-rw-r--r--drivers/clk/meson/meson-clkc-utils.h89
-rw-r--r--drivers/clk/meson/meson-eeclk.c61
-rw-r--r--drivers/clk/meson/meson-eeclk.h26
-rw-r--r--drivers/clk/meson/meson8-ddr.c71
-rw-r--r--drivers/clk/meson/meson8b.c1021
-rw-r--r--drivers/clk/meson/meson8b.h80
-rw-r--r--drivers/clk/meson/s4-peripherals.c1554
-rw-r--r--drivers/clk/meson/s4-peripherals.h56
-rw-r--r--drivers/clk/meson/s4-pll.c160
-rw-r--r--drivers/clk/meson/s4-pll.h38
-rw-r--r--drivers/clk/meson/sclk-div.c8
-rw-r--r--drivers/clk/meson/vclk.c7
-rw-r--r--drivers/clk/meson/vid-pll-div.c4
-rw-r--r--drivers/clk/microchip/Kconfig2
-rw-r--r--drivers/clk/microchip/clk-core.c57
-rw-r--r--drivers/clk/microchip/clk-mpfs.c229
-rw-r--r--drivers/clk/mmp/Kconfig10
-rw-r--r--drivers/clk/mmp/Makefile5
-rw-r--r--drivers/clk/mmp/clk-audio.c20
-rw-r--r--drivers/clk/mmp/clk-frac.c82
-rw-r--r--drivers/clk/mmp/clk-gate.c2
-rw-r--r--drivers/clk/mmp/clk-mix.c10
-rw-r--r--drivers/clk/mmp/clk-of-mmp2.c26
-rw-r--r--drivers/clk/mmp/clk-of-pxa168.c4
-rw-r--r--drivers/clk/mmp/clk-of-pxa1928.c6
-rw-r--r--drivers/clk/mmp/clk-of-pxa910.c4
-rw-r--r--drivers/clk/mmp/clk-pxa1908-apbc.c130
-rw-r--r--drivers/clk/mmp/clk-pxa1908-apbcp.c82
-rw-r--r--drivers/clk/mmp/clk-pxa1908-apmu.c128
-rw-r--r--drivers/clk/mmp/clk-pxa1908-mpmu.c112
-rw-r--r--drivers/clk/mmp/clk.h10
-rw-r--r--drivers/clk/mmp/pwr-island.c2
-rw-r--r--drivers/clk/mstar/clk-msc313-cpupll.c18
-rw-r--r--drivers/clk/mvebu/ap-cpu-clk.c12
-rw-r--r--drivers/clk/mvebu/armada-37xx-periph.c17
-rw-r--r--drivers/clk/mvebu/armada-37xx-tbg.c2
-rw-r--r--drivers/clk/mvebu/armada-37xx-xtal.c2
-rw-r--r--drivers/clk/mvebu/armada-xp.c5
-rw-r--r--drivers/clk/mvebu/clk-corediv.c18
-rw-r--r--drivers/clk/mvebu/clk-cpu.c12
-rw-r--r--drivers/clk/mvebu/common.c12
-rw-r--r--drivers/clk/mvebu/cp110-system-controller.c20
-rw-r--r--drivers/clk/mvebu/dove-divider.c16
-rw-r--r--drivers/clk/mxs/clk-div.c10
-rw-r--r--drivers/clk/mxs/clk-frac.c16
-rw-r--r--drivers/clk/mxs/clk-ref.c30
-rw-r--r--drivers/clk/nuvoton/Kconfig4
-rw-r--r--drivers/clk/nuvoton/clk-ma35d1-divider.c12
-rw-r--r--drivers/clk/nuvoton/clk-ma35d1-pll.c28
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-ccu.c2
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-cgu.c20
-rw-r--r--drivers/clk/nxp/clk-lpc32xx.c62
-rw-r--r--drivers/clk/pistachio/clk-pll.c20
-rw-r--r--drivers/clk/qcom/Kconfig429
-rw-r--r--drivers/clk/qcom/Makefile43
-rw-r--r--drivers/clk/qcom/a53-pll.c2
-rw-r--r--drivers/clk/qcom/a7-pll.c3
-rw-r--r--drivers/clk/qcom/apcs-msm8916.c2
-rw-r--r--drivers/clk/qcom/apcs-sdx55.c8
-rw-r--r--drivers/clk/qcom/apss-ipq-pll.c6
-rw-r--r--drivers/clk/qcom/apss-ipq5424.c258
-rw-r--r--drivers/clk/qcom/apss-ipq6018.c2
-rw-r--r--drivers/clk/qcom/camcc-milos.c2161
-rw-r--r--drivers/clk/qcom/camcc-qcs615.c1597
-rw-r--r--drivers/clk/qcom/camcc-sa8775p.c1960
-rw-r--r--drivers/clk/qcom/camcc-sc7180.c4
-rw-r--r--drivers/clk/qcom/camcc-sc7280.c28
-rw-r--r--drivers/clk/qcom/camcc-sc8180x.c2889
-rw-r--r--drivers/clk/qcom/camcc-sc8280xp.c8
-rw-r--r--drivers/clk/qcom/camcc-sdm845.c6
-rw-r--r--drivers/clk/qcom/camcc-sm4450.c1687
-rw-r--r--drivers/clk/qcom/camcc-sm6350.c36
-rw-r--r--drivers/clk/qcom/camcc-sm7150.c2059
-rw-r--r--drivers/clk/qcom/camcc-sm8150.c2159
-rw-r--r--drivers/clk/qcom/camcc-sm8250.c67
-rw-r--r--drivers/clk/qcom/camcc-sm8450.c350
-rw-r--r--drivers/clk/qcom/camcc-sm8550.c97
-rw-r--r--drivers/clk/qcom/camcc-sm8650.c3592
-rw-r--r--drivers/clk/qcom/camcc-x1e80100.c74
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c818
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h34
-rw-r--r--drivers/clk/qcom/clk-branch.c19
-rw-r--r--drivers/clk/qcom/clk-branch.h5
-rw-r--r--drivers/clk/qcom/clk-cbf-8996.c10
-rw-r--r--drivers/clk/qcom/clk-cpu-8996.c1
-rw-r--r--drivers/clk/qcom/clk-rcg.c3
-rw-r--r--drivers/clk/qcom/clk-rcg.h3
-rw-r--r--drivers/clk/qcom/clk-rcg2.c316
-rw-r--r--drivers/clk/qcom/clk-regmap-divider.c27
-rw-r--r--drivers/clk/qcom/clk-rpm.c37
-rw-r--r--drivers/clk/qcom/clk-rpmh.c139
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c129
-rw-r--r--drivers/clk/qcom/clk-spmi-pmic-div.c25
-rw-r--r--drivers/clk/qcom/common.c142
-rw-r--r--drivers/clk/qcom/common.h23
-rw-r--r--drivers/clk/qcom/dispcc-glymur.c1982
-rw-r--r--drivers/clk/qcom/dispcc-milos.c974
-rw-r--r--drivers/clk/qcom/dispcc-qcm2290.c7
-rw-r--r--drivers/clk/qcom/dispcc-qcs615.c792
-rw-r--r--drivers/clk/qcom/dispcc-sc7180.c3
-rw-r--r--drivers/clk/qcom/dispcc-sc7280.c11
-rw-r--r--drivers/clk/qcom/dispcc-sc8280xp.c9
-rw-r--r--drivers/clk/qcom/dispcc-sdm845.c4
-rw-r--r--drivers/clk/qcom/dispcc-sm4450.c769
-rw-r--r--drivers/clk/qcom/dispcc-sm6115.c7
-rw-r--r--drivers/clk/qcom/dispcc-sm6125.c5
-rw-r--r--drivers/clk/qcom/dispcc-sm6350.c22
-rw-r--r--drivers/clk/qcom/dispcc-sm6375.c5
-rw-r--r--drivers/clk/qcom/dispcc-sm7150.c1012
-rw-r--r--drivers/clk/qcom/dispcc-sm8250.c16
-rw-r--r--drivers/clk/qcom/dispcc-sm8450.c75
-rw-r--r--drivers/clk/qcom/dispcc-sm8550.c221
-rw-r--r--drivers/clk/qcom/dispcc-sm8650.c1796
-rw-r--r--drivers/clk/qcom/dispcc-sm8750.c1961
-rw-r--r--drivers/clk/qcom/dispcc-x1e80100.c5
-rw-r--r--drivers/clk/qcom/dispcc0-sa8775p.c1480
-rw-r--r--drivers/clk/qcom/dispcc1-sa8775p.c1480
-rw-r--r--drivers/clk/qcom/ecpricc-qdu1000.c32
-rw-r--r--drivers/clk/qcom/gcc-glymur.c8615
-rw-r--r--drivers/clk/qcom/gcc-ipq4019.c14
-rw-r--r--drivers/clk/qcom/gcc-ipq5018.c6
-rw-r--r--drivers/clk/qcom/gcc-ipq5332.c416
-rw-r--r--drivers/clk/qcom/gcc-ipq5424.c3342
-rw-r--r--drivers/clk/qcom/gcc-ipq6018.c70
-rw-r--r--drivers/clk/qcom/gcc-ipq806x.c4
-rw-r--r--drivers/clk/qcom/gcc-ipq8074.c12
-rw-r--r--drivers/clk/qcom/gcc-ipq9574.c472
-rw-r--r--drivers/clk/qcom/gcc-mdm9607.c4
-rw-r--r--drivers/clk/qcom/gcc-mdm9615.c6
-rw-r--r--drivers/clk/qcom/gcc-milos.c3225
-rw-r--r--drivers/clk/qcom/gcc-msm8660.c4
-rw-r--r--drivers/clk/qcom/gcc-msm8917.c619
-rw-r--r--drivers/clk/qcom/gcc-msm8939.c6
-rw-r--r--drivers/clk/qcom/gcc-msm8953.c4
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c9
-rw-r--r--drivers/clk/qcom/gcc-msm8974.c1
-rw-r--r--drivers/clk/qcom/gcc-msm8976.c3
-rw-r--r--drivers/clk/qcom/gcc-msm8994.c54
-rw-r--r--drivers/clk/qcom/gcc-msm8996.c4
-rw-r--r--drivers/clk/qcom/gcc-msm8998.c68
-rw-r--r--drivers/clk/qcom/gcc-qcm2290.c3
-rw-r--r--drivers/clk/qcom/gcc-qcs404.c5
-rw-r--r--drivers/clk/qcom/gcc-qcs615.c3034
-rw-r--r--drivers/clk/qcom/gcc-qcs8300.c3640
-rw-r--r--drivers/clk/qcom/gcc-qdu1000.c2
-rw-r--r--drivers/clk/qcom/gcc-sa8775p.c156
-rw-r--r--drivers/clk/qcom/gcc-sar2130p.c2366
-rw-r--r--drivers/clk/qcom/gcc-sc7180.c2
-rw-r--r--drivers/clk/qcom/gcc-sc7280.c15
-rw-r--r--drivers/clk/qcom/gcc-sc8180x.c446
-rw-r--r--drivers/clk/qcom/gcc-sc8280xp.c55
-rw-r--r--drivers/clk/qcom/gcc-sdm660.c76
-rw-r--r--drivers/clk/qcom/gcc-sdm845.c45
-rw-r--r--drivers/clk/qcom/gcc-sdx55.c2
-rw-r--r--drivers/clk/qcom/gcc-sdx65.c2
-rw-r--r--drivers/clk/qcom/gcc-sdx75.c2
-rw-r--r--drivers/clk/qcom/gcc-sm4450.c2
-rw-r--r--drivers/clk/qcom/gcc-sm6115.c8
-rw-r--r--drivers/clk/qcom/gcc-sm6125.c2
-rw-r--r--drivers/clk/qcom/gcc-sm6350.c40
-rw-r--r--drivers/clk/qcom/gcc-sm6375.c6
-rw-r--r--drivers/clk/qcom/gcc-sm7150.c394
-rw-r--r--drivers/clk/qcom/gcc-sm8150.c8
-rw-r--r--drivers/clk/qcom/gcc-sm8250.c8
-rw-r--r--drivers/clk/qcom/gcc-sm8350.c2
-rw-r--r--drivers/clk/qcom/gcc-sm8450.c187
-rw-r--r--drivers/clk/qcom/gcc-sm8550.c64
-rw-r--r--drivers/clk/qcom/gcc-sm8650.c72
-rw-r--r--drivers/clk/qcom/gcc-sm8750.c3276
-rw-r--r--drivers/clk/qcom/gcc-x1e80100.c849
-rw-r--r--drivers/clk/qcom/gdsc.c139
-rw-r--r--drivers/clk/qcom/gdsc.h2
-rw-r--r--drivers/clk/qcom/gpucc-milos.c562
-rw-r--r--drivers/clk/qcom/gpucc-msm8998.c7
-rw-r--r--drivers/clk/qcom/gpucc-qcm2290.c423
-rw-r--r--drivers/clk/qcom/gpucc-qcs615.c531
-rw-r--r--drivers/clk/qcom/gpucc-sa8775p.c92
-rw-r--r--drivers/clk/qcom/gpucc-sar2130p.c503
-rw-r--r--drivers/clk/qcom/gpucc-sc7180.c5
-rw-r--r--drivers/clk/qcom/gpucc-sc7280.c10
-rw-r--r--drivers/clk/qcom/gpucc-sc8280xp.c5
-rw-r--r--drivers/clk/qcom/gpucc-sdm660.c9
-rw-r--r--drivers/clk/qcom/gpucc-sdm845.c3
-rw-r--r--drivers/clk/qcom/gpucc-sm4450.c804
-rw-r--r--drivers/clk/qcom/gpucc-sm6115.c6
-rw-r--r--drivers/clk/qcom/gpucc-sm6125.c4
-rw-r--r--drivers/clk/qcom/gpucc-sm6350.c13
-rw-r--r--drivers/clk/qcom/gpucc-sm6375.c4
-rw-r--r--drivers/clk/qcom/gpucc-sm8150.c5
-rw-r--r--drivers/clk/qcom/gpucc-sm8250.c7
-rw-r--r--drivers/clk/qcom/gpucc-sm8350.c11
-rw-r--r--drivers/clk/qcom/gpucc-sm8450.c54
-rw-r--r--drivers/clk/qcom/gpucc-sm8550.c2
-rw-r--r--drivers/clk/qcom/gpucc-sm8650.c4
-rw-r--r--drivers/clk/qcom/gpucc-x1e80100.c2
-rw-r--r--drivers/clk/qcom/gpucc-x1p42100.c587
-rw-r--r--drivers/clk/qcom/hfpll.c1
-rw-r--r--drivers/clk/qcom/ipq-cmn-pll.c468
-rw-r--r--drivers/clk/qcom/kpss-xcc.c5
-rw-r--r--drivers/clk/qcom/krait-cc.c1
-rw-r--r--drivers/clk/qcom/lcc-ipq806x.c10
-rw-r--r--drivers/clk/qcom/lcc-msm8960.c10
-rw-r--r--drivers/clk/qcom/lpass-gfm-sm8250.c1
-rw-r--r--drivers/clk/qcom/lpassaudiocc-sc7280.c31
-rw-r--r--drivers/clk/qcom/lpasscc-sc8280xp.c8
-rw-r--r--drivers/clk/qcom/lpasscc-sdm845.c1
-rw-r--r--drivers/clk/qcom/lpasscc-sm6115.c85
-rw-r--r--drivers/clk/qcom/lpasscorecc-sc7180.c9
-rw-r--r--drivers/clk/qcom/lpasscorecc-sc7280.c3
-rw-r--r--drivers/clk/qcom/mmcc-apq8084.c52
-rw-r--r--drivers/clk/qcom/mmcc-msm8960.c97
-rw-r--r--drivers/clk/qcom/mmcc-msm8974.c56
-rw-r--r--drivers/clk/qcom/mmcc-msm8994.c17
-rw-r--r--drivers/clk/qcom/mmcc-msm8996.c20
-rw-r--r--drivers/clk/qcom/mmcc-msm8998.c5
-rw-r--r--drivers/clk/qcom/mmcc-sdm660.c17
-rw-r--r--drivers/clk/qcom/nsscc-ipq5424.c1340
-rw-r--r--drivers/clk/qcom/nsscc-ipq9574.c3110
-rw-r--r--drivers/clk/qcom/nsscc-qca8k.c2221
-rw-r--r--drivers/clk/qcom/tcsrcc-glymur.c313
-rw-r--r--drivers/clk/qcom/tcsrcc-sm8550.c20
-rw-r--r--drivers/clk/qcom/tcsrcc-sm8650.c8
-rw-r--r--drivers/clk/qcom/tcsrcc-sm8750.c141
-rw-r--r--drivers/clk/qcom/tcsrcc-x1e80100.c4
-rw-r--r--drivers/clk/qcom/videocc-milos.c403
-rw-r--r--drivers/clk/qcom/videocc-qcs615.c338
-rw-r--r--drivers/clk/qcom/videocc-sa8775p.c584
-rw-r--r--drivers/clk/qcom/videocc-sc7180.c4
-rw-r--r--drivers/clk/qcom/videocc-sc7280.c11
-rw-r--r--drivers/clk/qcom/videocc-sdm845.c7
-rw-r--r--drivers/clk/qcom/videocc-sm6350.c355
-rw-r--r--drivers/clk/qcom/videocc-sm7150.c357
-rw-r--r--drivers/clk/qcom/videocc-sm8150.c8
-rw-r--r--drivers/clk/qcom/videocc-sm8250.c8
-rw-r--r--drivers/clk/qcom/videocc-sm8350.c8
-rw-r--r--drivers/clk/qcom/videocc-sm8450.c96
-rw-r--r--drivers/clk/qcom/videocc-sm8550.c245
-rw-r--r--drivers/clk/qcom/videocc-sm8750.c463
-rw-r--r--drivers/clk/ralink/clk-mtmips.c55
-rw-r--r--drivers/clk/renesas/Kconfig38
-rw-r--r--drivers/clk/renesas/Makefile7
-rw-r--r--drivers/clk/renesas/clk-div6.c6
-rw-r--r--drivers/clk/renesas/clk-mstp.c22
-rw-r--r--drivers/clk/renesas/clk-r8a73a4.c1
-rw-r--r--drivers/clk/renesas/clk-r8a7778.c1
-rw-r--r--drivers/clk/renesas/clk-vbattb.c205
-rw-r--r--drivers/clk/renesas/r7s9210-cpg-mssr.c17
-rw-r--r--drivers/clk/renesas/r8a77970-cpg-mssr.c13
-rw-r--r--drivers/clk/renesas/r8a779a0-cpg-mssr.c53
-rw-r--r--drivers/clk/renesas/r8a779f0-cpg-mssr.c30
-rw-r--r--drivers/clk/renesas/r8a779g0-cpg-mssr.c44
-rw-r--r--drivers/clk/renesas/r8a779h0-cpg-mssr.c86
-rw-r--r--drivers/clk/renesas/r9a06g032-clocks.c35
-rw-r--r--drivers/clk/renesas/r9a07g043-cpg.c153
-rw-r--r--drivers/clk/renesas/r9a07g044-cpg.c221
-rw-r--r--drivers/clk/renesas/r9a08g045-cpg.c222
-rw-r--r--drivers/clk/renesas/r9a09g011-cpg.c117
-rw-r--r--drivers/clk/renesas/r9a09g047-cpg.c532
-rw-r--r--drivers/clk/renesas/r9a09g056-cpg.c476
-rw-r--r--drivers/clk/renesas/r9a09g057-cpg.c567
-rw-r--r--drivers/clk/renesas/r9a09g077-cpg.c320
-rw-r--r--drivers/clk/renesas/rcar-cpg-lib.c5
-rw-r--r--drivers/clk/renesas/rcar-gen2-cpg.c9
-rw-r--r--drivers/clk/renesas/rcar-gen2-cpg.h3
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.c24
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.h3
-rw-r--r--drivers/clk/renesas/rcar-gen4-cpg.c236
-rw-r--r--drivers/clk/renesas/rcar-gen4-cpg.h39
-rw-r--r--drivers/clk/renesas/rcar-usb2-clock-sel.c2
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c369
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.h32
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.c751
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.h83
-rw-r--r--drivers/clk/renesas/rzv2h-cpg.c1693
-rw-r--r--drivers/clk/renesas/rzv2h-cpg.h368
-rw-r--r--drivers/clk/rockchip/Kconfig35
-rw-r--r--drivers/clk/rockchip/Makefile7
-rw-r--r--drivers/clk/rockchip/clk-cpu.c176
-rw-r--r--drivers/clk/rockchip/clk-ddr.c13
-rw-r--r--drivers/clk/rockchip/clk-gate-grf.c105
-rw-r--r--drivers/clk/rockchip/clk-half-divider.c12
-rw-r--r--drivers/clk/rockchip/clk-mmc-phase.c28
-rw-r--r--drivers/clk/rockchip/clk-pll.c60
-rw-r--r--drivers/clk/rockchip/clk-px30.c10
-rw-r--r--drivers/clk/rockchip/clk-rk3036.c16
-rw-r--r--drivers/clk/rockchip/clk-rk3128.c24
-rw-r--r--drivers/clk/rockchip/clk-rk3188.c22
-rw-r--r--drivers/clk/rockchip/clk-rk3228.c7
-rw-r--r--drivers/clk/rockchip/clk-rk3288.c19
-rw-r--r--drivers/clk/rockchip/clk-rk3308.c5
-rw-r--r--drivers/clk/rockchip/clk-rk3328.c13
-rw-r--r--drivers/clk/rockchip/clk-rk3368.c7
-rw-r--r--drivers/clk/rockchip/clk-rk3399.c10
-rw-r--r--drivers/clk/rockchip/clk-rk3506.c869
-rw-r--r--drivers/clk/rockchip/clk-rk3528.c1187
-rw-r--r--drivers/clk/rockchip/clk-rk3562.c1101
-rw-r--r--drivers/clk/rockchip/clk-rk3568.c10
-rw-r--r--drivers/clk/rockchip/clk-rk3576.c1860
-rw-r--r--drivers/clk/rockchip/clk-rk3588.c135
-rw-r--r--drivers/clk/rockchip/clk-rv1126.c2
-rw-r--r--drivers/clk/rockchip/clk-rv1126b.c1117
-rw-r--r--drivers/clk/rockchip/clk.c169
-rw-r--r--drivers/clk/rockchip/clk.h330
-rw-r--r--drivers/clk/rockchip/gate-link.c85
-rw-r--r--drivers/clk/rockchip/rst-rk3506.c226
-rw-r--r--drivers/clk/rockchip/rst-rk3528.c306
-rw-r--r--drivers/clk/rockchip/rst-rk3562.c429
-rw-r--r--drivers/clk/rockchip/rst-rk3576.c651
-rw-r--r--drivers/clk/rockchip/rst-rv1126b.c443
-rw-r--r--drivers/clk/samsung/Kconfig10
-rw-r--r--drivers/clk/samsung/Makefile7
-rw-r--r--drivers/clk/samsung/clk-acpm.c185
-rw-r--r--drivers/clk/samsung/clk-artpec8.c1044
-rw-r--r--drivers/clk/samsung/clk-cpu.c20
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c5
-rw-r--r--drivers/clk/samsung/clk-exynos-clkout.c12
-rw-r--r--drivers/clk/samsung/clk-exynos2200.c3928
-rw-r--r--drivers/clk/samsung/clk-exynos3250.c6
-rw-r--r--drivers/clk/samsung/clk-exynos4.c91
-rw-r--r--drivers/clk/samsung/clk-exynos4412-isp.c4
-rw-r--r--drivers/clk/samsung/clk-exynos5-subcmu.c1
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c3
-rw-r--r--drivers/clk/samsung/clk-exynos5260.c7
-rw-r--r--drivers/clk/samsung/clk-exynos5410.c4
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c7
-rw-r--r--drivers/clk/samsung/clk-exynos5433.c4
-rw-r--r--drivers/clk/samsung/clk-exynos7.c3
-rw-r--r--drivers/clk/samsung/clk-exynos7870.c1829
-rw-r--r--drivers/clk/samsung/clk-exynos7885.c95
-rw-r--r--drivers/clk/samsung/clk-exynos850.c11
-rw-r--r--drivers/clk/samsung/clk-exynos8895.c2803
-rw-r--r--drivers/clk/samsung/clk-exynos990.c2717
-rw-r--r--drivers/clk/samsung/clk-exynosautov9.c85
-rw-r--r--drivers/clk/samsung/clk-exynosautov920.c1961
-rw-r--r--drivers/clk/samsung/clk-fsd.c51
-rw-r--r--drivers/clk/samsung/clk-gs101.c26
-rw-r--r--drivers/clk/samsung/clk-pll.c288
-rw-r--r--drivers/clk/samsung/clk-pll.h10
-rw-r--r--drivers/clk/samsung/clk-s3c64xx.c3
-rw-r--r--drivers/clk/samsung/clk-s5pv210-audss.c15
-rw-r--r--drivers/clk/samsung/clk-s5pv210.c1
-rw-r--r--drivers/clk/samsung/clk.c18
-rw-r--r--drivers/clk/samsung/clk.h1
-rw-r--r--drivers/clk/sifive/fu540-prci.h2
-rw-r--r--drivers/clk/sifive/fu740-prci.h2
-rw-r--r--drivers/clk/sifive/sifive-prci.c20
-rw-r--r--drivers/clk/sifive/sifive-prci.h4
-rw-r--r--drivers/clk/socfpga/Kconfig2
-rw-r--r--drivers/clk/socfpga/Makefile2
-rw-r--r--drivers/clk/socfpga/clk-agilex5.c561
-rw-r--r--drivers/clk/socfpga/clk-gate-s10.c53
-rw-r--r--drivers/clk/socfpga/clk-periph-s10.c41
-rw-r--r--drivers/clk/socfpga/clk-pll-a10.c2
-rw-r--r--drivers/clk/socfpga/clk-pll-s10.c42
-rw-r--r--drivers/clk/socfpga/clk-pll.c4
-rw-r--r--drivers/clk/socfpga/stratix10-clk.h43
-rw-r--r--drivers/clk/sophgo/Kconfig47
-rw-r--r--drivers/clk/sophgo/Makefile6
-rw-r--r--drivers/clk/sophgo/clk-cv1800.c3
-rw-r--r--drivers/clk/sophgo/clk-cv18xx-ip.c12
-rw-r--r--drivers/clk/sophgo/clk-sg2042-clkgen.c1153
-rw-r--r--drivers/clk/sophgo/clk-sg2042-pll.c559
-rw-r--r--drivers/clk/sophgo/clk-sg2042-rpgate.c291
-rw-r--r--drivers/clk/sophgo/clk-sg2042.h18
-rw-r--r--drivers/clk/sophgo/clk-sg2044-pll.c628
-rw-r--r--drivers/clk/sophgo/clk-sg2044.c1812
-rw-r--r--drivers/clk/spacemit/Kconfig19
-rw-r--r--drivers/clk/spacemit/Makefile5
-rw-r--r--drivers/clk/spacemit/ccu-k1.c1209
-rw-r--r--drivers/clk/spacemit/ccu_common.h48
-rw-r--r--drivers/clk/spacemit/ccu_ddn.c86
-rw-r--r--drivers/clk/spacemit/ccu_ddn.h50
-rw-r--r--drivers/clk/spacemit/ccu_mix.c270
-rw-r--r--drivers/clk/spacemit/ccu_mix.h223
-rw-r--r--drivers/clk/spacemit/ccu_pll.c159
-rw-r--r--drivers/clk/spacemit/ccu_pll.h86
-rw-r--r--drivers/clk/spear/clk-aux-synth.c12
-rw-r--r--drivers/clk/spear/clk-frac-synth.c12
-rw-r--r--drivers/clk/spear/clk-gpt-synth.c12
-rw-r--r--drivers/clk/spear/clk-vco-pll.c23
-rw-r--r--drivers/clk/spear/spear1340_clock.c2
-rw-r--r--drivers/clk/sprd/common.c1
-rw-r--r--drivers/clk/sprd/div.c13
-rw-r--r--drivers/clk/sprd/gate.h2
-rw-r--r--drivers/clk/sprd/pll.c8
-rw-r--r--drivers/clk/sprd/sc9860-clk.c8
-rw-r--r--drivers/clk/sprd/ums512-clk.c4
-rw-r--r--drivers/clk/st/clk-flexgen.c80
-rw-r--r--drivers/clk/st/clkgen-fsyn.c33
-rw-r--r--drivers/clk/st/clkgen-pll.c38
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7100-audio.c14
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-aon.c14
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-isp.c16
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-pll.c2
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-stg.c14
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-sys.c45
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-vout.c18
-rw-r--r--drivers/clk/starfive/clk-starfive-jh71x0.c12
-rw-r--r--drivers/clk/starfive/clk-starfive-jh71x0.h6
-rw-r--r--drivers/clk/stm32/Kconfig15
-rw-r--r--drivers/clk/stm32/Makefile1
-rw-r--r--drivers/clk/stm32/clk-stm32-core.c30
-rw-r--r--drivers/clk/stm32/clk-stm32-core.h2
-rw-r--r--drivers/clk/stm32/clk-stm32mp1.c17
-rw-r--r--drivers/clk/stm32/clk-stm32mp13.c2
-rw-r--r--drivers/clk/stm32/clk-stm32mp21.c1586
-rw-r--r--drivers/clk/stm32/clk-stm32mp25.c516
-rw-r--r--drivers/clk/stm32/stm32mp21_rcc.h651
-rw-r--r--drivers/clk/sunxi-ng/Kconfig59
-rw-r--r--drivers/clk/sunxi-ng/Makefile6
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun20i-d1-r.c5
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun20i-d1.c51
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun4i-a10.c5
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c5
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a100.c11
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.c18
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.h2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c9
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6.c5
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h616.c143
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h616.h2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun55i-a523-mcu.c469
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun55i-a523-r.c249
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun55i-a523-r.h14
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun55i-a523.c1701
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun5i.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.c5
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-rtc.c16
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a23.c5
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a33.c5
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a83t.c5
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-de2.c36
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.c7
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r.c9
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r40.c9
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-v3s.c43
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c5
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c5
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun9i-a80.c5
-rw-r--r--drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c5
-rw-r--r--drivers/clk/sunxi-ng/ccu_common.c29
-rw-r--r--drivers/clk/sunxi-ng/ccu_common.h7
-rw-r--r--drivers/clk/sunxi-ng/ccu_div.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu_div.h18
-rw-r--r--drivers/clk/sunxi-ng/ccu_frac.c12
-rw-r--r--drivers/clk/sunxi-ng/ccu_gate.c26
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.c55
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.h79
-rw-r--r--drivers/clk/sunxi-ng/ccu_mult.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu_mux.c14
-rw-r--r--drivers/clk/sunxi-ng/ccu_nk.c16
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkm.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkmp.c25
-rw-r--r--drivers/clk/sunxi-ng/ccu_nm.c45
-rw-r--r--drivers/clk/sunxi-ng/ccu_phase.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu_reset.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu_reset.h2
-rw-r--r--drivers/clk/sunxi-ng/ccu_sdm.c12
-rw-r--r--drivers/clk/sunxi/Kconfig10
-rw-r--r--drivers/clk/sunxi/clk-simple-gates.c4
-rw-r--r--drivers/clk/sunxi/clk-sun8i-bus-gates.c4
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c11
-rw-r--r--drivers/clk/tegra/Kconfig2
-rw-r--r--drivers/clk/tegra/clk-audio-sync.c10
-rw-r--r--drivers/clk/tegra/clk-bpmp.c4
-rw-r--r--drivers/clk/tegra/clk-dfll.c2
-rw-r--r--drivers/clk/tegra/clk-divider.c28
-rw-r--r--drivers/clk/tegra/clk-periph.c10
-rw-r--r--drivers/clk/tegra/clk-pll.c52
-rw-r--r--drivers/clk/tegra/clk-super.c9
-rw-r--r--drivers/clk/tegra/clk-tegra114.c30
-rw-r--r--drivers/clk/tegra/clk-tegra124-dfll-fcpu.c160
-rw-r--r--drivers/clk/tegra/clk-tegra210-emc.c24
-rw-r--r--drivers/clk/tegra/clk-tegra210.c14
-rw-r--r--drivers/clk/tegra/clk-tegra30.c1
-rw-r--r--drivers/clk/tegra/clk.h3
-rw-r--r--drivers/clk/thead/Kconfig13
-rw-r--r--drivers/clk/thead/Makefile2
-rw-r--r--drivers/clk/thead/clk-th1520-ap.c1308
-rw-r--r--drivers/clk/ti/adpll.c2
-rw-r--r--drivers/clk/ti/autoidle.c2
-rw-r--r--drivers/clk/ti/clk-33xx.c2
-rw-r--r--drivers/clk/ti/clk-43xx.c2
-rw-r--r--drivers/clk/ti/clk-dra7-atl.c13
-rw-r--r--drivers/clk/ti/clk.c32
-rw-r--r--drivers/clk/ti/clkt_dpll.c36
-rw-r--r--drivers/clk/ti/clock.h6
-rw-r--r--drivers/clk/ti/composite.c6
-rw-r--r--drivers/clk/ti/divider.c12
-rw-r--r--drivers/clk/ti/dpll.c10
-rw-r--r--drivers/clk/ti/dpll3xxx.c7
-rw-r--r--drivers/clk/ti/dpll44xx.c89
-rw-r--r--drivers/clk/ti/fapll.c48
-rw-r--r--drivers/clk/ti/mux.c4
-rw-r--r--drivers/clk/ux500/clk-prcmu.c14
-rw-r--r--drivers/clk/versatile/clk-icst.c74
-rw-r--r--drivers/clk/versatile/clk-sp810.c2
-rw-r--r--drivers/clk/versatile/clk-vexpress-osc.c17
-rw-r--r--drivers/clk/visconti/clkc-tmpv770x.c79
-rw-r--r--drivers/clk/visconti/pll-tmpv770x.c5
-rw-r--r--drivers/clk/visconti/pll.c25
-rw-r--r--drivers/clk/x86/clk-cgu.c35
-rw-r--r--drivers/clk/x86/clk-fch.c2
-rw-r--r--drivers/clk/x86/clk-pmc-atom.c2
-rw-r--r--drivers/clk/xilinx/clk-xlnx-clock-wizard.c479
-rw-r--r--drivers/clk/xilinx/xlnx_vcu.c52
-rw-r--r--drivers/clk/zynq/pll.c12
-rw-r--r--drivers/clk/zynqmp/divider.c23
-rw-r--r--drivers/clk/zynqmp/pll.c24
-rw-r--r--drivers/clocksource/Kconfig66
-rw-r--r--drivers/clocksource/Makefile8
-rw-r--r--drivers/clocksource/acpi_pm.c32
-rw-r--r--drivers/clocksource/arm_arch_timer.c699
-rw-r--r--drivers/clocksource/arm_arch_timer_mmio.c442
-rw-r--r--drivers/clocksource/arm_global_timer.c47
-rw-r--r--drivers/clocksource/asm9260_timer.c1
-rw-r--r--drivers/clocksource/clps711x-timer.c23
-rw-r--r--drivers/clocksource/dw_apb_timer.c39
-rw-r--r--drivers/clocksource/exynos_mct.c3
-rw-r--r--drivers/clocksource/hyperv_timer.c47
-rw-r--r--drivers/clocksource/i8253.c49
-rw-r--r--drivers/clocksource/ingenic-ost.c7
-rw-r--r--drivers/clocksource/ingenic-sysost.c27
-rw-r--r--drivers/clocksource/jcore-pit.c22
-rw-r--r--drivers/clocksource/mips-gic-timer.c65
-rw-r--r--drivers/clocksource/renesas-ostm.c4
-rw-r--r--drivers/clocksource/samsung_pwm_timer.c4
-rw-r--r--drivers/clocksource/scx200_hrt.c1
-rw-r--r--drivers/clocksource/sh_cmt.c119
-rw-r--r--drivers/clocksource/timer-armada-370-xp.c13
-rw-r--r--drivers/clocksource/timer-cadence-ttc.c6
-rw-r--r--drivers/clocksource/timer-clint.c2
-rw-r--r--drivers/clocksource/timer-cs5535.c1
-rw-r--r--drivers/clocksource/timer-econet-en751221.c216
-rw-r--r--drivers/clocksource/timer-gxp.c2
-rw-r--r--drivers/clocksource/timer-imx-tpm.c16
-rw-r--r--drivers/clocksource/timer-nxp-pit.c383
-rw-r--r--drivers/clocksource/timer-nxp-stm.c496
-rw-r--r--drivers/clocksource/timer-of.c17
-rw-r--r--drivers/clocksource/timer-of.h1
-rw-r--r--drivers/clocksource/timer-orion.c2
-rw-r--r--drivers/clocksource/timer-qcom.c8
-rw-r--r--drivers/clocksource/timer-ralink.c157
-rw-r--r--drivers/clocksource/timer-rda.c9
-rw-r--r--drivers/clocksource/timer-realtek.c150
-rw-r--r--drivers/clocksource/timer-riscv.c6
-rw-r--r--drivers/clocksource/timer-rtl-otto.c303
-rw-r--r--drivers/clocksource/timer-sp804.c24
-rw-r--r--drivers/clocksource/timer-sprd.c24
-rw-r--r--drivers/clocksource/timer-stm32-lp.c99
-rw-r--r--drivers/clocksource/timer-sun5i.c4
-rw-r--r--drivers/clocksource/timer-tegra.c1
-rw-r--r--drivers/clocksource/timer-tegra186.c120
-rw-r--r--drivers/clocksource/timer-ti-dm-systimer.c8
-rw-r--r--drivers/clocksource/timer-ti-dm.c129
-rw-r--r--drivers/clocksource/timer-vf-pit.c194
-rw-r--r--drivers/comedi/Kconfig9
-rw-r--r--drivers/comedi/comedi_buf.c431
-rw-r--r--drivers/comedi/comedi_fops.c365
-rw-r--r--drivers/comedi/comedi_internal.h13
-rw-r--r--drivers/comedi/drivers.c177
-rw-r--r--drivers/comedi/drivers/8255.c20
-rw-r--r--drivers/comedi/drivers/Makefile1
-rw-r--r--drivers/comedi/drivers/adl_pci7250.c220
-rw-r--r--drivers/comedi/drivers/adl_pci9118.c4
-rw-r--r--drivers/comedi/drivers/aio_iiro_16.c3
-rw-r--r--drivers/comedi/drivers/c6xdigio.c46
-rw-r--r--drivers/comedi/drivers/cb_pcidas64.c5
-rw-r--r--drivers/comedi/drivers/comedi_bond.c4
-rw-r--r--drivers/comedi/drivers/comedi_test.c20
-rw-r--r--drivers/comedi/drivers/das16.c7
-rw-r--r--drivers/comedi/drivers/das16m1.c3
-rw-r--r--drivers/comedi/drivers/das6402.c3
-rw-r--r--drivers/comedi/drivers/jr3_pci.c5
-rw-r--r--drivers/comedi/drivers/multiq3.c9
-rw-r--r--drivers/comedi/drivers/ni_670x.c2
-rw-r--r--drivers/comedi/drivers/ni_atmio.c11
-rw-r--r--drivers/comedi/drivers/ni_mio_common.c9
-rw-r--r--drivers/comedi/drivers/ni_pcidio.c2
-rw-r--r--drivers/comedi/drivers/ni_pcimio.c9
-rw-r--r--drivers/comedi/drivers/ni_routing/tools/convert_c_to_py.c5
-rw-r--r--drivers/comedi/drivers/ni_stc.h2
-rw-r--r--drivers/comedi/drivers/pcl726.c3
-rw-r--r--drivers/comedi/drivers/pcl812.c3
-rw-r--r--drivers/comedi/drivers/pcl818.c5
-rw-r--r--drivers/comedi/drivers/usbduxsigma.c2
-rw-r--r--drivers/comedi/kcomedilib/kcomedilib_main.c120
-rw-r--r--drivers/counter/104-quad-8.c4
-rw-r--r--drivers/counter/Kconfig2
-rw-r--r--drivers/counter/counter-chrdev.c3
-rw-r--r--drivers/counter/counter-core.c18
-rw-r--r--drivers/counter/ftm-quaddec.c4
-rw-r--r--drivers/counter/i8254.c6
-rw-r--r--drivers/counter/intel-qep.c12
-rw-r--r--drivers/counter/interrupt-cnt.c19
-rw-r--r--drivers/counter/microchip-tcb-capture.c206
-rw-r--r--drivers/counter/rz-mtu3-cnt.c2
-rw-r--r--drivers/counter/stm32-lptimer-cnt.c26
-rw-r--r--drivers/counter/stm32-timer-cnt.c483
-rw-r--r--drivers/counter/ti-ecap-capture.c27
-rw-r--r--drivers/counter/ti-eqep.c175
-rw-r--r--drivers/cpufreq/Kconfig46
-rw-r--r--drivers/cpufreq/Kconfig.arm82
-rw-r--r--drivers/cpufreq/Kconfig.powerpc25
-rw-r--r--drivers/cpufreq/Kconfig.x8613
-rw-r--r--drivers/cpufreq/Makefile9
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c94
-rw-r--r--drivers/cpufreq/airoha-cpufreq.c153
-rw-r--r--drivers/cpufreq/amd-pstate-trace.h75
-rw-r--r--drivers/cpufreq/amd-pstate-ut.c245
-rw-r--r--drivers/cpufreq/amd-pstate.c1559
-rw-r--r--drivers/cpufreq/amd-pstate.h127
-rw-r--r--drivers/cpufreq/amd_freq_sensitivity.c2
-rw-r--r--drivers/cpufreq/apple-soc-cpufreq.c90
-rw-r--r--drivers/cpufreq/armada-37xx-cpufreq.c10
-rw-r--r--drivers/cpufreq/armada-8k-cpufreq.c7
-rw-r--r--drivers/cpufreq/bmips-cpufreq.c5
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c13
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c387
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c26
-rw-r--r--drivers/cpufreq/cpufreq-dt.c53
-rw-r--r--drivers/cpufreq/cpufreq-dt.h2
-rw-r--r--drivers/cpufreq/cpufreq-nforce2.c9
-rw-r--r--drivers/cpufreq/cpufreq.c754
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c24
-rw-r--r--drivers/cpufreq/cpufreq_governor.c45
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c28
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.h23
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c1
-rw-r--r--drivers/cpufreq/davinci-cpufreq.c3
-rw-r--r--drivers/cpufreq/e_powersaver.c10
-rw-r--r--drivers/cpufreq/elanfreq.c2
-rw-r--r--drivers/cpufreq/freq_table.c43
-rw-r--r--drivers/cpufreq/imx-cpufreq-dt.c2
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c5
-rw-r--r--drivers/cpufreq/intel_pstate.c1082
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c3
-rw-r--r--drivers/cpufreq/longhaul.c33
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c11
-rw-r--r--drivers/cpufreq/loongson3_cpufreq.c389
-rw-r--r--drivers/cpufreq/maple-cpufreq.c241
-rw-r--r--drivers/cpufreq/mediatek-cpufreq-hw.c147
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c108
-rw-r--r--drivers/cpufreq/mvebu-cpufreq.c2
-rw-r--r--drivers/cpufreq/omap-cpufreq.c9
-rw-r--r--drivers/cpufreq/p4-clockmod.c1
-rw-r--r--drivers/cpufreq/pasemi-cpufreq.c8
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c8
-rw-r--r--drivers/cpufreq/pmac32-cpufreq.c1
-rw-r--r--drivers/cpufreq/pmac64-cpufreq.c4
-rw-r--r--drivers/cpufreq/powernow-k6.c6
-rw-r--r--drivers/cpufreq/powernow-k7.c18
-rw-r--r--drivers/cpufreq/powernow-k8.c9
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c32
-rw-r--r--drivers/cpufreq/powernv-trace.h44
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.c173
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.h33
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c102
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq_pmi.c150
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c57
-rw-r--r--drivers/cpufreq/qcom-cpufreq-nvmem.c147
-rw-r--r--drivers/cpufreq/qoriq-cpufreq.c7
-rw-r--r--drivers/cpufreq/raspberrypi-cpufreq.c2
-rw-r--r--drivers/cpufreq/rcpufreq_dt.rs222
-rw-r--r--drivers/cpufreq/s3c64xx-cpufreq.c11
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c10
-rw-r--r--drivers/cpufreq/sc520_freq.c2
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c128
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c29
-rw-r--r--drivers/cpufreq/sh-cpufreq.c11
-rw-r--r--drivers/cpufreq/sparc-us2e-cpufreq.c5
-rw-r--r--drivers/cpufreq/sparc-us3-cpufreq.c5
-rw-r--r--drivers/cpufreq/spear-cpufreq.c21
-rw-r--r--drivers/cpufreq/speedstep-centrino.c19
-rw-r--r--drivers/cpufreq/speedstep-ich.c1
-rw-r--r--drivers/cpufreq/speedstep-lib.c12
-rw-r--r--drivers/cpufreq/speedstep-lib.h10
-rw-r--r--drivers/cpufreq/speedstep-smi.c1
-rw-r--r--drivers/cpufreq/sti-cpufreq.c5
-rw-r--r--drivers/cpufreq/sun50i-cpufreq-nvmem.c93
-rw-r--r--drivers/cpufreq/tegra124-cpufreq.c49
-rw-r--r--drivers/cpufreq/tegra186-cpufreq.c195
-rw-r--r--drivers/cpufreq/tegra194-cpufreq.c10
-rw-r--r--drivers/cpufreq/ti-cpufreq.c139
-rw-r--r--drivers/cpufreq/vexpress-spc-cpufreq.c8
-rw-r--r--drivers/cpufreq/virtual-cpufreq.c332
-rw-r--r--drivers/cpuidle/Makefile3
-rw-r--r--drivers/cpuidle/cpuidle-arm.c8
-rw-r--r--drivers/cpuidle/cpuidle-big_little.c13
-rw-r--r--drivers/cpuidle/cpuidle-haltpoll.c1
-rw-r--r--drivers/cpuidle/cpuidle-kirkwood.c2
-rw-r--r--drivers/cpuidle/cpuidle-psci-domain.c34
-rw-r--r--drivers/cpuidle/cpuidle-psci.c146
-rw-r--r--drivers/cpuidle/cpuidle-psci.h4
-rw-r--r--drivers/cpuidle/cpuidle-pseries.c1
-rw-r--r--drivers/cpuidle/cpuidle-qcom-spm.c15
-rw-r--r--drivers/cpuidle/cpuidle-riscv-sbi.c66
-rw-r--r--drivers/cpuidle/cpuidle.c37
-rw-r--r--drivers/cpuidle/driver.c14
-rw-r--r--drivers/cpuidle/dt_idle_genpd.c14
-rw-r--r--drivers/cpuidle/dt_idle_states.c14
-rw-r--r--drivers/cpuidle/governor.c4
-rw-r--r--drivers/cpuidle/governors/menu.c327
-rw-r--r--drivers/cpuidle/governors/teo.c495
-rw-r--r--drivers/cpuidle/poll_state.c4
-rw-r--r--drivers/cpuidle/sysfs.c34
-rw-r--r--drivers/crypto/Kconfig152
-rw-r--r--drivers/crypto/Makefile10
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c2
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c162
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c82
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c298
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c7
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c3
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h46
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c51
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c110
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c103
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h18
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-core.c10
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl.h2
-rw-r--r--drivers/crypto/aspeed/aspeed-acry.c6
-rw-r--r--drivers/crypto/aspeed/aspeed-hace-crypto.c2
-rw-r--r--drivers/crypto/aspeed/aspeed-hace-hash.c800
-rw-r--r--drivers/crypto/aspeed/aspeed-hace.c2
-rw-r--r--drivers/crypto/aspeed/aspeed-hace.h28
-rw-r--r--drivers/crypto/atmel-aes.c24
-rw-r--r--drivers/crypto/atmel-ecc.c2
-rw-r--r--drivers/crypto/atmel-i2c.c2
-rw-r--r--drivers/crypto/atmel-sha.c23
-rw-r--r--drivers/crypto/atmel-sha204a.c13
-rw-r--r--drivers/crypto/atmel-tdes.c8
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c28
-rw-r--r--drivers/crypto/bcm/cipher.c34
-rw-r--r--drivers/crypto/bcm/spu.c7
-rw-r--r--drivers/crypto/bcm/spu2.c3
-rw-r--r--drivers/crypto/caam/Kconfig2
-rw-r--r--drivers/crypto/caam/Makefile4
-rw-r--r--drivers/crypto/caam/blob_gen.c85
-rw-r--r--drivers/crypto/caam/caamalg.c130
-rw-r--r--drivers/crypto/caam/caamalg_desc.c87
-rw-r--r--drivers/crypto/caam/caamalg_desc.h13
-rw-r--r--drivers/crypto/caam/caamalg_qi.c6
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c42
-rw-r--r--drivers/crypto/caam/caamalg_qi2.h2
-rw-r--r--drivers/crypto/caam/caamhash.c1
-rw-r--r--drivers/crypto/caam/caampkc.c11
-rw-r--r--drivers/crypto/caam/caamrng.c4
-rw-r--r--drivers/crypto/caam/ctrl.c24
-rw-r--r--drivers/crypto/caam/debugfs.c2
-rw-r--r--drivers/crypto/caam/debugfs.h2
-rw-r--r--drivers/crypto/caam/desc.h9
-rw-r--r--drivers/crypto/caam/desc_constr.h8
-rw-r--r--drivers/crypto/caam/intern.h5
-rw-r--r--drivers/crypto/caam/jr.c5
-rw-r--r--drivers/crypto/caam/qi.c78
-rw-r--r--drivers/crypto/cavium/Makefile3
-rw-r--r--drivers/crypto/cavium/cpt/cptpf_main.c6
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_reqmanager.c4
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_lib.c2
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_mbx.c2
-rw-r--r--drivers/crypto/cavium/zip/Makefile12
-rw-r--r--drivers/crypto/cavium/zip/common.h222
-rw-r--r--drivers/crypto/cavium/zip/zip_crypto.c301
-rw-r--r--drivers/crypto/cavium/zip/zip_crypto.h79
-rw-r--r--drivers/crypto/cavium/zip/zip_deflate.c200
-rw-r--r--drivers/crypto/cavium/zip/zip_deflate.h62
-rw-r--r--drivers/crypto/cavium/zip/zip_device.c202
-rw-r--r--drivers/crypto/cavium/zip/zip_device.h108
-rw-r--r--drivers/crypto/cavium/zip/zip_inflate.c223
-rw-r--r--drivers/crypto/cavium/zip/zip_inflate.h62
-rw-r--r--drivers/crypto/cavium/zip/zip_main.c651
-rw-r--r--drivers/crypto/cavium/zip/zip_main.h120
-rw-r--r--drivers/crypto/cavium/zip/zip_mem.c114
-rw-r--r--drivers/crypto/cavium/zip/zip_mem.h78
-rw-r--r--drivers/crypto/cavium/zip/zip_regs.h1347
-rw-r--r--drivers/crypto/ccp/Kconfig1
-rw-r--r--drivers/crypto/ccp/Makefile8
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes.c15
-rw-r--r--drivers/crypto/ccp/ccp-crypto-des3.c13
-rw-r--r--drivers/crypto/ccp/ccp-crypto-main.c13
-rw-r--r--drivers/crypto/ccp/ccp-debugfs.c3
-rw-r--r--drivers/crypto/ccp/ccp-dev.c2
-rw-r--r--drivers/crypto/ccp/ccp-ops.c174
-rw-r--r--drivers/crypto/ccp/dbc.c55
-rw-r--r--drivers/crypto/ccp/hsti.c138
-rw-r--r--drivers/crypto/ccp/hsti.h17
-rw-r--r--drivers/crypto/ccp/psp-dev.c43
-rw-r--r--drivers/crypto/ccp/psp-dev.h52
-rw-r--r--drivers/crypto/ccp/sev-dev-tio.c864
-rw-r--r--drivers/crypto/ccp/sev-dev-tio.h123
-rw-r--r--drivers/crypto/ccp/sev-dev-tsm.c405
-rw-r--r--drivers/crypto/ccp/sev-dev.c781
-rw-r--r--drivers/crypto/ccp/sev-dev.h20
-rw-r--r--drivers/crypto/ccp/sfs.c311
-rw-r--r--drivers/crypto/ccp/sfs.h47
-rw-r--r--drivers/crypto/ccp/sp-dev.c14
-rw-r--r--drivers/crypto/ccp/sp-dev.h5
-rw-r--r--drivers/crypto/ccp/sp-pci.c106
-rw-r--r--drivers/crypto/ccp/sp-platform.c19
-rw-r--r--drivers/crypto/ccree/cc_aead.c4
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c60
-rw-r--r--drivers/crypto/ccree/cc_cipher.c12
-rw-r--r--drivers/crypto/ccree/cc_driver.c2
-rw-r--r--drivers/crypto/ccree/cc_hash.c32
-rw-r--r--drivers/crypto/ccree/cc_pm.c1
-rw-r--r--drivers/crypto/chelsio/Kconfig6
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c261
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h1
-rw-r--r--drivers/crypto/exynos-rng.c2
-rw-r--r--drivers/crypto/gemini/sl3516-ce-core.c2
-rw-r--r--drivers/crypto/gemini/sl3516-ce.h2
-rw-r--r--drivers/crypto/geode-aes.c2
-rw-r--r--drivers/crypto/hifn_795x.c24
-rw-r--r--drivers/crypto/hisilicon/Kconfig1
-rw-r--r--drivers/crypto/hisilicon/debugfs.c5
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre.h23
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c416
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c424
-rw-r--r--drivers/crypto/hisilicon/qm.c898
-rw-r--r--drivers/crypto/hisilicon/sec/sec_drv.c7
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h91
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c841
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.h11
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c356
-rw-r--r--drivers/crypto/hisilicon/sgl.c30
-rw-r--r--drivers/crypto/hisilicon/trng/trng.c6
-rw-r--r--drivers/crypto/hisilicon/zip/Makefile2
-rw-r--r--drivers/crypto/hisilicon/zip/dae_main.c277
-rw-r--r--drivers/crypto/hisilicon/zip/zip.h26
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c30
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c486
-rw-r--r--drivers/crypto/img-hash.c74
-rw-r--r--drivers/crypto/inside-secure/Makefile1
-rw-r--r--drivers/crypto/inside-secure/eip93/Kconfig20
-rw-r--r--drivers/crypto/inside-secure/eip93/Makefile5
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-aead.c711
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-aead.h38
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-aes.h16
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-cipher.c413
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-cipher.h60
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-common.c822
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-common.h24
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-des.h16
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-hash.c875
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-hash.h82
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-main.c501
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-main.h151
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-regs.h335
-rw-r--r--drivers/crypto/inside-secure/safexcel.c5
-rw-r--r--drivers/crypto/inside-secure/safexcel.h2
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c2
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c104
-rw-r--r--drivers/crypto/intel/iaa/Makefile2
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_main.c257
-rw-r--r--drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c5
-rw-r--r--drivers/crypto/intel/keembay/keembay-ocs-aes-core.c2
-rw-r--r--drivers/crypto/intel/keembay/keembay-ocs-ecc.c2
-rw-r--r--drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c16
-rw-r--r--drivers/crypto/intel/keembay/ocs-aes.c4
-rw-r--r--drivers/crypto/intel/keembay/ocs-hcu.c1
-rw-r--r--drivers/crypto/intel/qat/Kconfig19
-rw-r--r--drivers/crypto/intel/qat/Makefile2
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/Makefile3
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c47
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_drv.c29
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/Makefile3
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c41
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_drv.c33
-rw-r--r--drivers/crypto/intel/qat/qat_6xxx/Makefile3
-rw-r--r--drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c950
-rw-r--r--drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h168
-rw-r--r--drivers/crypto/intel/qat/qat_6xxx/adf_drv.c226
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/Makefile3
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c6
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c49
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxxvf/Makefile3
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c2
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c6
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/Makefile3
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c6
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/adf_drv.c51
-rw-r--r--drivers/crypto/intel/qat/qat_c62xvf/Makefile3
-rw-r--r--drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c2
-rw-r--r--drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c6
-rw-r--r--drivers/crypto/intel/qat/qat_common/Makefile74
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_devices.h87
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_admin.c1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_aer.c15
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_bank_state.c238
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_bank_state.h49
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg.c35
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg.h2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_common.h2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_services.c205
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_services.h38
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h7
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_common_drv.h5
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c69
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_dbgfs.c13
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_dc.c64
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_dc.h17
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c12
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_fw_config.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_dc.c70
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h10
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c59
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c4
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_config.c21
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_config.h3
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c83
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h10
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c305
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h13
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c117
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c59
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c71
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_timer.h21
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c7
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_pm.h52
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_pm_dbgfs.c124
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_ras.c818
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_ras.h504
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c56
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h17
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_tl.c258
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_tl.h198
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c4
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_init.c45
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_isr.c8
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c9
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.c14
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c46
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.h36
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_rl.c87
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_rl.h11
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_rl_admin.c1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sriov.c196
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sysfs.c28
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c21
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_telemetry.c19
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_telemetry.h5
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_timer.c71
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_timer.h21
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c91
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h5
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_transport_debug.c21
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_vf_isr.c7
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h27
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp.h99
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp_defs.h318
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h33
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_algs.c203
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_bl.c165
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_bl.h8
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_comp_algs.c92
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_comp_req.h10
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_compression.c9
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_compression.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_hal.c15
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_uclo.c475
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/Makefile3
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c16
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c49
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xccvf/Makefile3
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c2
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c6
-rw-r--r--drivers/crypto/loongson/Kconfig5
-rw-r--r--drivers/crypto/loongson/Makefile1
-rw-r--r--drivers/crypto/loongson/loongson-rng.c209
-rw-r--r--drivers/crypto/marvell/Kconfig8
-rw-r--r--drivers/crypto/marvell/cesa/cesa.c63
-rw-r--r--drivers/crypto/marvell/cesa/cesa.h9
-rw-r--r--drivers/crypto/marvell/cesa/cipher.c31
-rw-r--r--drivers/crypto/marvell/cesa/hash.c24
-rw-r--r--drivers/crypto/marvell/cesa/tdma.c53
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c16
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h2
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_algs.c273
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_algs.h1
-rw-r--r--drivers/crypto/marvell/octeontx2/cn10k_cpt.c103
-rw-r--r--drivers/crypto/marvell/octeontx2/cn10k_cpt.h1
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_common.h40
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c6
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c45
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h128
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.c25
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.h15
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c34
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c19
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c164
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h1
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c270
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c60
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c14
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c21
-rw-r--r--drivers/crypto/mxs-dcp.c27
-rw-r--r--drivers/crypto/n2_asm.S96
-rw-r--r--drivers/crypto/n2_core.c2171
-rw-r--r--drivers/crypto/n2_core.h232
-rw-r--r--drivers/crypto/nx/nx-842.c33
-rw-r--r--drivers/crypto/nx/nx-842.h18
-rw-r--r--drivers/crypto/nx/nx-aes-cbc.c8
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c16
-rw-r--r--drivers/crypto/nx/nx-aes-ctr.c8
-rw-r--r--drivers/crypto/nx/nx-aes-ecb.c8
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c17
-rw-r--r--drivers/crypto/nx/nx-aes-xcbc.c128
-rw-r--r--drivers/crypto/nx/nx-common-powernv.c33
-rw-r--r--drivers/crypto/nx/nx-common-pseries.c109
-rw-r--r--drivers/crypto/nx/nx-sha256.c130
-rw-r--r--drivers/crypto/nx/nx-sha512.c143
-rw-r--r--drivers/crypto/nx/nx.c53
-rw-r--r--drivers/crypto/nx/nx.h14
-rw-r--r--drivers/crypto/omap-aes-gcm.c1
-rw-r--r--drivers/crypto/omap-aes.c66
-rw-r--r--drivers/crypto/omap-aes.h8
-rw-r--r--drivers/crypto/omap-des.c60
-rw-r--r--drivers/crypto/omap-sham.c32
-rw-r--r--drivers/crypto/padlock-sha.c478
-rw-r--r--drivers/crypto/qce/aead.c2
-rw-r--r--drivers/crypto/qce/core.c132
-rw-r--r--drivers/crypto/qce/core.h9
-rw-r--r--drivers/crypto/qce/dma.c28
-rw-r--r--drivers/crypto/qce/dma.h3
-rw-r--r--drivers/crypto/qce/sha.c2
-rw-r--r--drivers/crypto/qce/skcipher.c2
-rw-r--r--drivers/crypto/qcom-rng.c26
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.c2
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ahash.c56
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_skcipher.c3
-rw-r--r--drivers/crypto/s5p-sss.c64
-rw-r--r--drivers/crypto/sa2ul.c68
-rw-r--r--drivers/crypto/sahara.c2
-rw-r--r--drivers/crypto/starfive/jh7110-aes.c12
-rw-r--r--drivers/crypto/starfive/jh7110-cryp.c7
-rw-r--r--drivers/crypto/starfive/jh7110-cryp.h4
-rw-r--r--drivers/crypto/starfive/jh7110-hash.c25
-rw-r--r--drivers/crypto/starfive/jh7110-rsa.c17
-rw-r--r--drivers/crypto/stm32/Kconfig9
-rw-r--r--drivers/crypto/stm32/Makefile1
-rw-r--r--drivers/crypto/stm32/stm32-crc32.c480
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c750
-rw-r--r--drivers/crypto/stm32/stm32-hash.c3
-rw-r--r--drivers/crypto/talitos.c2
-rw-r--r--drivers/crypto/tegra/tegra-se-aes.c399
-rw-r--r--drivers/crypto/tegra/tegra-se-hash.c339
-rw-r--r--drivers/crypto/tegra/tegra-se-key.c29
-rw-r--r--drivers/crypto/tegra/tegra-se-main.c23
-rw-r--r--drivers/crypto/tegra/tegra-se.h39
-rw-r--r--drivers/crypto/ti/Kconfig15
-rw-r--r--drivers/crypto/ti/Makefile3
-rw-r--r--drivers/crypto/ti/dthev2-aes.c538
-rw-r--r--drivers/crypto/ti/dthev2-common.c217
-rw-r--r--drivers/crypto/ti/dthev2-common.h109
-rw-r--r--drivers/crypto/virtio/virtio_crypto_akcipher_algs.c104
-rw-r--r--drivers/crypto/virtio/virtio_crypto_common.h2
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c40
-rw-r--r--drivers/crypto/virtio/virtio_crypto_mgr.c38
-rw-r--r--drivers/crypto/virtio/virtio_crypto_skcipher_algs.c17
-rw-r--r--drivers/crypto/xilinx/Makefile1
-rw-r--r--drivers/crypto/xilinx/xilinx-trng.c430
-rw-r--r--drivers/crypto/xilinx/zynqmp-aes-gcm.c3
-rw-r--r--drivers/crypto/xilinx/zynqmp-sha.c100
-rw-r--r--drivers/cxl/Kconfig90
-rw-r--r--drivers/cxl/Makefile20
-rw-r--r--drivers/cxl/acpi.c246
-rw-r--r--drivers/cxl/core/Makefile4
-rw-r--r--drivers/cxl/core/cdat.c653
-rw-r--r--drivers/cxl/core/core.h90
-rw-r--r--drivers/cxl/core/edac.c2109
-rw-r--r--drivers/cxl/core/features.c706
-rw-r--r--drivers/cxl/core/hdm.c688
-rw-r--r--drivers/cxl/core/mbox.c419
-rw-r--r--drivers/cxl/core/mce.c65
-rw-r--r--drivers/cxl/core/mce.h20
-rw-r--r--drivers/cxl/core/memdev.c267
-rw-r--r--drivers/cxl/core/pci.c500
-rw-r--r--drivers/cxl/core/pmem.c43
-rw-r--r--drivers/cxl/core/pmu.c2
-rw-r--r--drivers/cxl/core/port.c664
-rw-r--r--drivers/cxl/core/ras.c126
-rw-r--r--drivers/cxl/core/region.c1806
-rw-r--r--drivers/cxl/core/regs.c140
-rw-r--r--drivers/cxl/core/suspend.c4
-rw-r--r--drivers/cxl/core/trace.h528
-rw-r--r--drivers/cxl/cxl.h205
-rw-r--r--drivers/cxl/cxlmem.h214
-rw-r--r--drivers/cxl/cxlpci.h10
-rw-r--r--drivers/cxl/mem.c49
-rw-r--r--drivers/cxl/pci.c260
-rw-r--r--drivers/cxl/pmem.c112
-rw-r--r--drivers/cxl/port.c92
-rw-r--r--drivers/cxl/security.c25
-rw-r--r--drivers/dax/Kconfig2
-rw-r--r--drivers/dax/bus.c17
-rw-r--r--drivers/dax/cxl.c3
-rw-r--r--drivers/dax/dax-private.h26
-rw-r--r--drivers/dax/device.c79
-rw-r--r--drivers/dax/hmem/hmem.c2
-rw-r--r--drivers/dax/kmem.c12
-rw-r--r--drivers/dax/pmem.c2
-rw-r--r--drivers/dax/pmem/Makefile7
-rw-r--r--drivers/dax/pmem/pmem.c10
-rw-r--r--drivers/dax/super.c8
-rw-r--r--drivers/dca/dca-core.c1
-rw-r--r--drivers/dca/dca-sysfs.c20
-rw-r--r--drivers/devfreq/Kconfig11
-rw-r--r--drivers/devfreq/Makefile1
-rw-r--r--drivers/devfreq/devfreq-event.c8
-rw-r--r--drivers/devfreq/devfreq.c25
-rw-r--r--drivers/devfreq/event/exynos-nocp.c2
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c2
-rw-r--r--drivers/devfreq/event/rockchip-dfi.c113
-rw-r--r--drivers/devfreq/exynos-bus.c27
-rw-r--r--drivers/devfreq/governor.h127
-rw-r--r--drivers/devfreq/governor_passive.c27
-rw-r--r--drivers/devfreq/governor_performance.c3
-rw-r--r--drivers/devfreq/governor_powersave.c3
-rw-r--r--drivers/devfreq/governor_simpleondemand.c7
-rw-r--r--drivers/devfreq/governor_userspace.c9
-rw-r--r--drivers/devfreq/hisi_uncore_freq.c658
-rw-r--r--drivers/devfreq/imx-bus.c2
-rw-r--r--drivers/devfreq/mtk-cci-devfreq.c7
-rw-r--r--drivers/devfreq/rk3399_dmc.c2
-rw-r--r--drivers/devfreq/sun8i-a33-mbus.c40
-rw-r--r--drivers/devfreq/tegra30-devfreq.c15
-rw-r--r--drivers/dibs/Kconfig23
-rw-r--r--drivers/dibs/Makefile8
-rw-r--r--drivers/dibs/dibs_loopback.c361
-rw-r--r--drivers/dibs/dibs_loopback.h57
-rw-r--r--drivers/dibs/dibs_main.c274
-rw-r--r--drivers/dio/dio-driver.c4
-rw-r--r--drivers/dma-buf/Kconfig1
-rw-r--r--drivers/dma-buf/Makefile2
-rw-r--r--drivers/dma-buf/dma-buf-mapping.c248
-rw-r--r--drivers/dma-buf/dma-buf.c344
-rw-r--r--drivers/dma-buf/dma-fence-array.c112
-rw-r--r--drivers/dma-buf/dma-fence-chain.c7
-rw-r--r--drivers/dma-buf/dma-fence-unwrap.c160
-rw-r--r--drivers/dma-buf/dma-fence.c215
-rw-r--r--drivers/dma-buf/dma-heap.c35
-rw-r--r--drivers/dma-buf/dma-resv.c24
-rw-r--r--drivers/dma-buf/heaps/cma_heap.c65
-rw-r--r--drivers/dma-buf/heaps/system_heap.c85
-rw-r--r--drivers/dma-buf/st-dma-fence-unwrap.c268
-rw-r--r--drivers/dma-buf/st-dma-fence.c14
-rw-r--r--drivers/dma-buf/sw_sync.c47
-rw-r--r--drivers/dma-buf/sync_debug.c76
-rw-r--r--drivers/dma-buf/sync_debug.h2
-rw-r--r--drivers/dma-buf/sync_file.c24
-rw-r--r--drivers/dma-buf/sync_trace.h2
-rw-r--r--drivers/dma-buf/udmabuf.c339
-rw-r--r--drivers/dma/Kconfig110
-rw-r--r--drivers/dma/Makefile18
-rw-r--r--drivers/dma/acpi-dma.c47
-rw-r--r--drivers/dma/altera-msgdma.c22
-rw-r--r--drivers/dma/amba-pl08x.c6
-rw-r--r--drivers/dma/amd/Kconfig42
-rw-r--r--drivers/dma/amd/Makefile5
-rw-r--r--drivers/dma/amd/ae4dma/Makefile10
-rw-r--r--drivers/dma/amd/ae4dma/ae4dma-dev.c157
-rw-r--r--drivers/dma/amd/ae4dma/ae4dma-pci.c156
-rw-r--r--drivers/dma/amd/ae4dma/ae4dma.h102
-rw-r--r--drivers/dma/amd/ptdma/Makefile (renamed from drivers/dma/ptdma/Makefile)0
-rw-r--r--drivers/dma/amd/ptdma/ptdma-debugfs.c143
-rw-r--r--drivers/dma/amd/ptdma/ptdma-dev.c (renamed from drivers/dma/ptdma/ptdma-dev.c)0
-rw-r--r--drivers/dma/amd/ptdma/ptdma-dmaengine.c659
-rw-r--r--drivers/dma/amd/ptdma/ptdma-pci.c (renamed from drivers/dma/ptdma/ptdma-pci.c)0
-rw-r--r--drivers/dma/amd/ptdma/ptdma.h (renamed from drivers/dma/ptdma/ptdma.h)7
-rw-r--r--drivers/dma/amd/qdma/Makefile5
-rw-r--r--drivers/dma/amd/qdma/qdma-comm-regs.c64
-rw-r--r--drivers/dma/amd/qdma/qdma.c1143
-rw-r--r--drivers/dma/amd/qdma/qdma.h266
-rw-r--r--drivers/dma/apple-admac.c9
-rw-r--r--drivers/dma/arm-dma350.c660
-rw-r--r--drivers/dma/at_hdmac.c14
-rw-r--r--drivers/dma/at_xdmac.c10
-rw-r--r--drivers/dma/bcm-sba-raid.c6
-rw-r--r--drivers/dma/bcm2835-dma.c27
-rw-r--r--drivers/dma/bestcomm/bestcomm.c2
-rw-r--r--drivers/dma/cv1800b-dmamux.c259
-rw-r--r--drivers/dma/dma-axi-dmac.c120
-rw-r--r--drivers/dma/dma-jz4780.c2
-rw-r--r--drivers/dma/dmaengine.c62
-rw-r--r--drivers/dma/dmatest.c3
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c40
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac.h3
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.c40
-rw-r--r--drivers/dma/dw-edma/dw-edma-pcie.c65
-rw-r--r--drivers/dma/dw-edma/dw-hdma-v0-core.c26
-rw-r--r--drivers/dma/dw/acpi.c6
-rw-r--r--drivers/dma/dw/core.c131
-rw-r--r--drivers/dma/dw/dw.c40
-rw-r--r--drivers/dma/dw/idma32.c19
-rw-r--r--drivers/dma/dw/internal.h8
-rw-r--r--drivers/dma/dw/pci.c12
-rw-r--r--drivers/dma/dw/platform.c35
-rw-r--r--drivers/dma/dw/regs.h1
-rw-r--r--drivers/dma/dw/rzn1-dmamux.c15
-rw-r--r--drivers/dma/ep93xx_dma.c293
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c14
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h11
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpdmai.c119
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpdmai.h61
-rw-r--r--drivers/dma/fsl-edma-common.c146
-rw-r--r--drivers/dma/fsl-edma-common.h136
-rw-r--r--drivers/dma/fsl-edma-main.c398
-rw-r--r--drivers/dma/fsl-edma-trace.c4
-rw-r--r--drivers/dma/fsl-edma-trace.h132
-rw-r--r--drivers/dma/fsl-qdma.c6
-rw-r--r--drivers/dma/fsl_raid.c2
-rw-r--r--drivers/dma/fsldma.c22
-rw-r--r--drivers/dma/fsldma.h1
-rw-r--r--drivers/dma/hisi_dma.c2
-rw-r--r--drivers/dma/idma64.c6
-rw-r--r--drivers/dma/idxd/Makefile2
-rw-r--r--drivers/dma/idxd/bus.c6
-rw-r--r--drivers/dma/idxd/cdev.c46
-rw-r--r--drivers/dma/idxd/compat.c3
-rw-r--r--drivers/dma/idxd/defaults.c6
-rw-r--r--drivers/dma/idxd/device.c33
-rw-r--r--drivers/dma/idxd/dma.c2
-rw-r--r--drivers/dma/idxd/idxd.h24
-rw-r--r--drivers/dma/idxd/init.c694
-rw-r--r--drivers/dma/idxd/irq.c89
-rw-r--r--drivers/dma/idxd/perfmon.c102
-rw-r--r--drivers/dma/idxd/registers.h70
-rw-r--r--drivers/dma/idxd/submit.c8
-rw-r--r--drivers/dma/idxd/sysfs.c16
-rw-r--r--drivers/dma/img-mdc-dma.c4
-rw-r--r--drivers/dma/imx-dma.c15
-rw-r--r--drivers/dma/imx-sdma.c109
-rw-r--r--drivers/dma/ioat/dca.c8
-rw-r--r--drivers/dma/ioat/dma.c5
-rw-r--r--drivers/dma/ioat/dma.h2
-rw-r--r--drivers/dma/ioat/hw.h3
-rw-r--r--drivers/dma/ioat/init.c63
-rw-r--r--drivers/dma/k3dma.c3
-rw-r--r--drivers/dma/lgm/lgm-dma.c2
-rw-r--r--drivers/dma/loongson1-apb-dma.c660
-rw-r--r--drivers/dma/loongson2-apb-dma.c705
-rw-r--r--drivers/dma/lpc32xx-dmamux.c195
-rw-r--r--drivers/dma/ls2x-apb-dma.c705
-rw-r--r--drivers/dma/mcf-edma-main.c8
-rw-r--r--drivers/dma/mediatek/mtk-cqdma.c16
-rw-r--r--drivers/dma/mediatek/mtk-hsdma.c4
-rw-r--r--drivers/dma/mediatek/mtk-uart-apdma.c2
-rw-r--r--drivers/dma/milbeaut-hdmac.c2
-rw-r--r--drivers/dma/milbeaut-xdmac.c2
-rw-r--r--drivers/dma/mmp_pdma.c291
-rw-r--r--drivers/dma/mmp_tdma.c8
-rw-r--r--drivers/dma/moxart-dma.c7
-rw-r--r--drivers/dma/mpc512x_dma.c2
-rw-r--r--drivers/dma/mv_xor.c32
-rw-r--r--drivers/dma/mv_xor.h2
-rw-r--r--drivers/dma/mv_xor_v2.c6
-rw-r--r--drivers/dma/nbpfaxi.c34
-rw-r--r--drivers/dma/of-dma.c4
-rw-r--r--drivers/dma/owl-dma.c4
-rw-r--r--drivers/dma/pch_dma.c5
-rw-r--r--drivers/dma/pl330.c5
-rw-r--r--drivers/dma/ppc4xx/adma.c8
-rw-r--r--drivers/dma/ppc4xx/dma.h2
-rw-r--r--drivers/dma/ptdma/Kconfig13
-rw-r--r--drivers/dma/ptdma/ptdma-debugfs.c106
-rw-r--r--drivers/dma/ptdma/ptdma-dmaengine.c411
-rw-r--r--drivers/dma/pxa_dma.c6
-rw-r--r--drivers/dma/qcom/bam_dma.c20
-rw-r--r--drivers/dma/qcom/gpi.c66
-rw-r--r--drivers/dma/qcom/hidma.c14
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c110
-rw-r--r--drivers/dma/qcom/qcom_adm.c4
-rw-r--r--drivers/dma/sa11x0-dma.c2
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.c4
-rw-r--r--drivers/dma/sh/Kconfig10
-rw-r--r--drivers/dma/sh/rcar-dmac.c26
-rw-r--r--drivers/dma/sh/rz-dmac.c115
-rw-r--r--drivers/dma/sh/shdma-base.c29
-rw-r--r--drivers/dma/sh/shdmac.c21
-rw-r--r--drivers/dma/sh/usb-dmac.c15
-rw-r--r--drivers/dma/sprd-dma.c3
-rw-r--r--drivers/dma/st_fdma.c3
-rw-r--r--drivers/dma/ste_dma40.c6
-rw-r--r--drivers/dma/ste_dma40.h2
-rw-r--r--drivers/dma/ste_dma40_ll.h2
-rw-r--r--drivers/dma/stm32/Kconfig47
-rw-r--r--drivers/dma/stm32/Makefile5
-rw-r--r--drivers/dma/stm32/stm32-dma.c (renamed from drivers/dma/stm32-dma.c)14
-rw-r--r--drivers/dma/stm32/stm32-dma3.c1926
-rw-r--r--drivers/dma/stm32/stm32-dmamux.c (renamed from drivers/dma/stm32-dmamux.c)0
-rw-r--r--drivers/dma/stm32/stm32-mdma.c (renamed from drivers/dma/stm32-mdma.c)10
-rw-r--r--drivers/dma/sun4i-dma.c254
-rw-r--r--drivers/dma/sun6i-dma.c5
-rw-r--r--drivers/dma/tegra186-gpc-dma.c12
-rw-r--r--drivers/dma/tegra20-apb-dma.c4
-rw-r--r--drivers/dma/tegra210-adma.c288
-rw-r--r--drivers/dma/ti/Kconfig4
-rw-r--r--drivers/dma/ti/cppi41.c3
-rw-r--r--drivers/dma/ti/edma.c24
-rw-r--r--drivers/dma/ti/k3-psil.c1
-rw-r--r--drivers/dma/ti/k3-udma-glue.c24
-rw-r--r--drivers/dma/ti/k3-udma.c133
-rw-r--r--drivers/dma/ti/k3-udma.h1
-rw-r--r--drivers/dma/ti/omap-dma.c9
-rw-r--r--drivers/dma/timb_dma.c2
-rw-r--r--drivers/dma/txx9dmac.c4
-rw-r--r--drivers/dma/uniphier-mdmac.c2
-rw-r--r--drivers/dma/uniphier-xdmac.c2
-rw-r--r--drivers/dma/virt-dma.c1
-rw-r--r--drivers/dma/virt-dma.h10
-rw-r--r--drivers/dma/xgene-dma.c4
-rw-r--r--drivers/dma/xilinx/xdma.c15
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c123
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c113
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c36
-rw-r--r--drivers/dpll/Kconfig6
-rw-r--r--drivers/dpll/Makefile2
-rw-r--r--drivers/dpll/dpll_core.c52
-rw-r--r--drivers/dpll/dpll_core.h3
-rw-r--r--drivers/dpll/dpll_netlink.c513
-rw-r--r--drivers/dpll/dpll_netlink.h2
-rw-r--r--drivers/dpll/dpll_nl.c18
-rw-r--r--drivers/dpll/dpll_nl.h2
-rw-r--r--drivers/dpll/zl3073x/Kconfig39
-rw-r--r--drivers/dpll/zl3073x/Makefile11
-rw-r--r--drivers/dpll/zl3073x/core.c1080
-rw-r--r--drivers/dpll/zl3073x/core.h367
-rw-r--r--drivers/dpll/zl3073x/devlink.c390
-rw-r--r--drivers/dpll/zl3073x/devlink.h15
-rw-r--r--drivers/dpll/zl3073x/dpll.c1938
-rw-r--r--drivers/dpll/zl3073x/dpll.h48
-rw-r--r--drivers/dpll/zl3073x/flash.c666
-rw-r--r--drivers/dpll/zl3073x/flash.h29
-rw-r--r--drivers/dpll/zl3073x/fw.c419
-rw-r--r--drivers/dpll/zl3073x/fw.h52
-rw-r--r--drivers/dpll/zl3073x/i2c.c76
-rw-r--r--drivers/dpll/zl3073x/out.c157
-rw-r--r--drivers/dpll/zl3073x/out.h93
-rw-r--r--drivers/dpll/zl3073x/prop.c369
-rw-r--r--drivers/dpll/zl3073x/prop.h34
-rw-r--r--drivers/dpll/zl3073x/ref.c204
-rw-r--r--drivers/dpll/zl3073x/ref.h134
-rw-r--r--drivers/dpll/zl3073x/regs.h317
-rw-r--r--drivers/dpll/zl3073x/spi.c76
-rw-r--r--drivers/dpll/zl3073x/synth.c87
-rw-r--r--drivers/dpll/zl3073x/synth.h72
-rw-r--r--drivers/edac/Kconfig110
-rw-r--r--drivers/edac/Makefile22
-rw-r--r--drivers/edac/a72_edac.c225
-rw-r--r--drivers/edac/altera_edac.c46
-rw-r--r--drivers/edac/altera_edac.h2
-rw-r--r--drivers/edac/amd64_edac.c285
-rw-r--r--drivers/edac/amd64_edac.h11
-rw-r--r--drivers/edac/amd8111_edac.c596
-rw-r--r--drivers/edac/amd8111_edac.h118
-rw-r--r--drivers/edac/amd8131_edac.c358
-rw-r--r--drivers/edac/amd8131_edac.h107
-rw-r--r--drivers/edac/armada_xp_edac.c4
-rw-r--r--drivers/edac/aspeed_edac.c2
-rw-r--r--drivers/edac/bluefield_edac.c182
-rw-r--r--drivers/edac/cell_edac.c281
-rw-r--r--drivers/edac/cpc925_edac.c2
-rw-r--r--drivers/edac/debugfs.c5
-rw-r--r--drivers/edac/dmc520_edac.c6
-rw-r--r--drivers/edac/ecs.c207
-rw-r--r--drivers/edac/edac_device.c185
-rw-r--r--drivers/edac/edac_mc.c2
-rw-r--r--drivers/edac/edac_mc_sysfs.c382
-rw-r--r--drivers/edac/fsl_ddr_edac.c141
-rw-r--r--drivers/edac/fsl_ddr_edac.h13
-rw-r--r--drivers/edac/ghes_edac.c9
-rw-r--r--drivers/edac/highbank_l2_edac.c2
-rw-r--r--drivers/edac/highbank_mc_edac.c2
-rw-r--r--drivers/edac/i10nm_base.c618
-rw-r--r--drivers/edac/i5000_edac.c8
-rw-r--r--drivers/edac/i5400_edac.c3
-rw-r--r--drivers/edac/i7300_edac.c7
-rw-r--r--drivers/edac/ie31200_edac.c688
-rw-r--r--drivers/edac/igen6_edac.c191
-rw-r--r--drivers/edac/imh_base.c602
-rw-r--r--drivers/edac/layerscape_edac.c4
-rw-r--r--drivers/edac/loongson_edac.c157
-rw-r--r--drivers/edac/mce_amd.c23
-rw-r--r--drivers/edac/mem_repair.c357
-rw-r--r--drivers/edac/mpc85xx_edac.c7
-rw-r--r--drivers/edac/npcm_edac.c2
-rw-r--r--drivers/edac/octeon_edac-l2c.c3
-rw-r--r--drivers/edac/octeon_edac-lmc.c3
-rw-r--r--drivers/edac/octeon_edac-pc.c3
-rw-r--r--drivers/edac/octeon_edac-pci.c3
-rw-r--r--drivers/edac/pnd2_edac.c8
-rw-r--r--drivers/edac/ppc4xx_edac.c1425
-rw-r--r--drivers/edac/ppc4xx_edac.h167
-rw-r--r--drivers/edac/qcom_edac.c14
-rw-r--r--drivers/edac/sb_edac.c53
-rw-r--r--drivers/edac/scrub.c210
-rw-r--r--drivers/edac/skx_base.c98
-rw-r--r--drivers/edac/skx_common.c273
-rw-r--r--drivers/edac/skx_common.h202
-rw-r--r--drivers/edac/synopsys_edac.c134
-rw-r--r--drivers/edac/thunderx_edac.c6
-rw-r--r--drivers/edac/ti_edac.c2
-rw-r--r--drivers/edac/versal_edac.c2
-rw-r--r--drivers/edac/versalnet_edac.c962
-rw-r--r--drivers/edac/xgene_edac.c19
-rw-r--r--drivers/edac/zynqmp_edac.c2
-rw-r--r--drivers/eisa/Makefile10
-rw-r--r--drivers/eisa/eisa-bus.c8
-rw-r--r--drivers/extcon/Kconfig27
-rw-r--r--drivers/extcon/Makefile2
-rw-r--r--drivers/extcon/extcon-adc-jack.c8
-rw-r--r--drivers/extcon/extcon-axp288.c4
-rw-r--r--drivers/extcon/extcon-fsa9480.c4
-rw-r--r--drivers/extcon/extcon-intel-cht-wc.c19
-rw-r--r--drivers/extcon/extcon-intel-mrfld.c24
-rw-r--r--drivers/extcon/extcon-lc824206xa.c495
-rw-r--r--drivers/extcon/extcon-max14526.c302
-rw-r--r--drivers/extcon/extcon-max3355.c4
-rw-r--r--drivers/extcon/extcon-max77843.c4
-rw-r--r--drivers/extcon/extcon-ptn5150.c2
-rw-r--r--drivers/extcon/extcon-qcom-spmi-misc.c2
-rw-r--r--drivers/extcon/extcon-rtk-type-c.c5
-rw-r--r--drivers/extcon/extcon-usb-gpio.c4
-rw-r--r--drivers/extcon/extcon-usbc-cros-ec.c4
-rw-r--r--drivers/firewire/.kunitconfig2
-rw-r--r--drivers/firewire/Kconfig35
-rw-r--r--drivers/firewire/Makefile2
-rw-r--r--drivers/firewire/core-card.c577
-rw-r--r--drivers/firewire/core-cdev.c476
-rw-r--r--drivers/firewire/core-device.c438
-rw-r--r--drivers/firewire/core-iso.c81
-rw-r--r--drivers/firewire/core-topology.c320
-rw-r--r--drivers/firewire/core-trace.c11
-rw-r--r--drivers/firewire/core-transaction.c439
-rw-r--r--drivers/firewire/core.h57
-rw-r--r--drivers/firewire/device-attribute-test.c2
-rw-r--r--drivers/firewire/init_ohci1394_dma.c10
-rw-r--r--drivers/firewire/net.c6
-rw-r--r--drivers/firewire/ohci-serdes-test.c122
-rw-r--r--drivers/firewire/ohci.c1210
-rw-r--r--drivers/firewire/ohci.h243
-rw-r--r--drivers/firewire/packet-header-definitions.h2
-rw-r--r--drivers/firewire/packet-serdes-test.c335
-rw-r--r--drivers/firewire/phy-packet-definitions.h302
-rw-r--r--drivers/firewire/sbp2.c10
-rw-r--r--drivers/firewire/self-id-sequence-helper-test.c152
-rw-r--r--drivers/firewire/uapi-test.c1
-rw-r--r--drivers/firmware/Kconfig32
-rw-r--r--drivers/firmware/Makefile2
-rw-r--r--drivers/firmware/arm_ffa/Makefile6
-rw-r--r--drivers/firmware/arm_ffa/bus.c47
-rw-r--r--drivers/firmware/arm_ffa/common.h2
-rw-r--r--drivers/firmware/arm_ffa/driver.c908
-rw-r--r--drivers/firmware/arm_scmi/Kconfig125
-rw-r--r--drivers/firmware/arm_scmi/Makefile15
-rw-r--r--drivers/firmware/arm_scmi/base.c6
-rw-r--r--drivers/firmware/arm_scmi/bus.c201
-rw-r--r--drivers/firmware/arm_scmi/clock.c34
-rw-r--r--drivers/firmware/arm_scmi/common.h275
-rw-r--r--drivers/firmware/arm_scmi/driver.c535
-rw-r--r--drivers/firmware/arm_scmi/mailbox.c332
-rw-r--r--drivers/firmware/arm_scmi/msg.c32
-rw-r--r--drivers/firmware/arm_scmi/notify.c39
-rw-r--r--drivers/firmware/arm_scmi/perf.c48
-rw-r--r--drivers/firmware/arm_scmi/pinctrl.c1
-rw-r--r--drivers/firmware/arm_scmi/power.c2
-rw-r--r--drivers/firmware/arm_scmi/protocols.h4
-rw-r--r--drivers/firmware/arm_scmi/quirks.c327
-rw-r--r--drivers/firmware/arm_scmi/quirks.h52
-rw-r--r--drivers/firmware/arm_scmi/raw_mode.c95
-rw-r--r--drivers/firmware/arm_scmi/reset.c2
-rw-r--r--drivers/firmware/arm_scmi/scmi_power_control.c33
-rw-r--r--drivers/firmware/arm_scmi/sensors.c2
-rw-r--r--drivers/firmware/arm_scmi/shmem.c165
-rw-r--r--drivers/firmware/arm_scmi/system.c2
-rw-r--r--drivers/firmware/arm_scmi/transports/Kconfig123
-rw-r--r--drivers/firmware/arm_scmi/transports/Makefile18
-rw-r--r--drivers/firmware/arm_scmi/transports/mailbox.c388
-rw-r--r--drivers/firmware/arm_scmi/transports/optee.c (renamed from drivers/firmware/arm_scmi/optee.c)144
-rw-r--r--drivers/firmware/arm_scmi/transports/smc.c (renamed from drivers/firmware/arm_scmi/smc.c)70
-rw-r--r--drivers/firmware/arm_scmi/transports/virtio.c (renamed from drivers/firmware/arm_scmi/virtio.c)134
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/Kconfig50
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/Makefile5
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx-sm-bbm.c384
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx-sm-cpu.c276
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx-sm-lmm.c263
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c430
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx95.rst1739
-rw-r--r--drivers/firmware/arm_scmi/voltage.c8
-rw-r--r--drivers/firmware/arm_scpi.c5
-rw-r--r--drivers/firmware/arm_sdei.c13
-rw-r--r--drivers/firmware/broadcom/bcm47xx_sprom.c2
-rw-r--r--drivers/firmware/cirrus/Kconfig15
-rw-r--r--drivers/firmware/cirrus/Makefile2
-rw-r--r--drivers/firmware/cirrus/cs_dsp.c864
-rw-r--r--drivers/firmware/cirrus/test/Makefile23
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_bin.c203
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c725
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_regmap.c367
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_utils.c13
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c478
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_bin.c2556
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c595
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c689
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_control_cache.c3281
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c1838
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_control_rw.c2669
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_wmfw.c2211
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_wmfw_error.c1347
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_tests.c14
-rw-r--r--drivers/firmware/dmi-sysfs.c28
-rw-r--r--drivers/firmware/dmi_scan.c14
-rw-r--r--drivers/firmware/efi/Kconfig47
-rw-r--r--drivers/firmware/efi/Makefile1
-rw-r--r--drivers/firmware/efi/arm-runtime.c4
-rw-r--r--drivers/firmware/efi/capsule-loader.c1
-rw-r--r--drivers/firmware/efi/cper-arm.c54
-rw-r--r--drivers/firmware/efi/cper-x86.c2
-rw-r--r--drivers/firmware/efi/cper.c79
-rw-r--r--drivers/firmware/efi/cper_cxl.c39
-rw-r--r--drivers/firmware/efi/cper_cxl.h66
-rw-r--r--drivers/firmware/efi/dev-path-parser.c4
-rw-r--r--drivers/firmware/efi/efi-init.c29
-rw-r--r--drivers/firmware/efi/efi-pstore.c10
-rw-r--r--drivers/firmware/efi/efi.c64
-rw-r--r--drivers/firmware/efi/efibc.c2
-rw-r--r--drivers/firmware/efi/embedded-firmware.c4
-rw-r--r--drivers/firmware/efi/esrt.c2
-rw-r--r--drivers/firmware/efi/fdtparams.c2
-rw-r--r--drivers/firmware/efi/libstub/Makefile31
-rw-r--r--drivers/firmware/efi/libstub/Makefile.zboot24
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c13
-rw-r--r--drivers/firmware/efi/libstub/arm64.c3
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c22
-rw-r--r--drivers/firmware/efi/libstub/efi-stub.c59
-rw-r--r--drivers/firmware/efi/libstub/efistub.h65
-rw-r--r--drivers/firmware/efi/libstub/file.c22
-rw-r--r--drivers/firmware/efi/libstub/gop.c450
-rw-r--r--drivers/firmware/efi/libstub/intrinsics.c26
-rw-r--r--drivers/firmware/efi/libstub/kaslr.c24
-rw-r--r--drivers/firmware/efi/libstub/loongarch.c4
-rw-r--r--drivers/firmware/efi/libstub/mem.c20
-rw-r--r--drivers/firmware/efi/libstub/pci.c34
-rw-r--r--drivers/firmware/efi/libstub/printk.c4
-rw-r--r--drivers/firmware/efi/libstub/randomalloc.c11
-rw-r--r--drivers/firmware/efi/libstub/relocate.c15
-rw-r--r--drivers/firmware/efi/libstub/riscv-stub.c2
-rw-r--r--drivers/firmware/efi/libstub/riscv.c2
-rw-r--r--drivers/firmware/efi/libstub/screen_info.c2
-rw-r--r--drivers/firmware/efi/libstub/smbios.c43
-rw-r--r--drivers/firmware/efi/libstub/tpm.c11
-rw-r--r--drivers/firmware/efi/libstub/unaccepted_memory.c5
-rw-r--r--drivers/firmware/efi/libstub/x86-5lvl.c6
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c452
-rw-r--r--drivers/firmware/efi/libstub/zboot-decompress-gzip.c68
-rw-r--r--drivers/firmware/efi/libstub/zboot-decompress-zstd.c49
-rw-r--r--drivers/firmware/efi/libstub/zboot-header.S32
-rw-r--r--drivers/firmware/efi/libstub/zboot.c67
-rw-r--r--drivers/firmware/efi/libstub/zboot.lds15
-rw-r--r--drivers/firmware/efi/memattr.c27
-rw-r--r--drivers/firmware/efi/memmap.c12
-rw-r--r--drivers/firmware/efi/mokvar-table.c59
-rw-r--r--drivers/firmware/efi/ovmf-debug-log.c111
-rw-r--r--drivers/firmware/efi/rci2-table.c10
-rw-r--r--drivers/firmware/efi/riscv-runtime.c27
-rw-r--r--drivers/firmware/efi/runtime-wrappers.c30
-rw-r--r--drivers/firmware/efi/stmm/mm_communication.h6
-rw-r--r--drivers/firmware/efi/stmm/tee_stmm_efi.c61
-rw-r--r--drivers/firmware/efi/sysfb_efi.c2
-rw-r--r--drivers/firmware/efi/test/efi_test.c5
-rw-r--r--drivers/firmware/efi/tpm.c26
-rw-r--r--drivers/firmware/efi/unaccepted_memory.c18
-rw-r--r--drivers/firmware/efi/vars.c16
-rw-r--r--drivers/firmware/google/cbmem.c9
-rw-r--r--drivers/firmware/google/coreboot_table.c9
-rw-r--r--drivers/firmware/google/framebuffer-coreboot.c15
-rw-r--r--drivers/firmware/google/gsmi.c11
-rw-r--r--drivers/firmware/google/memconsole-coreboot.c1
-rw-r--r--drivers/firmware/google/memconsole-x86-legacy.c1
-rw-r--r--drivers/firmware/google/memconsole.c3
-rw-r--r--drivers/firmware/google/vpd.c5
-rw-r--r--drivers/firmware/imx/Kconfig33
-rw-r--r--drivers/firmware/imx/Makefile3
-rw-r--r--drivers/firmware/imx/imx-dsp.c2
-rw-r--r--drivers/firmware/imx/imx-scu-irq.c32
-rw-r--r--drivers/firmware/imx/imx-scu.c12
-rw-r--r--drivers/firmware/imx/sm-cpu.c85
-rw-r--r--drivers/firmware/imx/sm-lmm.c91
-rw-r--r--drivers/firmware/imx/sm-misc.c119
-rw-r--r--drivers/firmware/iscsi_ibft.c5
-rw-r--r--drivers/firmware/memmap.c2
-rw-r--r--drivers/firmware/meson/Kconfig2
-rw-r--r--drivers/firmware/meson/meson_sm.c8
-rw-r--r--drivers/firmware/microchip/mpfs-auto-update.c182
-rw-r--r--drivers/firmware/mtk-adsp-ipc.c9
-rw-r--r--drivers/firmware/psci/psci.c53
-rw-r--r--drivers/firmware/psci/psci_checker.c4
-rw-r--r--drivers/firmware/qcom/Kconfig37
-rw-r--r--drivers/firmware/qcom/Makefile1
-rw-r--r--drivers/firmware/qcom/qcom_qseecom_uefisecapp.c260
-rw-r--r--drivers/firmware/qcom/qcom_scm-smc.c34
-rw-r--r--drivers/firmware/qcom/qcom_scm.c705
-rw-r--r--drivers/firmware/qcom/qcom_scm.h25
-rw-r--r--drivers/firmware/qcom/qcom_tzmem.c524
-rw-r--r--drivers/firmware/qcom/qcom_tzmem.h13
-rw-r--r--drivers/firmware/qemu_fw_cfg.c8
-rw-r--r--drivers/firmware/raspberrypi.c5
-rw-r--r--drivers/firmware/samsung/Kconfig14
-rw-r--r--drivers/firmware/samsung/Makefile6
-rw-r--r--drivers/firmware/samsung/exynos-acpm-dvfs.c80
-rw-r--r--drivers/firmware/samsung/exynos-acpm-dvfs.h21
-rw-r--r--drivers/firmware/samsung/exynos-acpm-pmic.c239
-rw-r--r--drivers/firmware/samsung/exynos-acpm-pmic.h29
-rw-r--r--drivers/firmware/samsung/exynos-acpm.c792
-rw-r--r--drivers/firmware/samsung/exynos-acpm.h23
-rw-r--r--drivers/firmware/smccc/kvm_guest.c78
-rw-r--r--drivers/firmware/smccc/smccc.c18
-rw-r--r--drivers/firmware/smccc/soc_id.c80
-rw-r--r--drivers/firmware/stratix10-rsu.c281
-rw-r--r--drivers/firmware/stratix10-svc.c791
-rw-r--r--drivers/firmware/sysfb.c78
-rw-r--r--drivers/firmware/sysfb_simplefb.c31
-rw-r--r--drivers/firmware/tegra/Kconfig5
-rw-r--r--drivers/firmware/tegra/Makefile1
-rw-r--r--drivers/firmware/tegra/bpmp-private.h6
-rw-r--r--drivers/firmware/tegra/bpmp-tegra186.c14
-rw-r--r--drivers/firmware/tegra/bpmp.c12
-rw-r--r--drivers/firmware/thead,th1520-aon.c250
-rw-r--r--drivers/firmware/ti_sci.c657
-rw-r--r--drivers/firmware/ti_sci.h155
-rw-r--r--drivers/firmware/turris-mox-rwtm.c573
-rw-r--r--drivers/firmware/xilinx/Makefile2
-rw-r--r--drivers/firmware/xilinx/zynqmp-debug.c175
-rw-r--r--drivers/firmware/xilinx/zynqmp-ufs.c118
-rw-r--r--drivers/firmware/xilinx/zynqmp.c338
-rw-r--r--drivers/fpga/Kconfig12
-rw-r--r--drivers/fpga/Makefile2
-rw-r--r--drivers/fpga/altera-cvp.c23
-rw-r--r--drivers/fpga/altera-fpga2sdram.c8
-rw-r--r--drivers/fpga/altera-freeze-bridge.c2
-rw-r--r--drivers/fpga/altera-hps2fpga.c2
-rw-r--r--drivers/fpga/altera-ps-spi.c1
-rw-r--r--drivers/fpga/dfl-afu-dma-region.c117
-rw-r--r--drivers/fpga/dfl-afu-error.c59
-rw-r--r--drivers/fpga/dfl-afu-main.c288
-rw-r--r--drivers/fpga/dfl-afu-region.c51
-rw-r--r--drivers/fpga/dfl-afu.h29
-rw-r--r--drivers/fpga/dfl-fme-br.c32
-rw-r--r--drivers/fpga/dfl-fme-error.c98
-rw-r--r--drivers/fpga/dfl-fme-main.c105
-rw-r--r--drivers/fpga/dfl-fme-pr.c86
-rw-r--r--drivers/fpga/dfl-fme-region.c8
-rw-r--r--drivers/fpga/dfl-fme.h2
-rw-r--r--drivers/fpga/dfl-pci.c16
-rw-r--r--drivers/fpga/dfl.c449
-rw-r--r--drivers/fpga/dfl.h139
-rw-r--r--drivers/fpga/fpga-bridge.c57
-rw-r--r--drivers/fpga/fpga-mgr.c82
-rw-r--r--drivers/fpga/fpga-region.c24
-rw-r--r--drivers/fpga/ice40-spi.c4
-rw-r--r--drivers/fpga/intel-m10-bmc-sec-update.c7
-rw-r--r--drivers/fpga/microchip-spi.c2
-rw-r--r--drivers/fpga/of-fpga-region.c2
-rw-r--r--drivers/fpga/socfpga-a10.c2
-rw-r--r--drivers/fpga/socfpga.c7
-rw-r--r--drivers/fpga/stratix10-soc.c2
-rw-r--r--drivers/fpga/tests/Kconfig4
-rw-r--r--drivers/fpga/tests/fpga-bridge-test.c51
-rw-r--r--drivers/fpga/tests/fpga-mgr-test.c44
-rw-r--r--drivers/fpga/tests/fpga-region-test.c77
-rw-r--r--drivers/fpga/versal-fpga.c2
-rw-r--r--drivers/fpga/xilinx-core.c229
-rw-r--r--drivers/fpga/xilinx-core.h27
-rw-r--r--drivers/fpga/xilinx-pr-decoupler.c2
-rw-r--r--drivers/fpga/xilinx-selectmap.c95
-rw-r--r--drivers/fpga/xilinx-spi.c231
-rw-r--r--drivers/fpga/zynq-fpga.c20
-rw-r--r--drivers/fsi/fsi-core.c11
-rw-r--r--drivers/fsi/fsi-master-aspeed.c5
-rw-r--r--drivers/fsi/fsi-master-ast-cf.c18
-rw-r--r--drivers/fsi/fsi-master-gpio.c5
-rw-r--r--drivers/fsi/fsi-master-hub.c1
-rw-r--r--drivers/fsi/fsi-occ.c39
-rw-r--r--drivers/fsi/fsi-scom.c1
-rw-r--r--drivers/fwctl/Kconfig33
-rw-r--r--drivers/fwctl/Makefile6
-rw-r--r--drivers/fwctl/main.c421
-rw-r--r--drivers/fwctl/mlx5/Makefile4
-rw-r--r--drivers/fwctl/mlx5/main.c418
-rw-r--r--drivers/fwctl/pds/Makefile4
-rw-r--r--drivers/fwctl/pds/main.c535
-rw-r--r--drivers/gnss/core.c1
-rw-r--r--drivers/gnss/ubx.c8
-rw-r--r--drivers/gpib/Kconfig255
-rw-r--r--drivers/gpib/Makefile20
-rw-r--r--drivers/gpib/TODO10
-rw-r--r--drivers/gpib/agilent_82350b/Makefile2
-rw-r--r--drivers/gpib/agilent_82350b/agilent_82350b.c896
-rw-r--r--drivers/gpib/agilent_82350b/agilent_82350b.h157
-rw-r--r--drivers/gpib/agilent_82357a/Makefile4
-rw-r--r--drivers/gpib/agilent_82357a/agilent_82357a.c1691
-rw-r--r--drivers/gpib/agilent_82357a/agilent_82357a.h182
-rw-r--r--drivers/gpib/cb7210/Makefile3
-rw-r--r--drivers/gpib/cb7210/cb7210.c1586
-rw-r--r--drivers/gpib/cb7210/cb7210.h203
-rw-r--r--drivers/gpib/cec/Makefile3
-rw-r--r--drivers/gpib/cec/cec.h20
-rw-r--r--drivers/gpib/cec/cec_gpib.c393
-rw-r--r--drivers/gpib/common/Makefile6
-rw-r--r--drivers/gpib/common/gpib_os.c2271
-rw-r--r--drivers/gpib/common/iblib.c717
-rw-r--r--drivers/gpib/common/ibsys.h34
-rw-r--r--drivers/gpib/eastwood/Makefile3
-rw-r--r--drivers/gpib/eastwood/fluke_gpib.c1180
-rw-r--r--drivers/gpib/eastwood/fluke_gpib.h146
-rw-r--r--drivers/gpib/fmh_gpib/Makefile2
-rw-r--r--drivers/gpib/fmh_gpib/fmh_gpib.c1754
-rw-r--r--drivers/gpib/fmh_gpib/fmh_gpib.h177
-rw-r--r--drivers/gpib/gpio/Makefile4
-rw-r--r--drivers/gpib/gpio/gpib_bitbang.c1469
-rw-r--r--drivers/gpib/hp_82335/Makefile4
-rw-r--r--drivers/gpib/hp_82335/hp82335.c371
-rw-r--r--drivers/gpib/hp_82335/hp82335.h52
-rw-r--r--drivers/gpib/hp_82341/Makefile2
-rw-r--r--drivers/gpib/hp_82341/hp_82341.c907
-rw-r--r--drivers/gpib/hp_82341/hp_82341.h165
-rw-r--r--drivers/gpib/include/amcc5920.h49
-rw-r--r--drivers/gpib/include/amccs5933.h59
-rw-r--r--drivers/gpib/include/gpibP.h41
-rw-r--r--drivers/gpib/include/gpib_cmd.h112
-rw-r--r--drivers/gpib/include/gpib_pci_ids.h23
-rw-r--r--drivers/gpib/include/gpib_proto.h49
-rw-r--r--drivers/gpib/include/gpib_state_machines.h23
-rw-r--r--drivers/gpib/include/gpib_types.h381
-rw-r--r--drivers/gpib/include/nec7210.h141
-rw-r--r--drivers/gpib/include/nec7210_registers.h218
-rw-r--r--drivers/gpib/include/plx9050.h72
-rw-r--r--drivers/gpib/include/quancom_pci.h22
-rw-r--r--drivers/gpib/include/tms9914.h280
-rw-r--r--drivers/gpib/include/tnt4882_registers.h192
-rw-r--r--drivers/gpib/ines/Makefile3
-rw-r--r--drivers/gpib/ines/ines.h165
-rw-r--r--drivers/gpib/ines/ines_gpib.c1500
-rw-r--r--drivers/gpib/lpvo_usb_gpib/Makefile3
-rw-r--r--drivers/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c2025
-rw-r--r--drivers/gpib/nec7210/Makefile4
-rw-r--r--drivers/gpib/nec7210/board.h19
-rw-r--r--drivers/gpib/nec7210/nec7210.c1121
-rw-r--r--drivers/gpib/ni_usb/Makefile4
-rw-r--r--drivers/gpib/ni_usb/ni_usb_gpib.c2678
-rw-r--r--drivers/gpib/ni_usb/ni_usb_gpib.h226
-rw-r--r--drivers/gpib/pc2/Makefile5
-rw-r--r--drivers/gpib/pc2/pc2_gpib.c684
-rw-r--r--drivers/gpib/tms9914/Makefile6
-rw-r--r--drivers/gpib/tms9914/tms9914.c914
-rw-r--r--drivers/gpib/tnt4882/Makefile6
-rw-r--r--drivers/gpib/tnt4882/mite.c133
-rw-r--r--drivers/gpib/tnt4882/mite.h234
-rw-r--r--drivers/gpib/tnt4882/tnt4882_gpib.c1838
-rw-r--r--drivers/gpio/Kconfig284
-rw-r--r--drivers/gpio/Makefile22
-rw-r--r--drivers/gpio/TODO127
-rw-r--r--drivers/gpio/dev-sync-probe.c97
-rw-r--r--drivers/gpio/dev-sync-probe.h25
-rw-r--r--drivers/gpio/gpio-104-dio-48e.c4
-rw-r--r--drivers/gpio/gpio-104-idio-16.c3
-rw-r--r--drivers/gpio/gpio-74x164.c103
-rw-r--r--drivers/gpio/gpio-74xx-mmio.c32
-rw-r--r--drivers/gpio/gpio-adnp.c136
-rw-r--r--drivers/gpio/gpio-adp5520.c10
-rw-r--r--drivers/gpio/gpio-adp5585.c527
-rw-r--r--drivers/gpio/gpio-aggregator.c1521
-rw-r--r--drivers/gpio/gpio-altera-a10sr.c12
-rw-r--r--drivers/gpio/gpio-altera.c194
-rw-r--r--drivers/gpio/gpio-amd-fch.c5
-rw-r--r--drivers/gpio/gpio-amd8111.c8
-rw-r--r--drivers/gpio/gpio-amdpt.c52
-rw-r--r--drivers/gpio/gpio-arizona.c9
-rw-r--r--drivers/gpio/gpio-aspeed-sgpio.c86
-rw-r--r--drivers/gpio/gpio-aspeed.c722
-rw-r--r--drivers/gpio/gpio-ath79.c121
-rw-r--r--drivers/gpio/gpio-bcm-kona.c127
-rw-r--r--drivers/gpio/gpio-bd71815.c13
-rw-r--r--drivers/gpio/gpio-bd71828.c13
-rw-r--r--drivers/gpio/gpio-bd9571mwv.c6
-rw-r--r--drivers/gpio/gpio-blzp1600.c290
-rw-r--r--drivers/gpio/gpio-brcmstb.c146
-rw-r--r--drivers/gpio/gpio-bt8xx.c76
-rw-r--r--drivers/gpio/gpio-cadence.c86
-rw-r--r--drivers/gpio/gpio-cgbc.c200
-rw-r--r--drivers/gpio/gpio-clps711x.c28
-rw-r--r--drivers/gpio/gpio-creg-snps.c8
-rw-r--r--drivers/gpio/gpio-cros-ec.c11
-rw-r--r--drivers/gpio/gpio-crystalcove.c13
-rw-r--r--drivers/gpio/gpio-cs5535.c4
-rw-r--r--drivers/gpio/gpio-da9052.c32
-rw-r--r--drivers/gpio/gpio-da9055.c12
-rw-r--r--drivers/gpio/gpio-davinci.c153
-rw-r--r--drivers/gpio/gpio-dln2.c7
-rw-r--r--drivers/gpio/gpio-ds4520.c6
-rw-r--r--drivers/gpio/gpio-dwapb.c185
-rw-r--r--drivers/gpio/gpio-eic-sprd.c7
-rw-r--r--drivers/gpio/gpio-elkhartlake.c38
-rw-r--r--drivers/gpio/gpio-em.c8
-rw-r--r--drivers/gpio/gpio-en7523.c36
-rw-r--r--drivers/gpio/gpio-ep93xx.c376
-rw-r--r--drivers/gpio/gpio-exar.c22
-rw-r--r--drivers/gpio/gpio-f7188x.c11
-rw-r--r--drivers/gpio/gpio-ftgpio010.c89
-rw-r--r--drivers/gpio/gpio-fxl6408.c15
-rw-r--r--drivers/gpio/gpio-ge.c25
-rw-r--r--drivers/gpio/gpio-gpio-mm.c2
-rw-r--r--drivers/gpio/gpio-graniterapids.c58
-rw-r--r--drivers/gpio/gpio-grgpio.c175
-rw-r--r--drivers/gpio/gpio-gw-pld.c5
-rw-r--r--drivers/gpio/gpio-hisi.c48
-rw-r--r--drivers/gpio/gpio-hlwd.c107
-rw-r--r--drivers/gpio/gpio-htc-egpio.c35
-rw-r--r--drivers/gpio/gpio-i8255.c2
-rw-r--r--drivers/gpio/gpio-ich.c10
-rw-r--r--drivers/gpio/gpio-idio-16.c10
-rw-r--r--drivers/gpio/gpio-idt3243x.c47
-rw-r--r--drivers/gpio/gpio-imx-scu.c45
-rw-r--r--drivers/gpio/gpio-it87.c9
-rw-r--r--drivers/gpio/gpio-ixp4xx.c80
-rw-r--r--drivers/gpio/gpio-janz-ttl.c4
-rw-r--r--drivers/gpio/gpio-kempld.c5
-rw-r--r--drivers/gpio/gpio-latch.c66
-rw-r--r--drivers/gpio/gpio-ljca.c46
-rw-r--r--drivers/gpio/gpio-logicvc.c9
-rw-r--r--drivers/gpio/gpio-loongson-64bit.c308
-rw-r--r--drivers/gpio/gpio-loongson.c6
-rw-r--r--drivers/gpio/gpio-loongson1.c40
-rw-r--r--drivers/gpio/gpio-lp3943.c11
-rw-r--r--drivers/gpio/gpio-lp873x.c10
-rw-r--r--drivers/gpio/gpio-lp87565.c13
-rw-r--r--drivers/gpio/gpio-lpc18xx.c52
-rw-r--r--drivers/gpio/gpio-lpc32xx.c18
-rw-r--r--drivers/gpio/gpio-macsmc.c292
-rw-r--r--drivers/gpio/gpio-madera.c16
-rw-r--r--drivers/gpio/gpio-max3191x.c34
-rw-r--r--drivers/gpio/gpio-max7300.c2
-rw-r--r--drivers/gpio/gpio-max730x.c24
-rw-r--r--drivers/gpio/gpio-max732x.c11
-rw-r--r--drivers/gpio/gpio-max7360.c257
-rw-r--r--drivers/gpio/gpio-max77620.c11
-rw-r--r--drivers/gpio/gpio-max77650.c12
-rw-r--r--drivers/gpio/gpio-max77759.c530
-rw-r--r--drivers/gpio/gpio-mb86s7x.c29
-rw-r--r--drivers/gpio/gpio-mc33880.c10
-rw-r--r--drivers/gpio/gpio-menz127.c111
-rw-r--r--drivers/gpio/gpio-merrifield.c17
-rw-r--r--drivers/gpio/gpio-ml-ioh.c16
-rw-r--r--drivers/gpio/gpio-mlxbf.c25
-rw-r--r--drivers/gpio/gpio-mlxbf2.c93
-rw-r--r--drivers/gpio/gpio-mlxbf3.c117
-rw-r--r--drivers/gpio/gpio-mm-lantiq.c67
-rw-r--r--drivers/gpio/gpio-mmio.c672
-rw-r--r--drivers/gpio/gpio-mockup.c11
-rw-r--r--drivers/gpio/gpio-moxtet.c14
-rw-r--r--drivers/gpio/gpio-mpc5200.c86
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c204
-rw-r--r--drivers/gpio/gpio-mpfs.c191
-rw-r--r--drivers/gpio/gpio-mpsse.c730
-rw-r--r--drivers/gpio/gpio-msc313.c17
-rw-r--r--drivers/gpio/gpio-mt7621.c80
-rw-r--r--drivers/gpio/gpio-mvebu.c46
-rw-r--r--drivers/gpio/gpio-mxc.c109
-rw-r--r--drivers/gpio/gpio-mxs.c35
-rw-r--r--drivers/gpio/gpio-nct6694.c499
-rw-r--r--drivers/gpio/gpio-nomadik.c34
-rw-r--r--drivers/gpio/gpio-npcm-sgpio.c8
-rw-r--r--drivers/gpio/gpio-octeon.c5
-rw-r--r--drivers/gpio/gpio-omap.c29
-rw-r--r--drivers/gpio/gpio-palmas.c24
-rw-r--r--drivers/gpio/gpio-pca953x.c264
-rw-r--r--drivers/gpio/gpio-pca9570.c3
-rw-r--r--drivers/gpio/gpio-pcf857x.c43
-rw-r--r--drivers/gpio/gpio-pch.c17
-rw-r--r--drivers/gpio/gpio-pci-idio-16.c20
-rw-r--r--drivers/gpio/gpio-pcie-idio-24.c19
-rw-r--r--drivers/gpio/gpio-pisosr.c16
-rw-r--r--drivers/gpio/gpio-pl061.c24
-rw-r--r--drivers/gpio/gpio-pmic-eic-sprd.c7
-rw-r--r--drivers/gpio/gpio-pxa.c21
-rw-r--r--drivers/gpio/gpio-qixis-fpga.c111
-rw-r--r--drivers/gpio/gpio-raspberrypi-exp.c8
-rw-r--r--drivers/gpio/gpio-rc5t583.c17
-rw-r--r--drivers/gpio/gpio-rcar.c77
-rw-r--r--drivers/gpio/gpio-rda.c35
-rw-r--r--drivers/gpio/gpio-rdc321x.c12
-rw-r--r--drivers/gpio/gpio-realtek-otto.c41
-rw-r--r--drivers/gpio/gpio-reg.c10
-rw-r--r--drivers/gpio/gpio-regmap.c139
-rw-r--r--drivers/gpio/gpio-rockchip.c41
-rw-r--r--drivers/gpio/gpio-rtd.c4
-rw-r--r--drivers/gpio/gpio-sa1100.c19
-rw-r--r--drivers/gpio/gpio-sama5d2-piobu.c24
-rw-r--r--drivers/gpio/gpio-sch.c7
-rw-r--r--drivers/gpio/gpio-sch311x.c6
-rw-r--r--drivers/gpio/gpio-shared-proxy.c334
-rw-r--r--drivers/gpio/gpio-sifive.c74
-rw-r--r--drivers/gpio/gpio-sim.c283
-rw-r--r--drivers/gpio/gpio-siox.c9
-rw-r--r--drivers/gpio/gpio-sloppy-logic-analyzer.c345
-rw-r--r--drivers/gpio/gpio-sodaville.c24
-rw-r--r--drivers/gpio/gpio-spacemit-k1.c307
-rw-r--r--drivers/gpio/gpio-spear-spics.c19
-rw-r--r--drivers/gpio/gpio-sprd.c6
-rw-r--r--drivers/gpio/gpio-stmpe.c113
-rw-r--r--drivers/gpio/gpio-stp-xway.c18
-rw-r--r--drivers/gpio/gpio-syscon.c59
-rw-r--r--drivers/gpio/gpio-tangier.c6
-rw-r--r--drivers/gpio/gpio-tb10x.c96
-rw-r--r--drivers/gpio/gpio-tc3589x.c9
-rw-r--r--drivers/gpio/gpio-tegra.c13
-rw-r--r--drivers/gpio/gpio-tegra186.c267
-rw-r--r--drivers/gpio/gpio-thunderx.c19
-rw-r--r--drivers/gpio/gpio-timberdale.c17
-rw-r--r--drivers/gpio/gpio-tpic2810.c23
-rw-r--r--drivers/gpio/gpio-tps65086.c14
-rw-r--r--drivers/gpio/gpio-tps65218.c29
-rw-r--r--drivers/gpio/gpio-tps65219.c130
-rw-r--r--drivers/gpio/gpio-tps6586x.c13
-rw-r--r--drivers/gpio/gpio-tps65910.c19
-rw-r--r--drivers/gpio/gpio-tps65912.c15
-rw-r--r--drivers/gpio/gpio-tps68470.c12
-rw-r--r--drivers/gpio/gpio-tqmx86.c221
-rw-r--r--drivers/gpio/gpio-ts4800.c39
-rw-r--r--drivers/gpio/gpio-ts4900.c18
-rw-r--r--drivers/gpio/gpio-ts5500.c6
-rw-r--r--drivers/gpio/gpio-twl4030.c30
-rw-r--r--drivers/gpio/gpio-twl6040.c27
-rw-r--r--drivers/gpio/gpio-uniphier.c28
-rw-r--r--drivers/gpio/gpio-usbio.c248
-rw-r--r--drivers/gpio/gpio-vf610.c108
-rw-r--r--drivers/gpio/gpio-viperboard.c126
-rw-r--r--drivers/gpio/gpio-virtio.c54
-rw-r--r--drivers/gpio/gpio-virtuser.c1800
-rw-r--r--drivers/gpio/gpio-visconti.c32
-rw-r--r--drivers/gpio/gpio-vx855.c7
-rw-r--r--drivers/gpio/gpio-wcd934x.c16
-rw-r--r--drivers/gpio/gpio-wcove.c12
-rw-r--r--drivers/gpio/gpio-winbond.c14
-rw-r--r--drivers/gpio/gpio-wm831x.c19
-rw-r--r--drivers/gpio/gpio-wm8350.c13
-rw-r--r--drivers/gpio/gpio-wm8994.c12
-rw-r--r--drivers/gpio/gpio-xgene-sb.c119
-rw-r--r--drivers/gpio/gpio-xgene.c12
-rw-r--r--drivers/gpio/gpio-xgs-iproc.c38
-rw-r--r--drivers/gpio/gpio-xilinx.c219
-rw-r--r--drivers/gpio/gpio-xlp.c8
-rw-r--r--drivers/gpio/gpio-xra1403.c17
-rw-r--r--drivers/gpio/gpio-xtensa.c11
-rw-r--r--drivers/gpio/gpio-zevio.c10
-rw-r--r--drivers/gpio/gpio-zynq.c34
-rw-r--r--drivers/gpio/gpio-zynqmp-modepin.c9
-rw-r--r--drivers/gpio/gpiolib-acpi-core.c1427
-rw-r--r--drivers/gpio/gpiolib-acpi-quirks.c402
-rw-r--r--drivers/gpio/gpiolib-acpi.c1707
-rw-r--r--drivers/gpio/gpiolib-acpi.h15
-rw-r--r--drivers/gpio/gpiolib-cdev.c650
-rw-r--r--drivers/gpio/gpiolib-devres.c211
-rw-r--r--drivers/gpio/gpiolib-legacy.c73
-rw-r--r--drivers/gpio/gpiolib-of.c329
-rw-r--r--drivers/gpio/gpiolib-of.h8
-rw-r--r--drivers/gpio/gpiolib-shared.c656
-rw-r--r--drivers/gpio/gpiolib-shared.h71
-rw-r--r--drivers/gpio/gpiolib-swnode.c69
-rw-r--r--drivers/gpio/gpiolib-sysfs.c856
-rw-r--r--drivers/gpio/gpiolib.c1413
-rw-r--r--drivers/gpio/gpiolib.h130
-rw-r--r--drivers/gpu/Makefile1
-rw-r--r--drivers/gpu/drm/Kconfig234
-rw-r--r--drivers/gpu/drm/Kconfig.debug117
-rw-r--r--drivers/gpu/drm/Makefile58
-rw-r--r--drivers/gpu/drm/adp/Kconfig17
-rw-r--r--drivers/gpu/drm/adp/Makefile5
-rw-r--r--drivers/gpu/drm/adp/adp-mipi.c277
-rw-r--r--drivers/gpu/drm/adp/adp_drv.c614
-rw-r--r--drivers/gpu/drm/amd/acp/include/acp_gfx_if.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Kconfig43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile79
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aldebaran.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h347
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c170
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c87
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c81
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c190
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h122
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c384
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c249
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c364
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c123
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c328
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c180
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c116
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c156
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c591
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c206
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c216
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c75
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c3227
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_df.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c806
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c233
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c183
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c610
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c241
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.h69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c67
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c311
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c442
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c1434
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h129
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c676
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c102
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c169
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ip.c96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ip.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c345
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c240
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c275
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h64
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c229
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c386
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c1470
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h168
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c478
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h73
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c820
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c2047
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h182
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c717
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c194
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c518
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h189
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c329
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h93
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c385
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c116
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c395
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h64
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c619
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.h18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c1482
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h161
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c1011
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_utils.h91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c191
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c868
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h112
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c827
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h117
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c695
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h141
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c116
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c169
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c163
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c700
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c367
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h258
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c473
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cikd.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/clearstate_gfx12.h121
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c102
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c3804
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c452
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c123
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v1_7.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v3_6.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v4_15.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v4_15.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c1400
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_1_10_cleaner_shader.asm125
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_3_0_cleaner_shader.asm124
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c1858
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3_cleaner_shader.asm118
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0_cleaner_shader.h56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c5793
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c157
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c213
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c287
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c1023
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2_cleaner_shader.asm153
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c1151
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3_cleaner_shader.asm153
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3_cleaner_shader.h64
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v12_0.c521
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v12_0.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c149
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c190
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c1070
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_0.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c167
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c111
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c127
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c525
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_0.c92
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_1.c64
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v7_0.c84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v11_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v12_0.c406
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v12_0.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c191
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c377
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c103
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c145
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c239
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c139
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c132
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c667
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c140
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c305
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c1101
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h111
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_userqueue.c501
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_userqueue.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v10_1.c1189
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v10_1.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c614
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c1942
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c123
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c161
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c647
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.h28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmsch_v5_0.h144
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c154
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c234
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h64
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c141
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c83
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c120
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nvd.h208
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c94
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c128
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c218
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v14_0.c91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c86
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c168
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c481
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c736
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c717
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c687
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0_0_pkt_open.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c1859
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v7_0.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c567
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c176
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_enums.h246
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sid.h1964
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smuio_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c183
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15d.h154
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c157
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc24.c601
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc24.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_ras_if.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c81
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.c480
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_14.c160
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_14.h51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c67
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c94
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c321
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v1_0.c839
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v1_0.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c75
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c93
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c321
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c427
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c416
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c1332
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c868
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c925
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c1115
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c991
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c802
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c1729
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c115
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vid.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c19
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h4097
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm296
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm1136
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm62
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c239
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c76
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debug.c70
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debug.h5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c76
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c469
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c844
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h45
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c75
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c43
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c43
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12.c90
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c45
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c77
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c81
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c231
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c106
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c220
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c64
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c46
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c139
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c459
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c107
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c177
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c113
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pasid.c70
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_aldebaran.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h138
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c294
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c189
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_queue.c385
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c90
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c346
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.h19
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c218
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h28
-rw-r--r--drivers/gpu/drm/amd/amdkfd/soc15_int.h1
-rw-r--r--drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c61
-rw-r--r--drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h1
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig19
-rw-r--r--drivers/gpu/drm/amd/display/Makefile11
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c4307
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h177
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c858
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c209
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.h36
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c565
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h56
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c246
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c365
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c136
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c248
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c84
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h7
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h7
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c622
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c315
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c84
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_quirks.c178
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c17
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h29
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c35
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile50
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/dc_common.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/vector.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c122
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c150
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c288
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c60
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c106
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce60/command_table_helper_dce60.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile14
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c44
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_smu11_driver_if.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.c118
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c152
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c113
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_smu13_driver_if.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/smu13_driver_if.h108
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c141
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c536
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dalsmc.h55
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c1631
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h117
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c472
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h41
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_smu14_driver_if.h66
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c2523
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c206
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c3267
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c46
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c1007
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stat.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c255
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c636
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c53
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h1259
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_bios_types.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c1195
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h222
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h219
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dsc.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_fused_io.c148
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_fused_io.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h234
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_plane.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_spl_translate.c231
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_spl_translate.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_state.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_state_priv.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h114
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream_priv.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h256
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/Makefile103
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h526
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn201/dcn201_dccg.c (renamed from drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn201/dcn201_dccg.h (renamed from drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn21/dcn21_dccg.c (renamed from drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn21/dcn21_dccg.h (renamed from drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn30/dcn30_dccg.c (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn30/dcn30_dccg.h (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn301/dcn301_dccg.c (renamed from drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dccg.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn301/dcn301_dccg.h (renamed from drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dccg.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn302/dcn302_dccg.h (renamed from drivers/gpu/drm/amd/display/dc/dcn302/dcn302_dccg.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn303/dcn303_dccg.h (renamed from drivers/gpu/drm/amd/display/dc/dcn303/dcn303_dccg.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c878
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h (renamed from drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h)4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c (renamed from drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c)15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h (renamed from drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h)3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn32/dcn32_dccg.c (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c)13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn32/dcn32_dccg.h (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c2486
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.h)14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c915
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h249
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.h64
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c141
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c79
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c98
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_opp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c220
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c220
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c126
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.c432
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/Makefile8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h428
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/Makefile9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/Makefile6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/Makefile11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c178
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c535
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/Makefile14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/Makefile13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/Makefile15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c755
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c48
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/Makefile17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/Makefile20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/Makefile17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/Makefile20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c1086
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c271
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/Makefile117
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c)10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.h (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c)7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.h (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h)7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_link_encoder.c (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_link_encoder.h (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.c (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.h (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn30/dcn30_dio_link_encoder.c (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn30/dcn30_dio_link_encoder.h (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn30/dcn30_dio_stream_encoder.c (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn30/dcn30_dio_stream_encoder.h (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h)6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn301/dcn301_dio_link_encoder.c (renamed from drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn301/dcn301_dio_link_encoder.h (renamed from drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c (renamed from drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c)7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.h (renamed from drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c (renamed from drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c)12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.h (renamed from drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_link_encoder.c (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c)3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_link_encoder.h (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c)42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.h (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn321/dcn321_dio_link_encoder.c (renamed from drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn321/dcn321_dio_link_encoder.h (renamed from drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.c391
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.h (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.h)41
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.c)74
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.h (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.h)6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_link_encoder.c322
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_link_encoder.h134
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c856
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.h240
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_pp_smu.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services_types.h28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile52
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c147
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn10/dcn10_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c75
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c90
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c109
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/Makefile98
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c803
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/Makefile140
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/cmntypes.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/cmntypes.h)18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c)129
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core_structs.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h)116
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_lib_defines.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c)6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.h)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.c929
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.h28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.c516
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.c466
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.h135
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/bounding_boxes/dcn4_soc_bb.h372
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml2_external_lib_deps.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top.h46
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_dchub_registers.h191
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_display_cfg_types.h526
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_policy_types.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_soc_parameter_types.h215
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_types.h744
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.c661
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c13342
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_shared_types.h2341
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.c788
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c785
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.c50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.c198
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.c706
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c2390
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.c83
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.c147
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.h25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.c49
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.c1170
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_debug.h189
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h1010
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c)176
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_internal_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h)23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c)13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c)327
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c)24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c704
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h)29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml_assert.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml_assert.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml_depedencies.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml_depedencies.h)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c)12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml_logging.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml_logging.h)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/Makefile8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/CMakeLists.txt6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_cm.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn20/CMakeLists.txt5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.c56
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn201/CMakeLists.txt4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/CMakeLists.txt5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c94
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn32/CMakeLists.txt4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c74
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn35/CMakeLists.txt4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c44
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c429
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h740
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c242
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c1186
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/Makefile9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c304
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c56
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c88
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c394
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h346
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dsc.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/Makefile46
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_cm_common.h (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.h)4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.c (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c)13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.h (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb_cm.c (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/dcn35/dcn35_dwb.c (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dwb.c)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/dcn35/dcn35_dwb.h (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dwb.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/Makefile10
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn401/hw_factory_dcn401.c264
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn401/hw_factory_dcn401.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn401/hw_translate_dcn401.c335
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn401/hw_translate_dcn401.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/Makefile50
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.c (renamed from drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c)10
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.h (renamed from drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h)6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c (renamed from drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c)3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.h (renamed from drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn32/dcn32_hpo_dp_link_encoder.c (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn32/dcn32_hpo_dp_link_encoder.h (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.h)4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/Makefile104
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.c (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c)20
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.h (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h)65
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn20/dcn20_hubbub.c (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c)30
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn20/dcn20_hubbub.h (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h)3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn201/dcn201_hubbub.c (renamed from drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn201/dcn201_hubbub.h (renamed from drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.c (renamed from drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c)6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.h (renamed from drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c)18
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn301/dcn301_hubbub.c (renamed from drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn301/dcn301_hubbub.h (renamed from drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c (renamed from drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c)12
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.h (renamed from drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c)40
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.h (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c)69
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.h)17
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c1271
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.h206
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/Makefile97
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c)33
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h)168
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c)96
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h)59
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn201/dcn201_hubp.c (renamed from drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c)4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn201/dcn201_hubp.h (renamed from drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c (renamed from drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c)6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.h (renamed from drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c678
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h)16
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c (renamed from drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c)17
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.h (renamed from drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.h)6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c)77
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.h (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.c)9
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.h (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.h)3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c1097
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h373
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/Makefile35
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce100/dce100_hwseq.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce100/dce100_hwseq.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c409
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce120/dce120_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.c433
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.h (renamed from drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce80/dce80_hwseq.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c537
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c595
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c150
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c102
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c189
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c414
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c659
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c4045
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h213
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c177
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h1543
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h54
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/clock_source.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_status.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h93
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/audio.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h29
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h118
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h28
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h131
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h86
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h52
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h97
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h70
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h822
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/opp.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/optc.h42
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h178
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/transform.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link.h343
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_service.h350
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/reg_helper.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/soc_and_ip_translator.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/Makefile18
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c90
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c381
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c414
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.c66
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq_types.h25
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c84
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c135
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c110
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c233
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c258
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.c165
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.c306
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c520
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c67
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c531
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h57
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c99
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c73
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c156
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c135
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c94
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c310
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/mmhubbub/Makefile54
-rw-r--r--drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.h (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/mmhubbub/dcn32/dcn32_mmhubbub.c (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/mmhubbub/dcn32/dcn32_mmhubbub.h (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/mmhubbub/dcn35/dcn35_mmhubbub.c (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_mmhubbub.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/mmhubbub/dcn35/dcn35_mmhubbub.h (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_mmhubbub.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/Makefile72
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.c (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c)16
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.h (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h)4
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn20/dcn20_mpc.c (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn20/dcn20_mpc.h (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c)38
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h)12
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c)383
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.h)8
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c635
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h257
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/Makefile51
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c)14
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h)6
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c)38
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h)10
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.c (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_opp.c)13
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.h (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_opp.h)4
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/Makefile6
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c202
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h289
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c57
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c232
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c135
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c552
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h196
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/pg/Makefile35
-rw-r--r--drivers/gpu/drm/amd/display/dc/pg/dcn35/dcn35_pg_cntl.c (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c)84
-rw-r--r--drivers/gpu/drm/amd/display/dc/pg/dcn35/dcn35_pg_cntl.h (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/Makefile42
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c67
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c)127
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce80/CMakeLists.txt4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c93
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c134
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c56
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c35
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c63
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c87
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c69
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c80
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c141
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h25
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource_helpers.c (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c)29
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c85
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c77
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c2196
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.h73
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c2282
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h656
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/Makefile19
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.c304
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/soc_and_ip_translator.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/Makefile33
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c1925
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl.h27
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_filters.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_filters.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_isharp_filters.c553
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_isharp_filters.h42
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_scl_easf_filters.c2586
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_scl_easf_filters.h37
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_scl_filters.c1233
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_scl_filters.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h560
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/spl_custom_float.c151
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/spl_custom_float.h29
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/spl_debug.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.c493
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.h522
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/spl_os_types.h56
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h236
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h2512
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c85
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c114
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c165
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h10
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c171
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn36.c34
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn36.h13
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c678
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h290
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c445
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c35
-rw-r--r--drivers/gpu/drm/amd/display/include/bios_parser_types.h11
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h20
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/include/dpcd_defs.h40
-rw-r--r--drivers/gpu/drm/amd/display/include/fixed31_32.h6
-rw-r--r--drivers/gpu/drm/amd/display/include/gpio_service_interface.h3
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h3
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_id.h11
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_interface.h9
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/include/signal_types.h12
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c307
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.h11
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c187
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c11
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h4
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c37
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c87
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c53
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c99
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c122
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h6
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c9
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h26
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h175
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c4
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c66
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h15
-rw-r--r--drivers/gpu/drm/amd/include/amd_acpi.h4
-rw-r--r--drivers/gpu/drm/amd/include/amd_cper.h269
-rw-r--r--drivers/gpu/drm/amd/include/amd_pcie.h18
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h221
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h7
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_1_offset.h6193
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_1_sh_mask.h22091
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_offset.h6193
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_sh_mask.h22091
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_offset.h108
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h56
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_offset.h90
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_sh_mask.h44
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_offset.h16
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_sh_mask.h16
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_sh_mask.h28
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_6_0_offset.h15485
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_6_0_sh_mask.h61940
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_offset.h16662
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h145870
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/df/df_4_15_offset.h28
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/df/df_4_15_sh_mask.h28
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_offset.h10
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_5_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_12_0_0_offset.h11061
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_12_0_0_sh_mask.h40550
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h23
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_4_1_0_offset.h1341
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_4_1_0_sh_mask.h6943
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h32
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h48
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_sh_mask.h13
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h23
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h41
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h44
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h188
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/thm/thm_14_0_2_offset.h228
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/thm/thm_14_0_2_sh_mask.h940
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_14_0_offset.h29
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_14_0_sh_mask.h37
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_0_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_d.h5
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_sh_mask.h10
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_3_offset.h37
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h22
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h39
-rw-r--r--drivers/gpu/drm/amd/include/atombios.h6
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h101
-rw-r--r--drivers/gpu/drm/amd/include/discovery.h70
-rw-r--r--drivers/gpu/drm/amd/include/dm_pp_interface.h1
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h1
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_12_0_0.h74
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/isp/irqsrcs_isp_4_1.h62
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h47
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h23
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h592
-rw-r--r--drivers/gpu/drm/amd/include/mes_api_def.h570
-rw-r--r--drivers/gpu/drm/amd/include/mes_v11_api_def.h53
-rw-r--r--drivers/gpu/drm/amd/include/mes_v12_api_def.h907
-rw-r--r--drivers/gpu/drm/amd/include/pptable.h91
-rw-r--r--drivers/gpu/drm/amd/include/soc21_enum.h2
-rw-r--r--drivers/gpu/drm/amd/include/soc24_enum.h21073
-rw-r--r--drivers/gpu/drm/amd/include/v11_structs.h8
-rw-r--r--drivers/gpu/drm/amd/include/v12_structs.h1189
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c419
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c86
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c1550
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h44
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h6
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h1
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c163
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c144
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h7
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c628
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h557
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c66
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c99
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c10
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c21
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c447
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c78
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h3
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h555
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h36
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c40
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.h1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c84
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c32
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c119
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c20
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c31
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c574
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h23
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c7
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c7
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c9
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c38
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c10
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c28
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c863
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h272
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h145
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h378
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h146
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h148
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_2_ppsmc.h18
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h37
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h32
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_0_pptable.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_v13_0_0_pptable.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h23
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0_2_pptable.h52
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c696
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c277
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c1513
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c95
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c145
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c168
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c175
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c314
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c465
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c1073
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c53
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c61
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c1437
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h233
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c387
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c75
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c229
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c321
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c1582
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c225
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h128
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_internal.h2
-rw-r--r--drivers/gpu/drm/amd/ras/Makefile34
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/Makefile33
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.c285
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.h54
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.c182
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.h27
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.c648
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.h83
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.c94
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.h30
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.c125
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.h30
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.c190
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.h41
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_sys.c279
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/ras_sys.h110
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/Makefile44
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras.h370
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_aca.c672
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_aca.h164
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.c379
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.h71
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_cmd.c522
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_cmd.h426
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_core.c603
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_core_status.h37
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_cper.c315
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_cper.h304
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_eeprom.c1339
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_eeprom.h197
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_gfx.c70
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_gfx.h43
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.c426
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.h259
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_log_ring.c317
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_log_ring.h93
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_mp1.c81
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_mp1.h50
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.c105
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.h30
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_nbio.c96
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_nbio.h46
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.c123
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.h31
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_process.c322
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_process.h53
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_psp.c750
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_psp.h145
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.c46
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.h31
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_ta_if.h231
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_umc.c707
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_umc.h166
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.c511
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.h314
-rw-r--r--drivers/gpu/drm/arm/Kconfig3
-rw-r--r--drivers/gpu/drm/arm/display/Kconfig1
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c5
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_crtc.c78
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.c8
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_drv.c10
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c4
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h1
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.c13
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.h1
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c4
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c6
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c24
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c12
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c20
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.c3
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c4
-rw-r--r--drivers/gpu/drm/armada/Kconfig1
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c3
-rw-r--r--drivers/gpu/drm/armada/armada_debugfs.c1
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h11
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c11
-rw-r--r--drivers/gpu/drm/armada/armada_fb.c13
-rw-r--r--drivers/gpu/drm/armada/armada_fb.h4
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c133
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c3
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c1
-rw-r--r--drivers/gpu/drm/armada/armada_plane.c8
-rw-r--r--drivers/gpu/drm/aspeed/Kconfig1
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_drv.c11
-rw-r--r--drivers/gpu/drm/ast/Kconfig3
-rw-r--r--drivers/gpu/drm/ast/Makefile14
-rw-r--r--drivers/gpu/drm/ast/ast_2000.c257
-rw-r--r--drivers/gpu/drm/ast/ast_2100.c480
-rw-r--r--drivers/gpu/drm/ast/ast_2200.c92
-rw-r--r--drivers/gpu/drm/ast/ast_2300.c1463
-rw-r--r--drivers/gpu/drm/ast/ast_2400.c100
-rw-r--r--drivers/gpu/drm/ast/ast_2500.c675
-rw-r--r--drivers/gpu/drm/ast/ast_2600.c116
-rw-r--r--drivers/gpu/drm/ast/ast_cursor.c314
-rw-r--r--drivers/gpu/drm/ast/ast_dp.c661
-rw-r--r--drivers/gpu/drm/ast/ast_dp501.c241
-rw-r--r--drivers/gpu/drm/ast/ast_dram_tables.h207
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c109
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h289
-rw-r--r--drivers/gpu/drm/ast/ast_main.c306
-rw-r--r--drivers/gpu/drm/ast/ast_mm.c26
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c1393
-rw-r--r--drivers/gpu/drm/ast/ast_post.c2024
-rw-r--r--drivers/gpu/drm/ast/ast_post.h50
-rw-r--r--drivers/gpu/drm/ast/ast_reg.h99
-rw-r--r--drivers/gpu/drm/ast/ast_sil164.c126
-rw-r--r--drivers/gpu/drm/ast/ast_tables.h247
-rw-r--r--drivers/gpu/drm/ast/ast_vbios.c241
-rw-r--r--drivers/gpu/drm/ast/ast_vbios.h108
-rw-r--r--drivers/gpu/drm/ast/ast_vga.c126
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/Kconfig3
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c173
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c128
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h133
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c3
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c408
-rw-r--r--drivers/gpu/drm/bridge/Kconfig60
-rw-r--r--drivers/gpu/drm/bridge/Makefile8
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Kconfig5
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h61
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_audio.c84
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_cec.c70
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c474
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7533.c13
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx6345.c63
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c70
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c2
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c602
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.h35
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c209
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h9
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c209
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.h10
-rw-r--r--drivers/gpu/drm/bridge/aux-bridge.c21
-rw-r--r--drivers/gpu/drm/bridge/aux-hpd-bridge.c18
-rw-r--r--drivers/gpu/drm/bridge/cadence/Kconfig1
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c444
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h2
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c122
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c30
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.h3
-rw-r--r--drivers/gpu/drm/bridge/chipone-icn6211.c23
-rw-r--r--drivers/gpu/drm/bridge/chrontel-ch7033.c17
-rw-r--r--drivers/gpu/drm/bridge/cros-ec-anx7688.c8
-rw-r--r--drivers/gpu/drm/bridge/display-connector.c33
-rw-r--r--drivers/gpu/drm/bridge/fsl-ldb.c23
-rw-r--r--drivers/gpu/drm/bridge/imx/Kconfig21
-rw-r--r--drivers/gpu/drm/bridge/imx/Makefile2
-rw-r--r--drivers/gpu/drm/bridge/imx/imx-ldb-helper.c16
-rw-r--r--drivers/gpu/drm/bridge/imx/imx-ldb-helper.h5
-rw-r--r--drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c91
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pai.c158
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c20
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c87
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qm-ldb.c53
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c57
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c51
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c28
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c26
-rw-r--r--drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c14
-rw-r--r--drivers/gpu/drm/bridge/ite-it6263.c930
-rw-r--r--drivers/gpu/drm/bridge/ite-it6505.c545
-rw-r--r--drivers/gpu/drm/bridge/ite-it66121.c95
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c56
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9211.c20
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611.c367
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c133
-rw-r--r--drivers/gpu/drm/bridge/lvds-codec.c14
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c37
-rw-r--r--drivers/gpu/drm/bridge/microchip-lvds.c14
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.c32
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.h4
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c24
-rw-r--r--drivers/gpu/drm/bridge/panel.c49
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c10
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c16
-rw-r--r--drivers/gpu/drm/bridge/samsung-dsim.c478
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c119
-rw-r--r--drivers/gpu/drm/bridge/sii9234.c13
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c16
-rw-r--r--drivers/gpu/drm/bridge/simple-bridge.c53
-rw-r--r--drivers/gpu/drm/bridge/ssd2825.c775
-rw-r--r--drivers/gpu/drm/bridge/synopsys/Kconfig29
-rw-r--r--drivers/gpu/drm/bridge/synopsys/Makefile4
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-dp.c2097
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c32
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c10
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c7
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c5
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c1343
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h848
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c114
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c32
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c1032
-rw-r--r--drivers/gpu/drm/bridge/tc358762.c22
-rw-r--r--drivers/gpu/drm/bridge/tc358764.c11
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c298
-rw-r--r--drivers/gpu/drm/bridge/tc358768.c78
-rw-r--r--drivers/gpu/drm/bridge/tc358775.c56
-rw-r--r--drivers/gpu/drm/bridge/tda998x_drv.c (renamed from drivers/gpu/drm/i2c/tda998x_drv.c)85
-rw-r--r--drivers/gpu/drm/bridge/thc63lvd1024.c13
-rw-r--r--drivers/gpu/drm/bridge/ti-dlpc3433.c22
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi83.c306
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c407
-rw-r--r--drivers/gpu/drm/bridge/ti-tdp158.c115
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c24
-rw-r--r--drivers/gpu/drm/bridge/ti-tpd12s015.c21
-rw-r--r--drivers/gpu/drm/bridge/waveshare-dsi.c203
-rw-r--r--drivers/gpu/drm/ci/arm64.config14
-rw-r--r--drivers/gpu/drm/ci/build-igt.sh41
-rw-r--r--drivers/gpu/drm/ci/build.sh34
-rw-r--r--drivers/gpu/drm/ci/build.yml129
-rw-r--r--drivers/gpu/drm/ci/check-devicetrees.yml50
-rw-r--r--drivers/gpu/drm/ci/container.yml40
-rwxr-xr-xdrivers/gpu/drm/ci/dt-binding-check.sh19
-rwxr-xr-xdrivers/gpu/drm/ci/dtbs-check.sh22
-rw-r--r--drivers/gpu/drm/ci/gitlab-ci.yml312
-rwxr-xr-xdrivers/gpu/drm/ci/igt_runner.sh51
-rw-r--r--drivers/gpu/drm/ci/image-tags.yml21
-rwxr-xr-xdrivers/gpu/drm/ci/kunit.sh16
-rw-r--r--drivers/gpu/drm/ci/kunit.yml37
-rwxr-xr-xdrivers/gpu/drm/ci/lava-submit.sh104
-rwxr-xr-xdrivers/gpu/drm/ci/setup-llvm-links.sh13
-rw-r--r--drivers/gpu/drm/ci/test.yml274
-rw-r--r--drivers/gpu/drm/ci/testlist.txt2761
-rw-r--r--drivers/gpu/drm/ci/x86_64.config1
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt41
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt33
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt40
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-fails.txt26
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt69
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-skips.txt25
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-apl-fails.txt37
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt13
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-apl-skips.txt27
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-fails.txt46
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt34
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-skips.txt25
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-fails.txt29
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt20
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-skips.txt325
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt46
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-jsl-flakes.txt6
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt21
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt37
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-kbl-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt149
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt64
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt42
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-fails.txt50
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt13
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-skips.txt24
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt52
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt55
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt23
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt51
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt41
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt23
-rw-r--r--drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt6
-rw-r--r--drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt18
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt18
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt17
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8096-flakes.txt6
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt28
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt3
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt41
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt37
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt3
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt20
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt28
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt60
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt22
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-fails.txt10
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-flakes.txt6
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt212
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-g12b-fails.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt24
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-mt8183-fails.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt24
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-rk3288-fails.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt27
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-rk3399-fails.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-rk3399-flakes.txt6
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt27
-rw-r--r--drivers/gpu/drm/ci/xfails/requirements.txt17
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt63
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3288-flakes.txt34
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt63
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt47
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt149
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt25
-rwxr-xr-xdrivers/gpu/drm/ci/xfails/update-xfails.py204
-rw-r--r--drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt158
-rw-r--r--drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt27
-rw-r--r--drivers/gpu/drm/ci/xfails/vkms-none-fails.txt25
-rw-r--r--drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt118
-rw-r--r--drivers/gpu/drm/ci/xfails/vkms-none-skips.txt811
-rw-r--r--drivers/gpu/drm/clients/Kconfig123
-rw-r--r--drivers/gpu/drm/clients/Makefile8
-rw-r--r--drivers/gpu/drm/clients/drm_client_internal.h25
-rw-r--r--drivers/gpu/drm/clients/drm_client_setup.c98
-rw-r--r--drivers/gpu/drm/clients/drm_fbdev_client.c176
-rw-r--r--drivers/gpu/drm/clients/drm_log.c441
-rw-r--r--drivers/gpu/drm/display/Kconfig56
-rw-r--r--drivers/gpu/drm/display/Makefile15
-rw-r--r--drivers/gpu/drm/display/drm_bridge_connector.c887
-rw-r--r--drivers/gpu/drm/display/drm_dp_aux_bus.c7
-rw-r--r--drivers/gpu/drm/display/drm_dp_cec.c54
-rw-r--r--drivers/gpu/drm/display/drm_dp_dual_mode_helper.c8
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c1117
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c508
-rw-r--r--drivers/gpu/drm/display/drm_dp_tunnel.c33
-rw-r--r--drivers/gpu/drm/display/drm_dsc_helper.c92
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_audio_helper.c195
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_cec_helper.c193
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_cec_notifier_helper.c65
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_helper.c232
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_state_helper.c1180
-rw-r--r--drivers/gpu/drm/display/drm_scdc_helper.c1
-rw-r--r--drivers/gpu/drm/drm_aperture.c192
-rw-r--r--drivers/gpu/drm/drm_atomic.c312
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c522
-rw-r--r--drivers/gpu/drm/drm_atomic_state_helper.c6
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c205
-rw-r--r--drivers/gpu/drm/drm_auth.c65
-rw-r--r--drivers/gpu/drm/drm_blend.c6
-rw-r--r--drivers/gpu/drm/drm_bridge.c411
-rw-r--r--drivers/gpu/drm/drm_bridge_connector.c407
-rw-r--r--drivers/gpu/drm/drm_bridge_helper.c60
-rw-r--r--drivers/gpu/drm/drm_buddy.c459
-rw-r--r--drivers/gpu/drm/drm_cache.c9
-rw-r--r--drivers/gpu/drm/drm_client.c339
-rw-r--r--drivers/gpu/drm/drm_client_event.c214
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c331
-rw-r--r--drivers/gpu/drm/drm_client_sysrq.c65
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c289
-rw-r--r--drivers/gpu/drm/drm_colorop.c599
-rw-r--r--drivers/gpu/drm/drm_connector.c514
-rw-r--r--drivers/gpu/drm/drm_crtc.c61
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c1
-rw-r--r--drivers/gpu/drm/drm_crtc_helper_internal.h2
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h20
-rw-r--r--drivers/gpu/drm/drm_damage_helper.c4
-rw-r--r--drivers/gpu/drm/drm_debugfs.c321
-rw-r--r--drivers/gpu/drm/drm_debugfs_crc.c1
-rw-r--r--drivers/gpu/drm/drm_displayid.c61
-rw-r--r--drivers/gpu/drm/drm_displayid_internal.h33
-rw-r--r--drivers/gpu/drm/drm_draw.c157
-rw-r--r--drivers/gpu/drm/drm_draw_internal.h56
-rw-r--r--drivers/gpu/drm/drm_drv.c324
-rw-r--r--drivers/gpu/drm/drm_dumb_buffers.c171
-rw-r--r--drivers/gpu/drm/drm_edid.c446
-rw-r--r--drivers/gpu/drm/drm_encoder_slave.c182
-rw-r--r--drivers/gpu/drm/drm_exec.c5
-rw-r--r--drivers/gpu/drm/drm_fb_dma_helper.c4
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c322
-rw-r--r--drivers/gpu/drm/drm_fbdev_dma.c357
-rw-r--r--drivers/gpu/drm/drm_fbdev_generic.c349
-rw-r--r--drivers/gpu/drm/drm_fbdev_shmem.c203
-rw-r--r--drivers/gpu/drm/drm_fbdev_ttm.c234
-rw-r--r--drivers/gpu/drm/drm_file.c168
-rw-r--r--drivers/gpu/drm/drm_flip_work.c1
-rw-r--r--drivers/gpu/drm/drm_format_helper.c842
-rw-r--r--drivers/gpu/drm/drm_format_internal.h174
-rw-r--r--drivers/gpu/drm/drm_fourcc.c75
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c64
-rw-r--r--drivers/gpu/drm/drm_gem.c312
-rw-r--r--drivers/gpu/drm/drm_gem_atomic_helper.c11
-rw-r--r--drivers/gpu/drm/drm_gem_dma_helper.c14
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c61
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c379
-rw-r--r--drivers/gpu/drm/drm_gem_ttm_helper.c2
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c136
-rw-r--r--drivers/gpu/drm/drm_gpusvm.c1635
-rw-r--r--drivers/gpu/drm/drm_gpuvm.c691
-rw-r--r--drivers/gpu/drm/drm_internal.h44
-rw-r--r--drivers/gpu/drm/drm_ioctl.c64
-rw-r--r--drivers/gpu/drm/drm_managed.c11
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c95
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c717
-rw-r--r--drivers/gpu/drm/drm_mm.c40
-rw-r--r--drivers/gpu/drm/drm_mode_config.c26
-rw-r--r--drivers/gpu/drm/drm_mode_object.c20
-rw-r--r--drivers/gpu/drm/drm_modes.c20
-rw-r--r--drivers/gpu/drm/drm_modeset_helper.c20
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c2
-rw-r--r--drivers/gpu/drm/drm_of.c95
-rw-r--r--drivers/gpu/drm/drm_pagemap.c882
-rw-r--r--drivers/gpu/drm/drm_panel.c287
-rw-r--r--drivers/gpu/drm/drm_panel_backlight_quirks.c156
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c102
-rw-r--r--drivers/gpu/drm/drm_panic.c946
-rw-r--r--drivers/gpu/drm/drm_panic_qr.rs1016
-rw-r--r--drivers/gpu/drm/drm_pci.c1
-rw-r--r--drivers/gpu/drm/drm_plane.c148
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c1
-rw-r--r--drivers/gpu/drm/drm_prime.c158
-rw-r--r--drivers/gpu/drm/drm_print.c123
-rw-r--r--drivers/gpu/drm/drm_privacy_screen.c1
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c74
-rw-r--r--drivers/gpu/drm/drm_rect.c1
-rw-r--r--drivers/gpu/drm/drm_self_refresh_helper.c1
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c2
-rw-r--r--drivers/gpu/drm/drm_suballoc.c2
-rw-r--r--drivers/gpu/drm/drm_syncobj.c76
-rw-r--r--drivers/gpu/drm/drm_sysfs.c71
-rw-r--r--drivers/gpu/drm/drm_vblank.c268
-rw-r--r--drivers/gpu/drm/drm_vblank_helper.c176
-rw-r--r--drivers/gpu/drm/drm_vblank_work.c26
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c1
-rw-r--r--drivers/gpu/drm/drm_writeback.c194
-rw-r--r--drivers/gpu/drm/etnaviv/cmdstream.xml.h52
-rw-r--r--drivers/gpu/drm/etnaviv/common.xml.h12
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c7
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c37
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c5
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c49
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h7
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c8
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c192
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h11
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_hwdb.c32
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c64
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.h1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_perfmon.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c62
-rw-r--r--drivers/gpu/drm/etnaviv/state.xml.h103
-rw-r--r--drivers/gpu/drm/etnaviv/state_blt.xml.h22
-rw-r--r--drivers/gpu/drm/etnaviv/state_hi.xml.h25
-rw-r--r--drivers/gpu/drm/exynos/Kconfig1
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c163
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dma.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c116
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.h15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c17
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c14
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c33
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c89
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c38
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c3
-rw-r--r--drivers/gpu/drm/exynos/regs-decon7.h15
-rw-r--r--drivers/gpu/drm/fsl-dcu/Kconfig2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c34
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h3
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c1
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c3
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_tcon.c2
-rw-r--r--drivers/gpu/drm/gma500/Kconfig3
-rw-r--r--drivers/gpu/drm/gma500/backlight.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c3
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c1
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c8
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c3
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c8
-rw-r--r--drivers/gpu/drm/gma500/fbdev.c168
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c14
-rw-r--r--drivers/gpu/drm/gma500/gem.c1
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.c23
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.h4
-rw-r--r--drivers/gpu/drm/gma500/intel_gmbus.c4
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.c6
-rw-r--r--drivers/gpu/drm/gma500/mmu.c41
-rw-r--r--drivers/gpu/drm/gma500/mmu.h2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c8
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c5
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c3
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c1
-rw-r--r--drivers/gpu/drm/gma500/opregion.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c7
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h16
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h5
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c10
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_modes.c31
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c29
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c37
-rw-r--r--drivers/gpu/drm/gud/Kconfig1
-rw-r--r--drivers/gpu/drm/gud/gud_connector.c33
-rw-r--r--drivers/gpu/drm/gud/gud_drv.c124
-rw-r--r--drivers/gpu/drm/gud/gud_internal.h14
-rw-r--r--drivers/gpu/drm/gud/gud_pipe.c91
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Kconfig6
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Makefile4
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c168
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h69
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h21
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c307
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h64
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c392
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h132
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_serdes.c71
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_debugfs.c104
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c182
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c108
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h33
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c46
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c45
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/Kconfig3
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c17
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_dsi_reg.h2
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h2
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c8
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c7
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm.h4
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_drv.c15
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_modeset.c218
-rw-r--r--drivers/gpu/drm/i2c/Kconfig36
-rw-r--r--drivers/gpu/drm/i2c/Makefile10
-rw-r--r--drivers/gpu/drm/i915/Kconfig6
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug19
-rw-r--r--drivers/gpu/drm/i915/Makefile71
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7017.c18
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7xxx.c22
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ivch.c20
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ns2501.c26
-rw-r--r--drivers/gpu/drm/i915/display/dvo_sil164.c22
-rw-r--r--drivers/gpu/drm/i915/display/dvo_tfp410.c22
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c456
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.h25
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c236
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.h7
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.c150
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.h6
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_display_sr.c97
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_display_sr.h14
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c649
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.h16
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane_regs.h112
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.c1498
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.h18
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm_regs.h257
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c640
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.h4
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi_regs.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_alpm.c600
-rw-r--r--drivers/gpu/drm/i915/display/intel_alpm.h41
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c34
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c1178
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.h70
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c625
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio_regs.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c559
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c1315
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h214
-rw-r--r--drivers/gpu/drm/i915/display/intel_bo.c81
-rw-r--r--drivers/gpu/drm/i915/display/intel_bo.h29
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c1007
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h75
-rw-r--r--drivers/gpu/drm/i915/display/intel_casf.c290
-rw-r--r--drivers/gpu/drm/i915/display/intel_casf.h21
-rw-r--r--drivers/gpu/drm/i915/display/intel_casf_regs.h33
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c2271
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h98
-rw-r--r--drivers/gpu/drm/i915/display/intel_cmtg.c188
-rw-r--r--drivers/gpu/drm/i915/display/intel_cmtg.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_cmtg_regs.h21
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c1489
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.h31
-rw-r--r--drivers/gpu/drm/i915/display/intel_color_pipeline.c99
-rw-r--r--drivers/gpu/drm/i915/display/intel_color_pipeline.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_color_regs.h34
-rw-r--r--drivers/gpu/drm/i915/display/intel_colorop.c35
-rw-r--r--drivers/gpu/drm/i915/display/intel_colorop.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c187
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy_regs.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c114
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c456
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt_regs.h50
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c329
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.h31
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c113
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c452
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor_regs.h112
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c1331
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.h37
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h150
-rw-r--r--drivers/gpu/drm/i915/display/intel_dbuf_bw.c295
-rw-r--r--drivers/gpu/drm/i915/display/intel_dbuf_bw.h37
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c2517
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h31
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c158
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_de.h156
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c4153
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h183
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_conversion.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_conversion.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h124
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c684
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs_params.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs_params.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.c1471
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h284
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c491
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.h38
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c1949
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.h92
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_jiffies.h43
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_limits.h40
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.c32
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c1314
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h115
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_map.c254
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c1051
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.h54
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_regs.h2934
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_reset.c90
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_reset.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rpm.c62
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rpm.h37
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rps.c31
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rps.h28
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_snapshot.c79
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_snapshot.h16
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_trace.h505
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h622
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_utils.c32
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_utils.h31
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.c69
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.h27
-rw-r--r--drivers/gpu/drm/i915/display/intel_dkl_phy.c66
-rw-r--r--drivers/gpu/drm/i915/display/intel_dkl_phy.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_dkl_phy_regs.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c1023
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.h53
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_regs.h493
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_wl.c376
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_wl.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c3638
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h107
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c181
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c271
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_regs.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c152
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c953
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.h17
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c1533
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_test.c764
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_test.h23
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_tunnel.c83
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_tunnel.h16
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c418
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.h28
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c626
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c1647
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h112
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c60
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt_common.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.c62
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c849
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.h50
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb_regs.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c299
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt_defs.h197
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c105
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo_dev.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_encoder.c123
-rw-r--r--drivers/gpu/drm/i915/display/intel_encoder.h26
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c844
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.h53
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_bo.c28
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_bo.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c191
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.h19
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c971
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.h26
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc_regs.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c542
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.h29
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev_fb.c54
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev_fb.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c570
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.h24
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c296
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_flipq.c472
-rw-r--r--drivers/gpu/drm/i915/display/intel_flipq.h37
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c201
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.h25
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.c79
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.h42
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c372
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus_regs.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c1166
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.h21
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc.c124
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc.h19
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c272
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h64
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_regs.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_shim.h137
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c868
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h17
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c711
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.h33
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.c689
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.h28
-rw-r--r--drivers/gpu/drm/i915/display/intel_hti.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_hti.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_hti_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.c314
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_load_detect.c30
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c129
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c164
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.h25
-rw-r--r--drivers/gpu/drm/i915/display/intel_lt_phy.c2327
-rw-r--r--drivers/gpu/drm/i915/display/intel_lt_phy.h47
-rw-r--r--drivers/gpu/drm/i915/display/intel_lt_phy_regs.h90
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c245
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c425
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_verify.c117
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c339
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.h62
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c237
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.h47
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c470
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_panic.c27
-rw-r--r--drivers/gpu/drm/i915/display/intel_panic.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch.c340
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch.h56
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.c343
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.c321
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_pfit.c731
-rw-r--r--drivers/gpu/drm/i915/display/intel_pfit.h29
-rw-r--r--drivers/gpu/drm/i915/display/intel_pfit_regs.h79
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.c151
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc_regs.h152
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane.c1766
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane.h91
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.c155
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_pmdemand.c311
-rw-r--r--drivers/gpu/drm/i915/display/intel_pmdemand.h51
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c1025
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.h21
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps_regs.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c2874
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h39
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr_regs.h118
-rw-r--r--drivers/gpu/drm/i915/display/intel_qp_tables.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c81
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_sbi.c92
-rw-r--r--drivers/gpu/drm/i915/display/intel_sbi.h27
-rw-r--r--drivers/gpu/drm/i915/display/intel_sbi_regs.h65
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c381
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c366
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.h17
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c130
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c431
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite_regs.h242
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite_uapi.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c636
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.h75
-rw-r--r--drivers/gpu/drm/i915/display/intel_tdf.h25
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c248
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.c382
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.h16
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h800
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c449
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc_regs.h20
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.c98
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga_regs.h36
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c807
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr_regs.h126
-rw-r--r--drivers/gpu/drm/i915/display/intel_wm.c202
-rw-r--r--drivers/gpu/drm/i915/display/intel_wm.h19
-rw-r--r--drivers/gpu/drm/i915/display/skl_prefill.c157
-rw-r--r--drivers/gpu/drm/i915/display/skl_prefill.h46
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c980
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.h48
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c1601
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.h16
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane_regs.h585
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c2290
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.h81
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark_regs.h135
-rw-r--r--drivers/gpu/drm/i915/display/vlv_clock.c88
-rw-r--r--drivers/gpu/drm/i915/display/vlv_clock.h38
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c295
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.h13
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c211
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.h17
-rw-r--r--drivers/gpu/drm/i915/display/vlv_sideband.c50
-rw-r--r--drivers/gpu/drm/i915/display/vlv_sideband.h156
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_busy.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c117
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h9
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_create.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c21
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c99
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ioctls.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.c26
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c87
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c48
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h14
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.c103
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h56
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c137
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c180
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c15
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c120
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.h34
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_throttle.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c10
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c31
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c27
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gemfs.c21
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gemfs.h3
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c6
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c5
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c4
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c88
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.c38
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.h3
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c3
-rw-r--r--drivers/gpu/drm/i915/gt/gen2_engine_cs.c33
-rw-r--r--drivers/gpu/drm/i915/gt/gen2_engine_cs.h6
-rw-r--r--drivers/gpu/drm/i915/gt/gen7_renderclear.c3
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c19
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h34
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c23
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_regs.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_user.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c29
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c81
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gsc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c89
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_debugfs.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c26
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_mcr.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h146
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c47
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c18
-rw-r--r--drivers/gpu/drm/i915/gt/intel_migrate.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c28
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c26
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c75
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset_types.h5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.c24
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c90
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c124
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps_types.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sa_media.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c60
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_tlb.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_tlb.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_wopcm.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_wopcm.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c109
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c6
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_context.c4
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c110
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_execlists.c15
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c6
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c11
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_migrate.c19
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.c59
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c16
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_slpc.c19
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_tlb.c8
-rw-r--r--drivers/gpu/drm/i915/gt/shaders/README6
-rw-r--r--drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm2
-rw-r--r--drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm2
-rw-r--r--drivers/gpu/drm/i915/gt/shmem_utils.c10
-rw-r--r--drivers/gpu/drm/i915/gt/sysfs_engines.c6
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c45
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c10
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c27
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c30
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c23
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c25
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c19
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.h8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c117
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c169
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c42
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c12
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c9
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c40
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c17
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c98
-rw-r--r--drivers/gpu/drm/i915/gvt/display.h55
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c44
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.h12
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c145
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c26
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c78
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h23
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c285
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c316
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c14
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c11
-rw-r--r--drivers/gpu/drm/i915/gvt/page_track.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c15
-rw-r--r--drivers/gpu/drm/i915/gvt/trace.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c8
-rw-r--r--drivers/gpu/drm/i915/i915_active.c25
-rw-r--r--drivers/gpu/drm/i915/i915_active.h1
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c1
-rw-r--r--drivers/gpu/drm/i915/i915_config.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c34
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs_params.c4
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c505
-rw-r--r--drivers/gpu/drm/i915/i915_driver.h4
-rw-r--r--drivers/gpu/drm/i915/i915_drm_client.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h180
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c3
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c210
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h17
-rw-r--r--drivers/gpu/drm/i915/i915_gtt_view_types.h59
-rw-r--r--drivers/gpu/drm/i915/i915_hwmon.c128
-rw-r--r--drivers/gpu/drm/i915/i915_iosf_mbi.h6
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c716
-rw-r--r--drivers/gpu/drm/i915/i915_irq.h42
-rw-r--r--drivers/gpu/drm/i915/i915_jiffies.h16
-rw-r--r--drivers/gpu/drm/i915/i915_list_util.h23
-rw-r--r--drivers/gpu/drm/i915/i915_mm.c16
-rw-r--r--drivers/gpu/drm/i915/i915_mm.h3
-rw-r--r--drivers/gpu/drm/i915/i915_mmio_range.c18
-rw-r--r--drivers/gpu/drm/i915/i915_mmio_range.h19
-rw-r--r--drivers/gpu/drm/i915/i915_module.c14
-rw-r--r--drivers/gpu/drm/i915/i915_params.c8
-rw-r--r--drivers/gpu/drm/i915/i915_params.h3
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c169
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c135
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c221
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h17
-rw-r--r--drivers/gpu/drm/i915/i915_ptr_util.h66
-rw-r--r--drivers/gpu/drm/i915/i915_query.c2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4203
-rw-r--r--drivers/gpu/drm/i915/i915_reg_defs.h136
-rw-r--r--drivers/gpu/drm/i915/i915_request.c27
-rw-r--r--drivers/gpu/drm/i915/i915_request.h9
-rw-r--r--drivers/gpu/drm/i915/i915_scatterlist.c8
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c2
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c124
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.h14
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c21
-rw-r--r--drivers/gpu/drm/i915/i915_switcheroo.c11
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c10
-rw-r--r--drivers/gpu/drm/i915/i915_timer_util.c36
-rw-r--r--drivers/gpu/drm/i915/i915_timer_util.h23
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h30
-rw-r--r--drivers/gpu/drm/i915/i915_ttm_buddy_manager.c4
-rw-r--r--drivers/gpu/drm/i915/i915_utils.c82
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h267
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c2
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c60
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h34
-rw-r--r--drivers/gpu/drm/i915/i915_vma_types.h52
-rw-r--r--drivers/gpu/drm/i915/i915_wait_util.h119
-rw-r--r--drivers/gpu/drm/i915/intel_clock_gating.c55
-rw-r--r--drivers/gpu/drm/i915/intel_cpu_info.c44
-rw-r--r--drivers/gpu/drm/i915/intel_cpu_info.h13
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c129
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h13
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c78
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c366
-rw-r--r--drivers/gpu/drm/i915/intel_mchbar_regs.h4
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c24
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.h8
-rw-r--r--drivers/gpu/drm/i915/intel_pci_config.h2
-rw-r--r--drivers/gpu/drm/i915/intel_pcode.c32
-rw-r--r--drivers/gpu/drm/i915/intel_pcode.h15
-rw-r--r--drivers/gpu/drm/i915/intel_region_ttm.c2
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c91
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.h20
-rw-r--r--drivers/gpu/drm/i915/intel_sbi.c74
-rw-r--r--drivers/gpu/drm/i915/intel_sbi.h23
-rw-r--r--drivers/gpu/drm/i915/intel_step.c86
-rw-r--r--drivers/gpu/drm/i915/intel_step.h3
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c58
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h15
-rw-r--r--drivers/gpu/drm/i915/intel_uncore_trace.c7
-rw-r--r--drivers/gpu/drm/i915/intel_uncore_trace.h49
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.c23
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.h29
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.c18
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.h6
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_43.h2
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c8
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c2
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h8
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_huc.c2
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_session.c2
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_tee.c4
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_types.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c11
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c29
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_random.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c35
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c21
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c15
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c5
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c16
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/lib_sw_fence.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/librapl.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c24
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_request.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/scatterlist.c2
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.c387
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.h33
-rw-r--r--drivers/gpu/drm/i915/soc/intel_gmch.c8
-rw-r--r--drivers/gpu/drm/i915/soc/intel_pch.c287
-rw-r--r--drivers/gpu/drm/i915/soc/intel_pch.h89
-rw-r--r--drivers/gpu/drm/i915/soc/intel_rom.c161
-rw-r--r--drivers/gpu/drm/i915/soc/intel_rom.h25
-rw-r--r--drivers/gpu/drm/i915/vlv_iosf_sb.c231
-rw-r--r--drivers/gpu/drm/i915/vlv_iosf_sb.h37
-rw-r--r--drivers/gpu/drm/i915/vlv_iosf_sb_reg.h180
-rw-r--r--drivers/gpu/drm/i915/vlv_sideband.c251
-rw-r--r--drivers/gpu/drm/i915/vlv_sideband.h122
-rw-r--r--drivers/gpu/drm/i915/vlv_sideband_reg.h180
-rw-r--r--drivers/gpu/drm/i915/vlv_suspend.c6
-rw-r--r--drivers/gpu/drm/imagination/Kconfig4
-rw-r--r--drivers/gpu/drm/imagination/Makefile4
-rw-r--r--drivers/gpu/drm/imagination/pvr_ccb.c3
-rw-r--r--drivers/gpu/drm/imagination/pvr_context.c51
-rw-r--r--drivers/gpu/drm/imagination/pvr_context.h21
-rw-r--r--drivers/gpu/drm/imagination/pvr_debugfs.c3
-rw-r--r--drivers/gpu/drm/imagination/pvr_device.c137
-rw-r--r--drivers/gpu/drm/imagination/pvr_device.h77
-rw-r--r--drivers/gpu/drm/imagination/pvr_drv.c50
-rw-r--r--drivers/gpu/drm/imagination/pvr_drv.h1
-rw-r--r--drivers/gpu/drm/imagination/pvr_free_list.c3
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw.c68
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw.h85
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_meta.c34
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_mips.c85
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_riscv.c165
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_startstop.c17
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_trace.c36
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_trace.h2
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_util.c66
-rw-r--r--drivers/gpu/drm/imagination/pvr_gem.c18
-rw-r--r--drivers/gpu/drm/imagination/pvr_gem.h6
-rw-r--r--drivers/gpu/drm/imagination/pvr_hwrt.c12
-rw-r--r--drivers/gpu/drm/imagination/pvr_job.c34
-rw-r--r--drivers/gpu/drm/imagination/pvr_mmu.c8
-rw-r--r--drivers/gpu/drm/imagination/pvr_power.c318
-rw-r--r--drivers/gpu/drm/imagination/pvr_power.h18
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.c53
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.h6
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h153
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_riscv.h41
-rw-r--r--drivers/gpu/drm/imagination/pvr_stream.c12
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm.c152
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm.h4
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm_mips.c3
-rw-r--r--drivers/gpu/drm/imx/Kconfig1
-rw-r--r--drivers/gpu/drm/imx/Makefile1
-rw-r--r--drivers/gpu/drm/imx/dc/Kconfig13
-rw-r--r--drivers/gpu/drm/imx/dc/Makefile7
-rw-r--r--drivers/gpu/drm/imx/dc/dc-cf.c172
-rw-r--r--drivers/gpu/drm/imx/dc/dc-crtc.c555
-rw-r--r--drivers/gpu/drm/imx/dc/dc-de.c177
-rw-r--r--drivers/gpu/drm/imx/dc/dc-de.h59
-rw-r--r--drivers/gpu/drm/imx/dc/dc-drv.c293
-rw-r--r--drivers/gpu/drm/imx/dc/dc-drv.h102
-rw-r--r--drivers/gpu/drm/imx/dc/dc-ed.c288
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fg.c376
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fl.c185
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fu.c258
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fu.h129
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fw.c222
-rw-r--r--drivers/gpu/drm/imx/dc/dc-ic.c282
-rw-r--r--drivers/gpu/drm/imx/dc/dc-kms.c143
-rw-r--r--drivers/gpu/drm/imx/dc/dc-kms.h131
-rw-r--r--drivers/gpu/drm/imx/dc/dc-lb.c325
-rw-r--r--drivers/gpu/drm/imx/dc/dc-pe.c158
-rw-r--r--drivers/gpu/drm/imx/dc/dc-pe.h101
-rw-r--r--drivers/gpu/drm/imx/dc/dc-plane.c224
-rw-r--r--drivers/gpu/drm/imx/dc/dc-tc.c141
-rw-r--r--drivers/gpu/drm/imx/dcss/Kconfig5
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-crtc.c6
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-drv.c2
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-dtg.c4
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-kms.c6
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-plane.c5
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-scaler.c4
-rw-r--r--drivers/gpu/drm/imx/ipuv3/Kconfig14
-rw-r--r--drivers/gpu/drm/imx/ipuv3/dw_hdmi-imx.c3
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-drm-core.c45
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-drm.h14
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-ldb.c206
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-tve.c44
-rw-r--r--drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c8
-rw-r--r--drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c4
-rw-r--r--drivers/gpu/drm/imx/ipuv3/parallel-display.c165
-rw-r--r--drivers/gpu/drm/imx/lcdc/Kconfig3
-rw-r--r--drivers/gpu/drm/imx/lcdc/imx-lcdc.c10
-rw-r--r--drivers/gpu/drm/ingenic/Kconfig3
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c36
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-ipu.c6
-rw-r--r--drivers/gpu/drm/kmb/Kconfig3
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.c8
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.h1
-rw-r--r--drivers/gpu/drm/kmb/kmb_dsi.c4
-rw-r--r--drivers/gpu/drm/kmb/kmb_plane.c4
-rw-r--r--drivers/gpu/drm/lib/drm_random.c1
-rw-r--r--drivers/gpu/drm/lib/drm_random.h2
-rw-r--r--drivers/gpu/drm/lima/lima_drv.c8
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c8
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c36
-rw-r--r--drivers/gpu/drm/lima/lima_sched.h3
-rw-r--r--drivers/gpu/drm/lima/lima_trace.h8
-rw-r--r--drivers/gpu/drm/logicvc/Kconfig1
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_drm.c19
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_layer.c4
-rw-r--r--drivers/gpu/drm/loongson/Kconfig4
-rw-r--r--drivers/gpu/drm/loongson/lsdc_benchmark.c1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_crtc.c1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_debugfs.c1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_drv.c23
-rw-r--r--drivers/gpu/drm/loongson/lsdc_gem.c32
-rw-r--r--drivers/gpu/drm/loongson/lsdc_i2c.c1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_irq.c1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_output_7a1000.c16
-rw-r--r--drivers/gpu/drm/loongson/lsdc_output_7a2000.c16
-rw-r--r--drivers/gpu/drm/loongson/lsdc_pixpll.c1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_plane.c6
-rw-r--r--drivers/gpu/drm/loongson/lsdc_ttm.c12
-rw-r--r--drivers/gpu/drm/mcde/Kconfig1
-rw-r--r--drivers/gpu/drm/mcde/mcde_clk_div.c13
-rw-r--r--drivers/gpu/drm/mcde/mcde_display.c1
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c8
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi.c12
-rw-r--r--drivers/gpu/drm/mediatek/Kconfig36
-rw-r--r--drivers/gpu/drm/mediatek/Makefile11
-rw-r--r--drivers/gpu/drm/mediatek/mtk_cec.c37
-rw-r--r--drivers/gpu/drm/mediatek/mtk_crtc.c142
-rw-r--r--drivers/gpu/drm/mediatek/mtk_crtc.h1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ddp_comp.c145
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ddp_comp.h29
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_aal.c20
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ccorr.c43
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_color.c26
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_drv.h4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_gamma.c24
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_merge.c31
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c292
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c76
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c46
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp.c219
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp_reg.h1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c475
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi_regs.h9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c351
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.h6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c211
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ethdr.c66
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ethdr.h1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_gem.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c840
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.h14
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_common.c456
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_common.h198
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c25
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_ddc_v2.c396
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_regs_v2.h263
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_v2.c1521
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mdp_rdma.c24
-rw-r--r--drivers/gpu/drm/mediatek/mtk_padding.c23
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.c75
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.h6
-rw-r--r--drivers/gpu/drm/meson/Kconfig5
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c52
-rw-r--r--drivers/gpu/drm/meson/meson_drv.h2
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c16
-rw-r--r--drivers/gpu/drm/meson/meson_dw_mipi_dsi.c2
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_cvbs.c18
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_dsi.c18
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_hdmi.c47
-rw-r--r--drivers/gpu/drm/meson/meson_overlay.c1
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c18
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c226
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.h13
-rw-r--r--drivers/gpu/drm/mgag200/Kconfig21
-rw-r--r--drivers/gpu/drm/mgag200/Makefile7
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_bmc.c14
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ddc.c178
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ddc.h11
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c42
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h84
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200.c47
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200eh.c47
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200eh3.c47
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200eh5.c205
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200er.c59
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200ev.c59
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200ew3.c49
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200se.c59
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200wb.c49
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_i2c.c129
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c237
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_reg.h9
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_vga.c73
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_vga_bmc.c157
-rw-r--r--drivers/gpu/drm/msm/Kconfig41
-rw-r--r--drivers/gpu/drm/msm/Makefile38
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_catalog.c48
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.c77
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpummu.c10
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_catalog.c88
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c83
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_catalog.c48
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c75
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_catalog.c153
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_debugfs.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c165
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h3
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c45
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_catalog.c1961
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c883
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h61
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c2096
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h221
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c142
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h117
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c288
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.h43
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_preempt.c477
-rw-r--r--drivers/gpu/drm/msm/adreno/a8xx_gpu.c1201
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c733
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h435
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h340
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h506
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c261
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h198
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h86
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_12_0_sm8750.h494
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_12_2_glymur.h541
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h205
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h184
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h212
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h328
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h24
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h15
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h36
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h54
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h75
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h79
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h317
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h254
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h39
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h43
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h27
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h41
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h48
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h24
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h64
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h53
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h453
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h75
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h408
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h64
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h46
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c198
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h40
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c566
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h53
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c757
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h134
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h100
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c52
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c63
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c90
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c246
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h32
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c267
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h144
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c215
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h52
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.c75
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.h70
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c31
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h23
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c22
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c52
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c33
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c231
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h28
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h53
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c20
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h20
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c69
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h27
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h11
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c236
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h67
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c970
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h52
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c531
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h82
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h26
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c16
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c18
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c92
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h18
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c52
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c121
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c88
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c18
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c91
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c9
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c3
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c38
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c27
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c2
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot.c2
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot.h13
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c51
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.c592
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.h57
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.c331
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.h29
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.c1341
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.h128
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c1096
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.h55
-rw-r--r--drivers/gpu/drm/msm/dp/dp_debug.c73
-rw-r--r--drivers/gpu/drm/msm/dp/dp_debug.h10
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c1126
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.h22
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.c185
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.h35
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.c566
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.h53
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c594
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.h57
-rw-r--r--drivers/gpu/drm/msm/dp/dp_reg.h19
-rw-r--r--drivers/gpu/drm/msm/dp/dp_utils.c28
-rw-r--r--drivers/gpu/drm/msm/dp/dp_utils.h8
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c6
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h3
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c34
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h3
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c329
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c49
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c69
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h16
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c297
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c274
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c109
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c345
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c238
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c582
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c265
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h67
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_audio.c199
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c393
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_hpd.c93
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_i2c.c14
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy.c13
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c16
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c765
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c12
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c17
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c138
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c393
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h155
-rw-r--r--drivers/gpu/drm/msm/msm_dsc_helper.h11
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c51
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c157
-rw-r--r--drivers/gpu/drm/msm/msm_fence.c3
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c626
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h302
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c71
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c104
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c413
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c1609
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c254
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h199
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_devfreq.c10
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_trace.h54
-rw-r--r--drivers/gpu/drm/msm/msm_io_utils.c3
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c398
-rw-r--r--drivers/gpu/drm/msm/msm_kms.c103
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h64
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.c356
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.h28
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h44
-rw-r--r--drivers/gpu/drm/msm/msm_perf.c1
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c63
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c29
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.h18
-rw-r--r--drivers/gpu/drm/msm/msm_submitqueue.c99
-rw-r--r--drivers/gpu/drm/msm/msm_syncobj.c172
-rw-r--r--drivers/gpu/drm/msm/msm_syncobj.h37
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx.xml5172
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_descriptors.xml158
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_enums.xml429
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml281
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_perfcntrs.xml600
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a7xx_enums.xml216
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a7xx_perfcntrs.xml1030
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a8xx_descriptors.xml121
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a8xx_enums.xml299
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/adreno_common.xml12
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml733
-rw-r--r--drivers/gpu/drm/msm/registers/display/dsi.xml28
-rw-r--r--drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml37
-rw-r--r--drivers/gpu/drm/msm/registers/display/hdmi.xml91
-rw-r--r--drivers/gpu/drm/msm/registers/display/mdp5.xml16
-rw-r--r--drivers/gpu/drm/msm/registers/display/mdss.xml38
-rw-r--r--drivers/gpu/drm/msm/registers/gen_header.py217
-rw-r--r--drivers/gpu/drm/mxsfb/Kconfig2
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_drv.c11
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_kms.c10
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c30
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_kms.c1
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig26
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c75
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dac.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c19
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c9
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c9
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/i2c/Kbuild5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/i2c/ch7006_drv.c (renamed from drivers/gpu/drm/i2c/ch7006_drv.c)34
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/i2c/ch7006_mode.c (renamed from drivers/gpu/drm/i2c/ch7006_mode.c)8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/i2c/ch7006_priv.h (renamed from drivers/gpu/drm/i2c/ch7006_priv.h)11
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/i2c/sil164_drv.c (renamed from drivers/gpu/drm/i2c/sil164_drv.c)37
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/nouveau_i2c_encoder.c145
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c28
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c18
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base507c.c21
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base827c.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base907c.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core.h6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core507d.c9
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec37d.c9
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec57d.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/coreca7d.c122
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crc.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crc.h1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crc907d.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crcc37d.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crcc57d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crcca7d.c98
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/dac507d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/dac907d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c152
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.h15
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c9
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.h5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head507d.c24
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head827d.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head907d.c26
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head917d.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headc37d.c18
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headc57d.c14
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headca7d.c297
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly507e.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly827e.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly907e.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/pior507d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/sor507d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/sor907d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/sorc37d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/tile.h63
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wimm.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c7
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c182
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.h3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c25
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c242
-rw-r--r--drivers/gpu/drm/nouveau/gv100_fence.c98
-rw-r--r--drivers/gpu/drm/nouveau/include/dispnv04/i2c/ch7006.h87
-rw-r--r--drivers/gpu/drm/nouveau/include/dispnv04/i2c/encoder_i2c.h220
-rw-r--r--drivers/gpu/drm/nouveau/include/dispnv04/i2c/sil164.h64
-rw-r--r--drivers/gpu/drm/nouveau/include/nvfw/hs.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/clc36f.h137
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/clc97b.h22
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/clca7d.h868
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/clca7e.h137
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gb100/dev_hshub_base.h28
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gb10b/dev_fbhub.h18
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_ce.h12
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_therm.h17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_falcon_v4.h20
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fb.h15
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fsp_pri.h28
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_mmu.h173
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_riscv_pri.h14
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_therm.h17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_xtl_ep_pri.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/pri_nv_xal_ep.h13
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/chan.h76
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl0080.h9
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h38
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/client.h11
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/device.h37
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/driver.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0000.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0002.h39
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0003.h34
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/ioctl.h59
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/log.h51
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/object.h26
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/os.h21
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/push.h14
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/push906f.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/pushc97b.h18
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/client.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h18
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/layout.h8
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/object.h14
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/oclass.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/os.h19
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/pci.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvjpg.h8
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/ofa.h9
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h29
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fsp.h24
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h163
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h170
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h38
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h38
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h43
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h62
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h34
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h39
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h166
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h335
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h216
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h65
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h57
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h48
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h40
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h41
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h51
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h52
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h100
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h41
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h162
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h95
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h42
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h148
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h97
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h52
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h79
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h170
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h82
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h100
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_allclasses.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h38
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h62
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h119
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h32
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h44
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_rpc-structures.h124
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_sdk-structures.h45
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h74
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h86
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h57
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h174
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h57
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h262
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h51
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h40
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c337
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c143
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h57
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo0039.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo5039.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo74c1.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo85b5.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo9039.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo90b5.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_boa0b5.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c309
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h24
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c25
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.h15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c94
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c103
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c336
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c462
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h68
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_exec.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c131
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c46
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_led.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c38
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_nvif.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c37
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.c37
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c63
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.c194
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.c116
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c24
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.h1
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c25
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c17
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c31
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fence.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvif/Kbuild6
-rw-r--r--drivers/gpu/drm/nouveau/nvif/chan.c159
-rw-r--r--drivers/gpu/drm/nouveau/nvif/chan506f.c72
-rw-r--r--drivers/gpu/drm/nouveau/nvif/chan906f.c93
-rw-r--r--drivers/gpu/drm/nouveau/nvif/chanc36f.c77
-rw-r--r--drivers/gpu/drm/nouveau/nvif/client.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvif/conn.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvif/device.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvif/disp.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvif/driver.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvif/object.c76
-rw-r--r--drivers/gpu/drm/nouveau/nvif/outp.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvif/user.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvif/vmm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/client.c64
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/enum.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/firmware.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/ioctl.c91
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/object.c50
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/oproxy.c42
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/uevent.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gb202.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c108
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c669
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c31
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c100
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c1714
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gb202.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c665
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ad102.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c508
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ad102.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga100.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c110
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ad102.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c110
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c107
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/ad102.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c107
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c867
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c165
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c243
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c66
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c80
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c184
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c157
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c138
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c123
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c175
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h105
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/fw.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c185
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.c320
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.h24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.c185
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb100.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb202.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gh100.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp102.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/Kbuild8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/base.c66
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb100.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb202.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gh100.c275
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/priv.h29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c358
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h42
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c2393
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/client.c49
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.c189
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.h20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb10x.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb20x.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gh100.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h70
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.c87
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.h55
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/handles.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvdec.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvenc.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/alloc.c112
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c202
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ctrl.c93
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c148
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c1793
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c327
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c617
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c356
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c2205
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/alloc.h36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/bar.h29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ce.h15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/client.h20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ctrl.h21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/device.h30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/disp.h741
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/engine.h260
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/event.h47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fbsr.h106
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fifo.h350
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gr.h73
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gsp.h825
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/msgfn.h53
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvdec.h17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvenc.h17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvjpg.h17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ofa.h16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/rpcfn.h225
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/vmm.h132
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ofa.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c698
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c191
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/Kbuild9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/client.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/disp.c263
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fbsr.c149
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fifo.c217
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gr.c191
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c216
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/client.h21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/disp.h355
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/engine.h318
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fbsr.h19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fifo.h213
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gr.h79
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gsp.h634
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/msgfn.h57
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/ofa.h17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/rpcfn.h249
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/ofa.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c99
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h191
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rpc.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c267
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c215
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxch.c215
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxch.h (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h)0
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm200.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gh100.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c333
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gh100.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c123
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c306
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c72
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gh100.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/vfn/uvfn.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c2
-rw-r--r--drivers/gpu/drm/nova/Kconfig16
-rw-r--r--drivers/gpu/drm/nova/Makefile3
-rw-r--r--drivers/gpu/drm/nova/driver.rs71
-rw-r--r--drivers/gpu/drm/nova/file.rs69
-rw-r--r--drivers/gpu/drm/nova/gem.rs47
-rw-r--r--drivers/gpu/drm/nova/nova.rs17
-rw-r--r--drivers/gpu/drm/omapdrm/Kconfig6
-rw-r--r--drivers/gpu/drm/omapdrm/dss/base.c28
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c150
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c13
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c14
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c16
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.h13
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c36
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c36
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.c17
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.h1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c31
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c28
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_debugfs.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c10
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c21
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c28
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.h5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c176
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.h8
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c37
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_overlay.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c3
-rw-r--r--drivers/gpu/drm/panel/Kconfig276
-rw-r--r--drivers/gpu/drm/panel/Makefile24
-rw-r--r--drivers/gpu/drm/panel/panel-abt-y030xx067a.c11
-rw-r--r--drivers/gpu/drm/panel/panel-arm-versatile.c11
-rw-r--r--drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c151
-rw-r--r--drivers/gpu/drm/panel/panel-auo-a030jtn01.c11
-rw-r--r--drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c125
-rw-r--r--drivers/gpu/drm/panel/panel-boe-himax8279d.c51
-rw-r--r--drivers/gpu/drm/panel/panel-boe-td4320.c247
-rw-r--r--drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c336
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c241
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c2703
-rw-r--r--drivers/gpu/drm/panel/panel-dsi-cm.c10
-rw-r--r--drivers/gpu/drm/panel/panel-ebbg-ft8719.c78
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c405
-rw-r--r--drivers/gpu/drm/panel/panel-elida-kd35t133.c118
-rw-r--r--drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c11
-rw-r--r--drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c10
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx8279.c1296
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx83102.c1088
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx83112a.c307
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx83112b.c430
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx8394.c586
-rw-r--r--drivers/gpu/drm/panel/panel-hydis-hv101hd1.c188
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9322.c12
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9341.c236
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9805.c12
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9806e.c565
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c1770
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9882t.c895
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-ej030na.c12
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-p079zca.c343
-rw-r--r--drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c1416
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c159
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c204
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-lt070me05000.c46
-rw-r--r--drivers/gpu/drm/panel/panel-khadas-ts050.c56
-rw-r--r--drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c62
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c384
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c39
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lb035q02.c10
-rw-r--r--drivers/gpu/drm/panel/panel-lg-ld070wx3.c184
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lg4573.c11
-rw-r--r--drivers/gpu/drm/panel/panel-lg-sw43408.c86
-rw-r--r--drivers/gpu/drm/panel/panel-lincolntech-lcd197.c261
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c16
-rw-r--r--drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c11
-rw-r--r--drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c90
-rw-r--r--drivers/gpu/drm/panel/panel-nec-nl8048hl11.c10
-rw-r--r--drivers/gpu/drm/panel/panel-newvision-nv3051d.c387
-rw-r--r--drivers/gpu/drm/panel/panel-newvision-nv3052c.c364
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35510.c29
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35560.c210
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35950.c225
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36523.c1704
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36672a.c39
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36672e.c676
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt37801.c340
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt39016.c11
-rw-r--r--drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c55
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-ota5601a.c19
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c16
-rw-r--r--drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c53
-rw-r--r--drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c59
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c14
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm67191.c36
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm67200.c493
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm68200.c11
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm692e5.c256
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm69380.c103
-rw-r--r--drivers/gpu/drm/panel/panel-renesas-r61307.c325
-rw-r--r--drivers/gpu/drm/panel/panel-renesas-r69328.c281
-rw-r--r--drivers/gpu/drm/panel/panel-ronbo-rb070d30.c19
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ams581vf01.c283
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ams639rq08.c329
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-atna33xc20.c47
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-db7430.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ld9040.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d16d0.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d27a1.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c250
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c81
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3fc2x01.c385
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c20
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c342
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c21
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0.c2
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c768
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c102
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c12
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e8aa5x01-ams561ra01.c981
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-sofef00.c204
-rw-r--r--drivers/gpu/drm/panel/panel-seiko-43wvf1g.c62
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-lq079l1sx01.c225
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c76
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c12
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c65
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c70
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c958
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7701.c1251
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7703.c884
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7789v.c37
-rw-r--r--drivers/gpu/drm/panel/panel-sony-acx565akm.c21
-rw-r--r--drivers/gpu/drm/panel/panel-sony-td4353-jdi.c118
-rw-r--r--drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c457
-rw-r--r--drivers/gpu/drm/panel/panel-startek-kd070fhfid015.c115
-rw-r--r--drivers/gpu/drm/panel/panel-summit.c134
-rw-r--r--drivers/gpu/drm/panel/panel-synaptics-r63353.c81
-rw-r--r--drivers/gpu/drm/panel/panel-synaptics-tddi.c277
-rw-r--r--drivers/gpu/drm/panel/panel-tdo-tl070wsh30.c23
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td028ttec1.c17
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td043mtea1.c10
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-tpg110.c11
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-g2647fb105.c280
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-r66451.c190
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-rm69299.c333
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-rm692e5.c442
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-vtdr6130.c223
-rw-r--r--drivers/gpu/drm/panel/panel-widechips-ws2401.c11
-rw-r--r--drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c216
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c13
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c142
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h58
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c432
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_dump.c12
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_features.h3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c197
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.h66
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c6
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.c83
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.h1
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c392
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.h38
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c259
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.h3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_perfcnt.c36
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_regs.h38
-rw-r--r--drivers/gpu/drm/panthor/Makefile2
-rw-r--r--drivers/gpu/drm/panthor/panthor_devfreq.c97
-rw-r--r--drivers/gpu/drm/panthor/panthor_devfreq.h6
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.c177
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.h195
-rw-r--r--drivers/gpu/drm/panthor/panthor_drv.c355
-rw-r--r--drivers/gpu/drm/panthor/panthor_fw.c341
-rw-r--r--drivers/gpu/drm/panthor/panthor_fw.h38
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.c295
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.h104
-rw-r--r--drivers/gpu/drm/panthor/panthor_gpu.c256
-rw-r--r--drivers/gpu/drm/panthor/panthor_gpu.h13
-rw-r--r--drivers/gpu/drm/panthor/panthor_heap.c97
-rw-r--r--drivers/gpu/drm/panthor/panthor_heap.h2
-rw-r--r--drivers/gpu/drm/panthor/panthor_hw.c224
-rw-r--r--drivers/gpu/drm/panthor/panthor_hw.h56
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c459
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.h4
-rw-r--r--drivers/gpu/drm/panthor/panthor_pwr.c549
-rw-r--r--drivers/gpu/drm/panthor/panthor_pwr.h23
-rw-r--r--drivers/gpu/drm/panthor/panthor_regs.h188
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.c1047
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.h13
-rw-r--r--drivers/gpu/drm/pl111/Kconfig1
-rw-r--r--drivers/gpu/drm/pl111/pl111_display.c14
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c5
-rw-r--r--drivers/gpu/drm/pl111/pl111_versatile.c2
-rw-r--r--drivers/gpu/drm/qxl/Kconfig4
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_debugfs.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c52
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c12
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h8
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_image.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_irq.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c13
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h4
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c70
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c3
-rw-r--r--drivers/gpu/drm/radeon/Kconfig4
-rw-r--r--drivers/gpu/drm/radeon/atom.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios.h5
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c19
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c18
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c48
-rw-r--r--drivers/gpu/drm/radeon/cik.c56
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c12
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c653
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c5
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/ni.c2
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/pptable.h2
-rw-r--r--drivers/gpu/drm/radeon/r100.c309
-rw-r--r--drivers/gpu/drm/radeon/r200.c34
-rw-r--r--drivers/gpu/drm/radeon/r300.c75
-rw-r--r--drivers/gpu/drm/radeon/r420.c6
-rw-r--r--drivers/gpu/drm/radeon/r520.c2
-rw-r--r--drivers/gpu/drm/radeon/r600.c12
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c453
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c4
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon.h18
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c40
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c29
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c38
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c81
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c48
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c53
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c90
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c157
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_fbdev.c155
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c59
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c48
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ib.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h25
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c35
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c23
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c10
-rw-r--r--drivers/gpu/drm/radeon/rs400.c24
-rw-r--r--drivers/gpu/drm/radeon/rs600.c14
-rw-r--r--drivers/gpu/drm/radeon/rs690.c2
-rw-r--r--drivers/gpu/drm/radeon/rv515.c4
-rw-r--r--drivers/gpu/drm/radeon/rv770.c2
-rw-r--r--drivers/gpu/drm/radeon/si.c6
-rw-r--r--drivers/gpu/drm/radeon/sid.h2
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c2
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/Kconfig13
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/Makefile2
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c7
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_crtc.c3
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c28
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_group.c24
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c20
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.c14
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.h2
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_dw_hdmi.c2
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c22
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c301
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h343
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rzg2l_mipi_dsi.c813
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rzg2l_mipi_dsi_regs.h151
-rw-r--r--drivers/gpu/drm/renesas/rz-du/Kconfig18
-rw-r--r--drivers/gpu/drm/renesas/rz-du/Makefile2
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c34
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c62
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c132
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h1
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c9
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c1084
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi_regs.h203
-rw-r--r--drivers/gpu/drm/renesas/shmobile/Kconfig3
-rw-r--r--drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c18
-rw-r--r--drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.c3
-rw-r--r--drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c14
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig39
-rw-r--r--drivers/gpu/drm/rockchip/Makefile3
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c273
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c325
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.h10
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-reg.c4
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-reg.h4
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c181
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi2-rockchip.c508
-rw-r--r--drivers/gpu/drm/rockchip/dw_dp-rockchip.c150
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c343
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c669
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c649
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.h349
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.c320
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c63
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h8
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c22
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c15
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c30
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c2072
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.h341
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c147
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.h23
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.c3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop2_reg.c2137
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c101
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.h2
-rw-r--r--drivers/gpu/drm/scheduler/.kunitconfig12
-rw-r--r--drivers/gpu/drm/scheduler/Makefile2
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler_trace.h109
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c201
-rw-r--r--drivers/gpu/drm/scheduler/sched_fence.c8
-rw-r--r--drivers/gpu/drm/scheduler/sched_internal.h91
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c510
-rw-r--r--drivers/gpu/drm/scheduler/tests/Makefile7
-rw-r--r--drivers/gpu/drm/scheduler/tests/mock_scheduler.c370
-rw-r--r--drivers/gpu/drm/scheduler/tests/sched_tests.h224
-rw-r--r--drivers/gpu/drm/scheduler/tests/tests_basic.c563
-rw-r--r--drivers/gpu/drm/sitronix/Kconfig42
-rw-r--r--drivers/gpu/drm/sitronix/Makefile3
-rw-r--r--drivers/gpu/drm/sitronix/st7571-i2c.c1083
-rw-r--r--drivers/gpu/drm/sitronix/st7586.c (renamed from drivers/gpu/drm/tiny/st7586.c)9
-rw-r--r--drivers/gpu/drm/sitronix/st7735r.c (renamed from drivers/gpu/drm/tiny/st7735r.c)8
-rw-r--r--drivers/gpu/drm/solomon/Kconfig1
-rw-r--r--drivers/gpu/drm/solomon/ssd130x-i2c.c2
-rw-r--r--drivers/gpu/drm/solomon/ssd130x-spi.c12
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.c103
-rw-r--r--drivers/gpu/drm/sprd/sprd_dpu.c15
-rw-r--r--drivers/gpu/drm/sprd/sprd_drm.c4
-rw-r--r--drivers/gpu/drm/sprd/sprd_dsi.c17
-rw-r--r--drivers/gpu/drm/sti/Kconfig3
-rw-r--r--drivers/gpu/drm/sti/Makefile2
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c16
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c4
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c27
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c49
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c4
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c60
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c76
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.h2
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c23
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.c2
-rw-r--r--drivers/gpu/drm/sti/sti_plane.c1
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c17
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c22
-rw-r--r--drivers/gpu/drm/stm/Kconfig15
-rw-r--r--drivers/gpu/drm/stm/Makefile2
-rw-r--r--drivers/gpu/drm/stm/drv.c32
-rw-r--r--drivers/gpu/drm/stm/dw_mipi_dsi-stm.c283
-rw-r--r--drivers/gpu/drm/stm/ltdc.c309
-rw-r--r--drivers/gpu/drm/stm/ltdc.h6
-rw-r--r--drivers/gpu/drm/stm/lvds.c1222
-rw-r--r--drivers/gpu/drm/sun4i/Kconfig4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c7
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_crtc.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_frontend.c3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c107
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon_dclk.c18
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_drc.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_csc.c113
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_csc.h16
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c386
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.h96
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_tcon_top.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.c255
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.h25
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_scaler.c44
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_scaler.h4
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c318
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.h25
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_scaler.c49
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_scaler.h6
-rw-r--r--drivers/gpu/drm/sun4i/sunxi_engine.h13
-rw-r--r--drivers/gpu/drm/sysfb/Kconfig76
-rw-r--r--drivers/gpu/drm/sysfb/Makefile12
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb.c35
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_helper.h218
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_modeset.c608
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_screen_info.c104
-rw-r--r--drivers/gpu/drm/sysfb/efidrm.c390
-rw-r--r--drivers/gpu/drm/sysfb/ofdrm.c1145
-rw-r--r--drivers/gpu/drm/sysfb/simpledrm.c885
-rw-r--r--drivers/gpu/drm/sysfb/vesadrm.c652
-rw-r--r--drivers/gpu/drm/tegra/Kconfig2
-rw-r--r--drivers/gpu/drm/tegra/Makefile1
-rw-r--r--drivers/gpu/drm/tegra/dc.c26
-rw-r--r--drivers/gpu/drm/tegra/dp.c67
-rw-r--r--drivers/gpu/drm/tegra/dp.h2
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c13
-rw-r--r--drivers/gpu/drm/tegra/drm.c30
-rw-r--r--drivers/gpu/drm/tegra/drm.h17
-rw-r--r--drivers/gpu/drm/tegra/dsi.c73
-rw-r--r--drivers/gpu/drm/tegra/falcon.c20
-rw-r--r--drivers/gpu/drm/tegra/falcon.h1
-rw-r--r--drivers/gpu/drm/tegra/fb.c8
-rw-r--r--drivers/gpu/drm/tegra/fbdev.c113
-rw-r--r--drivers/gpu/drm/tegra/gem.c74
-rw-r--r--drivers/gpu/drm/tegra/gem.h21
-rw-r--r--drivers/gpu/drm/tegra/gr2d.c2
-rw-r--r--drivers/gpu/drm/tegra/gr3d.c41
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c11
-rw-r--r--drivers/gpu/drm/tegra/hub.c14
-rw-r--r--drivers/gpu/drm/tegra/hub.h3
-rw-r--r--drivers/gpu/drm/tegra/nvdec.c8
-rw-r--r--drivers/gpu/drm/tegra/nvjpg.c330
-rw-r--r--drivers/gpu/drm/tegra/output.c29
-rw-r--r--drivers/gpu/drm/tegra/rgb.c14
-rw-r--r--drivers/gpu/drm/tegra/sor.c13
-rw-r--r--drivers/gpu/drm/tegra/uapi.c7
-rw-r--r--drivers/gpu/drm/tegra/vic.c2
-rw-r--r--drivers/gpu/drm/tests/.kunitconfig2
-rw-r--r--drivers/gpu/drm/tests/Makefile8
-rw-r--r--drivers/gpu/drm/tests/drm_atomic_state_test.c379
-rw-r--r--drivers/gpu/drm/tests/drm_atomic_test.c153
-rw-r--r--drivers/gpu/drm/tests/drm_bridge_test.c521
-rw-r--r--drivers/gpu/drm/tests/drm_buddy_test.c178
-rw-r--r--drivers/gpu/drm/tests/drm_client_modeset_test.c12
-rw-r--r--drivers/gpu/drm/tests/drm_cmdline_parser_test.c22
-rw-r--r--drivers/gpu/drm/tests/drm_connector_test.c1576
-rw-r--r--drivers/gpu/drm/tests/drm_damage_helper_test.c1
-rw-r--r--drivers/gpu/drm/tests/drm_dp_mst_helper_test.c18
-rw-r--r--drivers/gpu/drm/tests/drm_exec_test.c23
-rw-r--r--drivers/gpu/drm/tests/drm_fixp_test.c71
-rw-r--r--drivers/gpu/drm/tests/drm_format_helper_test.c334
-rw-r--r--drivers/gpu/drm/tests/drm_format_test.c1
-rw-r--r--drivers/gpu/drm/tests/drm_framebuffer_test.c377
-rw-r--r--drivers/gpu/drm/tests/drm_gem_shmem_test.c70
-rw-r--r--drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c2334
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_edid.h864
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_helpers.c167
-rw-r--r--drivers/gpu/drm/tests/drm_managed_test.c1
-rw-r--r--drivers/gpu/drm/tests/drm_mm_test.c2
-rw-r--r--drivers/gpu/drm/tests/drm_modes_test.c58
-rw-r--r--drivers/gpu/drm/tests/drm_plane_helper_test.c1
-rw-r--r--drivers/gpu/drm/tests/drm_probe_helper_test.c9
-rw-r--r--drivers/gpu/drm/tests/drm_rect_test.c1
-rw-r--r--drivers/gpu/drm/tests/drm_sysfb_modeset_test.c168
-rw-r--r--drivers/gpu/drm/tidss/Kconfig3
-rw-r--r--drivers/gpu/drm/tidss/Makefile3
-rw-r--r--drivers/gpu/drm/tidss/tidss_crtc.c53
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.c735
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.h29
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc_regs.h107
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.c44
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.h14
-rw-r--r--drivers/gpu/drm/tidss/tidss_encoder.c13
-rw-r--r--drivers/gpu/drm/tidss/tidss_irq.c34
-rw-r--r--drivers/gpu/drm/tidss/tidss_irq.h4
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.c8
-rw-r--r--drivers/gpu/drm/tidss/tidss_oldi.c619
-rw-r--r--drivers/gpu/drm/tidss/tidss_oldi.h43
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.c34
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.h4
-rw-r--r--drivers/gpu/drm/tidss/tidss_scale_coefs.h2
-rw-r--r--drivers/gpu/drm/tilcdc/Kconfig1
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c9
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c8
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c4
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_plane.c3
-rw-r--r--drivers/gpu/drm/tiny/Kconfig107
-rw-r--r--drivers/gpu/drm/tiny/Makefile9
-rw-r--r--drivers/gpu/drm/tiny/appletbdrm.c834
-rw-r--r--drivers/gpu/drm/tiny/arcpgu.c14
-rw-r--r--drivers/gpu/drm/tiny/bochs.c455
-rw-r--r--drivers/gpu/drm/tiny/cirrus-qemu.c675
-rw-r--r--drivers/gpu/drm/tiny/cirrus.c763
-rw-r--r--drivers/gpu/drm/tiny/gm12u320.c69
-rw-r--r--drivers/gpu/drm/tiny/hx8357d.c8
-rw-r--r--drivers/gpu/drm/tiny/ili9163.c8
-rw-r--r--drivers/gpu/drm/tiny/ili9225.c9
-rw-r--r--drivers/gpu/drm/tiny/ili9341.c8
-rw-r--r--drivers/gpu/drm/tiny/ili9486.c8
-rw-r--r--drivers/gpu/drm/tiny/mi0283qt.c9
-rw-r--r--drivers/gpu/drm/tiny/ofdrm.c1410
-rw-r--r--drivers/gpu/drm/tiny/panel-mipi-dbi.c68
-rw-r--r--drivers/gpu/drm/tiny/pixpaper.c1166
-rw-r--r--drivers/gpu/drm/tiny/repaper.c28
-rw-r--r--drivers/gpu/drm/tiny/sharp-memory.c669
-rw-r--r--drivers/gpu/drm/tiny/simpledrm.c1076
-rw-r--r--drivers/gpu/drm/ttm/Makefile2
-rw-r--r--drivers/gpu/drm/ttm/tests/.kunitconfig1
-rw-r--r--drivers/gpu/drm/ttm/tests/Makefile2
-rw-r--r--drivers/gpu/drm/ttm/tests/TODO27
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_test.c119
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c1176
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_device_test.c36
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c181
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h18
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_mock_manager.c238
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_mock_manager.h30
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_pool_test.c33
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_resource_test.c36
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_tt_test.c169
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_backup.c182
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c656
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_internal.h60
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c438
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c59
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c66
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c788
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool_internal.h25
-rw-r--r--drivers/gpu/drm/ttm/ttm_range_manager.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c360
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c101
-rw-r--r--drivers/gpu/drm/tve200/Kconfig1
-rw-r--r--drivers/gpu/drm/tve200/tve200_display.c1
-rw-r--r--drivers/gpu/drm/tve200/tve200_drv.c12
-rw-r--r--drivers/gpu/drm/tyr/Kconfig19
-rw-r--r--drivers/gpu/drm/tyr/Makefile3
-rw-r--r--drivers/gpu/drm/tyr/driver.rs205
-rw-r--r--drivers/gpu/drm/tyr/file.rs56
-rw-r--r--drivers/gpu/drm/tyr/gem.rs18
-rw-r--r--drivers/gpu/drm/tyr/gpu.rs219
-rw-r--r--drivers/gpu/drm/tyr/regs.rs108
-rw-r--r--drivers/gpu/drm/tyr/tyr.rs22
-rw-r--r--drivers/gpu/drm/udl/Kconfig1
-rw-r--r--drivers/gpu/drm/udl/Makefile8
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c32
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h33
-rw-r--r--drivers/gpu/drm/udl/udl_edid.c81
-rw-r--r--drivers/gpu/drm/udl/udl_edid.h15
-rw-r--r--drivers/gpu/drm/udl/udl_main.c191
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c160
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c8
-rw-r--r--drivers/gpu/drm/v3d/Makefile3
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c37
-rw-r--r--drivers/gpu/drm/v3d/v3d_debugfs.c131
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c141
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h113
-rw-r--r--drivers/gpu/drm/v3d/v3d_fence.c11
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c47
-rw-r--r--drivers/gpu/drm/v3d/v3d_gemfs.c62
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c161
-rw-r--r--drivers/gpu/drm/v3d/v3d_mmu.c93
-rw-r--r--drivers/gpu/drm/v3d/v3d_perfmon.c308
-rw-r--r--drivers/gpu/drm/v3d/v3d_performance_counters.h33
-rw-r--r--drivers/gpu/drm/v3d/v3d_regs.h55
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c392
-rw-r--r--drivers/gpu/drm/v3d/v3d_submit.c314
-rw-r--r--drivers/gpu/drm/vboxvideo/Kconfig1
-rw-r--r--drivers/gpu/drm/vboxvideo/hgsmi_base.c47
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c12
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.h1
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_irq.c1
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_main.c25
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_mode.c9
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_ttm.c1
-rw-r--r--drivers/gpu/drm/vboxvideo/vboxvideo.h4
-rw-r--r--drivers/gpu/drm/vboxvideo/vboxvideo_guest.h2
-rw-r--r--drivers/gpu/drm/vc4/Kconfig5
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_mock.c28
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_mock.h9
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_mock_output.c62
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_mock_plane.c44
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c294
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c47
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c160
-rw-r--r--drivers/gpu/drm/vc4/vc4_debugfs.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c17
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c49
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h110
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c81
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c219
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c1178
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h58
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi_phy.c646
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi_regs.h222
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c1065
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c13
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c119
-rw-r--r--drivers/gpu/drm/vc4/vc4_perfmon.c35
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c1153
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h298
-rw-r--r--drivers/gpu/drm/vc4/vc4_render_cl.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c96
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c38
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c29
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate_shaders.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_vec.c44
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c32
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c21
-rw-r--r--drivers/gpu/drm/virtio/Kconfig1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_debugfs.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c56
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c66
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h34
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c16
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c46
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c30
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c30
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c240
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c183
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_submit.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_trace.h2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c204
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vram.c9
-rw-r--r--drivers/gpu/drm/vkms/Kconfig17
-rw-r--r--drivers/gpu/drm/vkms/Makefile8
-rw-r--r--drivers/gpu/drm/vkms/tests/.kunitconfig4
-rw-r--r--drivers/gpu/drm/vkms/tests/Makefile8
-rw-r--r--drivers/gpu/drm/vkms/tests/vkms_color_test.c414
-rw-r--r--drivers/gpu/drm/vkms/tests/vkms_config_test.c1029
-rw-r--r--drivers/gpu/drm/vkms/tests/vkms_format_test.c279
-rw-r--r--drivers/gpu/drm/vkms/vkms_colorop.c120
-rw-r--r--drivers/gpu/drm/vkms/vkms_composer.c453
-rw-r--r--drivers/gpu/drm/vkms/vkms_composer.h28
-rw-r--r--drivers/gpu/drm/vkms/vkms_config.c649
-rw-r--r--drivers/gpu/drm/vkms/vkms_config.h489
-rw-r--r--drivers/gpu/drm/vkms/vkms_configfs.c843
-rw-r--r--drivers/gpu/drm/vkms/vkms_configfs.h8
-rw-r--r--drivers/gpu/drm/vkms/vkms_connector.c96
-rw-r--r--drivers/gpu/drm/vkms/vkms_connector.h35
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c134
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c133
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h232
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.c943
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.h13
-rw-r--r--drivers/gpu/drm/vkms/vkms_luts.c811
-rw-r--r--drivers/gpu/drm/vkms/vkms_luts.h12
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c191
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c70
-rw-r--r--drivers/gpu/drm/vkms/vkms_writeback.c49
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig5
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmw_surface_cache.h10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c120
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c158
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.h28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c858
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h82
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c97
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h110
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c51
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c523
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c88
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c61
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c1308
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h92
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c175
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h196
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg_x86.h185
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c75
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_prime.c32
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c37
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c44
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c260
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c337
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c51
-rw-r--r--drivers/gpu/drm/xe/Kconfig62
-rw-r--r--drivers/gpu/drm/xe/Kconfig.debug30
-rw-r--r--drivers/gpu/drm/xe/Kconfig.profile1
-rw-r--r--drivers/gpu/drm/xe/Makefile178
-rw-r--r--drivers/gpu/drm/xe/abi/gsc_pxp_commands_abi.h41
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_abi.h74
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h30
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h289
-rw-r--r--drivers/gpu/drm/xe/abi/guc_capture_abi.h186
-rw-r--r--drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h1
-rw-r--r--drivers/gpu/drm/xe/abi/guc_errors_abi.h70
-rw-r--r--drivers/gpu/drm/xe/abi/guc_klvs_abi.h118
-rw-r--r--drivers/gpu/drm/xe/abi/guc_log_abi.h75
-rw-r--r--drivers/gpu/drm/xe/abi/guc_messages_abi.h41
-rw-r--r--drivers/gpu/drm/xe/abi/guc_relay_actions_abi.h170
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_lmem.h1
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_mman.h17
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h61
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object_frontbuffer.h12
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h40
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gt/intel_gt_types.h (renamed from drivers/gpu/drm/xe/compat-i915-headers/intel_gt_types.h)0
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gt/intel_rps.h11
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_debugfs.h14
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h166
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_gem_stolen.h87
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_gpu_error.h17
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_gtt_view_types.h7
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_utils.h9
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h12
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_vma_types.h74
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h31
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h16
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_step.h10
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h105
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_uncore_trace.h (renamed from drivers/gpu/drm/xe/compat-i915-headers/i915_trace.h)0
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_wakeref.h4
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h23
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/soc/intel_pch.h6
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/soc/intel_rom.h6
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/vlv_iosf_sb.h42
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/vlv_iosf_sb_reg.h6
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband.h132
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband_reg.h6
-rw-r--r--drivers/gpu/drm/xe/display/ext/i915_irq.c70
-rw-r--r--drivers/gpu/drm/xe/display/ext/i915_utils.c26
-rw-r--r--drivers/gpu/drm/xe/display/intel_bo.c103
-rw-r--r--drivers/gpu/drm/xe/display/intel_fb_bo.c41
-rw-r--r--drivers/gpu/drm/xe/display/intel_fb_bo.h24
-rw-r--r--drivers/gpu/drm/xe/display/intel_fbdev_fb.c103
-rw-r--r--drivers/gpu/drm/xe/display/intel_fbdev_fb.h21
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c532
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.h33
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_rpm.c74
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_rpm.h11
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_rps.c17
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_wa.c19
-rw-r--r--drivers/gpu/drm/xe/display/xe_dsb_buffer.c28
-rw-r--r--drivers/gpu/drm/xe/display/xe_fb_pin.c289
-rw-r--r--drivers/gpu/drm/xe/display/xe_hdcp_gsc.c149
-rw-r--r--drivers/gpu/drm/xe/display/xe_panic.c102
-rw-r--r--drivers/gpu/drm/xe/display/xe_plane_initial.c90
-rw-r--r--drivers/gpu/drm/xe/display/xe_stolen.c123
-rw-r--r--drivers/gpu/drm/xe/display/xe_tdf.c15
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_alu_commands.h79
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h1
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_gpu_commands.h (renamed from drivers/gpu/drm/xe/regs/xe_gpu_commands.h)7
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_instr_defs.h1
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_mfx_commands.h28
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_mi_commands.h20
-rw-r--r--drivers/gpu/drm/xe/regs/xe_bars.h12
-rw-r--r--drivers/gpu/drm/xe/regs/xe_engine_regs.h39
-rw-r--r--drivers/gpu/drm/xe/regs/xe_eu_stall_regs.h29
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gsc_regs.h10
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gt_regs.h246
-rw-r--r--drivers/gpu/drm/xe/regs/xe_guc_regs.h4
-rw-r--r--drivers/gpu/drm/xe/regs/xe_hw_error_regs.h20
-rw-r--r--drivers/gpu/drm/xe/regs/xe_i2c_regs.h23
-rw-r--r--drivers/gpu/drm/xe/regs/xe_irq_regs.h96
-rw-r--r--drivers/gpu/drm/xe/regs/xe_lrc_layout.h17
-rw-r--r--drivers/gpu/drm/xe/regs/xe_mchbar_regs.h14
-rw-r--r--drivers/gpu/drm/xe/regs/xe_oa_regs.h103
-rw-r--r--drivers/gpu/drm/xe/regs/xe_pcode_regs.h6
-rw-r--r--drivers/gpu/drm/xe/regs/xe_pmt.h35
-rw-r--r--drivers/gpu/drm/xe/regs/xe_pxp_regs.h23
-rw-r--r--drivers/gpu/drm/xe/regs/xe_reg_defs.h32
-rw-r--r--drivers/gpu/drm/xe/regs/xe_regs.h37
-rw-r--r--drivers/gpu/drm/xe/regs/xe_sriov_regs.h20
-rw-r--r--drivers/gpu/drm/xe/tests/Makefile7
-rw-r--r--drivers/gpu/drm/xe/tests/xe_args_test.c221
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo.c357
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo_test.c21
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo_test.h14
-rw-r--r--drivers/gpu/drm/xe/tests/xe_dma_buf.c84
-rw-r--r--drivers/gpu/drm/xe/tests/xe_dma_buf_test.c20
-rw-r--r--drivers/gpu/drm/xe/tests/xe_dma_buf_test.h13
-rw-r--r--drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_config_kunit.c208
-rw-r--r--drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c333
-rw-r--r--drivers/gpu/drm/xe/tests/xe_guc_g2g_test.c776
-rw-r--r--drivers/gpu/drm/xe/tests/xe_kunit_helpers.c39
-rw-r--r--drivers/gpu/drm/xe/tests/xe_kunit_helpers.h2
-rw-r--r--drivers/gpu/drm/xe/tests/xe_live_test_mod.c17
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate.c608
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate_test.c20
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate_test.h13
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs.c82
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs_test.c21
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs_test.h14
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci.c375
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci_test.c34
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci_test.h22
-rw-r--r--drivers/gpu/drm/xe/tests/xe_rtp_test.c285
-rw-r--r--drivers/gpu/drm/xe/tests/xe_sriov_pf_service_kunit.c227
-rw-r--r--drivers/gpu/drm/xe/tests/xe_test.h10
-rw-r--r--drivers/gpu/drm/xe/tests/xe_test_mod.c2
-rw-r--r--drivers/gpu/drm/xe/tests/xe_wa_test.c89
-rw-r--r--drivers/gpu/drm/xe/xe_args.h143
-rw-r--r--drivers/gpu/drm/xe/xe_assert.h20
-rw-r--r--drivers/gpu/drm/xe/xe_bb.c44
-rw-r--r--drivers/gpu/drm/xe/xe_bb.h5
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c2181
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h212
-rw-r--r--drivers/gpu/drm/xe/xe_bo_doc.h10
-rw-r--r--drivers/gpu/drm/xe/xe_bo_evict.c406
-rw-r--r--drivers/gpu/drm/xe/xe_bo_evict.h10
-rw-r--r--drivers/gpu/drm/xe/xe_bo_types.h61
-rw-r--r--drivers/gpu/drm/xe/xe_configfs.c1291
-rw-r--r--drivers/gpu/drm/xe/xe_configfs.h47
-rw-r--r--drivers/gpu/drm/xe/xe_debugfs.c299
-rw-r--r--drivers/gpu/drm/xe/xe_debugfs.h4
-rw-r--r--drivers/gpu/drm/xe/xe_dep_job_types.h29
-rw-r--r--drivers/gpu/drm/xe/xe_dep_scheduler.c143
-rw-r--r--drivers/gpu/drm/xe/xe_dep_scheduler.h21
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.c480
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.h13
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump_types.h39
-rw-r--r--drivers/gpu/drm/xe/xe_device.c900
-rw-r--r--drivers/gpu/drm/xe/xe_device.h134
-rw-r--r--drivers/gpu/drm/xe/xe_device_sysfs.c249
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h486
-rw-r--r--drivers/gpu/drm/xe/xe_device_wa_oob.rules5
-rw-r--r--drivers/gpu/drm/xe/xe_dma_buf.c128
-rw-r--r--drivers/gpu/drm/xe/xe_drm_client.c238
-rw-r--r--drivers/gpu/drm/xe/xe_drv.h3
-rw-r--r--drivers/gpu/drm/xe/xe_eu_stall.c989
-rw-r--r--drivers/gpu/drm/xe/xe_eu_stall.h25
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c144
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c710
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.h50
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue_types.h106
-rw-r--r--drivers/gpu/drm/xe/xe_execlist.c101
-rw-r--r--drivers/gpu/drm/xe/xe_execlist_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_force_wake.c220
-rw-r--r--drivers/gpu/drm/xe/xe_force_wake.h36
-rw-r--r--drivers/gpu/drm/xe/xe_force_wake_types.h32
-rw-r--r--drivers/gpu/drm/xe/xe_gen_wa_oob.c75
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.c947
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.h55
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt_types.h66
-rw-r--r--drivers/gpu/drm/xe/xe_gpu_scheduler.c57
-rw-r--r--drivers/gpu/drm/xe/xe_gpu_scheduler.h55
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.c157
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.h8
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_debugfs.c71
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_debugfs.h14
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_proxy.c179
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_proxy.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_submit.c1
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_types.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c705
-rw-r--r--drivers/gpu/drm/xe/xe_gt.h94
-rw-r--r--drivers/gpu/drm/xe/xe_gt_ccs_mode.c38
-rw-r--r--drivers/gpu/drm/xe/xe_gt_clock.c96
-rw-r--r--drivers/gpu/drm/xe/xe_gt_clock.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.c308
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_freq.c157
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.c257
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.h9
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle_types.h5
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.c243
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.h33
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c660
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.h19
-rw-r--r--drivers/gpu/drm/xe/xe_gt_printk.h56
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.c231
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.h23
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c1355
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h36
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h9
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c1974
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h16
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h137
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c622
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.h19
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_helpers.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c1069
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h54
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h28
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_monitor.c147
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_monitor.h27
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_monitor_types.h22
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c75
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c425
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_service.h34
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_service_types.h52
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h31
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_printk.h7
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.c1377
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.h43
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf_debugfs.c72
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf_debugfs.h14
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h81
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats.c101
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats.h26
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats_types.h51
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sysfs.c4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_throttle.c270
-rw-r--r--drivers/gpu/drm/xe/xe_gt_throttle.h17
-rw-r--r--drivers/gpu/drm/xe/xe_gt_throttle_sysfs.c249
-rw-r--r--drivers/gpu/drm/xe/xe_gt_throttle_sysfs.h16
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c459
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h26
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h28
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.c166
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.h26
-rw-r--r--drivers/gpu/drm/xe/xe_gt_types.h226
-rw-r--r--drivers/gpu/drm/xe/xe_guard.h119
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c1078
-rw-r--r--drivers/gpu/drm/xe/xe_guc.h28
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.c446
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.h3
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_buf.c207
-rw-r--r--drivers/gpu/drm/xe/xe_guc_buf.h49
-rw-r--r--drivers/gpu/drm/xe/xe_guc_buf_types.h28
-rw-r--r--drivers/gpu/drm/xe/xe_guc_capture.c2044
-rw-r--r--drivers/gpu/drm/xe/xe_guc_capture.h61
-rw-r--r--drivers/gpu/drm/xe/xe_guc_capture_types.h70
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c1157
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.h29
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct_types.h48
-rw-r--r--drivers/gpu/drm/xe/xe_guc_db_mgr.c3
-rw-r--r--drivers/gpu/drm/xe/xe_guc_debugfs.c139
-rw-r--r--drivers/gpu/drm/xe/xe_guc_engine_activity.c521
-rw-r--r--drivers/gpu/drm/xe/xe_guc_engine_activity.h22
-rw-r--r--drivers/gpu/drm/xe/xe_guc_engine_activity_types.h102
-rw-r--r--drivers/gpu/drm/xe/xe_guc_exec_queue_types.h21
-rw-r--r--drivers/gpu/drm/xe/xe_guc_fwif.h85
-rw-r--r--drivers/gpu/drm/xe/xe_guc_hwconfig.c97
-rw-r--r--drivers/gpu/drm/xe/xe_guc_hwconfig.h3
-rw-r--r--drivers/gpu/drm/xe/xe_guc_id_mgr.c3
-rw-r--r--drivers/gpu/drm/xe/xe_guc_klv_helpers.c14
-rw-r--r--drivers/gpu/drm/xe/xe_guc_klv_helpers.h15
-rw-r--r--drivers/gpu/drm/xe/xe_guc_klv_thresholds_set.h71
-rw-r--r--drivers/gpu/drm/xe/xe_guc_klv_thresholds_set_types.h68
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log.c315
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log.h19
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log_types.h36
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pagefault.c95
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pagefault.h15
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c876
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.h18
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc_types.h10
-rw-r--r--drivers/gpu/drm/xe/xe_guc_relay.c38
-rw-r--r--drivers/gpu/drm/xe/xe_guc_relay_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c1729
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.h16
-rw-r--r--drivers/gpu/drm/xe/xe_guc_tlb_inval.c242
-rw-r--r--drivers/gpu/drm/xe/xe_guc_tlb_inval.h19
-rw-r--r--drivers/gpu/drm/xe/xe_guc_types.h48
-rw-r--r--drivers/gpu/drm/xe/xe_heci_gsc.c75
-rw-r--r--drivers/gpu/drm/xe/xe_heci_gsc.h13
-rw-r--r--drivers/gpu/drm/xe/xe_hmm.c253
-rw-r--r--drivers/gpu/drm/xe/xe_hmm.h11
-rw-r--r--drivers/gpu/drm/xe/xe_huc.c77
-rw-r--r--drivers/gpu/drm/xe/xe_huc.h3
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c472
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.h20
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c139
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_group.c344
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_group.h29
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_group_types.h51
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_types.h60
-rw-r--r--drivers/gpu/drm/xe/xe_hw_error.c182
-rw-r--r--drivers/gpu/drm/xe/xe_hw_error.h15
-rw-r--r--drivers/gpu/drm/xe/xe_hw_fence.c72
-rw-r--r--drivers/gpu/drm/xe/xe_hw_fence.h7
-rw-r--r--drivers/gpu/drm/xe/xe_hw_fence_types.h9
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.c852
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.h4
-rw-r--r--drivers/gpu/drm/xe/xe_i2c.c372
-rw-r--r--drivers/gpu/drm/xe/xe_i2c.h68
-rw-r--r--drivers/gpu/drm/xe/xe_irq.c557
-rw-r--r--drivers/gpu/drm/xe/xe_irq.h9
-rw-r--r--drivers/gpu/drm/xe/xe_late_bind_fw.c464
-rw-r--r--drivers/gpu/drm/xe/xe_late_bind_fw.h17
-rw-r--r--drivers/gpu/drm/xe/xe_late_bind_fw_types.h75
-rw-r--r--drivers/gpu/drm/xe/xe_lmtt.c99
-rw-r--r--drivers/gpu/drm/xe/xe_lmtt.h1
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c1182
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.h116
-rw-r--r--drivers/gpu/drm/xe/xe_lrc_types.h17
-rw-r--r--drivers/gpu/drm/xe/xe_macros.h12
-rw-r--r--drivers/gpu/drm/xe/xe_map.h4
-rw-r--r--drivers/gpu/drm/xe/xe_memirq.c276
-rw-r--r--drivers/gpu/drm/xe/xe_memirq.h8
-rw-r--r--drivers/gpu/drm/xe/xe_memirq_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c1656
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.h96
-rw-r--r--drivers/gpu/drm/xe/xe_migrate_doc.h2
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.c665
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.h62
-rw-r--r--drivers/gpu/drm/xe/xe_mmio_gem.c226
-rw-r--r--drivers/gpu/drm/xe/xe_mmio_gem.h20
-rw-r--r--drivers/gpu/drm/xe/xe_mocs.c395
-rw-r--r--drivers/gpu/drm/xe/xe_mocs.h5
-rw-r--r--drivers/gpu/drm/xe/xe_module.c101
-rw-r--r--drivers/gpu/drm/xe/xe_module.h4
-rw-r--r--drivers/gpu/drm/xe/xe_nvm.c170
-rw-r--r--drivers/gpu/drm/xe/xe_nvm.h15
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c2816
-rw-r--r--drivers/gpu/drm/xe/xe_oa.h25
-rw-r--r--drivers/gpu/drm/xe/xe_oa_types.h271
-rw-r--r--drivers/gpu/drm/xe/xe_observation.c106
-rw-r--r--drivers/gpu/drm/xe/xe_observation.h20
-rw-r--r--drivers/gpu/drm/xe/xe_pagefault.c444
-rw-r--r--drivers/gpu/drm/xe/xe_pagefault.h19
-rw-r--r--drivers/gpu/drm/xe/xe_pagefault_types.h136
-rw-r--r--drivers/gpu/drm/xe/xe_pat.c271
-rw-r--r--drivers/gpu/drm/xe/xe_pat.h12
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c874
-rw-r--r--drivers/gpu/drm/xe/xe_pci.h3
-rw-r--r--drivers/gpu/drm/xe/xe_pci_sriov.c266
-rw-r--r--drivers/gpu/drm/xe/xe_pci_sriov.h21
-rw-r--r--drivers/gpu/drm/xe/xe_pci_types.h68
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.c193
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.h29
-rw-r--r--drivers/gpu/drm/xe/xe_pcode_api.h46
-rw-r--r--drivers/gpu/drm/xe/xe_platform_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c456
-rw-r--r--drivers/gpu/drm/xe/xe_pm.h32
-rw-r--r--drivers/gpu/drm/xe/xe_pmu.c600
-rw-r--r--drivers/gpu/drm/xe/xe_pmu.h18
-rw-r--r--drivers/gpu/drm/xe/xe_pmu_types.h39
-rw-r--r--drivers/gpu/drm/xe/xe_preempt_fence.c40
-rw-r--r--drivers/gpu/drm/xe/xe_preempt_fence_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_printk.h129
-rw-r--r--drivers/gpu/drm/xe/xe_psmi.c294
-rw-r--r--drivers/gpu/drm/xe/xe_psmi.h14
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c2011
-rw-r--r--drivers/gpu/drm/xe/xe_pt.h20
-rw-r--r--drivers/gpu/drm/xe/xe_pt_types.h51
-rw-r--r--drivers/gpu/drm/xe/xe_pt_walk.c3
-rw-r--r--drivers/gpu/drm/xe/xe_pt_walk.h4
-rw-r--r--drivers/gpu/drm/xe/xe_pxp.c949
-rw-r--r--drivers/gpu/drm/xe/xe_pxp.h35
-rw-r--r--drivers/gpu/drm/xe/xe_pxp_debugfs.c129
-rw-r--r--drivers/gpu/drm/xe/xe_pxp_debugfs.h13
-rw-r--r--drivers/gpu/drm/xe/xe_pxp_submit.c602
-rw-r--r--drivers/gpu/drm/xe/xe_pxp_submit.h22
-rw-r--r--drivers/gpu/drm/xe/xe_pxp_types.h135
-rw-r--r--drivers/gpu/drm/xe/xe_query.c311
-rw-r--r--drivers/gpu/drm/xe/xe_range_fence.h4
-rw-r--r--drivers/gpu/drm/xe/xe_reg_sr.c104
-rw-r--r--drivers/gpu/drm/xe/xe_reg_sr.h4
-rw-r--r--drivers/gpu/drm/xe/xe_reg_sr_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_reg_whitelist.c75
-rw-r--r--drivers/gpu/drm/xe/xe_res_cursor.h126
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops.c165
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.c158
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.h106
-rw-r--r--drivers/gpu/drm/xe/xe_rtp_helpers.h34
-rw-r--r--drivers/gpu/drm/xe/xe_rtp_types.h9
-rw-r--r--drivers/gpu/drm/xe/xe_sa.c96
-rw-r--r--drivers/gpu/drm/xe/xe_sa.h46
-rw-r--r--drivers/gpu/drm/xe/xe_sa_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job.c223
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job.h23
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job_types.h36
-rw-r--r--drivers/gpu/drm/xe/xe_shrinker.c306
-rw-r--r--drivers/gpu/drm/xe/xe_shrinker.h16
-rw-r--r--drivers/gpu/drm/xe/xe_sriov.c41
-rw-r--r--drivers/gpu/drm/xe/xe_sriov.h7
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_packet.c520
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_packet.h30
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_packet_types.h75
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf.c181
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf.h19
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_control.c279
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_control.h22
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c395
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_debugfs.h18
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_helpers.h29
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_migration.c365
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_migration.h30
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h37
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_provision.c438
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_provision.h45
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_provision_types.h36
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_service.c216
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_service.h23
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_service_types.h36
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_sysfs.c647
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_sysfs.h16
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_types.h70
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_printk.h12
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_types.h19
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf.c211
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf.h20
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf_ccs.c480
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf_ccs.h35
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf_ccs_types.h51
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf_types.h47
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vfio.c80
-rw-r--r--drivers/gpu/drm/xe/xe_step.c59
-rw-r--r--drivers/gpu/drm/xe/xe_step_types.h30
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode.c377
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode.h18
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode_types.h43
-rw-r--r--drivers/gpu/drm/xe/xe_svm.c1537
-rw-r--r--drivers/gpu/drm/xe/xe_svm.h389
-rw-r--r--drivers/gpu/drm/xe/xe_sync.c157
-rw-r--r--drivers/gpu/drm/xe/xe_sync.h5
-rw-r--r--drivers/gpu/drm/xe/xe_sync_types.h3
-rw-r--r--drivers/gpu/drm/xe/xe_tile.c98
-rw-r--r--drivers/gpu/drm/xe/xe_tile.h8
-rw-r--r--drivers/gpu/drm/xe/xe_tile_debugfs.c142
-rw-r--r--drivers/gpu/drm/xe/xe_tile_debugfs.h16
-rw-r--r--drivers/gpu/drm/xe/xe_tile_printk.h127
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_pf_debugfs.c253
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_pf_debugfs.h15
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_printk.h33
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_vf.c350
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_vf.h23
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_vf_types.h23
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sysfs.c16
-rw-r--r--drivers/gpu/drm/xe/xe_tlb_inval.c433
-rw-r--r--drivers/gpu/drm/xe/xe_tlb_inval.h46
-rw-r--r--drivers/gpu/drm/xe/xe_tlb_inval_job.c285
-rw-r--r--drivers/gpu/drm/xe/xe_tlb_inval_job.h34
-rw-r--r--drivers/gpu/drm/xe/xe_tlb_inval_types.h130
-rw-r--r--drivers/gpu/drm/xe/xe_trace.h443
-rw-r--r--drivers/gpu/drm/xe/xe_trace_bo.c9
-rw-r--r--drivers/gpu/drm/xe/xe_trace_bo.h263
-rw-r--r--drivers/gpu/drm/xe/xe_trace_guc.c9
-rw-r--r--drivers/gpu/drm/xe/xe_trace_guc.h159
-rw-r--r--drivers/gpu/drm/xe/xe_trace_lrc.c9
-rw-r--r--drivers/gpu/drm/xe/xe_trace_lrc.h52
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c98
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_stolen_mgr.h2
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_sys_mgr.c9
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_vram_mgr.c92
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_vram_mgr.h3
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h8
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.c153
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.h3
-rw-r--r--drivers/gpu/drm/xe/xe_uc.c132
-rw-r--r--drivers/gpu/drm/xe/xe_uc.h12
-rw-r--r--drivers/gpu/drm/xe/xe_uc_debugfs.c2
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw.c207
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw.h11
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw_abi.h130
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw_types.h13
-rw-r--r--drivers/gpu/drm/xe/xe_uc_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_userptr.c322
-rw-r--r--drivers/gpu/drm/xe/xe_userptr.h107
-rw-r--r--drivers/gpu/drm/xe/xe_validation.c278
-rw-r--r--drivers/gpu/drm/xe/xe_validation.h192
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c3435
-rw-r--r--drivers/gpu/drm/xe/xe_vm.h172
-rw-r--r--drivers/gpu/drm/xe/xe_vm_doc.h56
-rw-r--r--drivers/gpu/drm/xe/xe_vm_madvise.c431
-rw-r--r--drivers/gpu/drm/xe/xe_vm_madvise.h15
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h311
-rw-r--r--drivers/gpu/drm/xe/xe_vram.c462
-rw-r--r--drivers/gpu/drm/xe/xe_vram.h25
-rw-r--r--drivers/gpu/drm/xe/xe_vram_freq.c14
-rw-r--r--drivers/gpu/drm/xe/xe_vram_types.h85
-rw-r--r--drivers/gpu/drm/xe/xe_vsec.c225
-rw-r--r--drivers/gpu/drm/xe/xe_vsec.h15
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c414
-rw-r--r--drivers/gpu/drm/xe/xe_wa.h37
-rw-r--r--drivers/gpu/drm/xe/xe_wa_oob.rules55
-rw-r--r--drivers/gpu/drm/xe/xe_wait_user_fence.c12
-rw-r--r--drivers/gpu/drm/xe/xe_wopcm.c15
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c7
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.c1
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_kms.c4
-rw-r--r--drivers/gpu/drm/xlnx/Kconfig12
-rw-r--r--drivers/gpu/drm/xlnx/Makefile1
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp.c89
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp_regs.h7
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.c942
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.h7
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp_audio.c448
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dpsub.c45
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dpsub.h17
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_kms.c43
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_kms.h4
-rw-r--r--drivers/gpu/host1x/bus.c25
-rw-r--r--drivers/gpu/host1x/cdma.c7
-rw-r--r--drivers/gpu/host1x/context.c1
-rw-r--r--drivers/gpu/host1x/context_bus.c2
-rw-r--r--drivers/gpu/host1x/debug.c9
-rw-r--r--drivers/gpu/host1x/debug.h1
-rw-r--r--drivers/gpu/host1x/dev.c205
-rw-r--r--drivers/gpu/host1x/dev.h11
-rw-r--r--drivers/gpu/host1x/hw/cdma_hw.c12
-rw-r--r--drivers/gpu/host1x/hw/channel_hw.c106
-rw-r--r--drivers/gpu/host1x/hw/debug_hw.c15
-rw-r--r--drivers/gpu/host1x/hw/intr_hw.c91
-rw-r--r--drivers/gpu/host1x/intr.c23
-rw-r--r--drivers/gpu/host1x/intr.h5
-rw-r--r--drivers/gpu/host1x/mipi.c17
-rw-r--r--drivers/gpu/host1x/syncpt.c4
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c50
-rw-r--r--drivers/gpu/ipu-v3/ipu-cpmem.c23
-rw-r--r--drivers/gpu/ipu-v3/ipu-csi.c108
-rw-r--r--drivers/gpu/ipu-v3/ipu-ic.c73
-rw-r--r--drivers/gpu/ipu-v3/ipu-image-convert.c48
-rw-r--r--drivers/gpu/ipu-v3/ipu-pre.c134
-rw-r--r--drivers/gpu/ipu-v3/ipu-prg.c6
-rw-r--r--drivers/gpu/ipu-v3/ipu-prv.h4
-rw-r--r--drivers/gpu/ipu-v3/ipu-vdi.c11
-rw-r--r--drivers/gpu/nova-core/Kconfig16
-rw-r--r--drivers/gpu/nova-core/Makefile3
-rw-r--r--drivers/gpu/nova-core/bitfield.rs330
-rw-r--r--drivers/gpu/nova-core/dma.rs54
-rw-r--r--drivers/gpu/nova-core/driver.rs104
-rw-r--r--drivers/gpu/nova-core/falcon.rs664
-rw-r--r--drivers/gpu/nova-core/falcon/gsp.rs57
-rw-r--r--drivers/gpu/nova-core/falcon/hal.rs60
-rw-r--r--drivers/gpu/nova-core/falcon/hal/ga102.rs120
-rw-r--r--drivers/gpu/nova-core/falcon/sec2.rs25
-rw-r--r--drivers/gpu/nova-core/fb.rs217
-rw-r--r--drivers/gpu/nova-core/fb/hal.rs41
-rw-r--r--drivers/gpu/nova-core/fb/hal/ga100.rs63
-rw-r--r--drivers/gpu/nova-core/fb/hal/ga102.rs38
-rw-r--r--drivers/gpu/nova-core/fb/hal/tu102.rs59
-rw-r--r--drivers/gpu/nova-core/firmware.rs244
-rw-r--r--drivers/gpu/nova-core/firmware/booter.rs401
-rw-r--r--drivers/gpu/nova-core/firmware/fwsec.rs438
-rw-r--r--drivers/gpu/nova-core/firmware/gsp.rs258
-rw-r--r--drivers/gpu/nova-core/firmware/riscv.rs95
-rw-r--r--drivers/gpu/nova-core/gfw.rs71
-rw-r--r--drivers/gpu/nova-core/gpu.rs302
-rw-r--r--drivers/gpu/nova-core/gsp.rs161
-rw-r--r--drivers/gpu/nova-core/gsp/boot.rs252
-rw-r--r--drivers/gpu/nova-core/gsp/cmdq.rs679
-rw-r--r--drivers/gpu/nova-core/gsp/commands.rs227
-rw-r--r--drivers/gpu/nova-core/gsp/fw.rs928
-rw-r--r--drivers/gpu/nova-core/gsp/fw/commands.rs128
-rw-r--r--drivers/gpu/nova-core/gsp/fw/r570_144.rs31
-rw-r--r--drivers/gpu/nova-core/gsp/fw/r570_144/bindings.rs951
-rw-r--r--drivers/gpu/nova-core/gsp/sequencer.rs407
-rw-r--r--drivers/gpu/nova-core/nova_core.rs33
-rw-r--r--drivers/gpu/nova-core/num.rs217
-rw-r--r--drivers/gpu/nova-core/regs.rs411
-rw-r--r--drivers/gpu/nova-core/regs/macros.rs721
-rw-r--r--drivers/gpu/nova-core/sbuffer.rs227
-rw-r--r--drivers/gpu/nova-core/util.rs16
-rw-r--r--drivers/gpu/nova-core/vbios.rs1097
-rw-r--r--drivers/gpu/trace/Kconfig11
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c5
-rw-r--r--drivers/greybus/Kconfig2
-rw-r--r--drivers/greybus/core.c5
-rw-r--r--drivers/greybus/es2.c3
-rw-r--r--drivers/greybus/gb-beagleplay.c651
-rw-r--r--drivers/greybus/interface.c3
-rw-r--r--drivers/greybus/operation.c7
-rw-r--r--drivers/greybus/svc.c3
-rw-r--r--drivers/hid/Kconfig97
-rw-r--r--drivers/hid/Makefile17
-rw-r--r--drivers/hid/amd-sfh-hid/Kconfig1
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_client.c67
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_common.h5
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_hid.c4
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_hid.h4
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c70
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.h1
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c84
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.h3
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c40
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h24
-rw-r--r--drivers/hid/bpf/Kconfig2
-rw-r--r--drivers/hid/bpf/Makefile2
-rw-r--r--drivers/hid/bpf/entrypoints/Makefile93
-rw-r--r--drivers/hid/bpf/entrypoints/README4
-rw-r--r--drivers/hid/bpf/entrypoints/entrypoints.bpf.c25
-rw-r--r--drivers/hid/bpf/entrypoints/entrypoints.lskel.h248
-rw-r--r--drivers/hid/bpf/hid_bpf_dispatch.c452
-rw-r--r--drivers/hid/bpf/hid_bpf_dispatch.h13
-rw-r--r--drivers/hid/bpf/hid_bpf_jmp_table.c565
-rw-r--r--drivers/hid/bpf/hid_bpf_struct_ops.c326
-rw-r--r--drivers/hid/bpf/progs/FR-TEC__Raptor-Mach-2.bpf.c9
-rw-r--r--drivers/hid/bpf/progs/HP__Elite-Presenter.bpf.c6
-rw-r--r--drivers/hid/bpf/progs/Huion__Dial-2.bpf.c636
-rw-r--r--drivers/hid/bpf/progs/Huion__Inspiroy-2-M.bpf.c563
-rw-r--r--drivers/hid/bpf/progs/Huion__Inspiroy-2-S.bpf.c579
-rw-r--r--drivers/hid/bpf/progs/Huion__Kamvas-Pro-19.bpf.c90
-rw-r--r--drivers/hid/bpf/progs/Huion__Kamvas13Gen3.bpf.c1395
-rw-r--r--drivers/hid/bpf/progs/Huion__Kamvas16Gen3.bpf.c724
-rw-r--r--drivers/hid/bpf/progs/Huion__KeydialK20.bpf.c531
-rw-r--r--drivers/hid/bpf/progs/IOGEAR__Kaliber-MMOmentum.bpf.c6
-rw-r--r--drivers/hid/bpf/progs/Logitech__SpaceNavigator.bpf.c86
-rw-r--r--drivers/hid/bpf/progs/Makefile2
-rw-r--r--drivers/hid/bpf/progs/Microsoft__XBox-Elite-2.bpf.c133
-rw-r--r--drivers/hid/bpf/progs/Microsoft__Xbox-Elite-2.bpf.c136
-rw-r--r--drivers/hid/bpf/progs/Mistel__MD770.bpf.c154
-rw-r--r--drivers/hid/bpf/progs/Rapoo__M50-Plus-Silent.bpf.c148
-rw-r--r--drivers/hid/bpf/progs/TUXEDO__Sirius-16-Gen1-and-Gen2.bpf.c47
-rw-r--r--drivers/hid/bpf/progs/Thrustmaster__TCA-Yoke-Boeing.bpf.c144
-rw-r--r--drivers/hid/bpf/progs/WALTOP__Batteryless-Tablet.bpf.c321
-rw-r--r--drivers/hid/bpf/progs/Wacom__ArtPen.bpf.c6
-rw-r--r--drivers/hid/bpf/progs/XPPen__ACK05.bpf.c331
-rw-r--r--drivers/hid/bpf/progs/XPPen__Artist24.bpf.c12
-rw-r--r--drivers/hid/bpf/progs/XPPen__ArtistPro16Gen2.bpf.c68
-rw-r--r--drivers/hid/bpf/progs/XPPen__Deco01V3.bpf.c305
-rw-r--r--drivers/hid/bpf/progs/XPPen__Deco02.bpf.c359
-rw-r--r--drivers/hid/bpf/progs/XPPen__DecoMini4.bpf.c231
-rw-r--r--drivers/hid/bpf/progs/hid_bpf.h6
-rw-r--r--drivers/hid/bpf/progs/hid_bpf_async.h219
-rw-r--r--drivers/hid/bpf/progs/hid_bpf_helpers.h20
-rw-r--r--drivers/hid/bpf/progs/hid_report_helpers.h2982
-rw-r--r--drivers/hid/hid-a4tech.c1
-rw-r--r--drivers/hid/hid-alps.c2
-rw-r--r--drivers/hid/hid-apple.c336
-rw-r--r--drivers/hid/hid-appleir.c6
-rw-r--r--drivers/hid/hid-appletb-bl.c204
-rw-r--r--drivers/hid/hid-appletb-kbd.c519
-rw-r--r--drivers/hid/hid-asus.c171
-rw-r--r--drivers/hid/hid-aureal.c3
-rw-r--r--drivers/hid/hid-belkin.c1
-rw-r--r--drivers/hid/hid-betopff.c1
-rw-r--r--drivers/hid/hid-bigbenff.c7
-rw-r--r--drivers/hid/hid-cherry.c3
-rw-r--r--drivers/hid/hid-chicony.c5
-rw-r--r--drivers/hid/hid-cmedia.c6
-rw-r--r--drivers/hid/hid-core.c464
-rw-r--r--drivers/hid/hid-corsair-void.c832
-rw-r--r--drivers/hid/hid-corsair.c4
-rw-r--r--drivers/hid/hid-cougar.c6
-rw-r--r--drivers/hid/hid-cp2112.c107
-rw-r--r--drivers/hid/hid-cypress.c3
-rw-r--r--drivers/hid/hid-debug.c23
-rw-r--r--drivers/hid/hid-dr.c9
-rw-r--r--drivers/hid/hid-elecom.c17
-rw-r--r--drivers/hid/hid-elo.c1
-rw-r--r--drivers/hid/hid-emsff.c1
-rw-r--r--drivers/hid/hid-evision.c22
-rw-r--r--drivers/hid/hid-ezkey.c1
-rw-r--r--drivers/hid/hid-gaff.c1
-rw-r--r--drivers/hid/hid-gembird.c2
-rw-r--r--drivers/hid/hid-generic.c14
-rw-r--r--drivers/hid/hid-glorious.c2
-rw-r--r--drivers/hid/hid-goodix-spi.c818
-rw-r--r--drivers/hid/hid-google-hammer.c35
-rw-r--r--drivers/hid/hid-google-stadiaff.c1
-rw-r--r--drivers/hid/hid-gyration.c1
-rw-r--r--drivers/hid/hid-haptic.c580
-rw-r--r--drivers/hid/hid-haptic.h127
-rw-r--r--drivers/hid/hid-holtek-kbd.c7
-rw-r--r--drivers/hid/hid-holtek-mouse.c5
-rw-r--r--drivers/hid/hid-hyperv.c62
-rw-r--r--drivers/hid/hid-ids.h130
-rw-r--r--drivers/hid/hid-input-test.c10
-rw-r--r--drivers/hid/hid-input.c173
-rw-r--r--drivers/hid/hid-ite.c3
-rw-r--r--drivers/hid/hid-kensington.c3
-rw-r--r--drivers/hid/hid-keytouch.c9
-rw-r--r--drivers/hid/hid-kye.c5
-rw-r--r--drivers/hid/hid-kysona.c290
-rw-r--r--drivers/hid/hid-lcpower.c1
-rw-r--r--drivers/hid/hid-lenovo.c147
-rw-r--r--drivers/hid/hid-letsketch.c6
-rw-r--r--drivers/hid/hid-lg-g15.c608
-rw-r--r--drivers/hid/hid-lg.c31
-rw-r--r--drivers/hid/hid-lg4ff.c15
-rw-r--r--drivers/hid/hid-logitech-dj.c197
-rw-r--r--drivers/hid/hid-logitech-hidpp.c116
-rw-r--r--drivers/hid/hid-macally.c4
-rw-r--r--drivers/hid/hid-magicmouse.c181
-rw-r--r--drivers/hid/hid-maltron.c9
-rw-r--r--drivers/hid/hid-mcp2200.c19
-rw-r--r--drivers/hid/hid-mcp2221.c122
-rw-r--r--drivers/hid/hid-megaworld.c1
-rw-r--r--drivers/hid/hid-mf.c1
-rw-r--r--drivers/hid/hid-microsoft.c3
-rw-r--r--drivers/hid/hid-monterey.c3
-rw-r--r--drivers/hid/hid-multitouch.c277
-rw-r--r--drivers/hid/hid-nintendo.c96
-rw-r--r--drivers/hid/hid-nti.c2
-rw-r--r--drivers/hid/hid-ntrig.c11
-rw-r--r--drivers/hid/hid-nvidia-shield.c6
-rw-r--r--drivers/hid/hid-ortek.c3
-rw-r--r--drivers/hid/hid-petalynx.c3
-rw-r--r--drivers/hid/hid-picolcd_backlight.c12
-rw-r--r--drivers/hid/hid-picolcd_core.c14
-rw-r--r--drivers/hid/hid-picolcd_fb.c12
-rw-r--r--drivers/hid/hid-picolcd_lcd.c8
-rw-r--r--drivers/hid/hid-pl.c1
-rw-r--r--drivers/hid/hid-plantronics.c121
-rw-r--r--drivers/hid/hid-playstation.c1057
-rw-r--r--drivers/hid/hid-primax.c1
-rw-r--r--drivers/hid/hid-prodikeys.c7
-rw-r--r--drivers/hid/hid-pxrc.c6
-rw-r--r--drivers/hid/hid-quirks.c68
-rw-r--r--drivers/hid/hid-razer.c1
-rw-r--r--drivers/hid/hid-redragon.c3
-rw-r--r--drivers/hid/hid-retrode.c1
-rw-r--r--drivers/hid/hid-roccat-arvo.c18
-rw-r--r--drivers/hid/hid-roccat-common.h14
-rw-r--r--drivers/hid/hid-roccat-isku.c12
-rw-r--r--drivers/hid/hid-roccat-kone.c16
-rw-r--r--drivers/hid/hid-roccat-koneplus.c32
-rw-r--r--drivers/hid/hid-roccat-konepure.c2
-rw-r--r--drivers/hid/hid-roccat-kovaplus.c26
-rw-r--r--drivers/hid/hid-roccat-lua.c6
-rw-r--r--drivers/hid/hid-roccat-pyra.c36
-rw-r--r--drivers/hid/hid-roccat-ryos.c2
-rw-r--r--drivers/hid/hid-roccat-savu.c2
-rw-r--r--drivers/hid/hid-saitek.c3
-rw-r--r--drivers/hid/hid-samsung.c3
-rw-r--r--drivers/hid/hid-semitek.c5
-rw-r--r--drivers/hid/hid-sensor-custom.c6
-rw-r--r--drivers/hid/hid-sensor-hub.c23
-rw-r--r--drivers/hid/hid-sigmamicro.c4
-rw-r--r--drivers/hid/hid-sjoy.c1
-rw-r--r--drivers/hid/hid-sony.c24
-rw-r--r--drivers/hid/hid-speedlink.c1
-rw-r--r--drivers/hid/hid-steam.c100
-rw-r--r--drivers/hid/hid-steelseries.c250
-rw-r--r--drivers/hid/hid-sunplus.c3
-rw-r--r--drivers/hid/hid-thrustmaster.c9
-rw-r--r--drivers/hid/hid-tivo.c1
-rw-r--r--drivers/hid/hid-tmff.c1
-rw-r--r--drivers/hid/hid-topre.c11
-rw-r--r--drivers/hid/hid-topseed.c1
-rw-r--r--drivers/hid/hid-twinhan.c1
-rw-r--r--drivers/hid/hid-uclogic-core.c104
-rw-r--r--drivers/hid/hid-uclogic-params.c181
-rw-r--r--drivers/hid/hid-uclogic-params.h20
-rw-r--r--drivers/hid/hid-uclogic-rdesc-test.c2
-rw-r--r--drivers/hid/hid-uclogic-rdesc.c202
-rw-r--r--drivers/hid/hid-uclogic-rdesc.h32
-rw-r--r--drivers/hid/hid-universal-pidff.c204
-rw-r--r--drivers/hid/hid-viewsonic.c9
-rw-r--r--drivers/hid/hid-vivaldi-common.c1
-rw-r--r--drivers/hid/hid-vrc2.c6
-rw-r--r--drivers/hid/hid-waltop.c31
-rw-r--r--drivers/hid/hid-wiimote-core.c4
-rw-r--r--drivers/hid/hid-winwing.c178
-rw-r--r--drivers/hid/hid-xiaomi.c8
-rw-r--r--drivers/hid/hid-xinmo.c1
-rw-r--r--drivers/hid/hid-zpff.c1
-rw-r--r--drivers/hid/hid-zydacron.c3
-rw-r--r--drivers/hid/hidraw.c257
-rw-r--r--drivers/hid/i2c-hid/Kconfig2
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-acpi.c8
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c160
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-of-elan.c76
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-of.c6
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.h2
-rw-r--r--drivers/hid/intel-ish-hid/Kconfig1
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h3
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c131
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c104
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-fw-loader.c4
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid-client.c45
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid.c4
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid.h11
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c25
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.h1
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client-buffers.c21
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.c27
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.h3
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/hbm.c4
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/init.c30
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h29
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/loader.c235
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/loader.h65
-rw-r--r--drivers/hid/intel-thc-hid/Kconfig42
-rw-r--r--drivers/hid/intel-thc-hid/Makefile23
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c1044
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h227
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.c165
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.h14
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-protocol.c248
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-protocol.h20
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c1006
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h176
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.c164
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.h14
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c413
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.h25
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c1719
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.h133
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.c1009
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.h154
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-hw.h886
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-wot.c94
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-wot.h26
-rw-r--r--drivers/hid/surface-hid/Kconfig2
-rw-r--r--drivers/hid/surface-hid/surface_hid.c2
-rw-r--r--drivers/hid/surface-hid/surface_hid_core.c2
-rw-r--r--drivers/hid/surface-hid/surface_kbd.c4
-rw-r--r--drivers/hid/uhid.c1
-rw-r--r--drivers/hid/usbhid/Kconfig3
-rw-r--r--drivers/hid/usbhid/hid-core.c38
-rw-r--r--drivers/hid/usbhid/hid-pidff.c1059
-rw-r--r--drivers/hid/usbhid/hid-pidff.h32
-rw-r--r--drivers/hid/usbhid/usbkbd.c2
-rw-r--r--drivers/hid/wacom.h10
-rw-r--r--drivers/hid/wacom_sys.c99
-rw-r--r--drivers/hid/wacom_wac.c187
-rw-r--r--drivers/hid/wacom_wac.h15
-rw-r--r--drivers/hsi/clients/ssi_protocol.c26
-rw-r--r--drivers/hsi/controllers/omap_ssi_core.c13
-rw-r--r--drivers/hsi/controllers/omap_ssi_port.c13
-rw-r--r--drivers/hsi/hsi_core.c2
-rw-r--r--drivers/hte/hte-tegra194-test.c5
-rw-r--r--drivers/hv/Kconfig66
-rw-r--r--drivers/hv/Makefile15
-rw-r--r--drivers/hv/channel.c143
-rw-r--r--drivers/hv/channel_mgmt.c104
-rw-r--r--drivers/hv/connection.c38
-rw-r--r--drivers/hv/hv.c510
-rw-r--r--drivers/hv/hv_balloon.c216
-rw-r--r--drivers/hv/hv_common.c326
-rw-r--r--drivers/hv/hv_fcopy.c427
-rw-r--r--drivers/hv/hv_kvp.c12
-rw-r--r--drivers/hv/hv_proc.c196
-rw-r--r--drivers/hv/hv_snapshot.c11
-rw-r--r--drivers/hv/hv_util.c27
-rw-r--r--drivers/hv/hv_utils_transport.c10
-rw-r--r--drivers/hv/hyperv_vmbus.h111
-rw-r--r--drivers/hv/mshv.h28
-rw-r--r--drivers/hv/mshv_common.c239
-rw-r--r--drivers/hv/mshv_eventfd.c839
-rw-r--r--drivers/hv/mshv_eventfd.h71
-rw-r--r--drivers/hv/mshv_irq.c128
-rw-r--r--drivers/hv/mshv_portid_table.c83
-rw-r--r--drivers/hv/mshv_regions.c555
-rw-r--r--drivers/hv/mshv_root.h336
-rw-r--r--drivers/hv/mshv_root_hv_call.c1024
-rw-r--r--drivers/hv/mshv_root_main.c2342
-rw-r--r--drivers/hv/mshv_synic.c665
-rw-r--r--drivers/hv/mshv_vtl.h25
-rw-r--r--drivers/hv/mshv_vtl_main.c1392
-rw-r--r--drivers/hv/ring_buffer.c6
-rw-r--r--drivers/hv/vmbus_drv.c508
-rw-r--r--drivers/hwmon/Kconfig322
-rw-r--r--drivers/hwmon/Makefile21
-rw-r--r--drivers/hwmon/abituguru.c2
-rw-r--r--drivers/hwmon/abituguru3.c4
-rw-r--r--drivers/hwmon/acpi_power_meter.c854
-rw-r--r--drivers/hwmon/ad7314.c10
-rw-r--r--drivers/hwmon/ad7418.c7
-rw-r--r--drivers/hwmon/adc128d818.c4
-rw-r--r--drivers/hwmon/adm1021.c505
-rw-r--r--drivers/hwmon/adm1026.c16
-rw-r--r--drivers/hwmon/adm1029.c3
-rw-r--r--drivers/hwmon/adm1031.c4
-rw-r--r--drivers/hwmon/adm9240.c17
-rw-r--r--drivers/hwmon/ads7828.c7
-rw-r--r--drivers/hwmon/adt7310.c2
-rw-r--r--drivers/hwmon/adt7410.c11
-rw-r--r--drivers/hwmon/adt7411.c59
-rw-r--r--drivers/hwmon/adt7470.c22
-rw-r--r--drivers/hwmon/adt7475.c185
-rw-r--r--drivers/hwmon/adt7x10.c45
-rw-r--r--drivers/hwmon/aht10.c62
-rw-r--r--drivers/hwmon/amc6821.c1495
-rw-r--r--drivers/hwmon/aquacomputer_d5next.c41
-rw-r--r--drivers/hwmon/aspeed-g6-pwm-tach.c9
-rw-r--r--drivers/hwmon/aspeed-pwm-tacho.c8
-rw-r--r--drivers/hwmon/asus-ec-sensors.c483
-rw-r--r--drivers/hwmon/asus_atk0110.c17
-rw-r--r--drivers/hwmon/asus_rog_ryujin.c50
-rw-r--r--drivers/hwmon/axi-fan-control.c2
-rw-r--r--drivers/hwmon/cgbc-hwmon.c307
-rw-r--r--drivers/hwmon/chipcap2.c99
-rw-r--r--drivers/hwmon/coretemp.c76
-rw-r--r--drivers/hwmon/corsair-cpro.c102
-rw-r--r--drivers/hwmon/corsair-psu.c21
-rw-r--r--drivers/hwmon/cros_ec_hwmon.c597
-rw-r--r--drivers/hwmon/da9052-hwmon.c2
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c151
-rw-r--r--drivers/hwmon/dme1737.c6
-rw-r--r--drivers/hwmon/drivetemp.c13
-rw-r--r--drivers/hwmon/ds1621.c4
-rw-r--r--drivers/hwmon/emc1403.c46
-rw-r--r--drivers/hwmon/emc2103.c4
-rw-r--r--drivers/hwmon/emc2305.c217
-rw-r--r--drivers/hwmon/f71805f.c2
-rw-r--r--drivers/hwmon/f71882fg.c2
-rw-r--r--drivers/hwmon/f75375s.c46
-rw-r--r--drivers/hwmon/fam15h_power.c6
-rw-r--r--drivers/hwmon/fschmd.c3
-rw-r--r--drivers/hwmon/ftsteutates.c93
-rw-r--r--drivers/hwmon/g762.c43
-rw-r--r--drivers/hwmon/gigabyte_waterforce.c2
-rw-r--r--drivers/hwmon/gpd-fan.c683
-rw-r--r--drivers/hwmon/gpio-fan.c119
-rw-r--r--drivers/hwmon/gsc-hwmon.c24
-rw-r--r--drivers/hwmon/hp-wmi-sensors.c24
-rw-r--r--drivers/hwmon/hs3001.c10
-rw-r--r--drivers/hwmon/htu31.c350
-rw-r--r--drivers/hwmon/hwmon-vid.c4
-rw-r--r--drivers/hwmon/hwmon.c257
-rw-r--r--drivers/hwmon/i5500_temp.c11
-rw-r--r--drivers/hwmon/i5k_amb.c17
-rw-r--r--drivers/hwmon/ibmaem.c27
-rw-r--r--drivers/hwmon/ibmpex.c14
-rw-r--r--drivers/hwmon/iio_hwmon.c56
-rw-r--r--drivers/hwmon/ina238.c638
-rw-r--r--drivers/hwmon/ina2xx.c1000
-rw-r--r--drivers/hwmon/ina3221.c35
-rw-r--r--drivers/hwmon/intel-m10-bmc-hwmon.c15
-rw-r--r--drivers/hwmon/isl28022.c494
-rw-r--r--drivers/hwmon/jc42.c66
-rw-r--r--drivers/hwmon/k10temp.c125
-rw-r--r--drivers/hwmon/kbatt.c147
-rw-r--r--drivers/hwmon/kfan.c246
-rw-r--r--drivers/hwmon/lenovo-ec-sensors.c34
-rw-r--r--drivers/hwmon/lm63.c5
-rw-r--r--drivers/hwmon/lm70.c6
-rw-r--r--drivers/hwmon/lm75.c360
-rw-r--r--drivers/hwmon/lm78.c9
-rw-r--r--drivers/hwmon/lm83.c16
-rw-r--r--drivers/hwmon/lm85.c7
-rw-r--r--drivers/hwmon/lm87.c16
-rw-r--r--drivers/hwmon/lm90.c177
-rw-r--r--drivers/hwmon/lm92.c456
-rw-r--r--drivers/hwmon/lm95234.c809
-rw-r--r--drivers/hwmon/lm95241.c16
-rw-r--r--drivers/hwmon/lm95245.c126
-rw-r--r--drivers/hwmon/lochnagar-hwmon.c18
-rw-r--r--drivers/hwmon/ltc2947-core.c112
-rw-r--r--drivers/hwmon/ltc2991.c23
-rw-r--r--drivers/hwmon/ltc2992.c41
-rw-r--r--drivers/hwmon/ltc4245.c8
-rw-r--r--drivers/hwmon/ltc4282.c136
-rw-r--r--drivers/hwmon/macsmc-hwmon.c851
-rw-r--r--drivers/hwmon/max127.c23
-rw-r--r--drivers/hwmon/max16065.c34
-rw-r--r--drivers/hwmon/max1619.c499
-rw-r--r--drivers/hwmon/max1668.c485
-rw-r--r--drivers/hwmon/max197.c2
-rw-r--r--drivers/hwmon/max31790.c48
-rw-r--r--drivers/hwmon/max31827.c85
-rw-r--r--drivers/hwmon/max6620.c43
-rw-r--r--drivers/hwmon/max6639.c594
-rw-r--r--drivers/hwmon/max6642.c314
-rw-r--r--drivers/hwmon/max6697.c842
-rw-r--r--drivers/hwmon/max77705-hwmon.c221
-rw-r--r--drivers/hwmon/mc13783-adc.c2
-rw-r--r--drivers/hwmon/mc33xs2410_hwmon.c178
-rw-r--r--drivers/hwmon/mcp3021.c6
-rw-r--r--drivers/hwmon/mlxreg-fan.c47
-rw-r--r--drivers/hwmon/mr75203.c2
-rw-r--r--drivers/hwmon/nct6683.c11
-rw-r--r--drivers/hwmon/nct6694-hwmon.c949
-rw-r--r--drivers/hwmon/nct6775-core.c19
-rw-r--r--drivers/hwmon/nct6775-i2c.c2
-rw-r--r--drivers/hwmon/nct6775-platform.c9
-rw-r--r--drivers/hwmon/nct6775.h2
-rw-r--r--drivers/hwmon/nct7363.c445
-rw-r--r--drivers/hwmon/nct7802.c69
-rw-r--r--drivers/hwmon/nct7904.c63
-rw-r--r--drivers/hwmon/npcm750-pwm-fan.c18
-rw-r--r--drivers/hwmon/ntc_thermistor.c125
-rw-r--r--drivers/hwmon/nzxt-kraken2.c11
-rw-r--r--drivers/hwmon/nzxt-kraken3.c2
-rw-r--r--drivers/hwmon/nzxt-smart2.c11
-rw-r--r--drivers/hwmon/occ/common.c242
-rw-r--r--drivers/hwmon/occ/p8_i2c.c2
-rw-r--r--drivers/hwmon/occ/p9_sbe.c8
-rw-r--r--drivers/hwmon/oxp-sensors.c503
-rw-r--r--drivers/hwmon/pc87360.c4
-rw-r--r--drivers/hwmon/pc87427.c2
-rw-r--r--drivers/hwmon/peci/common.h3
-rw-r--r--drivers/hwmon/peci/cputemp.c100
-rw-r--r--drivers/hwmon/peci/dimmtemp.c48
-rw-r--r--drivers/hwmon/pmbus/Kconfig157
-rw-r--r--drivers/hwmon/pmbus/Makefile13
-rw-r--r--drivers/hwmon/pmbus/acbel-fsg032.c2
-rw-r--r--drivers/hwmon/pmbus/adm1266.c2
-rw-r--r--drivers/hwmon/pmbus/adm1275.c19
-rw-r--r--drivers/hwmon/pmbus/adp1050.c74
-rw-r--r--drivers/hwmon/pmbus/bel-pfe.c2
-rw-r--r--drivers/hwmon/pmbus/bpa-rs600.c2
-rw-r--r--drivers/hwmon/pmbus/crps.c74
-rw-r--r--drivers/hwmon/pmbus/delta-ahe50dc-fan.c2
-rw-r--r--drivers/hwmon/pmbus/dps920ab.c9
-rw-r--r--drivers/hwmon/pmbus/fsp-3y.c2
-rw-r--r--drivers/hwmon/pmbus/ibm-cffps.c2
-rw-r--r--drivers/hwmon/pmbus/ina233.c191
-rw-r--r--drivers/hwmon/pmbus/inspur-ipsps.c2
-rw-r--r--drivers/hwmon/pmbus/ir35221.c2
-rw-r--r--drivers/hwmon/pmbus/ir36021.c2
-rw-r--r--drivers/hwmon/pmbus/ir38064.c2
-rw-r--r--drivers/hwmon/pmbus/irps5401.c2
-rw-r--r--drivers/hwmon/pmbus/isl68137.c234
-rw-r--r--drivers/hwmon/pmbus/lm25066.c6
-rw-r--r--drivers/hwmon/pmbus/lt3074.c122
-rw-r--r--drivers/hwmon/pmbus/lt7182s.c2
-rw-r--r--drivers/hwmon/pmbus/ltc2978.c87
-rw-r--r--drivers/hwmon/pmbus/ltc3815.c2
-rw-r--r--drivers/hwmon/pmbus/ltc4286.c4
-rw-r--r--drivers/hwmon/pmbus/max15301.c95
-rw-r--r--drivers/hwmon/pmbus/max16064.c2
-rw-r--r--drivers/hwmon/pmbus/max16601.c2
-rw-r--r--drivers/hwmon/pmbus/max17616.c73
-rw-r--r--drivers/hwmon/pmbus/max20730.c2
-rw-r--r--drivers/hwmon/pmbus/max20751.c2
-rw-r--r--drivers/hwmon/pmbus/max31785.c2
-rw-r--r--drivers/hwmon/pmbus/max34440.c159
-rw-r--r--drivers/hwmon/pmbus/max8688.c2
-rw-r--r--drivers/hwmon/pmbus/mp2856.c10
-rw-r--r--drivers/hwmon/pmbus/mp2869.c659
-rw-r--r--drivers/hwmon/pmbus/mp2888.c2
-rw-r--r--drivers/hwmon/pmbus/mp2891.c600
-rw-r--r--drivers/hwmon/pmbus/mp2925.c316
-rw-r--r--drivers/hwmon/pmbus/mp29502.c670
-rw-r--r--drivers/hwmon/pmbus/mp2975.c2
-rw-r--r--drivers/hwmon/pmbus/mp2993.c261
-rw-r--r--drivers/hwmon/pmbus/mp5023.c2
-rw-r--r--drivers/hwmon/pmbus/mp5920.c90
-rw-r--r--drivers/hwmon/pmbus/mp5990.c69
-rw-r--r--drivers/hwmon/pmbus/mp9941.c319
-rw-r--r--drivers/hwmon/pmbus/mp9945.c243
-rw-r--r--drivers/hwmon/pmbus/mpq7932.c8
-rw-r--r--drivers/hwmon/pmbus/mpq8785.c95
-rw-r--r--drivers/hwmon/pmbus/pim4328.c2
-rw-r--r--drivers/hwmon/pmbus/pli1209bc.c28
-rw-r--r--drivers/hwmon/pmbus/pm6764tr.c2
-rw-r--r--drivers/hwmon/pmbus/pmbus.c4
-rw-r--r--drivers/hwmon/pmbus/pmbus.h39
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c597
-rw-r--r--drivers/hwmon/pmbus/pxe1610.c2
-rw-r--r--drivers/hwmon/pmbus/q54sj108a2.c2
-rw-r--r--drivers/hwmon/pmbus/stpddc60.c2
-rw-r--r--drivers/hwmon/pmbus/tda38640.c4
-rw-r--r--drivers/hwmon/pmbus/tps25990.c436
-rw-r--r--drivers/hwmon/pmbus/tps40422.c2
-rw-r--r--drivers/hwmon/pmbus/tps53679.c39
-rw-r--r--drivers/hwmon/pmbus/tps546d24.c2
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c88
-rw-r--r--drivers/hwmon/pmbus/ucd9200.c2
-rw-r--r--drivers/hwmon/pmbus/xdp710.c2
-rw-r--r--drivers/hwmon/pmbus/xdpe12284.c2
-rw-r--r--drivers/hwmon/pmbus/xdpe152c4.c2
-rw-r--r--drivers/hwmon/pmbus/zl6100.c68
-rw-r--r--drivers/hwmon/powerz.c8
-rw-r--r--drivers/hwmon/powr1220.c23
-rw-r--r--drivers/hwmon/pt5161l.c50
-rw-r--r--drivers/hwmon/pwm-fan.c84
-rw-r--r--drivers/hwmon/qnap-mcu-hwmon.c363
-rw-r--r--drivers/hwmon/raspberrypi-hwmon.c30
-rw-r--r--drivers/hwmon/sa67mcu-hwmon.c161
-rw-r--r--drivers/hwmon/sbrmi.c357
-rw-r--r--drivers/hwmon/sbtsi_temp.c63
-rw-r--r--drivers/hwmon/sch5636.c5
-rw-r--r--drivers/hwmon/sch56xx-common.c4
-rw-r--r--drivers/hwmon/sch56xx-common.h1
-rw-r--r--drivers/hwmon/scmi-hwmon.c9
-rw-r--r--drivers/hwmon/sfctemp.c36
-rw-r--r--drivers/hwmon/sg2042-mcu.c360
-rw-r--r--drivers/hwmon/sht15.c2
-rw-r--r--drivers/hwmon/sht21.c20
-rw-r--r--drivers/hwmon/sht3x.c114
-rw-r--r--drivers/hwmon/sht4x.c198
-rw-r--r--drivers/hwmon/shtc1.c6
-rw-r--r--drivers/hwmon/sis5595.c2
-rw-r--r--drivers/hwmon/sl28cpld-hwmon.c9
-rw-r--r--drivers/hwmon/smsc47m1.c2
-rw-r--r--drivers/hwmon/spd5118.c778
-rw-r--r--drivers/hwmon/stts751.c2
-rw-r--r--drivers/hwmon/surface_fan.c10
-rw-r--r--drivers/hwmon/surface_temp.c235
-rw-r--r--drivers/hwmon/sy7636a-hwmon.c8
-rw-r--r--drivers/hwmon/thmc50.c4
-rw-r--r--drivers/hwmon/tmp102.c29
-rw-r--r--drivers/hwmon/tmp103.c3
-rw-r--r--drivers/hwmon/tmp108.c82
-rw-r--r--drivers/hwmon/tmp401.c27
-rw-r--r--drivers/hwmon/tmp421.c41
-rw-r--r--drivers/hwmon/tmp464.c48
-rw-r--r--drivers/hwmon/tmp513.c19
-rw-r--r--drivers/hwmon/tps23861.c35
-rw-r--r--drivers/hwmon/tsc1641.c748
-rw-r--r--drivers/hwmon/ultra45_env.c2
-rw-r--r--drivers/hwmon/vexpress-hwmon.c2
-rw-r--r--drivers/hwmon/via-cputemp.c2
-rw-r--r--drivers/hwmon/via686a.c2
-rw-r--r--drivers/hwmon/vt1211.c55
-rw-r--r--drivers/hwmon/vt8231.c22
-rw-r--r--drivers/hwmon/w83627ehf.c13
-rw-r--r--drivers/hwmon/w83627hf.c2
-rw-r--r--drivers/hwmon/w83781d.c11
-rw-r--r--drivers/hwmon/w83791d.c17
-rw-r--r--drivers/hwmon/w83793.c1
-rw-r--r--drivers/hwmon/w83795.c4
-rw-r--r--drivers/hwmon/w83l786ng.c26
-rw-r--r--drivers/hwmon/xgene-hwmon.c43
-rw-r--r--drivers/hwspinlock/hwspinlock_core.c122
-rw-r--r--drivers/hwspinlock/hwspinlock_internal.h3
-rw-r--r--drivers/hwspinlock/qcom_hwspinlock.c25
-rw-r--r--drivers/hwspinlock/u8500_hsem.c2
-rw-r--r--drivers/hwtracing/coresight/Kconfig33
-rw-r--r--drivers/hwtracing/coresight/Makefile8
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.c181
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.h3
-rw-r--r--drivers/hwtracing/coresight/coresight-cfg-preload.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-cfg-preload.h2
-rw-r--r--drivers/hwtracing/coresight/coresight-cfg-pstop.c83
-rw-r--r--drivers/hwtracing/coresight/coresight-config.c8
-rw-r--r--drivers/hwtracing/coresight/coresight-config.h4
-rw-r--r--drivers/hwtracing/coresight/coresight-core.c564
-rw-r--r--drivers/hwtracing/coresight/coresight-cpu-debug.c117
-rw-r--r--drivers/hwtracing/coresight/coresight-ctcu-core.c313
-rw-r--r--drivers/hwtracing/coresight/coresight-ctcu.h39
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-core.c51
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-platform.c10
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-sysfs.c76
-rw-r--r--drivers/hwtracing/coresight/coresight-cti.h10
-rw-r--r--drivers/hwtracing/coresight/coresight-dummy.c105
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c57
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c147
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.h20
-rw-r--r--drivers/hwtracing/coresight/coresight-etm.h7
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-core.c160
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-sysfs.c11
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c527
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c269
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h49
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c147
-rw-r--r--drivers/hwtracing/coresight/coresight-kunit-tests.c74
-rw-r--r--drivers/hwtracing/coresight/coresight-platform.c61
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h53
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c143
-rw-r--r--drivers/hwtracing/coresight/coresight-self-hosted-trace.h9
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c156
-rw-r--r--drivers/hwtracing/coresight/coresight-syscfg-configfs.c14
-rw-r--r--drivers/hwtracing/coresight/coresight-syscfg.c77
-rw-r--r--drivers/hwtracing/coresight/coresight-sysfs.c93
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-core.c499
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c168
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c279
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h118
-rw-r--r--drivers/hwtracing/coresight/coresight-tnoc.c246
-rw-r--r--drivers/hwtracing/coresight/coresight-tpda.c47
-rw-r--r--drivers/hwtracing/coresight/coresight-tpdm.c310
-rw-r--r--drivers/hwtracing/coresight/coresight-tpdm.h33
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c134
-rw-r--r--drivers/hwtracing/coresight/coresight-trace-id.c179
-rw-r--r--drivers/hwtracing/coresight/coresight-trace-id.h79
-rw-r--r--drivers/hwtracing/coresight/coresight-trbe.c60
-rw-r--r--drivers/hwtracing/coresight/ultrasoc-smb.c24
-rw-r--r--drivers/hwtracing/coresight/ultrasoc-smb.h3
-rw-r--r--drivers/hwtracing/intel_th/Kconfig1
-rw-r--r--drivers/hwtracing/intel_th/acpi.c4
-rw-r--r--drivers/hwtracing/intel_th/core.c37
-rw-r--r--drivers/hwtracing/intel_th/gth.c8
-rw-r--r--drivers/hwtracing/intel_th/intel_th.h2
-rw-r--r--drivers/hwtracing/intel_th/msu-sink.c1
-rw-r--r--drivers/hwtracing/intel_th/msu.c56
-rw-r--r--drivers/hwtracing/intel_th/pci.c54
-rw-r--r--drivers/hwtracing/intel_th/sth.c2
-rw-r--r--drivers/hwtracing/ptt/hisi_ptt.c1
-rw-r--r--drivers/hwtracing/stm/console.c1
-rw-r--r--drivers/hwtracing/stm/core.c20
-rw-r--r--drivers/hwtracing/stm/ftrace.c1
-rw-r--r--drivers/hwtracing/stm/heartbeat.c7
-rw-r--r--drivers/hwtracing/stm/p_basic.c3
-rw-r--r--drivers/hwtracing/stm/p_sys-t.c93
-rw-r--r--drivers/hwtracing/stm/stm.h2
-rw-r--r--drivers/i2c/Kconfig10
-rw-r--r--drivers/i2c/Makefile7
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c4
-rw-r--r--drivers/i2c/algos/i2c-algo-pca.c6
-rw-r--r--drivers/i2c/algos/i2c-algo-pcf.c110
-rw-r--r--drivers/i2c/busses/Kconfig186
-rw-r--r--drivers/i2c/busses/Makefile28
-rw-r--r--drivers/i2c/busses/i2c-ali1535.c15
-rw-r--r--drivers/i2c/busses/i2c-ali1563.c1
-rw-r--r--drivers/i2c/busses/i2c-ali15x3.c14
-rw-r--r--drivers/i2c/busses/i2c-altera.c6
-rw-r--r--drivers/i2c/busses/i2c-amd-asf-plat.c370
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2-pci.c10
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2-plat.c6
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2.h1
-rw-r--r--drivers/i2c/busses/i2c-amd756-s4882.c245
-rw-r--r--drivers/i2c/busses/i2c-amd756.c8
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c28
-rw-r--r--drivers/i2c/busses/i2c-at91-core.c3
-rw-r--r--drivers/i2c/busses/i2c-at91-master.c8
-rw-r--r--drivers/i2c/busses/i2c-at91-slave.c3
-rw-r--r--drivers/i2c/busses/i2c-au1550.c17
-rw-r--r--drivers/i2c/busses/i2c-axxia.c25
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c240
-rw-r--r--drivers/i2c/busses/i2c-bcm-kona.c21
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c24
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c24
-rw-r--r--drivers/i2c/busses/i2c-cadence.c453
-rw-r--r--drivers/i2c/busses/i2c-cbus-gpio.c2
-rw-r--r--drivers/i2c/busses/i2c-ccgx-ucsi.c1
-rw-r--r--drivers/i2c/busses/i2c-cgbc.c406
-rw-r--r--drivers/i2c/busses/i2c-cht-wc.c12
-rw-r--r--drivers/i2c/busses/i2c-cp2615.c10
-rw-r--r--drivers/i2c/busses/i2c-cpm.c6
-rw-r--r--drivers/i2c/busses/i2c-cros-ec-tunnel.c9
-rw-r--r--drivers/i2c/busses/i2c-davinci.c136
-rw-r--r--drivers/i2c/busses/i2c-designware-amdisp.c207
-rw-r--r--drivers/i2c/busses/i2c-designware-amdpsp.c36
-rw-r--r--drivers/i2c/busses/i2c-designware-common.c276
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h59
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c140
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c131
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c282
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c24
-rw-r--r--drivers/i2c/busses/i2c-digicolor.c10
-rw-r--r--drivers/i2c/busses/i2c-diolan-u2c.c2
-rw-r--r--drivers/i2c/busses/i2c-dln2.c6
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c30
-rw-r--r--drivers/i2c/busses/i2c-emev2.c33
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c45
-rw-r--r--drivers/i2c/busses/i2c-fsi.c56
-rw-r--r--drivers/i2c/busses/i2c-gpio.c10
-rw-r--r--drivers/i2c/busses/i2c-gxp.c8
-rw-r--r--drivers/i2c/busses/i2c-highlander.c4
-rw-r--r--drivers/i2c/busses/i2c-hisi.c8
-rw-r--r--drivers/i2c/busses/i2c-hix5hd2.c11
-rw-r--r--drivers/i2c/busses/i2c-i801.c441
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c43
-rw-r--r--drivers/i2c/busses/i2c-img-scb.c11
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c1063
-rw-r--r--drivers/i2c/busses/i2c-imx.c579
-rw-r--r--drivers/i2c/busses/i2c-iop3xx.c17
-rw-r--r--drivers/i2c/busses/i2c-isch.c322
-rw-r--r--drivers/i2c/busses/i2c-ismt.c12
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c28
-rw-r--r--drivers/i2c/busses/i2c-k1.c628
-rw-r--r--drivers/i2c/busses/i2c-keba.c594
-rw-r--r--drivers/i2c/busses/i2c-kempld.c16
-rw-r--r--drivers/i2c/busses/i2c-ljca.c22
-rw-r--r--drivers/i2c/busses/i2c-lpc2k.c19
-rw-r--r--drivers/i2c/busses/i2c-ls2x.c27
-rw-r--r--drivers/i2c/busses/i2c-mchp-pci1xxxx.c2
-rw-r--r--drivers/i2c/busses/i2c-meson.c6
-rw-r--r--drivers/i2c/busses/i2c-microchip-corei2c.c230
-rw-r--r--drivers/i2c/busses/i2c-mlxbf.c195
-rw-r--r--drivers/i2c/busses/i2c-mlxcpld.c16
-rw-r--r--drivers/i2c/busses/i2c-mpc.c29
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c37
-rw-r--r--drivers/i2c/busses/i2c-mt7621.c44
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c27
-rw-r--r--drivers/i2c/busses/i2c-mxs.c4
-rw-r--r--drivers/i2c/busses/i2c-nct6694.c196
-rw-r--r--drivers/i2c/busses/i2c-nforce2-s4985.c240
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c16
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c92
-rw-r--r--drivers/i2c/busses/i2c-npcm7xx.c488
-rw-r--r--drivers/i2c/busses/i2c-nvidia-gpu.c10
-rw-r--r--drivers/i2c/busses/i2c-ocores.c12
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.c283
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.h17
-rw-r--r--drivers/i2c/busses/i2c-octeon-platdrv.c4
-rw-r--r--drivers/i2c/busses/i2c-omap.c105
-rw-r--r--drivers/i2c/busses/i2c-opal.c12
-rw-r--r--drivers/i2c/busses/i2c-owl.c12
-rw-r--r--drivers/i2c/busses/i2c-parport.c1
-rw-r--r--drivers/i2c/busses/i2c-pasemi-core.c161
-rw-r--r--drivers/i2c/busses/i2c-pasemi-pci.c10
-rw-r--r--drivers/i2c/busses/i2c-pasemi-platform.c2
-rw-r--r--drivers/i2c/busses/i2c-pca-isa.c2
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c4
-rw-r--r--drivers/i2c/busses/i2c-piix4.c82
-rw-r--r--drivers/i2c/busses/i2c-piix4.h44
-rw-r--r--drivers/i2c/busses/i2c-pnx.c58
-rw-r--r--drivers/i2c/busses/i2c-powermac.c18
-rw-r--r--drivers/i2c/busses/i2c-pxa-pci.c4
-rw-r--r--drivers/i2c/busses/i2c-pxa.c26
-rw-r--r--drivers/i2c/busses/i2c-qcom-cci.c75
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c371
-rw-r--r--drivers/i2c/busses/i2c-qup.c60
-rw-r--r--drivers/i2c/busses/i2c-rcar.c82
-rw-r--r--drivers/i2c/busses/i2c-riic.c451
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c22
-rw-r--r--drivers/i2c/busses/i2c-robotfuzz-osif.c10
-rw-r--r--drivers/i2c/busses/i2c-rtl9300.c539
-rw-r--r--drivers/i2c/busses/i2c-rzv2m.c32
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c11
-rw-r--r--drivers/i2c/busses/i2c-scmi.c2
-rw-r--r--drivers/i2c/busses/i2c-sh7760.c6
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c9
-rw-r--r--drivers/i2c/busses/i2c-simtec.c2
-rw-r--r--drivers/i2c/busses/i2c-sis5595.c2
-rw-r--r--drivers/i2c/busses/i2c-sis630.c12
-rw-r--r--drivers/i2c/busses/i2c-sprd.c22
-rw-r--r--drivers/i2c/busses/i2c-st.c31
-rw-r--r--drivers/i2c/busses/i2c-stm32.c15
-rw-r--r--drivers/i2c/busses/i2c-stm32f4.c10
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c76
-rw-r--r--drivers/i2c/busses/i2c-sun6i-p2wi.c22
-rw-r--r--drivers/i2c/busses/i2c-synquacer.c18
-rw-r--r--drivers/i2c/busses/i2c-taos-evm.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra-bpmp.c6
-rw-r--r--drivers/i2c/busses/i2c-tegra.c130
-rw-r--r--drivers/i2c/busses/i2c-thunderx-pcidrv.c7
-rw-r--r--drivers/i2c/busses/i2c-tiny-usb.c15
-rw-r--r--drivers/i2c/busses/i2c-uniphier-f.c52
-rw-r--r--drivers/i2c/busses/i2c-uniphier.c41
-rw-r--r--drivers/i2c/busses/i2c-usbio.c321
-rw-r--r--drivers/i2c/busses/i2c-versatile.c2
-rw-r--r--drivers/i2c/busses/i2c-via.c15
-rw-r--r--drivers/i2c/busses/i2c-viai2c-common.c71
-rw-r--r--drivers/i2c/busses/i2c-viai2c-common.h2
-rw-r--r--drivers/i2c/busses/i2c-viai2c-wmt.c66
-rw-r--r--drivers/i2c/busses/i2c-viai2c-zhaoxin.c125
-rw-r--r--drivers/i2c/busses/i2c-viapro.c33
-rw-r--r--drivers/i2c/busses/i2c-viperboard.c31
-rw-r--r--drivers/i2c/busses/i2c-virtio.c28
-rw-r--r--drivers/i2c/busses/i2c-xgene-slimpro.c59
-rw-r--r--drivers/i2c/busses/i2c-xiic.c359
-rw-r--r--drivers/i2c/busses/i2c-xlp9xx.c4
-rw-r--r--drivers/i2c/busses/scx200_acb.c8
-rw-r--r--drivers/i2c/i2c-atr.c588
-rw-r--r--drivers/i2c/i2c-core-acpi.c25
-rw-r--r--drivers/i2c/i2c-core-base.c180
-rw-r--r--drivers/i2c/i2c-core-of-prober.c415
-rw-r--r--drivers/i2c/i2c-core-of.c2
-rw-r--r--drivers/i2c/i2c-core-slave.c16
-rw-r--r--drivers/i2c/i2c-core-smbus.c14
-rw-r--r--drivers/i2c/i2c-core.h9
-rw-r--r--drivers/i2c/i2c-dev.c30
-rw-r--r--drivers/i2c/i2c-mux.c15
-rw-r--r--drivers/i2c/i2c-slave-eeprom.c4
-rw-r--r--drivers/i2c/i2c-slave-testunit.c192
-rw-r--r--drivers/i2c/i2c-smbus.c130
-rw-r--r--drivers/i2c/muxes/Kconfig16
-rw-r--r--drivers/i2c/muxes/Makefile1
-rw-r--r--drivers/i2c/muxes/i2c-arb-gpio-challenge.c2
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c12
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c16
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpmux.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-ltc4306.c10
-rw-r--r--drivers/i2c/muxes/i2c-mux-mlxcpld.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-mule.c147
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca9541.c14
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c7
-rw-r--r--drivers/i2c/muxes/i2c-mux-pinctrl.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-reg.c4
-rw-r--r--drivers/i3c/device.c34
-rw-r--r--drivers/i3c/internals.h54
-rw-r--r--drivers/i3c/master.c295
-rw-r--r--drivers/i3c/master/Kconfig36
-rw-r--r--drivers/i3c/master/Makefile2
-rw-r--r--drivers/i3c/master/adi-i3c-master.c1019
-rw-r--r--drivers/i3c/master/ast2600-i3c-master.c3
-rw-r--r--drivers/i3c/master/dw-i3c-master.c546
-rw-r--r--drivers/i3c/master/dw-i3c-master.h17
-rw-r--r--drivers/i3c/master/i3c-master-cdns.c99
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/Makefile4
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/cmd_v1.c21
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/cmd_v2.c7
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/core.c169
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dat_v1.c11
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dma.c163
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/ext_caps.c11
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/hci.h18
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/hci_quirks.c44
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c296
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/pio.c77
-rw-r--r--drivers/i3c/master/renesas-i3c.c1404
-rw-r--r--drivers/i3c/master/svc-i3c-master.c772
-rw-r--r--drivers/idle/Makefile5
-rw-r--r--drivers/idle/intel_idle.c704
-rw-r--r--drivers/iio/Kconfig1
-rw-r--r--drivers/iio/Makefile1
-rw-r--r--drivers/iio/accel/Kconfig50
-rw-r--r--drivers/iio/accel/Makefile7
-rw-r--r--drivers/iio/accel/adis16201.c6
-rw-r--r--drivers/iio/accel/adis16209.c2
-rw-r--r--drivers/iio/accel/adxl313.h33
-rw-r--r--drivers/iio/accel/adxl313_core.c939
-rw-r--r--drivers/iio/accel/adxl313_i2c.c8
-rw-r--r--drivers/iio/accel/adxl313_spi.c16
-rw-r--r--drivers/iio/accel/adxl345.h95
-rw-r--r--drivers/iio/accel/adxl345_core.c1987
-rw-r--r--drivers/iio/accel/adxl345_i2c.c6
-rw-r--r--drivers/iio/accel/adxl345_spi.c17
-rw-r--r--drivers/iio/accel/adxl355_core.c62
-rw-r--r--drivers/iio/accel/adxl355_i2c.c2
-rw-r--r--drivers/iio/accel/adxl355_spi.c12
-rw-r--r--drivers/iio/accel/adxl367.c210
-rw-r--r--drivers/iio/accel/adxl367_i2c.c8
-rw-r--r--drivers/iio/accel/adxl367_spi.c8
-rw-r--r--drivers/iio/accel/adxl372.c18
-rw-r--r--drivers/iio/accel/adxl372_i2c.c6
-rw-r--r--drivers/iio/accel/adxl372_spi.c4
-rw-r--r--drivers/iio/accel/adxl380.c1955
-rw-r--r--drivers/iio/accel/adxl380.h30
-rw-r--r--drivers/iio/accel/adxl380_i2c.c68
-rw-r--r--drivers/iio/accel/adxl380_spi.c70
-rw-r--r--drivers/iio/accel/bma180.c27
-rw-r--r--drivers/iio/accel/bma220.h28
-rw-r--r--drivers/iio/accel/bma220_core.c585
-rw-r--r--drivers/iio/accel/bma220_i2c.c69
-rw-r--r--drivers/iio/accel/bma220_spi.c325
-rw-r--r--drivers/iio/accel/bma400.h155
-rw-r--r--drivers/iio/accel/bma400_core.c380
-rw-r--r--drivers/iio/accel/bma400_i2c.c4
-rw-r--r--drivers/iio/accel/bma400_spi.c4
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c84
-rw-r--r--drivers/iio/accel/bmc150-accel-i2c.c8
-rw-r--r--drivers/iio/accel/bmc150-accel-spi.c6
-rw-r--r--drivers/iio/accel/bmc150-accel.h4
-rw-r--r--drivers/iio/accel/bmi088-accel-core.c25
-rw-r--r--drivers/iio/accel/bmi088-accel-i2c.c6
-rw-r--r--drivers/iio/accel/bmi088-accel-spi.c8
-rw-r--r--drivers/iio/accel/cros_ec_accel_legacy.c2
-rw-r--r--drivers/iio/accel/da280.c4
-rw-r--r--drivers/iio/accel/da311.c4
-rw-r--r--drivers/iio/accel/dmard06.c10
-rw-r--r--drivers/iio/accel/dmard09.c10
-rw-r--r--drivers/iio/accel/dmard10.c8
-rw-r--r--drivers/iio/accel/fxls8962af-core.c109
-rw-r--r--drivers/iio/accel/fxls8962af-i2c.c8
-rw-r--r--drivers/iio/accel/fxls8962af-spi.c6
-rw-r--r--drivers/iio/accel/fxls8962af.h2
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c15
-rw-r--r--drivers/iio/accel/kionix-kx022a-i2c.c6
-rw-r--r--drivers/iio/accel/kionix-kx022a-spi.c6
-rw-r--r--drivers/iio/accel/kionix-kx022a.c272
-rw-r--r--drivers/iio/accel/kionix-kx022a.h14
-rw-r--r--drivers/iio/accel/kxcjk-1013.c608
-rw-r--r--drivers/iio/accel/kxsd9-i2c.c8
-rw-r--r--drivers/iio/accel/kxsd9-spi.c4
-rw-r--r--drivers/iio/accel/kxsd9.c22
-rw-r--r--drivers/iio/accel/mc3230.c99
-rw-r--r--drivers/iio/accel/mma7455_core.c14
-rw-r--r--drivers/iio/accel/mma7455_i2c.c6
-rw-r--r--drivers/iio/accel/mma7455_spi.c2
-rw-r--r--drivers/iio/accel/mma7660.c60
-rw-r--r--drivers/iio/accel/mma8452.c109
-rw-r--r--drivers/iio/accel/mma9551.c41
-rw-r--r--drivers/iio/accel/mma9551_core.c41
-rw-r--r--drivers/iio/accel/mma9553.c67
-rw-r--r--drivers/iio/accel/msa311.c72
-rw-r--r--drivers/iio/accel/mxc4005.c45
-rw-r--r--drivers/iio/accel/mxc6255.c7
-rw-r--r--drivers/iio/accel/sca3000.c37
-rw-r--r--drivers/iio/accel/sca3300.c32
-rw-r--r--drivers/iio/accel/ssp_accel_sensor.c2
-rw-r--r--drivers/iio/accel/st_accel.h1
-rw-r--r--drivers/iio/accel/st_accel_core.c97
-rw-r--r--drivers/iio/accel/st_accel_i2c.c13
-rw-r--r--drivers/iio/accel/st_accel_spi.c11
-rw-r--r--drivers/iio/accel/stk8312.c23
-rw-r--r--drivers/iio/accel/stk8ba50.c23
-rw-r--r--drivers/iio/adc/88pm886-gpadc.c393
-rw-r--r--drivers/iio/adc/Kconfig493
-rw-r--r--drivers/iio/adc/Makefile54
-rw-r--r--drivers/iio/adc/ab8500-gpadc.c11
-rw-r--r--drivers/iio/adc/ad4000.c1264
-rw-r--r--drivers/iio/adc/ad4030.c1228
-rw-r--r--drivers/iio/adc/ad4080.c701
-rw-r--r--drivers/iio/adc/ad4130.c163
-rw-r--r--drivers/iio/adc/ad4170-4.c3027
-rw-r--r--drivers/iio/adc/ad4695.c2026
-rw-r--r--drivers/iio/adc/ad4851.c1317
-rw-r--r--drivers/iio/adc/ad7091r-base.c21
-rw-r--r--drivers/iio/adc/ad7091r-base.h2
-rw-r--r--drivers/iio/adc/ad7091r5.c10
-rw-r--r--drivers/iio/adc/ad7091r8.c10
-rw-r--r--drivers/iio/adc/ad7124.c1546
-rw-r--r--drivers/iio/adc/ad7173.c2099
-rw-r--r--drivers/iio/adc/ad7191.c554
-rw-r--r--drivers/iio/adc/ad7192.c693
-rw-r--r--drivers/iio/adc/ad7266.c61
-rw-r--r--drivers/iio/adc/ad7280a.c32
-rw-r--r--drivers/iio/adc/ad7291.c8
-rw-r--r--drivers/iio/adc/ad7292.c53
-rw-r--r--drivers/iio/adc/ad7298.c18
-rw-r--r--drivers/iio/adc/ad7380.c2129
-rw-r--r--drivers/iio/adc/ad7405.c253
-rw-r--r--drivers/iio/adc/ad7476.c479
-rw-r--r--drivers/iio/adc/ad7606.c1491
-rw-r--r--drivers/iio/adc/ad7606.h251
-rw-r--r--drivers/iio/adc/ad7606_bus_iface.h16
-rw-r--r--drivers/iio/adc/ad7606_par.c196
-rw-r--r--drivers/iio/adc/ad7606_spi.c529
-rw-r--r--drivers/iio/adc/ad7625.c687
-rw-r--r--drivers/iio/adc/ad7766.c14
-rw-r--r--drivers/iio/adc/ad7768-1.c1222
-rw-r--r--drivers/iio/adc/ad7779.c1050
-rw-r--r--drivers/iio/adc/ad7780.c14
-rw-r--r--drivers/iio/adc/ad7791.c40
-rw-r--r--drivers/iio/adc/ad7793.c131
-rw-r--r--drivers/iio/adc/ad7887.c15
-rw-r--r--drivers/iio/adc/ad7923.c33
-rw-r--r--drivers/iio/adc/ad7944.c889
-rw-r--r--drivers/iio/adc/ad7949.c11
-rw-r--r--drivers/iio/adc/ad799x.c58
-rw-r--r--drivers/iio/adc/ad9467.c844
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c541
-rw-r--r--drivers/iio/adc/ade9000.c1799
-rw-r--r--drivers/iio/adc/adi-axi-adc.c638
-rw-r--r--drivers/iio/adc/aspeed_adc.c69
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c150
-rw-r--r--drivers/iio/adc/at91_adc.c33
-rw-r--r--drivers/iio/adc/axp20x_adc.c511
-rw-r--r--drivers/iio/adc/axp288_adc.c12
-rw-r--r--drivers/iio/adc/bcm_iproc_adc.c22
-rw-r--r--drivers/iio/adc/berlin2-adc.c26
-rw-r--r--drivers/iio/adc/cc10001_adc.c4
-rw-r--r--drivers/iio/adc/cpcap-adc.c54
-rw-r--r--drivers/iio/adc/da9150-gpadc.c33
-rw-r--r--drivers/iio/adc/dln2-adc.c53
-rw-r--r--drivers/iio/adc/envelope-detector.c4
-rw-r--r--drivers/iio/adc/ep93xx_adc.c4
-rw-r--r--drivers/iio/adc/exynos_adc.c298
-rw-r--r--drivers/iio/adc/fsl-imx25-gcq.c168
-rw-r--r--drivers/iio/adc/gehc-pmc-adc.c228
-rw-r--r--drivers/iio/adc/hi8435.c10
-rw-r--r--drivers/iio/adc/hx711.c99
-rw-r--r--drivers/iio/adc/imx7d_adc.c6
-rw-r--r--drivers/iio/adc/imx8qxp-adc.c10
-rw-r--r--drivers/iio/adc/imx93_adc.c30
-rw-r--r--drivers/iio/adc/ina2xx-adc.c24
-rw-r--r--drivers/iio/adc/industrialio-adc.c82
-rw-r--r--drivers/iio/adc/ingenic-adc.c3
-rw-r--r--drivers/iio/adc/intel_dc_ti_adc.c328
-rw-r--r--drivers/iio/adc/intel_mrfld_adc.c24
-rw-r--r--drivers/iio/adc/lp8788_adc.c18
-rw-r--r--drivers/iio/adc/lpc18xx_adc.c2
-rw-r--r--drivers/iio/adc/lpc32xx_adc.c2
-rw-r--r--drivers/iio/adc/ltc2309.c45
-rw-r--r--drivers/iio/adc/ltc2471.c2
-rw-r--r--drivers/iio/adc/ltc2485.c2
-rw-r--r--drivers/iio/adc/ltc2496.c2
-rw-r--r--drivers/iio/adc/ltc2497-core.c19
-rw-r--r--drivers/iio/adc/ltc2497.c4
-rw-r--r--drivers/iio/adc/ltc2497.h2
-rw-r--r--drivers/iio/adc/max1027.c53
-rw-r--r--drivers/iio/adc/max11100.c6
-rw-r--r--drivers/iio/adc/max1118.c13
-rw-r--r--drivers/iio/adc/max11205.c7
-rw-r--r--drivers/iio/adc/max11410.c107
-rw-r--r--drivers/iio/adc/max1241.c4
-rw-r--r--drivers/iio/adc/max1363.c309
-rw-r--r--drivers/iio/adc/max14001.c391
-rw-r--r--drivers/iio/adc/max34408.c7
-rw-r--r--drivers/iio/adc/max77541-adc.c2
-rw-r--r--drivers/iio/adc/max9611.c10
-rw-r--r--drivers/iio/adc/mcp320x.c10
-rw-r--r--drivers/iio/adc/mcp3422.c2
-rw-r--r--drivers/iio/adc/mcp3564.c80
-rw-r--r--drivers/iio/adc/mcp3911.c127
-rw-r--r--drivers/iio/adc/men_z188_adc.c2
-rw-r--r--drivers/iio/adc/meson_saradc.c194
-rw-r--r--drivers/iio/adc/mp2629_adc.c29
-rw-r--r--drivers/iio/adc/mt6359-auxadc.c908
-rw-r--r--drivers/iio/adc/mt6360-adc.c15
-rw-r--r--drivers/iio/adc/mt6370-adc.c2
-rw-r--r--drivers/iio/adc/mt6577_auxadc.c3
-rw-r--r--drivers/iio/adc/mxs-lradc-adc.c28
-rw-r--r--drivers/iio/adc/nau7802.c4
-rw-r--r--drivers/iio/adc/nct7201.c501
-rw-r--r--drivers/iio/adc/npcm_adc.c4
-rw-r--r--drivers/iio/adc/pac1921.c1345
-rw-r--r--drivers/iio/adc/pac1934.c136
-rw-r--r--drivers/iio/adc/palmas_gpadc.c10
-rw-r--r--drivers/iio/adc/qcom-pm8xxx-xoadc.c12
-rw-r--r--drivers/iio/adc/qcom-spmi-adc5.c11
-rw-r--r--drivers/iio/adc/qcom-spmi-iadc.c4
-rw-r--r--drivers/iio/adc/qcom-spmi-rradc.c60
-rw-r--r--drivers/iio/adc/qcom-spmi-vadc.c7
-rw-r--r--drivers/iio/adc/qcom-vadc-common.c2
-rw-r--r--drivers/iio/adc/rcar-gyroadc.c42
-rw-r--r--drivers/iio/adc/rn5t618-adc.c13
-rw-r--r--drivers/iio/adc/rockchip_saradc.c76
-rw-r--r--drivers/iio/adc/rohm-bd79112.c551
-rw-r--r--drivers/iio/adc/rohm-bd79124.c1125
-rw-r--r--drivers/iio/adc/rtq6056.c61
-rw-r--r--drivers/iio/adc/rzg2l_adc.c454
-rw-r--r--drivers/iio/adc/rzn1-adc.c490
-rw-r--r--drivers/iio/adc/rzt2h_adc.c304
-rw-r--r--drivers/iio/adc/sc27xx_adc.c41
-rw-r--r--drivers/iio/adc/sd_adc_modulator.c97
-rw-r--r--drivers/iio/adc/sophgo-cv1800b-adc.c227
-rw-r--r--drivers/iio/adc/spear_adc.c39
-rw-r--r--drivers/iio/adc/stm32-adc-core.c24
-rw-r--r--drivers/iio/adc/stm32-adc-core.h17
-rw-r--r--drivers/iio/adc/stm32-adc.c258
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c415
-rw-r--r--drivers/iio/adc/stm32-dfsdm-core.c5
-rw-r--r--drivers/iio/adc/stmpe-adc.c6
-rw-r--r--drivers/iio/adc/sun20i-gpadc-iio.c44
-rw-r--r--drivers/iio/adc/sun4i-gpadc-iio.c18
-rw-r--r--drivers/iio/adc/ti-adc081c.c46
-rw-r--r--drivers/iio/adc/ti-adc0832.c11
-rw-r--r--drivers/iio/adc/ti-adc084s021.c26
-rw-r--r--drivers/iio/adc/ti-adc108s102.c35
-rw-r--r--drivers/iio/adc/ti-adc12138.c41
-rw-r--r--drivers/iio/adc/ti-adc128s052.c188
-rw-r--r--drivers/iio/adc/ti-adc161s626.c18
-rw-r--r--drivers/iio/adc/ti-ads1015.c200
-rw-r--r--drivers/iio/adc/ti-ads1100.c45
-rw-r--r--drivers/iio/adc/ti-ads1119.c825
-rw-r--r--drivers/iio/adc/ti-ads124s08.c17
-rw-r--r--drivers/iio/adc/ti-ads1298.c16
-rw-r--r--drivers/iio/adc/ti-ads131e08.c64
-rw-r--r--drivers/iio/adc/ti-ads7138.c749
-rw-r--r--drivers/iio/adc/ti-ads7924.c22
-rw-r--r--drivers/iio/adc/ti-ads7950.c17
-rw-r--r--drivers/iio/adc/ti-ads8344.c2
-rw-r--r--drivers/iio/adc/ti-ads8688.c75
-rw-r--r--drivers/iio/adc/ti-lmp92064.c14
-rw-r--r--drivers/iio/adc/ti-tlc4541.c19
-rw-r--r--drivers/iio/adc/ti-tsc2046.c113
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c21
-rw-r--r--drivers/iio/adc/twl4030-madc.c27
-rw-r--r--drivers/iio/adc/twl6030-gpadc.c12
-rw-r--r--drivers/iio/adc/vf610_adc.c151
-rw-r--r--drivers/iio/adc/viperboard_adc.c4
-rw-r--r--drivers/iio/adc/xilinx-ams.c180
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c11
-rw-r--r--drivers/iio/adc/xilinx-xadc-events.c4
-rw-r--r--drivers/iio/adc/xilinx-xadc.h2
-rw-r--r--drivers/iio/addac/ad74115.c43
-rw-r--r--drivers/iio/addac/ad74413r.c154
-rw-r--r--drivers/iio/addac/stx104.c2
-rw-r--r--drivers/iio/afe/iio-rescale.c6
-rw-r--r--drivers/iio/amplifiers/Kconfig1
-rw-r--r--drivers/iio/amplifiers/ad8366.c8
-rw-r--r--drivers/iio/amplifiers/ada4250.c59
-rw-r--r--drivers/iio/amplifiers/hmc425a.c8
-rw-r--r--drivers/iio/buffer/industrialio-buffer-cb.c4
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dma.c306
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dmaengine.c248
-rw-r--r--drivers/iio/buffer/industrialio-hw-consumer.c4
-rw-r--r--drivers/iio/buffer/industrialio-triggered-buffer.c2
-rw-r--r--drivers/iio/buffer/kfifo_buf.c1
-rw-r--r--drivers/iio/cdc/ad7150.c6
-rw-r--r--drivers/iio/cdc/ad7746.c6
-rw-r--r--drivers/iio/chemical/Kconfig44
-rw-r--r--drivers/iio/chemical/Makefile5
-rw-r--r--drivers/iio/chemical/ags02ma.c4
-rw-r--r--drivers/iio/chemical/ams-iaq-core.c4
-rw-r--r--drivers/iio/chemical/atlas-ezo-sensor.c6
-rw-r--r--drivers/iio/chemical/atlas-sensor.c19
-rw-r--r--drivers/iio/chemical/bme680.h54
-rw-r--r--drivers/iio/chemical/bme680_core.c1114
-rw-r--r--drivers/iio/chemical/bme680_i2c.c9
-rw-r--r--drivers/iio/chemical/bme680_spi.c17
-rw-r--r--drivers/iio/chemical/ccs811.c87
-rw-r--r--drivers/iio/chemical/ens160.h10
-rw-r--r--drivers/iio/chemical/ens160_core.c376
-rw-r--r--drivers/iio/chemical/ens160_i2c.c62
-rw-r--r--drivers/iio/chemical/ens160_spi.c61
-rw-r--r--drivers/iio/chemical/mhz19b.c316
-rw-r--r--drivers/iio/chemical/pms7003.c9
-rw-r--r--drivers/iio/chemical/scd30_core.c82
-rw-r--r--drivers/iio/chemical/scd30_i2c.c4
-rw-r--r--drivers/iio/chemical/scd30_serial.c4
-rw-r--r--drivers/iio/chemical/scd4x.c17
-rw-r--r--drivers/iio/chemical/sen0322.c161
-rw-r--r--drivers/iio/chemical/sgp40.c11
-rw-r--r--drivers/iio/chemical/sps30.c8
-rw-r--r--drivers/iio/chemical/sps30_i2c.c4
-rw-r--r--drivers/iio/chemical/sps30_serial.c2
-rw-r--r--drivers/iio/chemical/sunrise_co2.c10
-rw-r--r--drivers/iio/common/cros_ec_sensors/Kconfig9
-rw-r--r--drivers/iio/common/cros_ec_sensors/Makefile4
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_activity.c307
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c2
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c2
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c105
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_trace.c32
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_trace.h56
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.c32
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c13
-rw-r--r--drivers/iio/common/inv_sensors/inv_sensors_timestamp.c51
-rw-r--r--drivers/iio/common/ms_sensors/ms_sensors_i2c.c24
-rw-r--r--drivers/iio/common/scmi_sensors/scmi_iio.c73
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_dev.c22
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_iio.c20
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_spi.c2
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_buffer.c2
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c74
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_i2c.c2
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_spi.c2
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_trigger.c26
-rw-r--r--drivers/iio/dac/Kconfig143
-rw-r--r--drivers/iio/dac/Makefile10
-rw-r--r--drivers/iio/dac/ad3530r.c516
-rw-r--r--drivers/iio/dac/ad3552r-common.c294
-rw-r--r--drivers/iio/dac/ad3552r-hs.c884
-rw-r--r--drivers/iio/dac/ad3552r-hs.h27
-rw-r--r--drivers/iio/dac/ad3552r.c702
-rw-r--r--drivers/iio/dac/ad3552r.h232
-rw-r--r--drivers/iio/dac/ad5064.c10
-rw-r--r--drivers/iio/dac/ad5360.c4
-rw-r--r--drivers/iio/dac/ad5380.c93
-rw-r--r--drivers/iio/dac/ad5421.c4
-rw-r--r--drivers/iio/dac/ad5446-i2c.c102
-rw-r--r--drivers/iio/dac/ad5446-spi.c252
-rw-r--r--drivers/iio/dac/ad5446.c569
-rw-r--r--drivers/iio/dac/ad5446.h77
-rw-r--r--drivers/iio/dac/ad5449.c19
-rw-r--r--drivers/iio/dac/ad5504.c65
-rw-r--r--drivers/iio/dac/ad5592r-base.c160
-rw-r--r--drivers/iio/dac/ad5592r.c8
-rw-r--r--drivers/iio/dac/ad5593r.c10
-rw-r--r--drivers/iio/dac/ad5624r.h5
-rw-r--r--drivers/iio/dac/ad5624r_spi.c77
-rw-r--r--drivers/iio/dac/ad5686-spi.c10
-rw-r--r--drivers/iio/dac/ad5686.c66
-rw-r--r--drivers/iio/dac/ad5686.h6
-rw-r--r--drivers/iio/dac/ad5696-i2c.c12
-rw-r--r--drivers/iio/dac/ad5755.c39
-rw-r--r--drivers/iio/dac/ad5758.c2
-rw-r--r--drivers/iio/dac/ad5761.c113
-rw-r--r--drivers/iio/dac/ad5764.c4
-rw-r--r--drivers/iio/dac/ad5766.c10
-rw-r--r--drivers/iio/dac/ad5770r.c69
-rw-r--r--drivers/iio/dac/ad5791.c386
-rw-r--r--drivers/iio/dac/ad7293.c87
-rw-r--r--drivers/iio/dac/ad7303.c6
-rw-r--r--drivers/iio/dac/ad8460.c957
-rw-r--r--drivers/iio/dac/ad8801.c83
-rw-r--r--drivers/iio/dac/ad9739a.c469
-rw-r--r--drivers/iio/dac/adi-axi-dac.c1045
-rw-r--r--drivers/iio/dac/dpot-dac.c4
-rw-r--r--drivers/iio/dac/ds4424.c6
-rw-r--r--drivers/iio/dac/lpc18xx_dac.c8
-rw-r--r--drivers/iio/dac/ltc1660.c4
-rw-r--r--drivers/iio/dac/ltc2632.c77
-rw-r--r--drivers/iio/dac/ltc2664.c736
-rw-r--r--drivers/iio/dac/ltc2688.c127
-rw-r--r--drivers/iio/dac/m62332.c2
-rw-r--r--drivers/iio/dac/max517.c6
-rw-r--r--drivers/iio/dac/max5522.c15
-rw-r--r--drivers/iio/dac/max5821.c38
-rw-r--r--drivers/iio/dac/mcp4725.c10
-rw-r--r--drivers/iio/dac/mcp4728.c53
-rw-r--r--drivers/iio/dac/mcp4821.c6
-rw-r--r--drivers/iio/dac/mcp4922.c49
-rw-r--r--drivers/iio/dac/rohm-bd79703.c246
-rw-r--r--drivers/iio/dac/stm32-dac-core.c9
-rw-r--r--drivers/iio/dac/stm32-dac.c25
-rw-r--r--drivers/iio/dac/ti-dac082s085.c2
-rw-r--r--drivers/iio/dac/ti-dac5571.c9
-rw-r--r--drivers/iio/dac/ti-dac7311.c10
-rw-r--r--drivers/iio/dac/ti-dac7612.c4
-rw-r--r--drivers/iio/dac/vf610_dac.c29
-rw-r--r--drivers/iio/dummy/iio_simple_dummy.c119
-rw-r--r--drivers/iio/dummy/iio_simple_dummy.h2
-rw-r--r--drivers/iio/dummy/iio_simple_dummy_buffer.c29
-rw-r--r--drivers/iio/dummy/iio_simple_dummy_events.c32
-rw-r--r--drivers/iio/filter/admv8818.c244
-rw-r--r--drivers/iio/frequency/Kconfig34
-rw-r--r--drivers/iio/frequency/ad9523.c4
-rw-r--r--drivers/iio/frequency/adf4350.c155
-rw-r--r--drivers/iio/frequency/adf4371.c108
-rw-r--r--drivers/iio/frequency/adf4377.c41
-rw-r--r--drivers/iio/frequency/admfm2000.c24
-rw-r--r--drivers/iio/frequency/admv1013.c10
-rw-r--r--drivers/iio/frequency/admv1014.c6
-rw-r--r--drivers/iio/frequency/admv4420.c2
-rw-r--r--drivers/iio/frequency/adrf6780.c7
-rw-r--r--drivers/iio/gyro/Kconfig2
-rw-r--r--drivers/iio/gyro/adis16080.c2
-rw-r--r--drivers/iio/gyro/adis16130.c2
-rw-r--r--drivers/iio/gyro/adis16136.c28
-rw-r--r--drivers/iio/gyro/adis16260.c23
-rw-r--r--drivers/iio/gyro/adxrs290.c16
-rw-r--r--drivers/iio/gyro/adxrs450.c7
-rw-r--r--drivers/iio/gyro/bmg160_core.c33
-rw-r--r--drivers/iio/gyro/bmg160_i2c.c17
-rw-r--r--drivers/iio/gyro/bmg160_spi.c12
-rw-r--r--drivers/iio/gyro/fxas21002c_core.c18
-rw-r--r--drivers/iio/gyro/fxas21002c_i2c.c4
-rw-r--r--drivers/iio/gyro/fxas21002c_spi.c2
-rw-r--r--drivers/iio/gyro/hid-sensor-gyro-3d.c12
-rw-r--r--drivers/iio/gyro/itg3200_buffer.c2
-rw-r--r--drivers/iio/gyro/itg3200_core.c2
-rw-r--r--drivers/iio/gyro/mpu3050-core.c44
-rw-r--r--drivers/iio/gyro/mpu3050-i2c.c5
-rw-r--r--drivers/iio/gyro/ssp_gyro_sensor.c2
-rw-r--r--drivers/iio/gyro/st_gyro_core.c6
-rw-r--r--drivers/iio/gyro/st_gyro_i2c.c6
-rw-r--r--drivers/iio/gyro/st_gyro_spi.c6
-rw-r--r--drivers/iio/health/afe4403.c68
-rw-r--r--drivers/iio/health/afe4404.c68
-rw-r--r--drivers/iio/health/max30100.c50
-rw-r--r--drivers/iio/health/max30102.c18
-rw-r--r--drivers/iio/humidity/Kconfig11
-rw-r--r--drivers/iio/humidity/Makefile1
-rw-r--r--drivers/iio/humidity/am2315.c13
-rw-r--r--drivers/iio/humidity/dht11.c11
-rw-r--r--drivers/iio/humidity/ens210.c339
-rw-r--r--drivers/iio/humidity/hdc100x.c82
-rw-r--r--drivers/iio/humidity/hdc2010.c14
-rw-r--r--drivers/iio/humidity/hdc3020.c459
-rw-r--r--drivers/iio/humidity/hid-sensor-humidity.c8
-rw-r--r--drivers/iio/humidity/hts221.h2
-rw-r--r--drivers/iio/humidity/hts221_buffer.c3
-rw-r--r--drivers/iio/humidity/hts221_core.c95
-rw-r--r--drivers/iio/humidity/hts221_i2c.c8
-rw-r--r--drivers/iio/humidity/hts221_spi.c6
-rw-r--r--drivers/iio/humidity/htu21.c6
-rw-r--r--drivers/iio/humidity/si7005.c4
-rw-r--r--drivers/iio/humidity/si7020.c141
-rw-r--r--drivers/iio/iio_core.h4
-rw-r--r--drivers/iio/imu/Kconfig34
-rw-r--r--drivers/iio/imu/Makefile6
-rw-r--r--drivers/iio/imu/adis.c77
-rw-r--r--drivers/iio/imu/adis16400.c408
-rw-r--r--drivers/iio/imu/adis16460.c24
-rw-r--r--drivers/iio/imu/adis16475.c838
-rw-r--r--drivers/iio/imu/adis16480.c555
-rw-r--r--drivers/iio/imu/adis16550.c1147
-rw-r--r--drivers/iio/imu/adis_buffer.c78
-rw-r--r--drivers/iio/imu/adis_trigger.c39
-rw-r--r--drivers/iio/imu/bmi160/bmi160.h2
-rw-r--r--drivers/iio/imu/bmi160/bmi160_core.c67
-rw-r--r--drivers/iio/imu/bmi160/bmi160_i2c.c15
-rw-r--r--drivers/iio/imu/bmi160/bmi160_spi.c13
-rw-r--r--drivers/iio/imu/bmi270/Kconfig33
-rw-r--r--drivers/iio/imu/bmi270/Makefile7
-rw-r--r--drivers/iio/imu/bmi270/bmi270.h25
-rw-r--r--drivers/iio/imu/bmi270/bmi270_core.c1664
-rw-r--r--drivers/iio/imu/bmi270/bmi270_i2c.c70
-rw-r--r--drivers/iio/imu/bmi270/bmi270_spi.c94
-rw-r--r--drivers/iio/imu/bmi323/bmi323.h2
-rw-r--r--drivers/iio/imu/bmi323/bmi323_core.c261
-rw-r--r--drivers/iio/imu/bmi323/bmi323_i2c.c5
-rw-r--r--drivers/iio/imu/bmi323/bmi323_spi.c5
-rw-r--r--drivers/iio/imu/bno055/bno055.c72
-rw-r--r--drivers/iio/imu/bno055/bno055_i2c.c4
-rw-r--r--drivers/iio/imu/bno055/bno055_ser_core.c4
-rw-r--r--drivers/iio/imu/fxos8700_core.c1
-rw-r--r--drivers/iio/imu/fxos8700_i2c.c2
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600.h107
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c607
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c145
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h12
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_core.c367
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c182
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c34
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c37
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c28
-rw-r--r--drivers/iio/imu/inv_icm45600/Kconfig70
-rw-r--r--drivers/iio/imu/inv_icm45600/Makefile16
-rw-r--r--drivers/iio/imu/inv_icm45600/inv_icm45600.h385
-rw-r--r--drivers/iio/imu/inv_icm45600/inv_icm45600_accel.c782
-rw-r--r--drivers/iio/imu/inv_icm45600/inv_icm45600_buffer.c558
-rw-r--r--drivers/iio/imu/inv_icm45600/inv_icm45600_buffer.h101
-rw-r--r--drivers/iio/imu/inv_icm45600/inv_icm45600_core.c988
-rw-r--r--drivers/iio/imu/inv_icm45600/inv_icm45600_gyro.c791
-rw-r--r--drivers/iio/imu/inv_icm45600/inv_icm45600_i2c.c98
-rw-r--r--drivers/iio/imu/inv_icm45600/inv_icm45600_i3c.c79
-rw-r--r--drivers/iio/imu/inv_icm45600/inv_icm45600_spi.c108
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c17
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c56
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c616
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c22
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h42
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c27
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c21
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c95
-rw-r--r--drivers/iio/imu/kmx61.c49
-rw-r--r--drivers/iio/imu/smi240.c620
-rw-r--r--drivers/iio/imu/smi330/Kconfig33
-rw-r--r--drivers/iio/imu/smi330/Makefile7
-rw-r--r--drivers/iio/imu/smi330/smi330.h25
-rw-r--r--drivers/iio/imu/smi330/smi330_core.c918
-rw-r--r--drivers/iio/imu/smi330/smi330_i2c.c133
-rw-r--r--drivers/iio/imu/smi330/smi330_spi.c85
-rw-r--r--drivers/iio/imu/st_lsm6dsx/Kconfig18
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h46
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c77
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c180
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c8
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i3c.c10
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c75
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c6
-rw-r--r--drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_core.c4
-rw-r--r--drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c10
-rw-r--r--drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c8
-rw-r--r--drivers/iio/industrialio-acpi.c125
-rw-r--r--drivers/iio/industrialio-backend.c807
-rw-r--r--drivers/iio/industrialio-buffer.c781
-rw-r--r--drivers/iio/industrialio-core.c209
-rw-r--r--drivers/iio/industrialio-event.c15
-rw-r--r--drivers/iio/industrialio-gts-helper.c391
-rw-r--r--drivers/iio/industrialio-trigger.c100
-rw-r--r--drivers/iio/inkern.c402
-rw-r--r--drivers/iio/light/Kconfig119
-rw-r--r--drivers/iio/light/Makefile9
-rw-r--r--drivers/iio/light/acpi-als.c21
-rw-r--r--drivers/iio/light/adjd_s311.c17
-rw-r--r--drivers/iio/light/adux1020.c33
-rw-r--r--drivers/iio/light/al3000a.c210
-rw-r--r--drivers/iio/light/al3010.c106
-rw-r--r--drivers/iio/light/al3320a.c118
-rw-r--r--drivers/iio/light/apds9160.c1592
-rw-r--r--drivers/iio/light/apds9300.c25
-rw-r--r--drivers/iio/light/apds9306.c1359
-rw-r--r--drivers/iio/light/apds9960.c75
-rw-r--r--drivers/iio/light/as73211.c70
-rw-r--r--drivers/iio/light/bh1745.c901
-rw-r--r--drivers/iio/light/bh1750.c24
-rw-r--r--drivers/iio/light/bh1780.c7
-rw-r--r--drivers/iio/light/cm32181.c5
-rw-r--r--drivers/iio/light/cm3232.c42
-rw-r--r--drivers/iio/light/cm3323.c6
-rw-r--r--drivers/iio/light/cm3605.c4
-rw-r--r--drivers/iio/light/cm36651.c6
-rw-r--r--drivers/iio/light/cros_ec_light_prox.c2
-rw-r--r--drivers/iio/light/gp2ap002.c12
-rw-r--r--drivers/iio/light/gp2ap020a00f.c23
-rw-r--r--drivers/iio/light/hid-sensor-als.c17
-rw-r--r--drivers/iio/light/hid-sensor-prox.c230
-rw-r--r--drivers/iio/light/iqs621-als.c6
-rw-r--r--drivers/iio/light/isl29018.c46
-rw-r--r--drivers/iio/light/isl29028.c21
-rw-r--r--drivers/iio/light/isl29125.c26
-rw-r--r--drivers/iio/light/isl76682.c2
-rw-r--r--drivers/iio/light/jsa1212.c7
-rw-r--r--drivers/iio/light/lm3533-als.c8
-rw-r--r--drivers/iio/light/ltr390.c780
-rw-r--r--drivers/iio/light/ltr501.c225
-rw-r--r--drivers/iio/light/ltrf216a.c61
-rw-r--r--drivers/iio/light/lv0104cs.c2
-rw-r--r--drivers/iio/light/max44000.c20
-rw-r--r--drivers/iio/light/max44009.c4
-rw-r--r--drivers/iio/light/noa1305.c171
-rw-r--r--drivers/iio/light/opt3001.c200
-rw-r--r--drivers/iio/light/opt4001.c5
-rw-r--r--drivers/iio/light/opt4060.c1341
-rw-r--r--drivers/iio/light/pa12203001.c17
-rw-r--r--drivers/iio/light/rohm-bu27008.c1635
-rw-r--r--drivers/iio/light/rohm-bu27034.c437
-rw-r--r--drivers/iio/light/rpr0521.c97
-rw-r--r--drivers/iio/light/si1133.c4
-rw-r--r--drivers/iio/light/si1145.c37
-rw-r--r--drivers/iio/light/st_uvis25.h5
-rw-r--r--drivers/iio/light/st_uvis25_core.c30
-rw-r--r--drivers/iio/light/st_uvis25_i2c.c6
-rw-r--r--drivers/iio/light/st_uvis25_spi.c6
-rw-r--r--drivers/iio/light/stk3310.c66
-rw-r--r--drivers/iio/light/tcs3414.c29
-rw-r--r--drivers/iio/light/tcs3472.c30
-rw-r--r--drivers/iio/light/tsl2563.c6
-rw-r--r--drivers/iio/light/tsl2583.c16
-rw-r--r--drivers/iio/light/tsl2591.c8
-rw-r--r--drivers/iio/light/tsl2772.c10
-rw-r--r--drivers/iio/light/tsl4531.c2
-rw-r--r--drivers/iio/light/us5182d.c22
-rw-r--r--drivers/iio/light/vcnl4000.c109
-rw-r--r--drivers/iio/light/vcnl4035.c63
-rw-r--r--drivers/iio/light/veml3235.c547
-rw-r--r--drivers/iio/light/veml6030.c908
-rw-r--r--drivers/iio/light/veml6040.c280
-rw-r--r--drivers/iio/light/veml6046x00.c1030
-rw-r--r--drivers/iio/light/veml6070.c203
-rw-r--r--drivers/iio/light/veml6075.c10
-rw-r--r--drivers/iio/light/vl6180.c259
-rw-r--r--drivers/iio/light/zopt2201.c46
-rw-r--r--drivers/iio/magnetometer/Kconfig43
-rw-r--r--drivers/iio/magnetometer/Makefile5
-rw-r--r--drivers/iio/magnetometer/af8133j.c15
-rw-r--r--drivers/iio/magnetometer/ak8974.c38
-rw-r--r--drivers/iio/magnetometer/ak8975.c91
-rw-r--r--drivers/iio/magnetometer/als31300.c490
-rw-r--r--drivers/iio/magnetometer/bmc150_magn.c47
-rw-r--r--drivers/iio/magnetometer/bmc150_magn_i2c.c19
-rw-r--r--drivers/iio/magnetometer/bmc150_magn_spi.c13
-rw-r--r--drivers/iio/magnetometer/hid-sensor-magn-3d.c10
-rw-r--r--drivers/iio/magnetometer/hmc5843.h4
-rw-r--r--drivers/iio/magnetometer/hmc5843_core.c8
-rw-r--r--drivers/iio/magnetometer/hmc5843_i2c.c4
-rw-r--r--drivers/iio/magnetometer/hmc5843_spi.c3
-rw-r--r--drivers/iio/magnetometer/mag3110.c169
-rw-r--r--drivers/iio/magnetometer/mmc35240.c17
-rw-r--r--drivers/iio/magnetometer/rm3100-core.c23
-rw-r--r--drivers/iio/magnetometer/rm3100-i2c.c2
-rw-r--r--drivers/iio/magnetometer/rm3100-spi.c3
-rw-r--r--drivers/iio/magnetometer/si7210.c446
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c6
-rw-r--r--drivers/iio/magnetometer/st_magn_i2c.c6
-rw-r--r--drivers/iio/magnetometer/st_magn_spi.c6
-rw-r--r--drivers/iio/magnetometer/tlv493d.c526
-rw-r--r--drivers/iio/magnetometer/tmag5273.c11
-rw-r--r--drivers/iio/magnetometer/yamaha-yas530.c27
-rw-r--r--drivers/iio/multiplexer/iio-mux.c87
-rw-r--r--drivers/iio/orientation/hid-sensor-incl-3d.c12
-rw-r--r--drivers/iio/orientation/hid-sensor-rotation.c14
-rw-r--r--drivers/iio/position/hid-sensor-custom-intel-hinge.c14
-rw-r--r--drivers/iio/position/iqs624-pos.c2
-rw-r--r--drivers/iio/potentiometer/ad5272.c4
-rw-r--r--drivers/iio/potentiometer/ds1803.c5
-rw-r--r--drivers/iio/potentiometer/max5432.c2
-rw-r--r--drivers/iio/potentiometer/max5487.c2
-rw-r--r--drivers/iio/potentiometer/mcp4018.c4
-rw-r--r--drivers/iio/potentiometer/mcp41010.c4
-rw-r--r--drivers/iio/potentiometer/mcp4131.c5
-rw-r--r--drivers/iio/potentiometer/mcp4531.c4
-rw-r--r--drivers/iio/potentiometer/tpl0102.c2
-rw-r--r--drivers/iio/potentiostat/lmp91000.c12
-rw-r--r--drivers/iio/pressure/Kconfig27
-rw-r--r--drivers/iio/pressure/Makefile9
-rw-r--r--drivers/iio/pressure/abp060mg.c6
-rw-r--r--drivers/iio/pressure/adp810.c225
-rw-r--r--drivers/iio/pressure/bmp280-core.c2088
-rw-r--r--drivers/iio/pressure/bmp280-i2c.c12
-rw-r--r--drivers/iio/pressure/bmp280-regmap.c55
-rw-r--r--drivers/iio/pressure/bmp280-spi.c38
-rw-r--r--drivers/iio/pressure/bmp280.h148
-rw-r--r--drivers/iio/pressure/cros_ec_baro.c2
-rw-r--r--drivers/iio/pressure/dlhl60d.c67
-rw-r--r--drivers/iio/pressure/dps310.c144
-rw-r--r--drivers/iio/pressure/hid-sensor-press.c17
-rw-r--r--drivers/iio/pressure/hp03.c6
-rw-r--r--drivers/iio/pressure/hp206c.c6
-rw-r--r--drivers/iio/pressure/hsc030pa.c8
-rw-r--r--drivers/iio/pressure/hsc030pa.h2
-rw-r--r--drivers/iio/pressure/hsc030pa_i2c.c6
-rw-r--r--drivers/iio/pressure/hsc030pa_spi.c13
-rw-r--r--drivers/iio/pressure/icp10100.c18
-rw-r--r--drivers/iio/pressure/mpl115.c4
-rw-r--r--drivers/iio/pressure/mpl115_i2c.c4
-rw-r--r--drivers/iio/pressure/mpl115_spi.c4
-rw-r--r--drivers/iio/pressure/mpl3115.c647
-rw-r--r--drivers/iio/pressure/mprls0025pa.c4
-rw-r--r--drivers/iio/pressure/mprls0025pa.h17
-rw-r--r--drivers/iio/pressure/mprls0025pa_i2c.c11
-rw-r--r--drivers/iio/pressure/mprls0025pa_spi.c6
-rw-r--r--drivers/iio/pressure/ms5611_core.c12
-rw-r--r--drivers/iio/pressure/ms5611_i2c.c4
-rw-r--r--drivers/iio/pressure/ms5611_spi.c5
-rw-r--r--drivers/iio/pressure/ms5637.c6
-rw-r--r--drivers/iio/pressure/rohm-bm1390.c97
-rw-r--r--drivers/iio/pressure/sdp500.c156
-rw-r--r--drivers/iio/pressure/st_pressure_core.c8
-rw-r--r--drivers/iio/pressure/st_pressure_i2c.c8
-rw-r--r--drivers/iio/pressure/st_pressure_spi.c6
-rw-r--r--drivers/iio/pressure/t5403.c2
-rw-r--r--drivers/iio/pressure/zpa2326.c49
-rw-r--r--drivers/iio/pressure/zpa2326_i2c.c7
-rw-r--r--drivers/iio/pressure/zpa2326_spi.c6
-rw-r--r--drivers/iio/proximity/Kconfig36
-rw-r--r--drivers/iio/proximity/Makefile3
-rw-r--r--drivers/iio/proximity/as3935.c10
-rw-r--r--drivers/iio/proximity/aw96103.c846
-rw-r--r--drivers/iio/proximity/cros_ec_mkbp_proximity.c25
-rw-r--r--drivers/iio/proximity/d3323aa.c815
-rw-r--r--drivers/iio/proximity/hx9023s.c1222
-rw-r--r--drivers/iio/proximity/irsd200.c48
-rw-r--r--drivers/iio/proximity/isl29501.c22
-rw-r--r--drivers/iio/proximity/mb1232.c19
-rw-r--r--drivers/iio/proximity/ping.c6
-rw-r--r--drivers/iio/proximity/pulsedlight-lidar-lite-v2.c27
-rw-r--r--drivers/iio/proximity/rfd77402.c2
-rw-r--r--drivers/iio/proximity/srf04.c12
-rw-r--r--drivers/iio/proximity/srf08.c22
-rw-r--r--drivers/iio/proximity/sx9310.c27
-rw-r--r--drivers/iio/proximity/sx9324.c48
-rw-r--r--drivers/iio/proximity/sx9360.c27
-rw-r--r--drivers/iio/proximity/sx9500.c66
-rw-r--r--drivers/iio/proximity/sx_common.c52
-rw-r--r--drivers/iio/proximity/sx_common.h8
-rw-r--r--drivers/iio/proximity/vcnl3020.c20
-rw-r--r--drivers/iio/proximity/vl53l0x-i2c.c189
-rw-r--r--drivers/iio/resolver/Kconfig3
-rw-r--r--drivers/iio/resolver/ad2s1200.c5
-rw-r--r--drivers/iio/resolver/ad2s1210.c54
-rw-r--r--drivers/iio/resolver/ad2s90.c4
-rw-r--r--drivers/iio/temperature/Kconfig10
-rw-r--r--drivers/iio/temperature/hid-sensor-temperature.c13
-rw-r--r--drivers/iio/temperature/ltc2983.c386
-rw-r--r--drivers/iio/temperature/max30208.c1
-rw-r--r--drivers/iio/temperature/max31856.c2
-rw-r--r--drivers/iio/temperature/max31865.c2
-rw-r--r--drivers/iio/temperature/maxim_thermocouple.c61
-rw-r--r--drivers/iio/temperature/mcp9600.c505
-rw-r--r--drivers/iio/temperature/mlx90614.c6
-rw-r--r--drivers/iio/temperature/mlx90632.c11
-rw-r--r--drivers/iio/temperature/mlx90635.c15
-rw-r--r--drivers/iio/temperature/tmp006.c129
-rw-r--r--drivers/iio/temperature/tmp007.c6
-rw-r--r--drivers/iio/temperature/tsys01.c8
-rw-r--r--drivers/iio/temperature/tsys02d.c6
-rw-r--r--drivers/iio/test/Kconfig14
-rw-r--r--drivers/iio/test/Makefile1
-rw-r--r--drivers/iio/test/iio-test-format.c2
-rw-r--r--drivers/iio/test/iio-test-gts.c12
-rw-r--r--drivers/iio/test/iio-test-multiply.c212
-rw-r--r--drivers/iio/test/iio-test-rescale.c8
-rw-r--r--drivers/iio/trigger/iio-trig-hrtimer.c4
-rw-r--r--drivers/iio/trigger/iio-trig-interrupt.c2
-rw-r--r--drivers/iio/trigger/stm32-lptimer-trigger.c78
-rw-r--r--drivers/iio/trigger/stm32-timer-trigger.c110
-rw-r--r--drivers/infiniband/Kconfig3
-rw-r--r--drivers/infiniband/core/Makefile4
-rw-r--r--drivers/infiniband/core/addr.c83
-rw-r--r--drivers/infiniband/core/agent.c35
-rw-r--r--drivers/infiniband/core/cache.c63
-rw-r--r--drivers/infiniband/core/cm.c300
-rw-r--r--drivers/infiniband/core/cm_trace.h2
-rw-r--r--drivers/infiniband/core/cma.c207
-rw-r--r--drivers/infiniband/core/cma_priv.h4
-rw-r--r--drivers/infiniband/core/cma_trace.h6
-rw-r--r--drivers/infiniband/core/core_priv.h3
-rw-r--r--drivers/infiniband/core/counters.c54
-rw-r--r--drivers/infiniband/core/cq.c12
-rw-r--r--drivers/infiniband/core/device.c343
-rw-r--r--drivers/infiniband/core/iwcm.c58
-rw-r--r--drivers/infiniband/core/mad.c518
-rw-r--r--drivers/infiniband/core/mad_priv.h76
-rw-r--r--drivers/infiniband/core/mad_rmpp.c43
-rw-r--r--drivers/infiniband/core/netlink.c1
-rw-r--r--drivers/infiniband/core/nldev.c341
-rw-r--r--drivers/infiniband/core/rdma_core.c41
-rw-r--r--drivers/infiniband/core/rdma_core.h1
-rw-r--r--drivers/infiniband/core/restrack.c6
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c30
-rw-r--r--drivers/infiniband/core/sa_query.c285
-rw-r--r--drivers/infiniband/core/sysfs.c15
-rw-r--r--drivers/infiniband/core/ucaps.c267
-rw-r--r--drivers/infiniband/core/ucma.c148
-rw-r--r--drivers/infiniband/core/ud_header.c83
-rw-r--r--drivers/infiniband/core/umem.c44
-rw-r--r--drivers/infiniband/core/umem_dmabuf.c68
-rw-r--r--drivers/infiniband/core/umem_odp.c280
-rw-r--r--drivers/infiniband/core/user_mad.c29
-rw-r--r--drivers/infiniband/core/uverbs.h29
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c208
-rw-r--r--drivers/infiniband/core/uverbs_main.c52
-rw-r--r--drivers/infiniband/core/uverbs_marshall.c42
-rw-r--r--drivers/infiniband/core/uverbs_std_types_cq.c88
-rw-r--r--drivers/infiniband/core/uverbs_std_types_device.c4
-rw-r--r--drivers/infiniband/core/uverbs_std_types_dmah.c145
-rw-r--r--drivers/infiniband/core/uverbs_std_types_mr.c174
-rw-r--r--drivers/infiniband/core/uverbs_std_types_qp.c2
-rw-r--r--drivers/infiniband/core/uverbs_uapi.c1
-rw-r--r--drivers/infiniband/core/verbs.c105
-rw-r--r--drivers/infiniband/hw/Makefile5
-rw-r--r--drivers/infiniband/hw/bng_re/Kconfig10
-rw-r--r--drivers/infiniband/hw/bng_re/Makefile8
-rw-r--r--drivers/infiniband/hw/bng_re/bng_debugfs.c39
-rw-r--r--drivers/infiniband/hw/bng_re/bng_debugfs.h12
-rw-r--r--drivers/infiniband/hw/bng_re/bng_dev.c534
-rw-r--r--drivers/infiniband/hw/bng_re/bng_fw.c767
-rw-r--r--drivers/infiniband/hw/bng_re/bng_fw.h211
-rw-r--r--drivers/infiniband/hw/bng_re/bng_re.h85
-rw-r--r--drivers/infiniband/hw/bng_re/bng_res.c279
-rw-r--r--drivers/infiniband/hw/bng_re/bng_res.h215
-rw-r--r--drivers/infiniband/hw/bng_re/bng_sp.c131
-rw-r--r--drivers/infiniband/hw/bng_re/bng_sp.h47
-rw-r--r--drivers/infiniband/hw/bng_re/bng_tlv.h128
-rw-r--r--drivers/infiniband/hw/bnxt_re/Makefile3
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h123
-rw-r--r--drivers/infiniband/hw/bnxt_re/debugfs.c524
-rw-r--r--drivers/infiniband/hw/bnxt_re/debugfs.h55
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.c206
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.h26
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c720
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h48
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c1439
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c315
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h69
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c86
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h11
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c77
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h80
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c311
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h30
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h171
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c18
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c11
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c14
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h43
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c8
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c68
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c40
-rw-r--r--drivers/infiniband/hw/efa/efa.h17
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_cmds_defs.h83
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_defs.h4
-rw-r--r--drivers/infiniband/hw/efa/efa_com.c46
-rw-r--r--drivers/infiniband/hw/efa/efa_com.h6
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.c60
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.h16
-rw-r--r--drivers/infiniband/hw/efa/efa_io_defs.h106
-rw-r--r--drivers/infiniband/hw/efa/efa_main.c62
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c155
-rw-r--r--drivers/infiniband/hw/erdma/Kconfig2
-rw-r--r--drivers/infiniband/hw/erdma/erdma.h17
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cm.c78
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cmdq.c52
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cq.c65
-rw-r--r--drivers/infiniband/hw/erdma/erdma_eq.c93
-rw-r--r--drivers/infiniband/hw/erdma/erdma_hw.h135
-rw-r--r--drivers/infiniband/hw/erdma/erdma_main.c67
-rw-r--r--drivers/infiniband/hw/erdma/erdma_qp.c301
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.c685
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.h176
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c96
-rw-r--r--drivers/infiniband/hw/hfi1/aspm.c4
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c58
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h2
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.c28
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.h9
-rw-r--r--drivers/infiniband/hw/hfi1/device.c4
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c7
-rw-r--r--drivers/infiniband/hw/hfi1/fault.c10
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h16
-rw-r--r--drivers/infiniband/hw/hfi1/init.c11
-rw-r--r--drivers/infiniband/hw/hfi1/intr.c31
-rw-r--r--drivers/infiniband/hw/hfi1/iowait.h2
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c6
-rw-r--r--drivers/infiniband/hw/hfi1/mad.h1
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.c2
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.h4
-rw-r--r--drivers/infiniband/hw/hfi1/opfn.c4
-rw-r--r--drivers/infiniband/hw/hfi1/pin_system.c2
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c10
-rw-r--r--drivers/infiniband/hw/hfi1/pio.h1
-rw-r--r--drivers/infiniband/hw/hfi1/qsfp.c20
-rw-r--r--drivers/infiniband/hw/hfi1/qsfp.h2
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c25
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.h1
-rw-r--r--drivers/infiniband/hw/hfi1/sysfs.c12
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c13
-rw-r--r--drivers/infiniband/hw/hfi1/trace_dbg.h2
-rw-r--r--drivers/infiniband/hw/hfi1/trace_rx.h2
-rw-r--r--drivers/infiniband/hw/hfi1/trace_tid.h4
-rw-r--r--drivers/infiniband/hw/hfi1/trace_tx.h4
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c2
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c4
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c4
-rw-r--r--drivers/infiniband/hw/hns/Kconfig20
-rw-r--r--drivers/infiniband/hw/hns/Makefile12
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c16
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_bond.c1012
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_bond.h95
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c66
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_debugfs.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h84
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c147
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c853
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h53
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c231
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c152
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_pd.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c130
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_restrack.c10
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c9
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_trace.h216
-rw-r--r--drivers/infiniband/hw/ionic/Kconfig15
-rw-r--r--drivers/infiniband/hw/ionic/Makefile9
-rw-r--r--drivers/infiniband/hw/ionic/ionic_admin.c1229
-rw-r--r--drivers/infiniband/hw/ionic/ionic_controlpath.c2679
-rw-r--r--drivers/infiniband/hw/ionic/ionic_datapath.c1399
-rw-r--r--drivers/infiniband/hw/ionic/ionic_fw.h1029
-rw-r--r--drivers/infiniband/hw/ionic/ionic_hw_stats.c484
-rw-r--r--drivers/infiniband/hw/ionic/ionic_ibdev.c440
-rw-r--r--drivers/infiniband/hw/ionic/ionic_ibdev.h517
-rw-r--r--drivers/infiniband/hw/ionic/ionic_lif_cfg.c111
-rw-r--r--drivers/infiniband/hw/ionic/ionic_lif_cfg.h66
-rw-r--r--drivers/infiniband/hw/ionic/ionic_pgtbl.c143
-rw-r--r--drivers/infiniband/hw/ionic/ionic_queue.c52
-rw-r--r--drivers/infiniband/hw/ionic/ionic_queue.h234
-rw-r--r--drivers/infiniband/hw/ionic/ionic_res.h154
-rw-r--r--drivers/infiniband/hw/irdma/Kconfig8
-rw-r--r--drivers/infiniband/hw/irdma/Makefile4
-rw-r--r--drivers/infiniband/hw/irdma/cm.c9
-rw-r--r--drivers/infiniband/hw/irdma/ctrl.c1537
-rw-r--r--drivers/infiniband/hw/irdma/defs.h264
-rw-r--r--drivers/infiniband/hw/irdma/hmc.c18
-rw-r--r--drivers/infiniband/hw/irdma/hmc.h19
-rw-r--r--drivers/infiniband/hw/irdma/hw.c368
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_hw.c2
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_hw.h2
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_if.c3
-rw-r--r--drivers/infiniband/hw/irdma/icrdma_hw.c3
-rw-r--r--drivers/infiniband/hw/irdma/icrdma_hw.h5
-rw-r--r--drivers/infiniband/hw/irdma/icrdma_if.c347
-rw-r--r--drivers/infiniband/hw/irdma/ig3rdma_hw.c170
-rw-r--r--drivers/infiniband/hw/irdma/ig3rdma_hw.h32
-rw-r--r--drivers/infiniband/hw/irdma/ig3rdma_if.c236
-rw-r--r--drivers/infiniband/hw/irdma/irdma.h22
-rw-r--r--drivers/infiniband/hw/irdma/main.c324
-rw-r--r--drivers/infiniband/hw/irdma/main.h45
-rw-r--r--drivers/infiniband/hw/irdma/osdep.h12
-rw-r--r--drivers/infiniband/hw/irdma/pble.c30
-rw-r--r--drivers/infiniband/hw/irdma/protos.h5
-rw-r--r--drivers/infiniband/hw/irdma/puda.c39
-rw-r--r--drivers/infiniband/hw/irdma/puda.h9
-rw-r--r--drivers/infiniband/hw/irdma/type.h232
-rw-r--r--drivers/infiniband/hw/irdma/uda_d.h5
-rw-r--r--drivers/infiniband/hw/irdma/uk.c370
-rw-r--r--drivers/infiniband/hw/irdma/user.h271
-rw-r--r--drivers/infiniband/hw/irdma/utils.c288
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c899
-rw-r--r--drivers/infiniband/hw/irdma/verbs.h55
-rw-r--r--drivers/infiniband/hw/irdma/virtchnl.c618
-rw-r--r--drivers/infiniband/hw/irdma/virtchnl.h176
-rw-r--r--drivers/infiniband/hw/mana/Makefile2
-rw-r--r--drivers/infiniband/hw/mana/ah.c58
-rw-r--r--drivers/infiniband/hw/mana/counters.c179
-rw-r--r--drivers/infiniband/hw/mana/counters.h62
-rw-r--r--drivers/infiniband/hw/mana/cq.c255
-rw-r--r--drivers/infiniband/hw/mana/device.c224
-rw-r--r--drivers/infiniband/hw/mana/main.c306
-rw-r--r--drivers/infiniband/hw/mana/mana_ib.h385
-rw-r--r--drivers/infiniband/hw/mana/mr.c137
-rw-r--r--drivers/infiniband/hw/mana/qp.c445
-rw-r--r--drivers/infiniband/hw/mana/shadow_queue.h115
-rw-r--r--drivers/infiniband/hw/mana/wr.c168
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c4
-rw-r--r--drivers/infiniband/hw/mlx4/cm.c2
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c9
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c18
-rw-r--r--drivers/infiniband/hw/mlx4/main.c60
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c8
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h21
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c290
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c15
-rw-r--r--drivers/infiniband/hw/mlx5/Makefile4
-rw-r--r--drivers/infiniband/hw/mlx5/ah.c13
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.c33
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.h4
-rw-r--r--drivers/infiniband/hw/mlx5/counters.c233
-rw-r--r--drivers/infiniband/hw/mlx5/counters.h2
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c67
-rw-r--r--drivers/infiniband/hw/mlx5/data_direct.c227
-rw-r--r--drivers/infiniband/hw/mlx5/data_direct.h23
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c163
-rw-r--r--drivers/infiniband/hw/mlx5/devx.h9
-rw-r--r--drivers/infiniband/hw/mlx5/dm.c2
-rw-r--r--drivers/infiniband/hw/mlx5/dmah.c54
-rw-r--r--drivers/infiniband/hw/mlx5/dmah.h23
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c784
-rw-r--r--drivers/infiniband/hw/mlx5/fs.h21
-rw-r--r--drivers/infiniband/hw/mlx5/gsi.c15
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.c99
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c77
-rw-r--r--drivers/infiniband/hw/mlx5/main.c992
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c198
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h198
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c626
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c640
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c95
-rw-r--r--drivers/infiniband/hw/mlx5/qp.h1
-rw-r--r--drivers/infiniband/hw/mlx5/qpc.c43
-rw-r--r--drivers/infiniband/hw/mlx5/restrack.c9
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c17
-rw-r--r--drivers/infiniband/hw/mlx5/std_types.c99
-rw-r--r--drivers/infiniband/hw/mlx5/umr.c501
-rw-r--r--drivers/infiniband/hw/mlx5/umr.h15
-rw-r--r--drivers/infiniband/hw/mthca/mthca_catas.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c9
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c22
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c9
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h5
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c9
-rw-r--r--drivers/infiniband/hw/qedr/verbs.h5
-rw-r--r--drivers/infiniband/hw/qib/Kconfig17
-rw-r--r--drivers/infiniband/hw/qib/Makefile17
-rw-r--r--drivers/infiniband/hw/qib/qib.h1493
-rw-r--r--drivers/infiniband/hw/qib/qib_6120_regs.h977
-rw-r--r--drivers/infiniband/hw/qib/qib_7220.h149
-rw-r--r--drivers/infiniband/hw/qib/qib_7220_regs.h1496
-rw-r--r--drivers/infiniband/hw/qib/qib_7322_regs.h3163
-rw-r--r--drivers/infiniband/hw/qib/qib_common.h798
-rw-r--r--drivers/infiniband/hw/qib/qib_debugfs.c274
-rw-r--r--drivers/infiniband/hw/qib/qib_debugfs.h45
-rw-r--r--drivers/infiniband/hw/qib/qib_diag.c906
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c804
-rw-r--r--drivers/infiniband/hw/qib/qib_eeprom.c271
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c2401
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c548
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c3533
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c4596
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c8475
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c1785
-rw-r--r--drivers/infiniband/hw/qib/qib_intr.c240
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c2449
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.h300
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c598
-rw-r--r--drivers/infiniband/hw/qib/qib_pio_copy.c64
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c454
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.c549
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.h188
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c2131
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c314
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c1445
-rw-r--r--drivers/infiniband/hw/qib/qib_sdma.c999
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c737
-rw-r--r--drivers/infiniband/hw/qib/qib_twsi.c502
-rw-r--r--drivers/infiniband/hw/qib/qib_tx.c566
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c521
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c583
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c137
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.c1470
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.h52
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c1705
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h402
-rw-r--r--drivers/infiniband/hw/qib/qib_wc_ppc64.c62
-rw-r--r--drivers/infiniband/hw/qib/qib_wc_x86_64.c150
-rw-r--r--drivers/infiniband/hw/usnic/usnic_abi.h2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c87
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c6
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.h3
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c8
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h4
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c5
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c68
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c5
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c28
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h5
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c9
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.h2
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c11
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.h1
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c28
-rw-r--r--drivers/infiniband/sw/rdmavt/trace.h2
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_rvt.h2
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c2
-rw-r--r--drivers/infiniband/sw/rxe/Kconfig5
-rw-r--r--drivers/infiniband/sw/rxe/Makefile2
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c63
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h41
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_hdr.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_icrc.c40
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h72
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mcast.c22
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c72
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c123
-rw-r--r--drivers/infiniband/sw/rxe/rxe_odp.c577
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h7
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c11
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c63
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c16
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c40
-rw-r--r--drivers/infiniband/sw/rxe/rxe_srq.c7
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c48
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c63
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h51
-rw-r--r--drivers/infiniband/sw/siw/Kconfig5
-rw-r--r--drivers/infiniband/sw/siw/siw.h54
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c86
-rw-r--r--drivers/infiniband/sw/siw/siw_cq.c2
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c83
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.c28
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.h1
-rw-r--r--drivers/infiniband/sw/siw/siw_qp.c54
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_rx.c31
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_tx.c73
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c82
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.h5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h17
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c9
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c65
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c182
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_netlink.c9
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c19
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c16
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h4
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c5
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c4
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c92
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.h3
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-pri.h2
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c55
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.h2
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.c3
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c11
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c96
-rw-r--r--drivers/input/Kconfig14
-rw-r--r--drivers/input/Makefile3
-rw-r--r--drivers/input/evbug.c100
-rw-r--r--drivers/input/evdev.c32
-rw-r--r--drivers/input/ff-core.c96
-rw-r--r--drivers/input/ff-memless.c25
-rw-r--r--drivers/input/gameport/emu10k1-gp.c2
-rw-r--r--drivers/input/gameport/fm801-gp.c2
-rw-r--r--drivers/input/gameport/gameport.c14
-rw-r--r--drivers/input/gameport/ns558.c4
-rw-r--r--drivers/input/input-compat.c30
-rw-r--r--drivers/input/input-compat.h3
-rw-r--r--drivers/input/input-mt.c51
-rw-r--r--drivers/input/input-poller.c5
-rw-r--r--drivers/input/input.c767
-rw-r--r--drivers/input/joydev.c1
-rw-r--r--drivers/input/joystick/a3d.c2
-rw-r--r--drivers/input/joystick/adafruit-seesaw.c23
-rw-r--r--drivers/input/joystick/adc-joystick.c149
-rw-r--r--drivers/input/joystick/adi.c2
-rw-r--r--drivers/input/joystick/analog.c3
-rw-r--r--drivers/input/joystick/as5011.c4
-rw-r--r--drivers/input/joystick/cobra.c2
-rw-r--r--drivers/input/joystick/db9.c37
-rw-r--r--drivers/input/joystick/fsia6b.c2
-rw-r--r--drivers/input/joystick/gamecon.c29
-rw-r--r--drivers/input/joystick/gf2k.c2
-rw-r--r--drivers/input/joystick/grip.c3
-rw-r--r--drivers/input/joystick/grip_mp.c3
-rw-r--r--drivers/input/joystick/guillemot.c2
-rw-r--r--drivers/input/joystick/iforce/iforce-ff.c48
-rw-r--r--drivers/input/joystick/iforce/iforce-main.c3
-rw-r--r--drivers/input/joystick/iforce/iforce-packets.c60
-rw-r--r--drivers/input/joystick/iforce/iforce-serio.c36
-rw-r--r--drivers/input/joystick/iforce/iforce-usb.c13
-rw-r--r--drivers/input/joystick/interact.c2
-rw-r--r--drivers/input/joystick/magellan.c4
-rw-r--r--drivers/input/joystick/maplecontrol.c2
-rw-r--r--drivers/input/joystick/n64joy.c39
-rw-r--r--drivers/input/joystick/psxpad-spi.c6
-rw-r--r--drivers/input/joystick/qwiic-joystick.c4
-rw-r--r--drivers/input/joystick/sidewinder.c5
-rw-r--r--drivers/input/joystick/spaceball.c4
-rw-r--r--drivers/input/joystick/spaceorb.c2
-rw-r--r--drivers/input/joystick/stinger.c2
-rw-r--r--drivers/input/joystick/tmdc.c3
-rw-r--r--drivers/input/joystick/turbografx.c29
-rw-r--r--drivers/input/joystick/twidjoy.c2
-rw-r--r--drivers/input/joystick/walkera0701.c4
-rw-r--r--drivers/input/joystick/warrior.c2
-rw-r--r--drivers/input/joystick/xpad.c321
-rw-r--r--drivers/input/joystick/zhenhua.c2
-rw-r--r--drivers/input/keyboard/Kconfig77
-rw-r--r--drivers/input/keyboard/Makefile7
-rw-r--r--drivers/input/keyboard/adc-keys.c5
-rw-r--r--drivers/input/keyboard/adp5520-keys.c2
-rw-r--r--drivers/input/keyboard/adp5585-keys.c371
-rw-r--r--drivers/input/keyboard/adp5588-keys.c116
-rw-r--r--drivers/input/keyboard/adp5589-keys.c1065
-rw-r--r--drivers/input/keyboard/applespi.c75
-rw-r--r--drivers/input/keyboard/atkbd.c65
-rw-r--r--drivers/input/keyboard/cap11xx.c137
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c18
-rw-r--r--drivers/input/keyboard/cypress-sf.c4
-rw-r--r--drivers/input/keyboard/dlink-dir685-touchkeys.c5
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c84
-rw-r--r--drivers/input/keyboard/gpio_keys.c72
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c16
-rw-r--r--drivers/input/keyboard/hilkbd.c4
-rw-r--r--drivers/input/keyboard/imx-sm-bbm-key.c225
-rw-r--r--drivers/input/keyboard/imx_keypad.c32
-rw-r--r--drivers/input/keyboard/imx_sc_key.c2
-rw-r--r--drivers/input/keyboard/ipaq-micro-keys.c17
-rw-r--r--drivers/input/keyboard/iqs62x-keys.c9
-rw-r--r--drivers/input/keyboard/lkkbd.c2
-rw-r--r--drivers/input/keyboard/lm8323.c54
-rw-r--r--drivers/input/keyboard/lm8333.c2
-rw-r--r--drivers/input/keyboard/locomokbd.c9
-rw-r--r--drivers/input/keyboard/lpc32xx-keys.c21
-rw-r--r--drivers/input/keyboard/maple_keyb.c11
-rw-r--r--drivers/input/keyboard/matrix_keypad.c384
-rw-r--r--drivers/input/keyboard/max7359_keypad.c2
-rw-r--r--drivers/input/keyboard/max7360-keypad.c308
-rw-r--r--drivers/input/keyboard/mcs_touchkey.c268
-rw-r--r--drivers/input/keyboard/mpr121_touchkey.c47
-rw-r--r--drivers/input/keyboard/mt6779-keypad.c19
-rw-r--r--drivers/input/keyboard/mtk-pmic-keys.c43
-rw-r--r--drivers/input/keyboard/newtonkbd.c2
-rw-r--r--drivers/input/keyboard/nomadik-ske-keypad.c378
-rw-r--r--drivers/input/keyboard/omap-keypad.c39
-rw-r--r--drivers/input/keyboard/omap4-keypad.c6
-rw-r--r--drivers/input/keyboard/pmic8xxx-keypad.c8
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c546
-rw-r--r--drivers/input/keyboard/qt1050.c24
-rw-r--r--drivers/input/keyboard/qt1070.c4
-rw-r--r--drivers/input/keyboard/qt2160.c2
-rw-r--r--drivers/input/keyboard/samsung-keypad.c137
-rw-r--r--drivers/input/keyboard/sh_keysc.c2
-rw-r--r--drivers/input/keyboard/snvs_pwrkey.c54
-rw-r--r--drivers/input/keyboard/spear-keyboard.c96
-rw-r--r--drivers/input/keyboard/st-keyscan.c19
-rw-r--r--drivers/input/keyboard/stmpe-keypad.c3
-rw-r--r--drivers/input/keyboard/stowaway.c2
-rw-r--r--drivers/input/keyboard/sun4i-lradc-keys.c8
-rw-r--r--drivers/input/keyboard/sunkbd.c7
-rw-r--r--drivers/input/keyboard/tc3589x-keypad.c3
-rw-r--r--drivers/input/keyboard/tca6416-keypad.c311
-rw-r--r--drivers/input/keyboard/tca8418_keypad.c13
-rw-r--r--drivers/input/keyboard/tegra-kbc.c123
-rw-r--r--drivers/input/keyboard/tm2-touchkey.c4
-rw-r--r--drivers/input/keyboard/twl4030_keypad.c35
-rw-r--r--drivers/input/keyboard/xtkbd.c2
-rw-r--r--drivers/input/matrix-keymap.c26
-rw-r--r--drivers/input/misc/88pm80x_onkey.c4
-rw-r--r--drivers/input/misc/88pm886-onkey.c98
-rw-r--r--drivers/input/misc/Kconfig84
-rw-r--r--drivers/input/misc/Makefile8
-rw-r--r--drivers/input/misc/ad714x-i2c.c10
-rw-r--r--drivers/input/misc/ad714x.c13
-rw-r--r--drivers/input/misc/adxl34x-i2c.c11
-rw-r--r--drivers/input/misc/adxl34x-spi.c9
-rw-r--r--drivers/input/misc/adxl34x.c160
-rw-r--r--drivers/input/misc/adxl34x.h2
-rw-r--r--drivers/input/misc/apanel.c2
-rw-r--r--drivers/input/misc/arizona-haptics.c14
-rw-r--r--drivers/input/misc/ati_remote2.c107
-rw-r--r--drivers/input/misc/atmel_captouch.c2
-rw-r--r--drivers/input/misc/aw86927.c846
-rw-r--r--drivers/input/misc/bma150.c6
-rw-r--r--drivers/input/misc/cm109.c167
-rw-r--r--drivers/input/misc/cma3000_d0x.c19
-rw-r--r--drivers/input/misc/cma3000_d0x_i2c.c4
-rw-r--r--drivers/input/misc/cs40l50-vibra.c558
-rw-r--r--drivers/input/misc/da7280.c27
-rw-r--r--drivers/input/misc/da9052_onkey.c4
-rw-r--r--drivers/input/misc/da9055_onkey.c4
-rw-r--r--drivers/input/misc/drv260x.c52
-rw-r--r--drivers/input/misc/drv2665.c48
-rw-r--r--drivers/input/misc/drv2667.c48
-rw-r--r--drivers/input/misc/gpio-beeper.c2
-rw-r--r--drivers/input/misc/hisi_powerkey.c2
-rw-r--r--drivers/input/misc/ibm-panel.c5
-rw-r--r--drivers/input/misc/ideapad_slidebar.c30
-rw-r--r--drivers/input/misc/ims-pcu.c211
-rw-r--r--drivers/input/misc/iqs269a.c62
-rw-r--r--drivers/input/misc/iqs626a.c24
-rw-r--r--drivers/input/misc/iqs7222.c92
-rw-r--r--drivers/input/misc/kxtj9.c20
-rw-r--r--drivers/input/misc/m68kspkr.c2
-rw-r--r--drivers/input/misc/max7360-rotary.c192
-rw-r--r--drivers/input/misc/max77693-haptic.c57
-rw-r--r--drivers/input/misc/max8997_haptic.c109
-rw-r--r--drivers/input/misc/mc13783-pwrbutton.c3
-rw-r--r--drivers/input/misc/mma8450.c20
-rw-r--r--drivers/input/misc/nxp-bbnsm-pwrkey.c48
-rw-r--r--drivers/input/misc/palmas-pwrbutton.c2
-rw-r--r--drivers/input/misc/pcap_keys.c4
-rw-r--r--drivers/input/misc/pcf50633-input.c113
-rw-r--r--drivers/input/misc/pcf8574_keypad.c2
-rw-r--r--drivers/input/misc/pcspkr.c2
-rw-r--r--drivers/input/misc/pf1550-onkey.c197
-rw-r--r--drivers/input/misc/pm8941-pwrkey.c18
-rw-r--r--drivers/input/misc/pm8xxx-vibrator.c92
-rw-r--r--drivers/input/misc/powermate.c13
-rw-r--r--drivers/input/misc/pwm-beeper.c12
-rw-r--r--drivers/input/misc/qnap-mcu-input.c153
-rw-r--r--drivers/input/misc/regulator-haptic.c27
-rw-r--r--drivers/input/misc/rotary_encoder.c23
-rw-r--r--drivers/input/misc/sgi_btns.c1
-rw-r--r--drivers/input/misc/soc_button_array.c5
-rw-r--r--drivers/input/misc/sparcspkr.c81
-rw-r--r--drivers/input/misc/tps65219-pwrbutton.c2
-rw-r--r--drivers/input/misc/tps6594-pwrbutton.c126
-rw-r--r--drivers/input/misc/twl4030-pwrbutton.c4
-rw-r--r--drivers/input/misc/twl4030-vibra.c11
-rw-r--r--drivers/input/misc/twl6040-vibra.c8
-rw-r--r--drivers/input/misc/uinput.c18
-rw-r--r--drivers/input/misc/wistron_btns.c10
-rw-r--r--drivers/input/misc/wm831x-on.c2
-rw-r--r--drivers/input/misc/yealink.c89
-rw-r--r--drivers/input/mouse/alps.c68
-rw-r--r--drivers/input/mouse/amimouse.c2
-rw-r--r--drivers/input/mouse/appletouch.c2
-rw-r--r--drivers/input/mouse/bcm5974.c37
-rw-r--r--drivers/input/mouse/byd.c9
-rw-r--r--drivers/input/mouse/cyapa.c20
-rw-r--r--drivers/input/mouse/cyapa_gen3.c2
-rw-r--r--drivers/input/mouse/cyapa_gen5.c2
-rw-r--r--drivers/input/mouse/cyapa_gen6.c2
-rw-r--r--drivers/input/mouse/cypress_ps2.c218
-rw-r--r--drivers/input/mouse/cypress_ps2.h6
-rw-r--r--drivers/input/mouse/elan_i2c_core.c235
-rw-r--r--drivers/input/mouse/elan_i2c_i2c.c16
-rw-r--r--drivers/input/mouse/elantech.c33
-rw-r--r--drivers/input/mouse/focaltech.c3
-rw-r--r--drivers/input/mouse/hgpk.c2
-rw-r--r--drivers/input/mouse/lifebook.c6
-rw-r--r--drivers/input/mouse/maplemouse.c2
-rw-r--r--drivers/input/mouse/psmouse-base.c4
-rw-r--r--drivers/input/mouse/psmouse-smbus.c28
-rw-r--r--drivers/input/mouse/sentelic.c2
-rw-r--r--drivers/input/mouse/sermouse.c2
-rw-r--r--drivers/input/mouse/synaptics.c74
-rw-r--r--drivers/input/mouse/synaptics.h3
-rw-r--r--drivers/input/mouse/synaptics_i2c.c6
-rw-r--r--drivers/input/mouse/vmmouse.c76
-rw-r--r--drivers/input/mouse/vsxxxaa.c2
-rw-r--r--drivers/input/rmi4/Kconfig15
-rw-r--r--drivers/input/rmi4/Makefile2
-rw-r--r--drivers/input/rmi4/rmi_2d_sensor.c1
-rw-r--r--drivers/input/rmi4/rmi_2d_sensor.h3
-rw-r--r--drivers/input/rmi4/rmi_bus.c13
-rw-r--r--drivers/input/rmi4/rmi_bus.h2
-rw-r--r--drivers/input/rmi4/rmi_driver.c3
-rw-r--r--drivers/input/rmi4/rmi_driver.h4
-rw-r--r--drivers/input/rmi4/rmi_f01.c2
-rw-r--r--drivers/input/rmi4/rmi_f03.c4
-rw-r--r--drivers/input/rmi4/rmi_f12.c43
-rw-r--r--drivers/input/rmi4/rmi_f1a.c143
-rw-r--r--drivers/input/rmi4/rmi_f21.c179
-rw-r--r--drivers/input/rmi4/rmi_f34.c174
-rw-r--r--drivers/input/rmi4/rmi_f34v7.c2
-rw-r--r--drivers/input/rmi4/rmi_f54.c2
-rw-r--r--drivers/input/rmi4/rmi_i2c.c2
-rw-r--r--drivers/input/rmi4/rmi_smbus.c2
-rw-r--r--drivers/input/serio/Kconfig4
-rw-r--r--drivers/input/serio/altera_ps2.c4
-rw-r--r--drivers/input/serio/ambakmi.c4
-rw-r--r--drivers/input/serio/ams_delta_serio.c2
-rw-r--r--drivers/input/serio/apbps2.c4
-rw-r--r--drivers/input/serio/arc_ps2.c4
-rw-r--r--drivers/input/serio/ct82c710.c4
-rw-r--r--drivers/input/serio/gscps2.c126
-rw-r--r--drivers/input/serio/hil_mlc.c3
-rw-r--r--drivers/input/serio/hp_sdc.c3
-rw-r--r--drivers/input/serio/hyperv-keyboard.c42
-rw-r--r--drivers/input/serio/i8042-acpipnpio.h209
-rw-r--r--drivers/input/serio/i8042-sparcio.h16
-rw-r--r--drivers/input/serio/i8042.c361
-rw-r--r--drivers/input/serio/ioc3kbd.c9
-rw-r--r--drivers/input/serio/libps2.c29
-rw-r--r--drivers/input/serio/maceps2.c4
-rw-r--r--drivers/input/serio/olpc_apsp.c6
-rw-r--r--drivers/input/serio/parkbd.c3
-rw-r--r--drivers/input/serio/pcips2.c4
-rw-r--r--drivers/input/serio/ps2-gpio.c18
-rw-r--r--drivers/input/serio/ps2mult.c27
-rw-r--r--drivers/input/serio/q40kbd.c16
-rw-r--r--drivers/input/serio/rpckbd.c4
-rw-r--r--drivers/input/serio/sa1111ps2.c12
-rw-r--r--drivers/input/serio/serio.c172
-rw-r--r--drivers/input/serio/serio_raw.c127
-rw-r--r--drivers/input/serio/serport.c31
-rw-r--r--drivers/input/serio/sun4i-ps2.c14
-rw-r--r--drivers/input/serio/userio.c142
-rw-r--r--drivers/input/serio/xilinx_ps2.c21
-rw-r--r--drivers/input/sparse-keymap.c4
-rw-r--r--drivers/input/tablet/acecad.c2
-rw-r--r--drivers/input/tablet/aiptek.c4
-rw-r--r--drivers/input/tablet/hanwang.c2
-rw-r--r--drivers/input/tablet/kbtab.c4
-rw-r--r--drivers/input/tablet/pegasus_notetaker.c95
-rw-r--r--drivers/input/tablet/wacom_serial4.c2
-rw-r--r--drivers/input/tests/input_test.c3
-rw-r--r--drivers/input/touch-overlay.c278
-rw-r--r--drivers/input/touchscreen.c1
-rw-r--r--drivers/input/touchscreen/88pm860x-ts.c20
-rw-r--r--drivers/input/touchscreen/Kconfig78
-rw-r--r--drivers/input/touchscreen/Makefile9
-rw-r--r--drivers/input/touchscreen/ad7877.c4
-rw-r--r--drivers/input/touchscreen/ad7879-i2c.c4
-rw-r--r--drivers/input/touchscreen/ad7879.c14
-rw-r--r--drivers/input/touchscreen/ads7846.c67
-rw-r--r--drivers/input/touchscreen/apple_z2.c477
-rw-r--r--drivers/input/touchscreen/ar1021_i2c.c4
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c43
-rw-r--r--drivers/input/touchscreen/auo-pixcir-ts.c4
-rw-r--r--drivers/input/touchscreen/bcm_iproc_tsc.c2
-rw-r--r--drivers/input/touchscreen/bu21013_ts.c2
-rw-r--r--drivers/input/touchscreen/bu21029_ts.c7
-rw-r--r--drivers/input/touchscreen/chipone_icn8505.c3
-rw-r--r--drivers/input/touchscreen/colibri-vf50-ts.c10
-rw-r--r--drivers/input/touchscreen/cy8ctma140.c4
-rw-r--r--drivers/input/touchscreen/cyttsp4_core.c2174
-rw-r--r--drivers/input/touchscreen/cyttsp4_core.h448
-rw-r--r--drivers/input/touchscreen/cyttsp4_i2c.c72
-rw-r--r--drivers/input/touchscreen/cyttsp4_spi.c187
-rw-r--r--drivers/input/touchscreen/cyttsp5.c11
-rw-r--r--drivers/input/touchscreen/cyttsp_core.c40
-rw-r--r--drivers/input/touchscreen/cyttsp_core.h5
-rw-r--r--drivers/input/touchscreen/cyttsp_i2c.c57
-rw-r--r--drivers/input/touchscreen/cyttsp_i2c_common.c85
-rw-r--r--drivers/input/touchscreen/da9052_tsi.c4
-rw-r--r--drivers/input/touchscreen/dynapro.c2
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c68
-rw-r--r--drivers/input/touchscreen/eeti_ts.c4
-rw-r--r--drivers/input/touchscreen/egalax_ts.c5
-rw-r--r--drivers/input/touchscreen/egalax_ts_serial.c2
-rw-r--r--drivers/input/touchscreen/ektf2127.c36
-rw-r--r--drivers/input/touchscreen/elants_i2c.c2
-rw-r--r--drivers/input/touchscreen/elo.c10
-rw-r--r--drivers/input/touchscreen/exc3000.c13
-rw-r--r--drivers/input/touchscreen/fsl-imx25-tcq.c3
-rw-r--r--drivers/input/touchscreen/fujitsu_ts.c2
-rw-r--r--drivers/input/touchscreen/goodix.c82
-rw-r--r--drivers/input/touchscreen/goodix.h1
-rw-r--r--drivers/input/touchscreen/goodix_berlin.h17
-rw-r--r--drivers/input/touchscreen/goodix_berlin_core.c93
-rw-r--r--drivers/input/touchscreen/goodix_berlin_i2c.c15
-rw-r--r--drivers/input/touchscreen/goodix_berlin_spi.c69
-rw-r--r--drivers/input/touchscreen/gunze.c2
-rw-r--r--drivers/input/touchscreen/hampshire.c2
-rw-r--r--drivers/input/touchscreen/hideep.c4
-rw-r--r--drivers/input/touchscreen/himax_hx83112b.c125
-rw-r--r--drivers/input/touchscreen/himax_hx852x.c503
-rw-r--r--drivers/input/touchscreen/hycon-hy46xx.c2
-rw-r--r--drivers/input/touchscreen/hynitron-cst816x.c253
-rw-r--r--drivers/input/touchscreen/hynitron_cstxxx.c4
-rw-r--r--drivers/input/touchscreen/ili210x.c135
-rw-r--r--drivers/input/touchscreen/ilitek_ts_i2c.c25
-rw-r--r--drivers/input/touchscreen/imagis.c44
-rw-r--r--drivers/input/touchscreen/imx6ul_tsc.c121
-rw-r--r--drivers/input/touchscreen/inexio.c2
-rw-r--r--drivers/input/touchscreen/iqs5xx.c2
-rw-r--r--drivers/input/touchscreen/iqs7211.c2
-rw-r--r--drivers/input/touchscreen/mainstone-wm97xx.c2
-rw-r--r--drivers/input/touchscreen/max11801_ts.c2
-rw-r--r--drivers/input/touchscreen/mc13783_ts.c6
-rw-r--r--drivers/input/touchscreen/mcs5000_ts.c288
-rw-r--r--drivers/input/touchscreen/melfas_mip4.c8
-rw-r--r--drivers/input/touchscreen/migor_ts.c2
-rw-r--r--drivers/input/touchscreen/mms114.c2
-rw-r--r--drivers/input/touchscreen/mtouch.c2
-rw-r--r--drivers/input/touchscreen/novatek-nvt-ts.c69
-rw-r--r--drivers/input/touchscreen/pcap_ts.c2
-rw-r--r--drivers/input/touchscreen/penmount.c2
-rw-r--r--drivers/input/touchscreen/pixcir_i2c_ts.c4
-rw-r--r--drivers/input/touchscreen/raspberrypi-ts.c4
-rw-r--r--drivers/input/touchscreen/raydium_i2c_ts.c6
-rw-r--r--drivers/input/touchscreen/rohm_bu21023.c101
-rw-r--r--drivers/input/touchscreen/s6sy761.c6
-rw-r--r--drivers/input/touchscreen/silead.c33
-rw-r--r--drivers/input/touchscreen/sis_i2c.c6
-rw-r--r--drivers/input/touchscreen/st1232.c35
-rw-r--r--drivers/input/touchscreen/stmfts.c4
-rw-r--r--drivers/input/touchscreen/stmpe-ts.c13
-rw-r--r--drivers/input/touchscreen/sun4i-ts.c4
-rw-r--r--drivers/input/touchscreen/sur40.c6
-rw-r--r--drivers/input/touchscreen/surface3_spi.c2
-rw-r--r--drivers/input/touchscreen/sx8654.c4
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c4
-rw-r--r--drivers/input/touchscreen/touchit213.c2
-rw-r--r--drivers/input/touchscreen/touchright.c2
-rw-r--r--drivers/input/touchscreen/touchwin.c2
-rw-r--r--drivers/input/touchscreen/ts4800-ts.c5
-rw-r--r--drivers/input/touchscreen/tsc2004.c8
-rw-r--r--drivers/input/touchscreen/tsc2005.c6
-rw-r--r--drivers/input/touchscreen/tsc2007.h2
-rw-r--r--drivers/input/touchscreen/tsc2007_core.c46
-rw-r--r--drivers/input/touchscreen/tsc200x-core.c252
-rw-r--r--drivers/input/touchscreen/tsc200x-core.h1
-rw-r--r--drivers/input/touchscreen/tsc40.c2
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c971
-rw-r--r--drivers/input/touchscreen/wacom_i2c.c6
-rw-r--r--drivers/input/touchscreen/wacom_w8001.c37
-rw-r--r--drivers/input/touchscreen/wdt87xx_i2c.c6
-rw-r--r--drivers/input/touchscreen/wm831x-ts.c2
-rw-r--r--drivers/input/touchscreen/wm9705.c1
-rw-r--r--drivers/input/touchscreen/wm9712.c1
-rw-r--r--drivers/input/touchscreen/wm9713.c1
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c7
-rw-r--r--drivers/input/touchscreen/zet6223.c6
-rw-r--r--drivers/input/touchscreen/zforce_ts.c476
-rw-r--r--drivers/input/touchscreen/zinitix.c144
-rw-r--r--drivers/input/vivaldi-fmap.c1
-rw-r--r--drivers/interconnect/Kconfig1
-rw-r--r--drivers/interconnect/Makefile1
-rw-r--r--drivers/interconnect/core.c112
-rw-r--r--drivers/interconnect/debugfs-client.c7
-rw-r--r--drivers/interconnect/icc-clk.c39
-rw-r--r--drivers/interconnect/imx/imx.c1
-rw-r--r--drivers/interconnect/imx/imx8mm.c3
-rw-r--r--drivers/interconnect/imx/imx8mn.c3
-rw-r--r--drivers/interconnect/imx/imx8mp.c3
-rw-r--r--drivers/interconnect/imx/imx8mq.c3
-rw-r--r--drivers/interconnect/mediatek/Kconfig29
-rw-r--r--drivers/interconnect/mediatek/Makefile5
-rw-r--r--drivers/interconnect/mediatek/icc-emi.c153
-rw-r--r--drivers/interconnect/mediatek/icc-emi.h40
-rw-r--r--drivers/interconnect/mediatek/mt8183.c143
-rw-r--r--drivers/interconnect/mediatek/mt8195.c339
-rw-r--r--drivers/interconnect/qcom/Kconfig92
-rw-r--r--drivers/interconnect/qcom/Makefile20
-rw-r--r--drivers/interconnect/qcom/glymur.c2522
-rw-r--r--drivers/interconnect/qcom/icc-common.c1
-rw-r--r--drivers/interconnect/qcom/icc-rpm.c2
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.c116
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.h44
-rw-r--r--drivers/interconnect/qcom/kaanapali.c1855
-rw-r--r--drivers/interconnect/qcom/milos.c1919
-rw-r--r--drivers/interconnect/qcom/msm8909.c2
-rw-r--r--drivers/interconnect/qcom/msm8916.c2
-rw-r--r--drivers/interconnect/qcom/msm8937.c1350
-rw-r--r--drivers/interconnect/qcom/msm8939.c2
-rw-r--r--drivers/interconnect/qcom/msm8953.c1323
-rw-r--r--drivers/interconnect/qcom/msm8974.c2
-rw-r--r--drivers/interconnect/qcom/msm8976.c1440
-rw-r--r--drivers/interconnect/qcom/msm8996.c3
-rw-r--r--drivers/interconnect/qcom/osm-l3.c47
-rw-r--r--drivers/interconnect/qcom/qcm2290.c6
-rw-r--r--drivers/interconnect/qcom/qcs404.c129
-rw-r--r--drivers/interconnect/qcom/qcs615.c1472
-rw-r--r--drivers/interconnect/qcom/qcs8300.c2001
-rw-r--r--drivers/interconnect/qcom/qdu1000.c350
-rw-r--r--drivers/interconnect/qcom/qdu1000.h95
-rw-r--r--drivers/interconnect/qcom/sa8775p.c1379
-rw-r--r--drivers/interconnect/qcom/sar2130p.c1758
-rw-r--r--drivers/interconnect/qcom/sc7180.c680
-rw-r--r--drivers/interconnect/qcom/sc7180.h149
-rw-r--r--drivers/interconnect/qcom/sc7280.c896
-rw-r--r--drivers/interconnect/qcom/sc7280.h154
-rw-r--r--drivers/interconnect/qcom/sc8180x.c656
-rw-r--r--drivers/interconnect/qcom/sc8180x.h179
-rw-r--r--drivers/interconnect/qcom/sc8280xp.c828
-rw-r--r--drivers/interconnect/qcom/sc8280xp.h209
-rw-r--r--drivers/interconnect/qcom/sdm660.c2
-rw-r--r--drivers/interconnect/qcom/sdm670.c524
-rw-r--r--drivers/interconnect/qcom/sdm670.h128
-rw-r--r--drivers/interconnect/qcom/sdm845.c768
-rw-r--r--drivers/interconnect/qcom/sdm845.h140
-rw-r--r--drivers/interconnect/qcom/sdx55.c491
-rw-r--r--drivers/interconnect/qcom/sdx55.h70
-rw-r--r--drivers/interconnect/qcom/sdx65.c459
-rw-r--r--drivers/interconnect/qcom/sdx65.h65
-rw-r--r--drivers/interconnect/qcom/sdx75.c397
-rw-r--r--drivers/interconnect/qcom/sdx75.h97
-rw-r--r--drivers/interconnect/qcom/sm6115.c35
-rw-r--r--drivers/interconnect/qcom/sm6350.c929
-rw-r--r--drivers/interconnect/qcom/sm6350.h139
-rw-r--r--drivers/interconnect/qcom/sm7150.c655
-rw-r--r--drivers/interconnect/qcom/sm7150.h140
-rw-r--r--drivers/interconnect/qcom/sm8150.c708
-rw-r--r--drivers/interconnect/qcom/sm8150.h152
-rw-r--r--drivers/interconnect/qcom/sm8250.c738
-rw-r--r--drivers/interconnect/qcom/sm8250.h168
-rw-r--r--drivers/interconnect/qcom/sm8350.c841
-rw-r--r--drivers/interconnect/qcom/sm8350.h168
-rw-r--r--drivers/interconnect/qcom/sm8450.c603
-rw-r--r--drivers/interconnect/qcom/sm8450.h169
-rw-r--r--drivers/interconnect/qcom/sm8550.c503
-rw-r--r--drivers/interconnect/qcom/sm8550.h138
-rw-r--r--drivers/interconnect/qcom/sm8650.c869
-rw-r--r--drivers/interconnect/qcom/sm8650.h143
-rw-r--r--drivers/interconnect/qcom/sm8750.c1535
-rw-r--r--drivers/interconnect/qcom/smd-rpm.c2
-rw-r--r--drivers/interconnect/qcom/x1e80100.c612
-rw-r--r--drivers/interconnect/qcom/x1e80100.h192
-rw-r--r--drivers/interconnect/samsung/exynos.c7
-rw-r--r--drivers/interconnect/trace.h10
-rw-r--r--drivers/iommu/Kconfig149
-rw-r--r--drivers/iommu/Makefile8
-rw-r--r--drivers/iommu/amd/Kconfig6
-rw-r--r--drivers/iommu/amd/Makefile2
-rw-r--r--drivers/iommu/amd/amd_iommu.h52
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h264
-rw-r--r--drivers/iommu/amd/debugfs.c378
-rw-r--r--drivers/iommu/amd/init.c895
-rw-r--r--drivers/iommu/amd/io_pgtable.c607
-rw-r--r--drivers/iommu/amd/io_pgtable_v2.c387
-rw-r--r--drivers/iommu/amd/iommu.c1811
-rw-r--r--drivers/iommu/amd/pasid.c11
-rw-r--r--drivers/iommu/amd/ppr.c27
-rw-r--r--drivers/iommu/apple-dart.c94
-rw-r--r--drivers/iommu/arm/Kconfig144
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/Makefile7
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c485
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c542
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c172
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c1868
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h393
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c1349
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-impl.c9
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c4
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c94
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c253
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h5
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c163
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.h73
-rw-r--r--drivers/iommu/arm/arm-smmu/qcom_iommu.c35
-rw-r--r--drivers/iommu/dma-iommu.c960
-rw-r--r--drivers/iommu/dma-iommu.h14
-rw-r--r--drivers/iommu/exynos-iommu.c47
-rw-r--r--drivers/iommu/fsl_pamu_domain.c18
-rw-r--r--drivers/iommu/generic_pt/.kunitconfig14
-rw-r--r--drivers/iommu/generic_pt/Kconfig79
-rw-r--r--drivers/iommu/generic_pt/fmt/Makefile28
-rw-r--r--drivers/iommu/generic_pt/fmt/amdv1.h411
-rw-r--r--drivers/iommu/generic_pt/fmt/defs_amdv1.h21
-rw-r--r--drivers/iommu/generic_pt/fmt/defs_vtdss.h21
-rw-r--r--drivers/iommu/generic_pt/fmt/defs_x86_64.h21
-rw-r--r--drivers/iommu/generic_pt/fmt/iommu_amdv1.c15
-rw-r--r--drivers/iommu/generic_pt/fmt/iommu_mock.c10
-rw-r--r--drivers/iommu/generic_pt/fmt/iommu_template.h48
-rw-r--r--drivers/iommu/generic_pt/fmt/iommu_vtdss.c10
-rw-r--r--drivers/iommu/generic_pt/fmt/iommu_x86_64.c11
-rw-r--r--drivers/iommu/generic_pt/fmt/vtdss.h285
-rw-r--r--drivers/iommu/generic_pt/fmt/x86_64.h279
-rw-r--r--drivers/iommu/generic_pt/iommu_pt.h1289
-rw-r--r--drivers/iommu/generic_pt/kunit_generic_pt.h823
-rw-r--r--drivers/iommu/generic_pt/kunit_iommu.h184
-rw-r--r--drivers/iommu/generic_pt/kunit_iommu_pt.h487
-rw-r--r--drivers/iommu/generic_pt/pt_common.h389
-rw-r--r--drivers/iommu/generic_pt/pt_defs.h332
-rw-r--r--drivers/iommu/generic_pt/pt_fmt_defaults.h295
-rw-r--r--drivers/iommu/generic_pt/pt_iter.h636
-rw-r--r--drivers/iommu/generic_pt/pt_log2.h122
-rw-r--r--drivers/iommu/hyperv-iommu.c41
-rw-r--r--drivers/iommu/intel/Kconfig9
-rw-r--r--drivers/iommu/intel/Makefile7
-rw-r--r--drivers/iommu/intel/cache.c285
-rw-r--r--drivers/iommu/intel/cap_audit.c217
-rw-r--r--drivers/iommu/intel/cap_audit.h131
-rw-r--r--drivers/iommu/intel/debugfs.c29
-rw-r--r--drivers/iommu/intel/dmar.c144
-rw-r--r--drivers/iommu/intel/iommu.c2513
-rw-r--r--drivers/iommu/intel/iommu.h407
-rw-r--r--drivers/iommu/intel/irq_remapping.c166
-rw-r--r--drivers/iommu/intel/nested.c96
-rw-r--r--drivers/iommu/intel/pasid.c558
-rw-r--r--drivers/iommu/intel/pasid.h45
-rw-r--r--drivers/iommu/intel/perf.c10
-rw-r--r--drivers/iommu/intel/perf.h5
-rw-r--r--drivers/iommu/intel/perfmon.c111
-rw-r--r--drivers/iommu/intel/prq.c396
-rw-r--r--drivers/iommu/intel/svm.c480
-rw-r--r--drivers/iommu/intel/trace.h19
-rw-r--r--drivers/iommu/io-pgfault.c122
-rw-r--r--drivers/iommu/io-pgtable-arm-selftests.c214
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c152
-rw-r--r--drivers/iommu/io-pgtable-arm.c627
-rw-r--r--drivers/iommu/io-pgtable-dart.c159
-rw-r--r--drivers/iommu/io-pgtable.c4
-rw-r--r--drivers/iommu/iommu-pages.c253
-rw-r--r--drivers/iommu/iommu-pages.h224
-rw-r--r--drivers/iommu/iommu-priv.h41
-rw-r--r--drivers/iommu/iommu-sva.c96
-rw-r--r--drivers/iommu/iommu-sysfs.c2
-rw-r--r--drivers/iommu/iommu.c1055
-rw-r--r--drivers/iommu/iommufd/Kconfig5
-rw-r--r--drivers/iommu/iommufd/Makefile7
-rw-r--r--drivers/iommu/iommufd/device.c713
-rw-r--r--drivers/iommu/iommufd/driver.c304
-rw-r--r--drivers/iommu/iommufd/eventq.c541
-rw-r--r--drivers/iommu/iommufd/hw_pagetable.c203
-rw-r--r--drivers/iommu/iommufd/io_pagetable.c248
-rw-r--r--drivers/iommu/iommufd/io_pagetable.h83
-rw-r--r--drivers/iommu/iommufd/ioas.c267
-rw-r--r--drivers/iommu/iommufd/iommufd_private.h383
-rw-r--r--drivers/iommu/iommufd/iommufd_test.h123
-rw-r--r--drivers/iommu/iommufd/iova_bitmap.c138
-rw-r--r--drivers/iommu/iommufd/main.c322
-rw-r--r--drivers/iommu/iommufd/pages.c721
-rw-r--r--drivers/iommu/iommufd/selftest.c1447
-rw-r--r--drivers/iommu/iommufd/vfio_compat.c13
-rw-r--r--drivers/iommu/iommufd/viommu.c430
-rw-r--r--drivers/iommu/iova.c9
-rw-r--r--drivers/iommu/ipmmu-vmsa.c49
-rw-r--r--drivers/iommu/msm_iommu.c69
-rw-r--r--drivers/iommu/mtk_iommu.c255
-rw-r--r--drivers/iommu/mtk_iommu_v1.c91
-rw-r--r--drivers/iommu/of_iommu.c68
-rw-r--r--drivers/iommu/omap-iommu.c76
-rw-r--r--drivers/iommu/omap-iommu.h2
-rw-r--r--drivers/iommu/riscv/Kconfig20
-rw-r--r--drivers/iommu/riscv/Makefile3
-rw-r--r--drivers/iommu/riscv/iommu-bits.h784
-rw-r--r--drivers/iommu/riscv/iommu-pci.c128
-rw-r--r--drivers/iommu/riscv/iommu-platform.c179
-rw-r--r--drivers/iommu/riscv/iommu.c1682
-rw-r--r--drivers/iommu/riscv/iommu.h89
-rw-r--r--drivers/iommu/rockchip-iommu.c104
-rw-r--r--drivers/iommu/s390-iommu.c570
-rw-r--r--drivers/iommu/sprd-iommu.c10
-rw-r--r--drivers/iommu/sun50i-iommu.c30
-rw-r--r--drivers/iommu/tegra-smmu.c132
-rw-r--r--drivers/iommu/virtio-iommu.c220
-rw-r--r--drivers/ipack/ipack.c6
-rw-r--r--drivers/irqchip/Kconfig144
-rw-r--r--drivers/irqchip/Makefile19
-rw-r--r--drivers/irqchip/exynos-combiner.c16
-rw-r--r--drivers/irqchip/irq-aclint-sswi.c209
-rw-r--r--drivers/irqchip/irq-al-fic.c20
-rw-r--r--drivers/irqchip/irq-alpine-msi.c156
-rw-r--r--drivers/irqchip/irq-apple-aic.c141
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c888
-rw-r--r--drivers/irqchip/irq-aspeed-i2c-ic.c2
-rw-r--r--drivers/irqchip/irq-aspeed-intc.c139
-rw-r--r--drivers/irqchip/irq-aspeed-scu-ic.c258
-rw-r--r--drivers/irqchip/irq-aspeed-vic.c4
-rw-r--r--drivers/irqchip/irq-ath79-misc.c22
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.c21
-rw-r--r--drivers/irqchip/irq-atmel-aic.c22
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c40
-rw-r--r--drivers/irqchip/irq-bcm2712-mip.c288
-rw-r--r--drivers/irqchip/irq-bcm2835.c6
-rw-r--r--drivers/irqchip/irq-bcm2836.c5
-rw-r--r--drivers/irqchip/irq-bcm6345-l1.c2
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c31
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c55
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c63
-rw-r--r--drivers/irqchip/irq-clps711x.c8
-rw-r--r--drivers/irqchip/irq-crossbar.c6
-rw-r--r--drivers/irqchip/irq-csky-apb-intc.c5
-rw-r--r--drivers/irqchip/irq-csky-mpintc.c2
-rw-r--r--drivers/irqchip/irq-davinci-cp-intc.c62
-rw-r--r--drivers/irqchip/irq-digicolor.c2
-rw-r--r--drivers/irqchip/irq-dw-apb-ictl.c5
-rw-r--r--drivers/irqchip/irq-econet-en751221.c310
-rw-r--r--drivers/irqchip/irq-ftintc010.c7
-rw-r--r--drivers/irqchip/irq-gic-common.c22
-rw-r--r--drivers/irqchip/irq-gic-common.h8
-rw-r--r--drivers/irqchip/irq-gic-its-msi-parent.c331
-rw-r--r--drivers/irqchip/irq-gic-its-msi-parent.h12
-rw-r--r--drivers/irqchip/irq-gic-v2m.c118
-rw-r--r--drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c4
-rw-r--r--drivers/irqchip/irq-gic-v3-its-pci-msi.c202
-rw-r--r--drivers/irqchip/irq-gic-v3-its-platform-msi.c163
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c450
-rw-r--r--drivers/irqchip/irq-gic-v3-mbi.c153
-rw-r--r--drivers/irqchip/irq-gic-v3.c607
-rw-r--r--drivers/irqchip/irq-gic-v4.c6
-rw-r--r--drivers/irqchip/irq-gic-v5-irs.c827
-rw-r--r--drivers/irqchip/irq-gic-v5-its.c1233
-rw-r--r--drivers/irqchip/irq-gic-v5-iwb.c277
-rw-r--r--drivers/irqchip/irq-gic-v5.c1130
-rw-r--r--drivers/irqchip/irq-gic.c19
-rw-r--r--drivers/irqchip/irq-goldfish-pic.c7
-rw-r--r--drivers/irqchip/irq-hip04.c12
-rw-r--r--drivers/irqchip/irq-i8259.c16
-rw-r--r--drivers/irqchip/irq-idt3243x.c2
-rw-r--r--drivers/irqchip/irq-imgpdc.c6
-rw-r--r--drivers/irqchip/irq-imx-gpcv2.c22
-rw-r--r--drivers/irqchip/irq-imx-intmux.c4
-rw-r--r--drivers/irqchip/irq-imx-irqsteer.c44
-rw-r--r--drivers/irqchip/irq-imx-mu-msi.c89
-rw-r--r--drivers/irqchip/irq-ingenic-tcu.c13
-rw-r--r--drivers/irqchip/irq-ingenic.c4
-rw-r--r--drivers/irqchip/irq-ixp4xx.c5
-rw-r--r--drivers/irqchip/irq-jcore-aic.c7
-rw-r--r--drivers/irqchip/irq-keystone.c17
-rw-r--r--drivers/irqchip/irq-lan966x-oic.c274
-rw-r--r--drivers/irqchip/irq-loongarch-avec.c433
-rw-r--r--drivers/irqchip/irq-loongarch-cpu.c15
-rw-r--r--drivers/irqchip/irq-loongson-eiointc.c241
-rw-r--r--drivers/irqchip/irq-loongson-htpic.c10
-rw-r--r--drivers/irqchip/irq-loongson-htvec.c16
-rw-r--r--drivers/irqchip/irq-loongson-liointc.c17
-rw-r--r--drivers/irqchip/irq-loongson-pch-lpc.c23
-rw-r--r--drivers/irqchip/irq-loongson-pch-msi.c105
-rw-r--r--drivers/irqchip/irq-loongson-pch-pic.c16
-rw-r--r--drivers/irqchip/irq-loongson.h27
-rw-r--r--drivers/irqchip/irq-lpc32xx.c4
-rw-r--r--drivers/irqchip/irq-ls-extirq.c4
-rw-r--r--drivers/irqchip/irq-ls-scfg-msi.c57
-rw-r--r--drivers/irqchip/irq-ls1x.c4
-rw-r--r--drivers/irqchip/irq-madera.c2
-rw-r--r--drivers/irqchip/irq-mbigen.c138
-rw-r--r--drivers/irqchip/irq-mchp-eic.c22
-rw-r--r--drivers/irqchip/irq-meson-gpio.c82
-rw-r--r--drivers/irqchip/irq-mips-cpu.c13
-rw-r--r--drivers/irqchip/irq-mips-gic.c290
-rw-r--r--drivers/irqchip/irq-mmp.c12
-rw-r--r--drivers/irqchip/irq-mscc-ocelot.c17
-rw-r--r--drivers/irqchip/irq-msi-lib.c166
-rw-r--r--drivers/irqchip/irq-mst-intc.c16
-rw-r--r--drivers/irqchip/irq-mtk-cirq.c17
-rw-r--r--drivers/irqchip/irq-mtk-sysirq.c4
-rw-r--r--drivers/irqchip/irq-mvebu-gicp.c67
-rw-r--r--drivers/irqchip/irq-mvebu-icu.c276
-rw-r--r--drivers/irqchip/irq-mvebu-odmi.c55
-rw-r--r--drivers/irqchip/irq-mvebu-pic.c11
-rw-r--r--drivers/irqchip/irq-mvebu-sei.c71
-rw-r--r--drivers/irqchip/irq-mxs.c4
-rw-r--r--drivers/irqchip/irq-nvic.c5
-rw-r--r--drivers/irqchip/irq-omap-intc.c7
-rw-r--r--drivers/irqchip/irq-or1k-pic.c4
-rw-r--r--drivers/irqchip/irq-orion.c6
-rw-r--r--drivers/irqchip/irq-owl-sirq.c4
-rw-r--r--drivers/irqchip/irq-partition-percpu.c241
-rw-r--r--drivers/irqchip/irq-pic32-evic.c18
-rw-r--r--drivers/irqchip/irq-pruss-intc.c9
-rw-r--r--drivers/irqchip/irq-qcom-mpm.c11
-rw-r--r--drivers/irqchip/irq-realtek-rtl.c2
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c12
-rw-r--r--drivers/irqchip/irq-renesas-irqc.c12
-rw-r--r--drivers/irqchip/irq-renesas-rza1.c10
-rw-r--r--drivers/irqchip/irq-renesas-rzg2l.c225
-rw-r--r--drivers/irqchip/irq-renesas-rzv2h.c622
-rw-r--r--drivers/irqchip/irq-riscv-aplic-direct.c32
-rw-r--r--drivers/irqchip/irq-riscv-aplic-main.c77
-rw-r--r--drivers/irqchip/irq-riscv-aplic-main.h1
-rw-r--r--drivers/irqchip/irq-riscv-aplic-msi.c44
-rw-r--r--drivers/irqchip/irq-riscv-imsic-early.c111
-rw-r--r--drivers/irqchip/irq-riscv-imsic-platform.c259
-rw-r--r--drivers/irqchip/irq-riscv-imsic-state.c330
-rw-r--r--drivers/irqchip/irq-riscv-imsic-state.h19
-rw-r--r--drivers/irqchip/irq-riscv-intc.c125
-rw-r--r--drivers/irqchip/irq-riscv-rpmi-sysmsi.c328
-rw-r--r--drivers/irqchip/irq-sa11x0.c17
-rw-r--r--drivers/irqchip/irq-sg2042-msi.c340
-rw-r--r--drivers/irqchip/irq-sifive-plic.c441
-rw-r--r--drivers/irqchip/irq-sni-exiu.c6
-rw-r--r--drivers/irqchip/irq-sp7021-intc.c4
-rw-r--r--drivers/irqchip/irq-starfive-jh8100-intc.c10
-rw-r--r--drivers/irqchip/irq-stm32-exti.c693
-rw-r--r--drivers/irqchip/irq-stm32mp-exti.c725
-rw-r--r--drivers/irqchip/irq-sun4i.c2
-rw-r--r--drivers/irqchip/irq-sun6i-r.c24
-rw-r--r--drivers/irqchip/irq-sunxi-nmi.c97
-rw-r--r--drivers/irqchip/irq-tb10x.c21
-rw-r--r--drivers/irqchip/irq-tegra.c17
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c10
-rw-r--r--drivers/irqchip/irq-ti-sci-intr.c7
-rw-r--r--drivers/irqchip/irq-ts4800.c8
-rw-r--r--drivers/irqchip/irq-uniphier-aidet.c2
-rw-r--r--drivers/irqchip/irq-versatile-fpga.c8
-rw-r--r--drivers/irqchip/irq-vf610-mscm-ir.c6
-rw-r--r--drivers/irqchip/irq-vic.c17
-rw-r--r--drivers/irqchip/irq-vt8500.c153
-rw-r--r--drivers/irqchip/irq-wpcm450-aic.c2
-rw-r--r--drivers/irqchip/irq-xilinx-intc.c8
-rw-r--r--drivers/irqchip/irq-xtensa-mx.c7
-rw-r--r--drivers/irqchip/irq-xtensa-pic.c8
-rw-r--r--drivers/irqchip/irq-zevio.c4
-rw-r--r--drivers/irqchip/irqchip.c14
-rw-r--r--drivers/irqchip/qcom-irq-combiner.c6
-rw-r--r--drivers/irqchip/qcom-pdc.c72
-rw-r--r--drivers/irqchip/spear-shirq.c2
-rw-r--r--drivers/isdn/capi/capi.c9
-rw-r--r--drivers/isdn/capi/kcapi.c2
-rw-r--r--drivers/isdn/hardware/mISDN/avmfritz.c3
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c30
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c29
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c19
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNinfineon.c1
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNipac.c13
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNisar.c9
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c1
-rw-r--r--drivers/isdn/hardware/mISDN/speedfax.c1
-rw-r--r--drivers/isdn/hardware/mISDN/w6692.c11
-rw-r--r--drivers/isdn/mISDN/core.c15
-rw-r--r--drivers/isdn/mISDN/core.h1
-rw-r--r--drivers/isdn/mISDN/dsp_blowfish.c5
-rw-r--r--drivers/isdn/mISDN/dsp_cmx.c2
-rw-r--r--drivers/isdn/mISDN/dsp_core.c7
-rw-r--r--drivers/isdn/mISDN/dsp_hwec.c6
-rw-r--r--drivers/isdn/mISDN/dsp_tones.c6
-rw-r--r--drivers/isdn/mISDN/fsm.c6
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c9
-rw-r--r--drivers/isdn/mISDN/socket.c4
-rw-r--r--drivers/isdn/mISDN/timerdev.c3
-rw-r--r--drivers/leds/.kunitconfig4
-rw-r--r--drivers/leds/Kconfig121
-rw-r--r--drivers/leds/Makefile12
-rw-r--r--drivers/leds/blink/leds-bcm63138.c30
-rw-r--r--drivers/leds/blink/leds-lgm-sso.c6
-rw-r--r--drivers/leds/flash/Kconfig23
-rw-r--r--drivers/leds/flash/Makefile2
-rw-r--r--drivers/leds/flash/leds-aat1290.c18
-rw-r--r--drivers/leds/flash/leds-as3645a.c12
-rw-r--r--drivers/leds/flash/leds-ktd2692.c20
-rw-r--r--drivers/leds/flash/leds-lm3601x.c19
-rw-r--r--drivers/leds/flash/leds-max77693.c22
-rw-r--r--drivers/leds/flash/leds-mt6360.c22
-rw-r--r--drivers/leds/flash/leds-mt6370-flash.c11
-rw-r--r--drivers/leds/flash/leds-qcom-flash.c269
-rw-r--r--drivers/leds/flash/leds-rt4505.c3
-rw-r--r--drivers/leds/flash/leds-rt8515.c10
-rw-r--r--drivers/leds/flash/leds-sgm3140.c13
-rw-r--r--drivers/leds/flash/leds-sy7802.c539
-rw-r--r--drivers/leds/flash/leds-tps6131x.c815
-rw-r--r--drivers/leds/led-class-flash.c16
-rw-r--r--drivers/leds/led-class-multicolor.c8
-rw-r--r--drivers/leds/led-class.c66
-rw-r--r--drivers/leds/led-core.c140
-rw-r--r--drivers/leds/led-test.c132
-rw-r--r--drivers/leds/led-triggers.c58
-rw-r--r--drivers/leds/leds-88pm860x.c7
-rw-r--r--drivers/leds/leds-adp5520.c2
-rw-r--r--drivers/leds/leds-an30259a.c18
-rw-r--r--drivers/leds/leds-apu.c3
-rw-r--r--drivers/leds/leds-aw200xx.c41
-rw-r--r--drivers/leds/leds-aw2013.c33
-rw-r--r--drivers/leds/leds-bcm6328.c11
-rw-r--r--drivers/leds/leds-bcm6358.c7
-rw-r--r--drivers/leds/leds-bd2606mvv.c23
-rw-r--r--drivers/leds/leds-bd2802.c2
-rw-r--r--drivers/leds/leds-blinkm.c222
-rw-r--r--drivers/leds/leds-cht-wcove.c8
-rw-r--r--drivers/leds/leds-clevo-mail.c2
-rw-r--r--drivers/leds/leds-cr0014114.c4
-rw-r--r--drivers/leds/leds-cros_ec.c263
-rw-r--r--drivers/leds/leds-da903x.c2
-rw-r--r--drivers/leds/leds-da9052.c2
-rw-r--r--drivers/leds/leds-el15203000.c14
-rw-r--r--drivers/leds/leds-expresswire.c12
-rw-r--r--drivers/leds/leds-gpio-register.c2
-rw-r--r--drivers/leds/leds-gpio.c29
-rw-r--r--drivers/leds/leds-is31fl319x.c46
-rw-r--r--drivers/leds/leds-is31fl32xx.c61
-rw-r--r--drivers/leds/leds-lm3530.c2
-rw-r--r--drivers/leds/leds-lm3532.c49
-rw-r--r--drivers/leds/leds-lm3533.c2
-rw-r--r--drivers/leds/leds-lm3642.c2
-rw-r--r--drivers/leds/leds-lm3697.c20
-rw-r--r--drivers/leds/leds-lp3944.c2
-rw-r--r--drivers/leds/leds-lp3952.c23
-rw-r--r--drivers/leds/leds-lp50xx.c106
-rw-r--r--drivers/leds/leds-lp5521.c410
-rw-r--r--drivers/leds/leds-lp5523.c763
-rw-r--r--drivers/leds/leds-lp5562.c299
-rw-r--r--drivers/leds/leds-lp5569.c544
-rw-r--r--drivers/leds/leds-lp55xx-common.c779
-rw-r--r--drivers/leds/leds-lp55xx-common.h164
-rw-r--r--drivers/leds/leds-lp8501.c313
-rw-r--r--drivers/leds/leds-lp8860.c222
-rw-r--r--drivers/leds/leds-lp8864.c296
-rw-r--r--drivers/leds/leds-max5970.c7
-rw-r--r--drivers/leds/leds-max77650.c18
-rw-r--r--drivers/leds/leds-max77705.c275
-rw-r--r--drivers/leds/leds-mc13783.c26
-rw-r--r--drivers/leds/leds-mlxcpld.c1
-rw-r--r--drivers/leds/leds-mlxreg.c14
-rw-r--r--drivers/leds/leds-mt6323.c24
-rw-r--r--drivers/leds/leds-netxbig.c56
-rw-r--r--drivers/leds/leds-nic78bx.c39
-rw-r--r--drivers/leds/leds-ns2.c7
-rw-r--r--drivers/leds/leds-pca9532.c96
-rw-r--r--drivers/leds/leds-pca955x.c379
-rw-r--r--drivers/leds/leds-pca963x.c11
-rw-r--r--drivers/leds/leds-pca995x.c78
-rw-r--r--drivers/leds/leds-powernv.c32
-rw-r--r--drivers/leds/leds-pwm.c68
-rw-r--r--drivers/leds/leds-qnap-mcu.c392
-rw-r--r--drivers/leds/leds-rb532.c2
-rw-r--r--drivers/leds/leds-regulator.c2
-rw-r--r--drivers/leds/leds-sc27xx-bltc.c14
-rw-r--r--drivers/leds/leds-spi-byte.c63
-rw-r--r--drivers/leds/leds-ss4200.c9
-rw-r--r--drivers/leds/leds-st1202.c416
-rw-r--r--drivers/leds/leds-sun50i-a100.c45
-rw-r--r--drivers/leds/leds-sunfire.c4
-rw-r--r--drivers/leds/leds-tca6507.c16
-rw-r--r--drivers/leds/leds-tlc591xx.c18
-rw-r--r--drivers/leds/leds-turris-omnia.c352
-rw-r--r--drivers/leds/leds-upboard.c126
-rw-r--r--drivers/leds/leds-wm831x-status.c2
-rw-r--r--drivers/leds/leds-wm8350.c2
-rw-r--r--drivers/leds/leds.h5
-rw-r--r--drivers/leds/rgb/Kconfig1
-rw-r--r--drivers/leds/rgb/leds-group-multicolor.c2
-rw-r--r--drivers/leds/rgb/leds-ktd202x.c82
-rw-r--r--drivers/leds/rgb/leds-mt6370-rgb.c58
-rw-r--r--drivers/leds/rgb/leds-ncp5623.c23
-rw-r--r--drivers/leds/rgb/leds-pwm-multicolor.c20
-rw-r--r--drivers/leds/rgb/leds-qcom-lpg.c61
-rw-r--r--drivers/leds/simatic/Kconfig (renamed from drivers/leds/simple/Kconfig)0
-rw-r--r--drivers/leds/simatic/Makefile (renamed from drivers/leds/simple/Makefile)0
-rw-r--r--drivers/leds/simatic/simatic-ipc-leds-gpio-apollolake.c (renamed from drivers/leds/simple/simatic-ipc-leds-gpio-apollolake.c)3
-rw-r--r--drivers/leds/simatic/simatic-ipc-leds-gpio-core.c (renamed from drivers/leds/simple/simatic-ipc-leds-gpio-core.c)2
-rw-r--r--drivers/leds/simatic/simatic-ipc-leds-gpio-elkhartlake.c (renamed from drivers/leds/simple/simatic-ipc-leds-gpio-elkhartlake.c)3
-rw-r--r--drivers/leds/simatic/simatic-ipc-leds-gpio-f7188x.c107
-rw-r--r--drivers/leds/simatic/simatic-ipc-leds-gpio.h (renamed from drivers/leds/simple/simatic-ipc-leds-gpio.h)0
-rw-r--r--drivers/leds/simatic/simatic-ipc-leds.c (renamed from drivers/leds/simple/simatic-ipc-leds.c)1
-rw-r--r--drivers/leds/simple/simatic-ipc-leds-gpio-f7188x.c66
-rw-r--r--drivers/leds/trigger/Kconfig23
-rw-r--r--drivers/leds/trigger/Makefile2
-rw-r--r--drivers/leds/trigger/ledtrig-activity.c6
-rw-r--r--drivers/leds/trigger/ledtrig-audio.c67
-rw-r--r--drivers/leds/trigger/ledtrig-backlight.c48
-rw-r--r--drivers/leds/trigger/ledtrig-cpu.c14
-rw-r--r--drivers/leds/trigger/ledtrig-heartbeat.c2
-rw-r--r--drivers/leds/trigger/ledtrig-input-events.c165
-rw-r--r--drivers/leds/trigger/ledtrig-netdev.c28
-rw-r--r--drivers/leds/trigger/ledtrig-pattern.c126
-rw-r--r--drivers/leds/trigger/ledtrig-timer.c5
-rw-r--r--drivers/leds/trigger/ledtrig-transient.c4
-rw-r--r--drivers/leds/uleds.c1
-rw-r--r--drivers/macintosh/Kconfig1
-rw-r--r--drivers/macintosh/adb-iop.c2
-rw-r--r--drivers/macintosh/adb.c1
-rw-r--r--drivers/macintosh/adbhid.c2
-rw-r--r--drivers/macintosh/ams/ams-i2c.c2
-rw-r--r--drivers/macintosh/mac_hid.c8
-rw-r--r--drivers/macintosh/macio_asic.c6
-rw-r--r--drivers/macintosh/smu.c7
-rw-r--r--drivers/macintosh/therm_windtunnel.c4
-rw-r--r--drivers/macintosh/via-pmu-backlight.c4
-rw-r--r--drivers/macintosh/via-pmu-led.c19
-rw-r--r--drivers/macintosh/via-pmu.c14
-rw-r--r--drivers/macintosh/windfarm_ad7417_sensor.c2
-rw-r--r--drivers/macintosh/windfarm_fcu_controls.c2
-rw-r--r--drivers/macintosh/windfarm_lm87_sensor.c2
-rw-r--r--drivers/macintosh/windfarm_max6690_sensor.c2
-rw-r--r--drivers/macintosh/windfarm_pm112.c2
-rw-r--r--drivers/macintosh/windfarm_pm121.c2
-rw-r--r--drivers/macintosh/windfarm_pm72.c2
-rw-r--r--drivers/macintosh/windfarm_pm81.c2
-rw-r--r--drivers/macintosh/windfarm_pm91.c2
-rw-r--r--drivers/macintosh/windfarm_rm31.c2
-rw-r--r--drivers/macintosh/windfarm_smu_sat.c2
-rw-r--r--drivers/mailbox/Kconfig129
-rw-r--r--drivers/mailbox/Makefile22
-rw-r--r--drivers/mailbox/arm_mhu.c2
-rw-r--r--drivers/mailbox/arm_mhu_db.c2
-rw-r--r--drivers/mailbox/arm_mhuv2.c10
-rw-r--r--drivers/mailbox/arm_mhuv3.c1103
-rw-r--r--drivers/mailbox/ast2700-mailbox.c235
-rw-r--r--drivers/mailbox/bcm-flexrm-mailbox.c2
-rw-r--r--drivers/mailbox/bcm-pdc-mailbox.c27
-rw-r--r--drivers/mailbox/bcm2835-mailbox.c3
-rw-r--r--drivers/mailbox/bcm74110-mailbox.c656
-rw-r--r--drivers/mailbox/cix-mailbox.c645
-rw-r--r--drivers/mailbox/cv1800-mailbox.c220
-rw-r--r--drivers/mailbox/exynos-mailbox.c157
-rw-r--r--drivers/mailbox/imx-mailbox.c43
-rw-r--r--drivers/mailbox/mailbox-altera.c4
-rw-r--r--drivers/mailbox/mailbox-mchp-ipc-sbi.c504
-rw-r--r--drivers/mailbox/mailbox-mpfs.c81
-rw-r--r--drivers/mailbox/mailbox-test.c6
-rw-r--r--drivers/mailbox/mailbox-th1520.c597
-rw-r--r--drivers/mailbox/mailbox.c285
-rw-r--r--drivers/mailbox/mailbox.h2
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c205
-rw-r--r--drivers/mailbox/mtk-gpueb-mailbox.c319
-rw-r--r--drivers/mailbox/omap-mailbox.c558
-rw-r--r--drivers/mailbox/pcc.c196
-rw-r--r--drivers/mailbox/pl320-ipc.c14
-rw-r--r--drivers/mailbox/qcom-apcs-ipc-mailbox.c20
-rw-r--r--drivers/mailbox/qcom-cpucp-mbox.c187
-rw-r--r--drivers/mailbox/qcom-ipcc.c21
-rw-r--r--drivers/mailbox/riscv-sbi-mpxy-mbox.c1019
-rw-r--r--drivers/mailbox/rockchip-mailbox.c2
-rw-r--r--drivers/mailbox/sprd-mailbox.c25
-rw-r--r--drivers/mailbox/stm32-ipcc.c2
-rw-r--r--drivers/mailbox/sun6i-msgbox.c4
-rw-r--r--drivers/mailbox/tegra-hsp.c80
-rw-r--r--drivers/mailbox/ti-msgmgr.c2
-rw-r--r--drivers/mailbox/zynqmp-ipi-mailbox.c432
-rw-r--r--drivers/mcb/mcb-core.c40
-rw-r--r--drivers/mcb/mcb-lpc.c6
-rw-r--r--drivers/mcb/mcb-parse.c9
-rw-r--r--drivers/mcb/mcb-pci.c18
-rw-r--r--drivers/md/Kconfig57
-rw-r--r--drivers/md/Makefile7
-rw-r--r--drivers/md/bcache/alloc.c46
-rw-r--r--drivers/md/bcache/bcache.h7
-rw-r--r--drivers/md/bcache/bset.h8
-rw-r--r--drivers/md/bcache/btree.c65
-rw-r--r--drivers/md/bcache/debug.c3
-rw-r--r--drivers/md/bcache/io.c3
-rw-r--r--drivers/md/bcache/journal.c93
-rw-r--r--drivers/md/bcache/journal.h13
-rw-r--r--drivers/md/bcache/movinggc.c10
-rw-r--r--drivers/md/bcache/request.c16
-rw-r--r--drivers/md/bcache/stats.c4
-rw-r--r--drivers/md/bcache/super.c137
-rw-r--r--drivers/md/bcache/sysfs.c15
-rw-r--r--drivers/md/bcache/util.c2
-rw-r--r--drivers/md/bcache/writeback.c15
-rw-r--r--drivers/md/dm-bio-prison-v1.c35
-rw-r--r--drivers/md/dm-bio-prison-v1.h24
-rw-r--r--drivers/md/dm-bufio.c260
-rw-r--r--drivers/md/dm-cache-background-tracker.c31
-rw-r--r--drivers/md/dm-cache-background-tracker.h9
-rw-r--r--drivers/md/dm-cache-metadata.c48
-rw-r--r--drivers/md/dm-cache-metadata.h3
-rw-r--r--drivers/md/dm-cache-policy-smq.c2
-rw-r--r--drivers/md/dm-cache-target.c200
-rw-r--r--drivers/md/dm-clone-metadata.c11
-rw-r--r--drivers/md/dm-clone-target.c13
-rw-r--r--drivers/md/dm-core.h9
-rw-r--r--drivers/md/dm-crypt.c249
-rw-r--r--drivers/md/dm-delay.c37
-rw-r--r--drivers/md/dm-dust.c4
-rw-r--r--drivers/md/dm-ebs-target.c14
-rw-r--r--drivers/md/dm-era-target.c10
-rw-r--r--drivers/md/dm-flakey.c123
-rw-r--r--drivers/md/dm-ima.c110
-rw-r--r--drivers/md/dm-init.c4
-rw-r--r--drivers/md/dm-integrity.c1047
-rw-r--r--drivers/md/dm-io.c85
-rw-r--r--drivers/md/dm-ioctl.c27
-rw-r--r--drivers/md/dm-linear.c12
-rw-r--r--drivers/md/dm-log-writes.c10
-rw-r--r--drivers/md/dm-mpath.c258
-rw-r--r--drivers/md/dm-path-selector.c8
-rw-r--r--drivers/md/dm-path-selector.h2
-rw-r--r--drivers/md/dm-pcache/Kconfig17
-rw-r--r--drivers/md/dm-pcache/Makefile3
-rw-r--r--drivers/md/dm-pcache/backing_dev.c374
-rw-r--r--drivers/md/dm-pcache/backing_dev.h127
-rw-r--r--drivers/md/dm-pcache/cache.c445
-rw-r--r--drivers/md/dm-pcache/cache.h635
-rw-r--r--drivers/md/dm-pcache/cache_dev.c303
-rw-r--r--drivers/md/dm-pcache/cache_dev.h70
-rw-r--r--drivers/md/dm-pcache/cache_gc.c170
-rw-r--r--drivers/md/dm-pcache/cache_key.c888
-rw-r--r--drivers/md/dm-pcache/cache_req.c836
-rw-r--r--drivers/md/dm-pcache/cache_segment.c305
-rw-r--r--drivers/md/dm-pcache/cache_writeback.c261
-rw-r--r--drivers/md/dm-pcache/dm_pcache.c497
-rw-r--r--drivers/md/dm-pcache/dm_pcache.h67
-rw-r--r--drivers/md/dm-pcache/pcache_internal.h117
-rw-r--r--drivers/md/dm-pcache/segment.c61
-rw-r--r--drivers/md/dm-pcache/segment.h74
-rw-r--r--drivers/md/dm-ps-historical-service-time.c9
-rw-r--r--drivers/md/dm-ps-io-affinity.c7
-rw-r--r--drivers/md/dm-ps-queue-length.c9
-rw-r--r--drivers/md/dm-ps-round-robin.c9
-rw-r--r--drivers/md/dm-ps-service-time.c9
-rw-r--r--drivers/md/dm-raid.c159
-rw-r--r--drivers/md/dm-raid1.c14
-rw-r--r--drivers/md/dm-region-hash.c2
-rw-r--r--drivers/md/dm-rq.c10
-rw-r--r--drivers/md/dm-snap.c2
-rw-r--r--drivers/md/dm-stripe.c24
-rw-r--r--drivers/md/dm-switch.c8
-rw-r--r--drivers/md/dm-table.c707
-rw-r--r--drivers/md/dm-target.c6
-rw-r--r--drivers/md/dm-thin-metadata.c6
-rw-r--r--drivers/md/dm-thin.c31
-rw-r--r--drivers/md/dm-unstripe.c4
-rw-r--r--drivers/md/dm-vdo/Kconfig1
-rw-r--r--drivers/md/dm-vdo/block-map.c15
-rw-r--r--drivers/md/dm-vdo/constants.h3
-rw-r--r--drivers/md/dm-vdo/data-vio.c68
-rw-r--r--drivers/md/dm-vdo/data-vio.h5
-rw-r--r--drivers/md/dm-vdo/dedupe.c42
-rw-r--r--drivers/md/dm-vdo/dm-vdo-target.c39
-rw-r--r--drivers/md/dm-vdo/encodings.c22
-rw-r--r--drivers/md/dm-vdo/funnel-workqueue.c3
-rw-r--r--drivers/md/dm-vdo/indexer/chapter-index.c2
-rw-r--r--drivers/md/dm-vdo/indexer/index-layout.c31
-rw-r--r--drivers/md/dm-vdo/indexer/index-session.c6
-rw-r--r--drivers/md/dm-vdo/indexer/index.c5
-rw-r--r--drivers/md/dm-vdo/indexer/indexer.h57
-rw-r--r--drivers/md/dm-vdo/indexer/io-factory.c2
-rw-r--r--drivers/md/dm-vdo/indexer/volume-index.c4
-rw-r--r--drivers/md/dm-vdo/indexer/volume.c24
-rw-r--r--drivers/md/dm-vdo/int-map.c30
-rw-r--r--drivers/md/dm-vdo/io-submitter.c9
-rw-r--r--drivers/md/dm-vdo/io-submitter.h18
-rw-r--r--drivers/md/dm-vdo/logger.c2
-rw-r--r--drivers/md/dm-vdo/message-stats.c48
-rw-r--r--drivers/md/dm-vdo/message-stats.h1
-rw-r--r--drivers/md/dm-vdo/murmurhash3.c9
-rw-r--r--drivers/md/dm-vdo/numeric.h2
-rw-r--r--drivers/md/dm-vdo/packer.c3
-rw-r--r--drivers/md/dm-vdo/packer.h2
-rw-r--r--drivers/md/dm-vdo/physical-zone.c2
-rw-r--r--drivers/md/dm-vdo/priority-table.c2
-rw-r--r--drivers/md/dm-vdo/recovery-journal.c2
-rw-r--r--drivers/md/dm-vdo/recovery-journal.h6
-rw-r--r--drivers/md/dm-vdo/repair.c70
-rw-r--r--drivers/md/dm-vdo/slab-depot.c224
-rw-r--r--drivers/md/dm-vdo/slab-depot.h13
-rw-r--r--drivers/md/dm-vdo/status-codes.c2
-rw-r--r--drivers/md/dm-vdo/status-codes.h2
-rw-r--r--drivers/md/dm-vdo/types.h3
-rw-r--r--drivers/md/dm-vdo/vdo.c15
-rw-r--r--drivers/md/dm-vdo/vio.c57
-rw-r--r--drivers/md/dm-vdo/vio.h13
-rw-r--r--drivers/md/dm-vdo/wait-queue.c2
-rw-r--r--drivers/md/dm-verity-fec.c107
-rw-r--r--drivers/md/dm-verity-fec.h6
-rw-r--r--drivers/md/dm-verity-target.c805
-rw-r--r--drivers/md/dm-verity-verify-sig.c24
-rw-r--r--drivers/md/dm-verity.h46
-rw-r--r--drivers/md/dm-writecache.c21
-rw-r--r--drivers/md/dm-zero.c1
-rw-r--r--drivers/md/dm-zone.c419
-rw-r--r--drivers/md/dm-zoned-metadata.c50
-rw-r--r--drivers/md/dm-zoned-reclaim.c6
-rw-r--r--drivers/md/dm-zoned-target.c12
-rw-r--r--drivers/md/dm-zoned.h2
-rw-r--r--drivers/md/dm.c525
-rw-r--r--drivers/md/dm.h23
-rw-r--r--drivers/md/md-autodetect.c8
-rw-r--r--drivers/md/md-bitmap.c738
-rw-r--r--drivers/md/md-bitmap.h341
-rw-r--r--drivers/md/md-cluster.c176
-rw-r--r--drivers/md/md-cluster.h8
-rw-r--r--drivers/md/md-linear.c350
-rw-r--r--drivers/md/md-llbitmap.c1626
-rw-r--r--drivers/md/md.c2300
-rw-r--r--drivers/md/md.h310
-rw-r--r--drivers/md/persistent-data/Kconfig2
-rw-r--r--drivers/md/persistent-data/dm-array.c25
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c12
-rw-r--r--drivers/md/persistent-data/dm-block-manager.h14
-rw-r--r--drivers/md/persistent-data/dm-btree-internal.h2
-rw-r--r--drivers/md/persistent-data/dm-btree-spine.c6
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c14
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c4
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.c62
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.h6
-rw-r--r--drivers/md/raid0.c95
-rw-r--r--drivers/md/raid1-10.c27
-rw-r--r--drivers/md/raid1.c523
-rw-r--r--drivers/md/raid1.h27
-rw-r--r--drivers/md/raid10.c366
-rw-r--r--drivers/md/raid10.h3
-rw-r--r--drivers/md/raid5-cache.c59
-rw-r--r--drivers/md/raid5-ppl.c24
-rw-r--r--drivers/md/raid5.c534
-rw-r--r--drivers/md/raid5.h8
-rw-r--r--drivers/media/cec/core/cec-adap.c58
-rw-r--r--drivers/media/cec/core/cec-api.c7
-rw-r--r--drivers/media/cec/core/cec-core.c39
-rw-r--r--drivers/media/cec/core/cec-pin-error-inj.c62
-rw-r--r--drivers/media/cec/core/cec-pin-priv.h8
-rw-r--r--drivers/media/cec/core/cec-pin.c48
-rw-r--r--drivers/media/cec/core/cec-priv.h2
-rw-r--r--drivers/media/cec/i2c/Kconfig10
-rw-r--r--drivers/media/cec/i2c/Makefile1
-rw-r--r--drivers/media/cec/i2c/tda9950.c (renamed from drivers/gpu/drm/i2c/tda9950.c)4
-rw-r--r--drivers/media/cec/platform/Kconfig2
-rw-r--r--drivers/media/cec/platform/cec-gpio/cec-gpio.c69
-rw-r--r--drivers/media/cec/platform/cros-ec/cros-ec-cec.c7
-rw-r--r--drivers/media/cec/platform/meson/ao-cec-g12a.c2
-rw-r--r--drivers/media/cec/platform/meson/ao-cec.c2
-rw-r--r--drivers/media/cec/platform/s5p/s5p_cec.c2
-rw-r--r--drivers/media/cec/platform/seco/seco-cec.c2
-rw-r--r--drivers/media/cec/platform/sti/stih-cec.c2
-rw-r--r--drivers/media/cec/platform/stm32/stm32-cec.c3
-rw-r--r--drivers/media/cec/platform/tegra/tegra_cec.c2
-rw-r--r--drivers/media/cec/usb/Kconfig1
-rw-r--r--drivers/media/cec/usb/Makefile1
-rw-r--r--drivers/media/cec/usb/extron-da-hd-4k-plus/Kconfig14
-rw-r--r--drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile2
-rw-r--r--drivers/media/cec/usb/extron-da-hd-4k-plus/cec-splitter.c657
-rw-r--r--drivers/media/cec/usb/extron-da-hd-4k-plus/cec-splitter.h51
-rw-r--r--drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c1836
-rw-r--r--drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.h118
-rw-r--r--drivers/media/cec/usb/pulse8/pulse8-cec.c6
-rw-r--r--drivers/media/cec/usb/rainshadow/rainshadow-cec.c7
-rw-r--r--drivers/media/common/b2c2/flexcop-common.h4
-rw-r--r--drivers/media/common/b2c2/flexcop-i2c.c2
-rw-r--r--drivers/media/common/b2c2/flexcop-misc.c13
-rw-r--r--drivers/media/common/b2c2/flexcop-sram.c2
-rw-r--r--drivers/media/common/b2c2/flexcop.c22
-rw-r--r--drivers/media/common/cx2341x.c2
-rw-r--r--drivers/media/common/saa7146/saa7146_fops.c8
-rw-r--r--drivers/media/common/saa7146/saa7146_vbi.c8
-rw-r--r--drivers/media/common/saa7146/saa7146_video.c4
-rw-r--r--drivers/media/common/siano/smscoreapi.c25
-rw-r--r--drivers/media/common/siano/smscoreapi.h28
-rw-r--r--drivers/media/common/siano/smsdvb-debugfs.c9
-rw-r--r--drivers/media/common/siano/smsdvb-main.c6
-rw-r--r--drivers/media/common/siano/smsendian.c8
-rw-r--r--drivers/media/common/siano/smsir.c2
-rw-r--r--drivers/media/common/uvc.c9
-rw-r--r--drivers/media/common/v4l2-tpg/v4l2-tpg-core.c11
-rw-r--r--drivers/media/common/videobuf2/videobuf2-core.c196
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-contig.c6
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-sg.c6
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dvb.c1
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c30
-rw-r--r--drivers/media/common/videobuf2/videobuf2-vmalloc.c2
-rw-r--r--drivers/media/dvb-core/dmxdev.c15
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.c2
-rw-r--r--drivers/media/dvb-core/dvb_demux.c28
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c4
-rw-r--r--drivers/media/dvb-core/dvb_ringbuffer.c36
-rw-r--r--drivers/media/dvb-core/dvb_vb2.c8
-rw-r--r--drivers/media/dvb-core/dvbdev.c20
-rw-r--r--drivers/media/dvb-frontends/Kconfig4
-rw-r--r--drivers/media/dvb-frontends/a8293.c2
-rw-r--r--drivers/media/dvb-frontends/af9013.c2
-rw-r--r--drivers/media/dvb-frontends/af9033.c2
-rw-r--r--drivers/media/dvb-frontends/au8522_decoder.c3
-rw-r--r--drivers/media/dvb-frontends/bcm3510.c2
-rw-r--r--drivers/media/dvb-frontends/cx24116.c7
-rw-r--r--drivers/media/dvb-frontends/cxd2099.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_core.c6
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.c11
-rw-r--r--drivers/media/dvb-frontends/dib0090.c4
-rw-r--r--drivers/media/dvb-frontends/dib3000mb.c4
-rw-r--r--drivers/media/dvb-frontends/dib7000p.c17
-rw-r--r--drivers/media/dvb-frontends/dib8000.c10
-rw-r--r--drivers/media/dvb-frontends/dibx000_common.c10
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.c9
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c3
-rw-r--r--drivers/media/dvb-frontends/lgdt3306a.c2
-rw-r--r--drivers/media/dvb-frontends/lgdt330x.c6
-rw-r--r--drivers/media/dvb-frontends/mb86a16.c1
-rw-r--r--drivers/media/dvb-frontends/mn88443x.c7
-rw-r--r--drivers/media/dvb-frontends/mn88472.c2
-rw-r--r--drivers/media/dvb-frontends/mn88473.c2
-rw-r--r--drivers/media/dvb-frontends/mxl5xx.c2
-rw-r--r--drivers/media/dvb-frontends/mxl5xx_defs.h4
-rw-r--r--drivers/media/dvb-frontends/mxl692.c2
-rw-r--r--drivers/media/dvb-frontends/rtl2830.c4
-rw-r--r--drivers/media/dvb-frontends/rtl2832.c4
-rw-r--r--drivers/media/dvb-frontends/rtl2832_sdr.c6
-rw-r--r--drivers/media/dvb-frontends/si2165.c2
-rw-r--r--drivers/media/dvb-frontends/si2168.c2
-rw-r--r--drivers/media/dvb-frontends/sp2.c2
-rw-r--r--drivers/media/dvb-frontends/stb0899_algo.c2
-rw-r--r--drivers/media/dvb-frontends/stv0299.c2
-rw-r--r--drivers/media/dvb-frontends/stv0367_priv.h3
-rw-r--r--drivers/media/dvb-frontends/stv090x.c39
-rw-r--r--drivers/media/dvb-frontends/stv0910.c5
-rw-r--r--drivers/media/dvb-frontends/stv6110x.c2
-rw-r--r--drivers/media/dvb-frontends/stv6111.c2
-rw-r--r--drivers/media/dvb-frontends/tda10048.c8
-rw-r--r--drivers/media/dvb-frontends/tda10071.c2
-rw-r--r--drivers/media/dvb-frontends/tda18271c2dd.c2
-rw-r--r--drivers/media/dvb-frontends/ts2020.c12
-rw-r--r--drivers/media/dvb-frontends/zd1301_demod.c2
-rw-r--r--drivers/media/dvb-frontends/zl10036.c2
-rw-r--r--drivers/media/i2c/Kconfig225
-rw-r--r--drivers/media/i2c/Makefile18
-rw-r--r--drivers/media/i2c/ad5820.c4
-rw-r--r--drivers/media/i2c/adp1653.c2
-rw-r--r--drivers/media/i2c/adv7170.c4
-rw-r--r--drivers/media/i2c/adv7175.c4
-rw-r--r--drivers/media/i2c/adv7180.c385
-rw-r--r--drivers/media/i2c/adv7183.c4
-rw-r--r--drivers/media/i2c/adv7343.c4
-rw-r--r--drivers/media/i2c/adv7393.c4
-rw-r--r--drivers/media/i2c/adv748x/adv748x-afe.c21
-rw-r--r--drivers/media/i2c/adv748x/adv748x-csi2.c145
-rw-r--r--drivers/media/i2c/adv748x/adv748x-hdmi.c10
-rw-r--r--drivers/media/i2c/adv748x/adv748x.h3
-rw-r--r--drivers/media/i2c/adv7511-v4l2.c108
-rw-r--r--drivers/media/i2c/adv7604.c131
-rw-r--r--drivers/media/i2c/adv7842.c152
-rw-r--r--drivers/media/i2c/ak881x.c4
-rw-r--r--drivers/media/i2c/alvium-csi2.c71
-rw-r--r--drivers/media/i2c/alvium-csi2.h16
-rw-r--r--drivers/media/i2c/ar0521.c39
-rw-r--r--drivers/media/i2c/bt819.c6
-rw-r--r--drivers/media/i2c/bt856.c2
-rw-r--r--drivers/media/i2c/bt866.c2
-rw-r--r--drivers/media/i2c/ccs-pll.c69
-rw-r--r--drivers/media/i2c/ccs-pll.h29
-rw-r--r--drivers/media/i2c/ccs/ccs-core.c94
-rw-r--r--drivers/media/i2c/ccs/ccs-data.c15
-rw-r--r--drivers/media/i2c/ccs/ccs-quirk.c3
-rw-r--r--drivers/media/i2c/ccs/ccs-reg-access.c11
-rw-r--r--drivers/media/i2c/ccs/ccs-reg-access.h3
-rw-r--r--drivers/media/i2c/ccs/ccs.h2
-rw-r--r--drivers/media/i2c/cs3308.c2
-rw-r--r--drivers/media/i2c/cs5345.c2
-rw-r--r--drivers/media/i2c/cs53l32a.c2
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c6
-rw-r--r--drivers/media/i2c/ds90ub913.c152
-rw-r--r--drivers/media/i2c/ds90ub953.c330
-rw-r--r--drivers/media/i2c/ds90ub953.h104
-rw-r--r--drivers/media/i2c/ds90ub960.c2353
-rw-r--r--drivers/media/i2c/dw9714.c66
-rw-r--r--drivers/media/i2c/dw9719.c221
-rw-r--r--drivers/media/i2c/dw9768.c19
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_driver.c36
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_mode.c9
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_reg.h1
-rw-r--r--drivers/media/i2c/gc0308.c7
-rw-r--r--drivers/media/i2c/gc0310.c783
-rw-r--r--drivers/media/i2c/gc05a2.c1347
-rw-r--r--drivers/media/i2c/gc08a3.c1327
-rw-r--r--drivers/media/i2c/gc2145.c136
-rw-r--r--drivers/media/i2c/hi556.c181
-rw-r--r--drivers/media/i2c/hi846.c15
-rw-r--r--drivers/media/i2c/hi847.c84
-rw-r--r--drivers/media/i2c/imx111.c1610
-rw-r--r--drivers/media/i2c/imx208.c93
-rw-r--r--drivers/media/i2c/imx214.c1486
-rw-r--r--drivers/media/i2c/imx219.c425
-rw-r--r--drivers/media/i2c/imx258.c1487
-rw-r--r--drivers/media/i2c/imx274.c7
-rw-r--r--drivers/media/i2c/imx283.c1620
-rw-r--r--drivers/media/i2c/imx290.c150
-rw-r--r--drivers/media/i2c/imx296.c7
-rw-r--r--drivers/media/i2c/imx319.c93
-rw-r--r--drivers/media/i2c/imx334.c1052
-rw-r--r--drivers/media/i2c/imx335.c549
-rw-r--r--drivers/media/i2c/imx355.c100
-rw-r--r--drivers/media/i2c/imx412.c60
-rw-r--r--drivers/media/i2c/imx415.c189
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c8
-rw-r--r--drivers/media/i2c/isl7998x.c4
-rw-r--r--drivers/media/i2c/ks0127.c14
-rw-r--r--drivers/media/i2c/lm3560.c4
-rw-r--r--drivers/media/i2c/lm3646.c2
-rw-r--r--drivers/media/i2c/lt6911uxe.c707
-rw-r--r--drivers/media/i2c/m52790.c2
-rw-r--r--drivers/media/i2c/max2175.c4
-rw-r--r--drivers/media/i2c/max9286.c191
-rw-r--r--drivers/media/i2c/max96714.c1017
-rw-r--r--drivers/media/i2c/max96717.c1104
-rw-r--r--drivers/media/i2c/ml86v7667.c4
-rw-r--r--drivers/media/i2c/msp3400-driver.c2
-rw-r--r--drivers/media/i2c/msp3400-kthreads.c2
-rw-r--r--drivers/media/i2c/mt9m001.c7
-rw-r--r--drivers/media/i2c/mt9m111.c11
-rw-r--r--drivers/media/i2c/mt9m114.c252
-rw-r--r--drivers/media/i2c/mt9p031.c121
-rw-r--r--drivers/media/i2c/mt9t112.c13
-rw-r--r--drivers/media/i2c/mt9v011.c2
-rw-r--r--drivers/media/i2c/mt9v032.c105
-rw-r--r--drivers/media/i2c/mt9v111.c24
-rw-r--r--drivers/media/i2c/og01a1b.c254
-rw-r--r--drivers/media/i2c/og0ve1b.c816
-rw-r--r--drivers/media/i2c/ov01a10.c8
-rw-r--r--drivers/media/i2c/ov02a10.c45
-rw-r--r--drivers/media/i2c/ov02c10.c1022
-rw-r--r--drivers/media/i2c/ov02e10.c956
-rw-r--r--drivers/media/i2c/ov08d10.c82
-rw-r--r--drivers/media/i2c/ov08x40.c1650
-rw-r--r--drivers/media/i2c/ov13858.c73
-rw-r--r--drivers/media/i2c/ov13b10.c283
-rw-r--r--drivers/media/i2c/ov2640.c2
-rw-r--r--drivers/media/i2c/ov2659.c12
-rw-r--r--drivers/media/i2c/ov2680.c31
-rw-r--r--drivers/media/i2c/ov2685.c16
-rw-r--r--drivers/media/i2c/ov2735.c1109
-rw-r--r--drivers/media/i2c/ov2740.c171
-rw-r--r--drivers/media/i2c/ov4689.c15
-rw-r--r--drivers/media/i2c/ov5640.c20
-rw-r--r--drivers/media/i2c/ov5645.c299
-rw-r--r--drivers/media/i2c/ov5647.c22
-rw-r--r--drivers/media/i2c/ov5648.c10
-rw-r--r--drivers/media/i2c/ov5670.c114
-rw-r--r--drivers/media/i2c/ov5675.c114
-rw-r--r--drivers/media/i2c/ov5693.c37
-rw-r--r--drivers/media/i2c/ov5695.c16
-rw-r--r--drivers/media/i2c/ov6211.c793
-rw-r--r--drivers/media/i2c/ov64a40.c19
-rw-r--r--drivers/media/i2c/ov6650.c1149
-rw-r--r--drivers/media/i2c/ov7251.c37
-rw-r--r--drivers/media/i2c/ov7640.c2
-rw-r--r--drivers/media/i2c/ov772x.c4
-rw-r--r--drivers/media/i2c/ov7740.c15
-rw-r--r--drivers/media/i2c/ov8856.c102
-rw-r--r--drivers/media/i2c/ov8858.c15
-rw-r--r--drivers/media/i2c/ov8865.c53
-rw-r--r--drivers/media/i2c/ov9282.c40
-rw-r--r--drivers/media/i2c/ov9640.c7
-rw-r--r--drivers/media/i2c/ov9650.c11
-rw-r--r--drivers/media/i2c/ov9734.c84
-rw-r--r--drivers/media/i2c/rdacm20.c7
-rw-r--r--drivers/media/i2c/rdacm21.c7
-rw-r--r--drivers/media/i2c/rj54n1cb0c.c19
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c34
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3.h2
-rw-r--r--drivers/media/i2c/s5k5baf.c25
-rw-r--r--drivers/media/i2c/s5k6a3.c20
-rw-r--r--drivers/media/i2c/saa6588.c2
-rw-r--r--drivers/media/i2c/saa6752hs.c4
-rw-r--r--drivers/media/i2c/saa7110.c2
-rw-r--r--drivers/media/i2c/saa7115.c14
-rw-r--r--drivers/media/i2c/saa7127.c2
-rw-r--r--drivers/media/i2c/saa717x.c4
-rw-r--r--drivers/media/i2c/saa7185.c2
-rw-r--r--drivers/media/i2c/sony-btf-mpx.c2
-rw-r--r--drivers/media/i2c/st-mipid02.c124
-rw-r--r--drivers/media/i2c/st-vgxy61.c1895
-rw-r--r--drivers/media/i2c/tc358743.c289
-rw-r--r--drivers/media/i2c/tc358743_regs.h57
-rw-r--r--drivers/media/i2c/tc358746.c264
-rw-r--r--drivers/media/i2c/tda1997x.c14
-rw-r--r--drivers/media/i2c/tda7432.c2
-rw-r--r--drivers/media/i2c/tda9840.c4
-rw-r--r--drivers/media/i2c/tea6415c.c4
-rw-r--r--drivers/media/i2c/tea6420.c4
-rw-r--r--drivers/media/i2c/thp7312.c13
-rw-r--r--drivers/media/i2c/ths7303.c8
-rw-r--r--drivers/media/i2c/ths8200.c4
-rw-r--r--drivers/media/i2c/tlv320aic23b.c4
-rw-r--r--drivers/media/i2c/tvaudio.c8
-rw-r--r--drivers/media/i2c/tvp5150.c6
-rw-r--r--drivers/media/i2c/tvp7002.c2
-rw-r--r--drivers/media/i2c/tw2804.c2
-rw-r--r--drivers/media/i2c/tw9900.c2
-rw-r--r--drivers/media/i2c/tw9903.c2
-rw-r--r--drivers/media/i2c/tw9906.c2
-rw-r--r--drivers/media/i2c/tw9910.c7
-rw-r--r--drivers/media/i2c/uda1342.c3
-rw-r--r--drivers/media/i2c/upd64031a.c4
-rw-r--r--drivers/media/i2c/upd64083.c4
-rw-r--r--drivers/media/i2c/vd55g1.c2061
-rw-r--r--drivers/media/i2c/vd56g3.c1582
-rw-r--r--drivers/media/i2c/vgxy61.c1911
-rw-r--r--drivers/media/i2c/video-i2c.c20
-rw-r--r--drivers/media/i2c/vp27smpx.c4
-rw-r--r--drivers/media/i2c/vpx3220.c6
-rw-r--r--drivers/media/i2c/wm8739.c4
-rw-r--r--drivers/media/i2c/wm8775.c4
-rw-r--r--drivers/media/mc/mc-devnode.c7
-rw-r--r--drivers/media/mc/mc-entity.c20
-rw-r--r--drivers/media/mc/mc-request.c58
-rw-r--r--drivers/media/pci/Kconfig1
-rw-r--r--drivers/media/pci/Makefile2
-rw-r--r--drivers/media/pci/b2c2/flexcop-dma.c17
-rw-r--r--drivers/media/pci/b2c2/flexcop-pci.c2
-rw-r--r--drivers/media/pci/bt8xx/bt878.c9
-rw-r--r--drivers/media/pci/bt8xx/bt878.h3
-rw-r--r--drivers/media/pci/bt8xx/bttv-cards.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c20
-rw-r--r--drivers/media/pci/bt8xx/bttv-gpio.c4
-rw-r--r--drivers/media/pci/bt8xx/bttv-input.c8
-rw-r--r--drivers/media/pci/bt8xx/bttv-risc.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv-vbi.c8
-rw-r--r--drivers/media/pci/bt8xx/bttv.h2
-rw-r--r--drivers/media/pci/bt8xx/dvb-bt8xx.c8
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.c2
-rw-r--r--drivers/media/pci/cobalt/cobalt-v4l2.c62
-rw-r--r--drivers/media/pci/cx18/cx18-audio.c2
-rw-r--r--drivers/media/pci/cx18/cx18-audio.h2
-rw-r--r--drivers/media/pci/cx18/cx18-av-audio.c2
-rw-r--r--drivers/media/pci/cx18/cx18-av-core.c2
-rw-r--r--drivers/media/pci/cx18/cx18-av-core.h2
-rw-r--r--drivers/media/pci/cx18/cx18-av-firmware.c2
-rw-r--r--drivers/media/pci/cx18/cx18-av-vbi.c14
-rw-r--r--drivers/media/pci/cx18/cx18-cards.c2
-rw-r--r--drivers/media/pci/cx18/cx18-cards.h2
-rw-r--r--drivers/media/pci/cx18/cx18-controls.c2
-rw-r--r--drivers/media/pci/cx18/cx18-controls.h2
-rw-r--r--drivers/media/pci/cx18/cx18-driver.c11
-rw-r--r--drivers/media/pci/cx18/cx18-driver.h16
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.c17
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.h2
-rw-r--r--drivers/media/pci/cx18/cx18-firmware.c2
-rw-r--r--drivers/media/pci/cx18/cx18-firmware.h2
-rw-r--r--drivers/media/pci/cx18/cx18-gpio.c17
-rw-r--r--drivers/media/pci/cx18/cx18-gpio.h3
-rw-r--r--drivers/media/pci/cx18/cx18-i2c.c2
-rw-r--r--drivers/media/pci/cx18/cx18-i2c.h2
-rw-r--r--drivers/media/pci/cx18/cx18-io.c2
-rw-r--r--drivers/media/pci/cx18/cx18-io.h2
-rw-r--r--drivers/media/pci/cx18/cx18-ioctl.c92
-rw-r--r--drivers/media/pci/cx18/cx18-ioctl.h10
-rw-r--r--drivers/media/pci/cx18/cx18-irq.c2
-rw-r--r--drivers/media/pci/cx18/cx18-irq.h2
-rw-r--r--drivers/media/pci/cx18/cx18-mailbox.c2
-rw-r--r--drivers/media/pci/cx18/cx18-mailbox.h2
-rw-r--r--drivers/media/pci/cx18/cx18-queue.c15
-rw-r--r--drivers/media/pci/cx18/cx18-queue.h2
-rw-r--r--drivers/media/pci/cx18/cx18-scb.c2
-rw-r--r--drivers/media/pci/cx18/cx18-scb.h4
-rw-r--r--drivers/media/pci/cx18/cx18-streams.c4
-rw-r--r--drivers/media/pci/cx18/cx18-streams.h2
-rw-r--r--drivers/media/pci/cx18/cx18-vbi.c2
-rw-r--r--drivers/media/pci/cx18/cx18-vbi.h2
-rw-r--r--drivers/media/pci/cx18/cx18-version.h2
-rw-r--r--drivers/media/pci/cx18/cx18-video.c2
-rw-r--r--drivers/media/pci/cx18/cx18-video.h2
-rw-r--r--drivers/media/pci/cx18/cx23418.h2
-rw-r--r--drivers/media/pci/cx23885/cx23885-417.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c30
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c33
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-vbi.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-video.c5
-rw-r--r--drivers/media/pci/cx23885/cx23885.h1
-rw-r--r--drivers/media/pci/cx23885/netup-eeprom.c29
-rw-r--r--drivers/media/pci/cx23885/netup-eeprom.h1
-rw-r--r--drivers/media/pci/cx25821/cx25821-video.c2
-rw-r--r--drivers/media/pci/cx88/cx88-blackbird.c2
-rw-r--r--drivers/media/pci/cx88/cx88-dvb.c2
-rw-r--r--drivers/media/pci/cx88/cx88-input.c3
-rw-r--r--drivers/media/pci/cx88/cx88-vbi.c2
-rw-r--r--drivers/media/pci/cx88/cx88-video.c2
-rw-r--r--drivers/media/pci/ddbridge/ddbridge.h2
-rw-r--r--drivers/media/pci/dt3155/dt3155.c2
-rw-r--r--drivers/media/pci/intel/ipu-bridge.c94
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.c102
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.h2
-rw-r--r--drivers/media/pci/intel/ipu6/Kconfig8
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-bus.c8
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-bus.h9
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-buttress.c95
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-buttress.h11
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-cpd.c30
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-dma.c217
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-dma.h30
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-fw-com.c40
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c118
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h4
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-dwc-phy.c4
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-queue.c120
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-queue.h11
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c40
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h4
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-video.c72
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-video.h8
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys.c101
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys.h8
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-mmu.c314
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-mmu.h4
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-platform-buttress-regs.h2
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6.c52
-rw-r--r--drivers/media/pci/intel/ivsc/Kconfig1
-rw-r--r--drivers/media/pci/intel/ivsc/mei_ace.c8
-rw-r--r--drivers/media/pci/intel/ivsc/mei_csi.c113
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-pcm.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-cards.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-cards.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-controls.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-controls.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c121
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.h24
-rw-r--r--drivers/media/pci/ivtv/ivtv-fileops.c108
-rw-r--r--drivers/media/pci/ivtv/ivtv-fileops.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-firmware.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-firmware.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-gpio.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-gpio.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-i2c.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-i2c.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-ioctl.c142
-rw-r--r--drivers/media/pci/ivtv/ivtv-ioctl.h8
-rw-r--r--drivers/media/pci/ivtv/ivtv-irq.c16
-rw-r--r--drivers/media/pci/ivtv/ivtv-irq.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-mailbox.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-mailbox.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-queue.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-queue.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-routing.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-routing.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-streams.c21
-rw-r--r--drivers/media/pci/ivtv/ivtv-streams.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-udma.c10
-rw-r--r--drivers/media/pci/ivtv/ivtv-udma.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-vbi.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-vbi.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-version.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-yuv.c14
-rw-r--r--drivers/media/pci/ivtv/ivtvfb.c7
-rw-r--r--drivers/media/pci/mantis/hopper_cards.c2
-rw-r--r--drivers/media/pci/mantis/mantis_cards.c2
-rw-r--r--drivers/media/pci/mantis/mantis_common.h2
-rw-r--r--drivers/media/pci/mantis/mantis_core.h43
-rw-r--r--drivers/media/pci/mantis/mantis_dma.c4
-rw-r--r--drivers/media/pci/mantis/mantis_dma.h2
-rw-r--r--drivers/media/pci/mantis/mantis_dvb.c12
-rw-r--r--drivers/media/pci/mgb4/mgb4_cmt.c10
-rw-r--r--drivers/media/pci/mgb4/mgb4_core.c30
-rw-r--r--drivers/media/pci/mgb4/mgb4_core.h15
-rw-r--r--drivers/media/pci/mgb4/mgb4_io.h29
-rw-r--r--drivers/media/pci/mgb4/mgb4_regs.c1
-rw-r--r--drivers/media/pci/mgb4/mgb4_sysfs_in.c12
-rw-r--r--drivers/media/pci/mgb4/mgb4_sysfs_out.c9
-rw-r--r--drivers/media/pci/mgb4/mgb4_trigger.c7
-rw-r--r--drivers/media/pci/mgb4/mgb4_vin.c298
-rw-r--r--drivers/media/pci/mgb4/mgb4_vin.h8
-rw-r--r--drivers/media/pci/mgb4/mgb4_vout.c345
-rw-r--r--drivers/media/pci/mgb4/mgb4_vout.h6
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_core.c4
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_spi.c6
-rw-r--r--drivers/media/pci/ngene/ngene-core.c22
-rw-r--r--drivers/media/pci/ngene/ngene.h5
-rw-r--r--drivers/media/pci/pt1/pt1.c2
-rw-r--r--drivers/media/pci/pt3/pt3.c17
-rw-r--r--drivers/media/pci/saa7134/saa7134-alsa.c1
-rw-r--r--drivers/media/pci/saa7134/saa7134-cards.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-core.c12
-rw-r--r--drivers/media/pci/saa7134/saa7134-dvb.c9
-rw-r--r--drivers/media/pci/saa7134/saa7134-empress.c3
-rw-r--r--drivers/media/pci/saa7134/saa7134-go7007.c1
-rw-r--r--drivers/media/pci/saa7134/saa7134-input.c4
-rw-r--r--drivers/media/pci/saa7134/saa7134-ts.c4
-rw-r--r--drivers/media/pci/saa7134/saa7134-vbi.c4
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c8
-rw-r--r--drivers/media/pci/saa7164/saa7164-buffer.c20
-rw-r--r--drivers/media/pci/saa7164/saa7164-cmd.c28
-rw-r--r--drivers/media/pci/saa7164/saa7164-encoder.c30
-rw-r--r--drivers/media/pci/saa7164/saa7164-vbi.c27
-rw-r--r--drivers/media/pci/saa7164/saa7164.h12
-rw-r--r--drivers/media/pci/smipcie/smipcie-main.c18
-rw-r--r--drivers/media/pci/smipcie/smipcie.h3
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-core.c6
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-gpio.c20
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-p2m.c8
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c2
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-v4l2.c2
-rw-r--r--drivers/media/pci/sta2x11/Kconfig16
-rw-r--r--drivers/media/pci/sta2x11/Makefile2
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.c1272
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.h29
-rw-r--r--drivers/media/pci/ttpci/budget-av.c3
-rw-r--r--drivers/media/pci/ttpci/budget-ci.c27
-rw-r--r--drivers/media/pci/ttpci/budget-core.c11
-rw-r--r--drivers/media/pci/ttpci/budget.h5
-rw-r--r--drivers/media/pci/tw5864/tw5864-core.c15
-rw-r--r--drivers/media/pci/tw5864/tw5864-video.c15
-rw-r--r--drivers/media/pci/tw5864/tw5864.h7
-rw-r--r--drivers/media/pci/tw68/tw68-core.c4
-rw-r--r--drivers/media/pci/tw68/tw68-reg.h2
-rw-r--r--drivers/media/pci/tw68/tw68-risc.c2
-rw-r--r--drivers/media/pci/tw68/tw68-video.c4
-rw-r--r--drivers/media/pci/tw68/tw68.h2
-rw-r--r--drivers/media/pci/tw686x/tw686x-core.c4
-rw-r--r--drivers/media/pci/tw686x/tw686x-video.c2
-rw-r--r--drivers/media/pci/zoran/zoran.h6
-rw-r--r--drivers/media/pci/zoran/zoran_card.c6
-rw-r--r--drivers/media/pci/zoran/zoran_card.h2
-rw-r--r--drivers/media/pci/zoran/zoran_driver.c37
-rw-r--r--drivers/media/pci/zoran/zr36016.c2
-rw-r--r--drivers/media/pci/zoran/zr36050.c2
-rw-r--r--drivers/media/pci/zoran/zr36060.c2
-rw-r--r--drivers/media/platform/Kconfig4
-rw-r--r--drivers/media/platform/Makefile4
-rw-r--r--drivers/media/platform/allegro-dvt/allegro-core.c186
-rw-r--r--drivers/media/platform/allegro-dvt/nal-hevc.h7
-rw-r--r--drivers/media/platform/amlogic/Kconfig1
-rw-r--r--drivers/media/platform/amlogic/Makefile2
-rw-r--r--drivers/media/platform/amlogic/c3/Kconfig5
-rw-r--r--drivers/media/platform/amlogic/c3/Makefile5
-rw-r--r--drivers/media/platform/amlogic/c3/isp/Kconfig19
-rw-r--r--drivers/media/platform/amlogic/c3/isp/Makefile10
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-capture.c804
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-common.h340
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-core.c641
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-dev.c421
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-params.c930
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-regs.h618
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-resizer.c892
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-stats.c326
-rw-r--r--drivers/media/platform/amlogic/c3/mipi-adapter/Kconfig16
-rw-r--r--drivers/media/platform/amlogic/c3/mipi-adapter/Makefile3
-rw-r--r--drivers/media/platform/amlogic/c3/mipi-adapter/c3-mipi-adap.c842
-rw-r--r--drivers/media/platform/amlogic/c3/mipi-csi2/Kconfig16
-rw-r--r--drivers/media/platform/amlogic/c3/mipi-csi2/Makefile3
-rw-r--r--drivers/media/platform/amlogic/c3/mipi-csi2/c3-mipi-csi2.c827
-rw-r--r--drivers/media/platform/amlogic/meson-ge2d/ge2d.c34
-rw-r--r--drivers/media/platform/amphion/vdec.c300
-rw-r--r--drivers/media/platform/amphion/venc.c22
-rw-r--r--drivers/media/platform/amphion/vpu.h11
-rw-r--r--drivers/media/platform/amphion/vpu_color.c73
-rw-r--r--drivers/media/platform/amphion/vpu_core.c49
-rw-r--r--drivers/media/platform/amphion/vpu_dbg.c15
-rw-r--r--drivers/media/platform/amphion/vpu_defs.h13
-rw-r--r--drivers/media/platform/amphion/vpu_drv.c30
-rw-r--r--drivers/media/platform/amphion/vpu_helpers.c123
-rw-r--r--drivers/media/platform/amphion/vpu_helpers.h12
-rw-r--r--drivers/media/platform/amphion/vpu_malone.c70
-rw-r--r--drivers/media/platform/amphion/vpu_mbox.c4
-rw-r--r--drivers/media/platform/amphion/vpu_mbox.h1
-rw-r--r--drivers/media/platform/amphion/vpu_v4l2.c69
-rw-r--r--drivers/media/platform/amphion/vpu_v4l2.h19
-rw-r--r--drivers/media/platform/amphion/vpu_windsor.c2
-rw-r--r--drivers/media/platform/arm/Kconfig5
-rw-r--r--drivers/media/platform/arm/Makefile2
-rw-r--r--drivers/media/platform/arm/mali-c55/Kconfig18
-rw-r--r--drivers/media/platform/arm/mali-c55/Makefile11
-rw-r--r--drivers/media/platform/arm/mali-c55/mali-c55-capture.c959
-rw-r--r--drivers/media/platform/arm/mali-c55/mali-c55-common.h310
-rw-r--r--drivers/media/platform/arm/mali-c55/mali-c55-core.c917
-rw-r--r--drivers/media/platform/arm/mali-c55/mali-c55-isp.c665
-rw-r--r--drivers/media/platform/arm/mali-c55/mali-c55-params.c819
-rw-r--r--drivers/media/platform/arm/mali-c55/mali-c55-registers.h449
-rw-r--r--drivers/media/platform/arm/mali-c55/mali-c55-resizer.c1156
-rw-r--r--drivers/media/platform/arm/mali-c55/mali-c55-stats.c323
-rw-r--r--drivers/media/platform/arm/mali-c55/mali-c55-tpg.c437
-rw-r--r--drivers/media/platform/aspeed/aspeed-video.c203
-rw-r--r--drivers/media/platform/atmel/atmel-isi.c20
-rw-r--r--drivers/media/platform/broadcom/bcm2835-unicam.c46
-rw-r--r--drivers/media/platform/cadence/cdns-csi2rx.c212
-rw-r--r--drivers/media/platform/cadence/cdns-csi2tx.c2
-rw-r--r--drivers/media/platform/chips-media/coda/coda-bit.c4
-rw-r--r--drivers/media/platform/chips-media/coda/coda-common.c59
-rw-r--r--drivers/media/platform/chips-media/coda/coda-jpeg.c6
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-helper.c55
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-helper.h7
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-hw.c425
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-regdefine.h5
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vdi.c27
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c402
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c341
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu.c97
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu.h10
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpuapi.c43
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpuapi.h4
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpuconfig.h43
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5.h9
-rw-r--r--drivers/media/platform/imagination/Kconfig14
-rw-r--r--drivers/media/platform/imagination/Makefile3
-rw-r--r--drivers/media/platform/imagination/e5010-core-regs.h585
-rw-r--r--drivers/media/platform/imagination/e5010-jpeg-enc-hw.c267
-rw-r--r--drivers/media/platform/imagination/e5010-jpeg-enc-hw.h42
-rw-r--r--drivers/media/platform/imagination/e5010-jpeg-enc.c1630
-rw-r--r--drivers/media/platform/imagination/e5010-jpeg-enc.h173
-rw-r--r--drivers/media/platform/imagination/e5010-mmu-regs.h311
-rw-r--r--drivers/media/platform/intel/pxa_camera.c19
-rw-r--r--drivers/media/platform/m2m-deinterlace.c41
-rw-r--r--drivers/media/platform/marvell/cafe-driver.c2
-rw-r--r--drivers/media/platform/marvell/mcam-core.c20
-rw-r--r--drivers/media/platform/marvell/mcam-core.h3
-rw-r--r--drivers/media/platform/marvell/mmp-driver.c24
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c69
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.h4
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c88
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.h1
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_reg.h8
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c37
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.h7
-rw-r--r--drivers/media/platform/mediatek/mdp/mtk_mdp_core.c2
-rw-r--r--drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c33
-rw-r--r--drivers/media/platform/mediatek/mdp3/mdp_cfg_data.c280
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-img-ipi.h1
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-cfg.h1
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c77
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.h3
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c540
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.h29
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c42
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.h1
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c29
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c4
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_dbgfs.c4
-rw-r--r--drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c5
-rw-r--r--drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c14
-rw-r--r--drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_util.c3
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.c97
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c23
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h9
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateful.c4
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c6
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_av1_req_lat_if.c6
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c11
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c667
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c7
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c12
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c11
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c11
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c53
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c23
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h6
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c8
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c5
-rw-r--r--drivers/media/platform/mediatek/vpu/mtk_vpu.c2
-rw-r--r--drivers/media/platform/microchip/microchip-csi2dc.c2
-rw-r--r--drivers/media/platform/microchip/microchip-isc-base.c21
-rw-r--r--drivers/media/platform/microchip/microchip-sama5d2-isc.c23
-rw-r--r--drivers/media/platform/microchip/microchip-sama7g5-isc.c23
-rw-r--r--drivers/media/platform/nuvoton/npcm-video.c33
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/dmabuf-cache.c2
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/h264.c19
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/iommu.c7
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/trace.h2
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/v4l2.c49
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/vde.c2
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/vde.h1
-rw-r--r--drivers/media/platform/nxp/dw100/dw100.c30
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.h1
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c268
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h6
-rw-r--r--drivers/media/platform/nxp/imx-mipi-csis.c481
-rw-r--r--drivers/media/platform/nxp/imx-pxp.c21
-rw-r--r--drivers/media/platform/nxp/imx-pxp.h9
-rw-r--r--drivers/media/platform/nxp/imx7-media-csi.c5
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c167
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h22
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c18
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-gasket.c22
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c2
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c286
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c2
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c161
-rw-r--r--drivers/media/platform/nxp/imx8mq-mipi-csi2.c200
-rw-r--r--drivers/media/platform/nxp/mx2_emmaprp.c35
-rw-r--r--drivers/media/platform/qcom/Kconfig1
-rw-r--r--drivers/media/platform/qcom/Makefile1
-rw-r--r--drivers/media/platform/qcom/camss/Makefile8
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-340.c190
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-4-1.c151
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-4-7.c202
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-680.c422
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-gen2.c467
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-gen3.c351
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-gen3.h25
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.c650
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.h88
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c6
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c1200
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.c139
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.h40
-rw-r--r--drivers/media/platform/qcom/camss/camss-format.c91
-rw-r--r--drivers/media/platform/qcom/camss/camss-format.h62
-rw-r--r--drivers/media/platform/qcom/camss/camss-ispif.c13
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-17x.c120
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-340.c320
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-1.c35
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-7.c17
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-8.c17
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-480.c274
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-680.c244
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-gen1.c17
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-gen3.c193
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-vbif.c31
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-vbif.h19
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.c810
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.h86
-rw-r--r--drivers/media/platform/qcom/camss/camss-video.c384
-rw-r--r--drivers/media/platform/qcom/camss/camss-video.h4
-rw-r--r--drivers/media/platform/qcom/camss/camss.c3443
-rw-r--r--drivers/media/platform/qcom/camss/camss.h44
-rw-r--r--drivers/media/platform/qcom/iris/Kconfig13
-rw-r--r--drivers/media/platform/qcom/iris/Makefile32
-rw-r--r--drivers/media/platform/qcom/iris/iris_buffer.c795
-rw-r--r--drivers/media/platform/qcom/iris/iris_buffer.h123
-rw-r--r--drivers/media/platform/qcom/iris/iris_common.c235
-rw-r--r--drivers/media/platform/qcom/iris/iris_common.h18
-rw-r--r--drivers/media/platform/qcom/iris/iris_core.c98
-rw-r--r--drivers/media/platform/qcom/iris/iris_core.h125
-rw-r--r--drivers/media/platform/qcom/iris/iris_ctrls.c917
-rw-r--r--drivers/media/platform/qcom/iris/iris_ctrls.h37
-rw-r--r--drivers/media/platform/qcom/iris/iris_firmware.c109
-rw-r--r--drivers/media/platform/qcom/iris/iris_firmware.h15
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_common.c176
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_common.h156
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen1.h16
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen1_command.c1089
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen1_defines.h551
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen1_response.c709
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2.h41
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2_command.c1216
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2_defines.h197
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2_packet.c292
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2_packet.h125
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c984
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_queue.c317
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_queue.h182
-rw-r--r--drivers/media/platform/qcom/iris/iris_instance.h112
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_common.h266
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_gen1.c417
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_gen2.c1080
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_qcs8300.h23
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_sc7280.h26
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_sm8650.h13
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_sm8750.h22
-rw-r--r--drivers/media/platform/qcom/iris/iris_power.c140
-rw-r--r--drivers/media/platform/qcom/iris/iris_power.h13
-rw-r--r--drivers/media/platform/qcom/iris/iris_probe.c397
-rw-r--r--drivers/media/platform/qcom/iris/iris_resources.c131
-rw-r--r--drivers/media/platform/qcom/iris/iris_resources.h18
-rw-r--r--drivers/media/platform/qcom/iris/iris_state.c277
-rw-r--r--drivers/media/platform/qcom/iris/iris_state.h146
-rw-r--r--drivers/media/platform/qcom/iris/iris_utils.c127
-rw-r--r--drivers/media/platform/qcom/iris/iris_utils.h55
-rw-r--r--drivers/media/platform/qcom/iris/iris_vb2.c343
-rw-r--r--drivers/media/platform/qcom/iris/iris_vb2.h19
-rw-r--r--drivers/media/platform/qcom/iris/iris_vdec.c515
-rw-r--r--drivers/media/platform/qcom/iris/iris_vdec.h25
-rw-r--r--drivers/media/platform/qcom/iris/iris_venc.c616
-rw-r--r--drivers/media/platform/qcom/iris/iris_venc.h27
-rw-r--r--drivers/media/platform/qcom/iris/iris_vidc.c718
-rw-r--r--drivers/media/platform/qcom/iris/iris_vidc.h15
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu2.c47
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu3x.c473
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_buffer.c1555
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_buffer.h153
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_common.c389
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_common.h37
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_register_defines.h17
-rw-r--r--drivers/media/platform/qcom/venus/Kconfig3
-rw-r--r--drivers/media/platform/qcom/venus/core.c294
-rw-r--r--drivers/media/platform/qcom/venus/core.h42
-rw-r--r--drivers/media/platform/qcom/venus/firmware.c67
-rw-r--r--drivers/media/platform/qcom/venus/firmware.h2
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c12
-rw-r--r--drivers/media/platform/qcom/venus/hfi.c23
-rw-r--r--drivers/media/platform/qcom/venus/hfi.h2
-rw-r--r--drivers/media/platform/qcom/venus/hfi_cmds.c8
-rw-r--r--drivers/media/platform/qcom/venus/hfi_cmds.h18
-rw-r--r--drivers/media/platform/qcom/venus/hfi_helper.h20
-rw-r--r--drivers/media/platform/qcom/venus/hfi_msgs.c94
-rw-r--r--drivers/media/platform/qcom/venus/hfi_parser.c104
-rw-r--r--drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c20
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform.c23
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform.h34
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform_v4.c188
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform_v6.c33
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.c59
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus_io.h4
-rw-r--r--drivers/media/platform/qcom/venus/pm_helpers.c151
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c73
-rw-r--r--drivers/media/platform/qcom/venus/vdec.h1
-rw-r--r--drivers/media/platform/qcom/venus/vdec_ctrls.c5
-rw-r--r--drivers/media/platform/qcom/venus/venc.c124
-rw-r--r--drivers/media/platform/qcom/venus/venc.h1
-rw-r--r--drivers/media/platform/qcom/venus/venc_ctrls.c140
-rw-r--r--drivers/media/platform/raspberrypi/Kconfig6
-rw-r--r--drivers/media/platform/raspberrypi/Makefile4
-rw-r--r--drivers/media/platform/raspberrypi/pisp_be/Kconfig14
-rw-r--r--drivers/media/platform/raspberrypi/pisp_be/Makefile6
-rw-r--r--drivers/media/platform/raspberrypi/pisp_be/pisp_be.c1796
-rw-r--r--drivers/media/platform/raspberrypi/pisp_be/pisp_be_formats.h519
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/Kconfig15
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/Makefile6
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/cfe-fmts.h332
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/cfe-trace.h202
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/cfe.c2506
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/cfe.h43
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/csi2.c586
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/csi2.h89
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/dphy.c181
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/dphy.h27
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/pisp-fe.c605
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/pisp-fe.h53
-rw-r--r--drivers/media/platform/renesas/Kconfig19
-rw-r--r--drivers/media/platform/renesas/Makefile3
-rw-r--r--drivers/media/platform/renesas/rcar-csi2.c1176
-rw-r--r--drivers/media/platform/renesas/rcar-fcp.c38
-rw-r--r--drivers/media/platform/renesas/rcar-isp.c532
-rw-r--r--drivers/media/platform/renesas/rcar-isp/Kconfig18
-rw-r--r--drivers/media/platform/renesas/rcar-isp/Makefile4
-rw-r--r--drivers/media/platform/renesas/rcar-isp/csisp.c593
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-core.c734
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-dma.c309
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c552
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-vin.h61
-rw-r--r--drivers/media/platform/renesas/rcar_drif.c19
-rw-r--r--drivers/media/platform/renesas/rcar_fdp1.c37
-rw-r--r--drivers/media/platform/renesas/rcar_jpu.c51
-rw-r--r--drivers/media/platform/renesas/renesas-ceu.c14
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c151
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru-regs.h111
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h68
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c252
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-ip.c180
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c593
-rw-r--r--drivers/media/platform/renesas/rzv2h-ivc/Kconfig18
-rw-r--r--drivers/media/platform/renesas/rzv2h-ivc/Makefile5
-rw-r--r--drivers/media/platform/renesas/rzv2h-ivc/rzv2h-ivc-dev.c251
-rw-r--r--drivers/media/platform/renesas/rzv2h-ivc/rzv2h-ivc-subdev.c376
-rw-r--r--drivers/media/platform/renesas/rzv2h-ivc/rzv2h-ivc-video.c531
-rw-r--r--drivers/media/platform/renesas/rzv2h-ivc/rzv2h-ivc.h130
-rw-r--r--drivers/media/platform/renesas/sh_vou.c4
-rw-r--r--drivers/media/platform/renesas/vsp1/Makefile3
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1.h5
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_brx.c40
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_clu.c4
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_dl.c32
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_drm.c58
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_drm.h10
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_drv.c111
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_entity.c96
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_entity.h51
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_hgo.c28
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_hgt.c20
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_histo.c84
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_hsit.c17
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_iif.c121
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_iif.h29
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_lif.c4
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_lut.c1
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_pipe.c293
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_pipe.h54
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_regs.h9
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_rpf.c96
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_rwpf.c77
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_rwpf.h3
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_sru.c46
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_uds.c60
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_uif.c15
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_video.c290
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_vspx.c634
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_vspx.h16
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_wpf.c96
-rw-r--r--drivers/media/platform/rockchip/Kconfig2
-rw-r--r--drivers/media/platform/rockchip/Makefile2
-rw-r--r--drivers/media/platform/rockchip/rga/rga-buf.c16
-rw-r--r--drivers/media/platform/rockchip/rga/rga-hw.c4
-rw-r--r--drivers/media/platform/rockchip/rga/rga-hw.h2
-rw-r--r--drivers/media/platform/rockchip/rga/rga.c46
-rw-r--r--drivers/media/platform/rockchip/rga/rga.h10
-rw-r--r--drivers/media/platform/rockchip/rkcif/Kconfig18
-rw-r--r--drivers/media/platform/rockchip/rkcif/Makefile8
-rw-r--r--drivers/media/platform/rockchip/rkcif/rkcif-capture-dvp.c865
-rw-r--r--drivers/media/platform/rockchip/rkcif/rkcif-capture-dvp.h25
-rw-r--r--drivers/media/platform/rockchip/rkcif/rkcif-capture-mipi.c777
-rw-r--r--drivers/media/platform/rockchip/rkcif/rkcif-capture-mipi.h23
-rw-r--r--drivers/media/platform/rockchip/rkcif/rkcif-common.h250
-rw-r--r--drivers/media/platform/rockchip/rkcif/rkcif-dev.c303
-rw-r--r--drivers/media/platform/rockchip/rkcif/rkcif-interface.c442
-rw-r--r--drivers/media/platform/rockchip/rkcif/rkcif-interface.h31
-rw-r--r--drivers/media/platform/rockchip/rkcif/rkcif-regs.h153
-rw-r--r--drivers/media/platform/rockchip/rkcif/rkcif-stream.c636
-rw-r--r--drivers/media/platform/rockchip/rkcif/rkcif-stream.h32
-rw-r--r--drivers/media/platform/rockchip/rkisp1/Kconfig1
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c6
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-common.c14
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-common.h69
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c9
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c143
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c50
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-params.c1122
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h129
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c14
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c53
-rw-r--r--drivers/media/platform/rockchip/rkvdec/Kconfig (renamed from drivers/staging/media/rkvdec/Kconfig)0
-rw-r--r--drivers/media/platform/rockchip/rkvdec/Makefile3
-rw-r--r--drivers/media/platform/rockchip/rkvdec/rkvdec-h264.c (renamed from drivers/staging/media/rkvdec/rkvdec-h264.c)64
-rw-r--r--drivers/media/platform/rockchip/rkvdec/rkvdec-hevc-data.c1848
-rw-r--r--drivers/media/platform/rockchip/rkvdec/rkvdec-hevc.c820
-rw-r--r--drivers/media/platform/rockchip/rkvdec/rkvdec-regs.h (renamed from drivers/staging/media/rkvdec/rkvdec-regs.h)4
-rw-r--r--drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c (renamed from drivers/staging/media/rkvdec/rkvdec-vp9.c)4
-rw-r--r--drivers/media/platform/rockchip/rkvdec/rkvdec.c1411
-rw-r--r--drivers/media/platform/rockchip/rkvdec/rkvdec.h161
-rw-r--r--drivers/media/platform/samsung/exynos-gsc/gsc-core.c12
-rw-r--r--drivers/media/platform/samsung/exynos-gsc/gsc-core.h6
-rw-r--r--drivers/media/platform/samsung/exynos-gsc/gsc-m2m.c39
-rw-r--r--drivers/media/platform/samsung/exynos4-is/common.c1
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-capture.c8
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-core.c12
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-core.h5
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-errno.c131
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-errno.h1
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-i2c.c4
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-i2c.h2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-param.c9
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-param.h1
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-regs.c1
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is.c6
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-lite.c14
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-m2m.c21
-rw-r--r--drivers/media/platform/samsung/exynos4-is/media-dev.c43
-rw-r--r--drivers/media/platform/samsung/exynos4-is/media-dev.h5
-rw-r--r--drivers/media/platform/samsung/exynos4-is/mipi-csis.c12
-rw-r--r--drivers/media/platform/samsung/s3c-camif/camif-capture.c40
-rw-r--r--drivers/media/platform/samsung/s3c-camif/camif-core.c15
-rw-r--r--drivers/media/platform/samsung/s5p-g2d/g2d.c48
-rw-r--r--drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c61
-rw-r--r--drivers/media/platform/samsung/s5p-jpeg/jpeg-hw-exynos3250.c5
-rw-r--r--drivers/media/platform/samsung/s5p-jpeg/jpeg-hw-exynos3250.h1
-rw-r--r--drivers/media/platform/samsung/s5p-jpeg/jpeg-hw-exynos4.c19
-rw-r--r--drivers/media/platform/samsung/s5p-jpeg/jpeg-hw-exynos4.h4
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/regs-mfc-v6.h1
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c48
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.c35
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_common.h7
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c36
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c40
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c10
-rw-r--r--drivers/media/platform/st/Makefile1
-rw-r--r--drivers/media/platform/st/sti/Kconfig1
-rw-r--r--drivers/media/platform/st/sti/Makefile1
-rw-r--r--drivers/media/platform/st/sti/bdisp/bdisp-debug.c8
-rw-r--r--drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c58
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/Kconfig28
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/Makefile11
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-common.c262
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-common.h60
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c1169
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.h287
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-debugfs.c244
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-debugfs.h23
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-dvb.c235
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-dvb.h17
-rw-r--r--drivers/media/platform/st/sti/delta/delta-debug.c8
-rw-r--r--drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c20
-rw-r--r--drivers/media/platform/st/sti/delta/delta-v4l2.c47
-rw-r--r--drivers/media/platform/st/sti/hva/hva-v4l2.c42
-rw-r--r--drivers/media/platform/st/sti/hva/hva.h2
-rw-r--r--drivers/media/platform/st/stm32/Kconfig14
-rw-r--r--drivers/media/platform/st/stm32/Makefile1
-rw-r--r--drivers/media/platform/st/stm32/dma2d/dma2d.c42
-rw-r--r--drivers/media/platform/st/stm32/stm32-csi.c1141
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmi.c26
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/Makefile2
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c134
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-byteproc.c129
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-common.h4
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c133
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-input.c540
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-parallel.c440
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c14
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c2
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c2
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_capture.c18
-rw-r--r--drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.c2
-rw-r--r--drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.c2
-rw-r--r--drivers/media/platform/sunxi/sun8i-di/sun8i-di.c16
-rw-r--r--drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c16
-rw-r--r--drivers/media/platform/synopsys/Kconfig3
-rw-r--r--drivers/media/platform/synopsys/Makefile2
-rw-r--r--drivers/media/platform/synopsys/hdmirx/Kconfig36
-rw-r--r--drivers/media/platform/synopsys/hdmirx/Makefile4
-rw-r--r--drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c2746
-rw-r--r--drivers/media/platform/synopsys/hdmirx/snps_hdmirx.h396
-rw-r--r--drivers/media/platform/synopsys/hdmirx/snps_hdmirx_cec.c275
-rw-r--r--drivers/media/platform/synopsys/hdmirx/snps_hdmirx_cec.h43
-rw-r--r--drivers/media/platform/ti/Kconfig3
-rw-r--r--drivers/media/platform/ti/am437x/am437x-vpfe.c18
-rw-r--r--drivers/media/platform/ti/cal/cal-camerarx.c271
-rw-r--r--drivers/media/platform/ti/cal/cal-video.c159
-rw-r--r--drivers/media/platform/ti/cal/cal.c62
-rw-r--r--drivers/media/platform/ti/cal/cal.h4
-rw-r--r--drivers/media/platform/ti/davinci/vpif.c6
-rw-r--r--drivers/media/platform/ti/davinci/vpif_capture.c22
-rw-r--r--drivers/media/platform/ti/davinci/vpif_display.c8
-rw-r--r--drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c79
-rw-r--r--drivers/media/platform/ti/omap/omap_vout.c10
-rw-r--r--drivers/media/platform/ti/omap/omap_voutdef.h2
-rw-r--r--drivers/media/platform/ti/omap3isp/isp.c68
-rw-r--r--drivers/media/platform/ti/omap3isp/isp.h2
-rw-r--r--drivers/media/platform/ti/omap3isp/ispccdc.c16
-rw-r--r--drivers/media/platform/ti/omap3isp/isph3a_aewb.c2
-rw-r--r--drivers/media/platform/ti/omap3isp/isph3a_af.c2
-rw-r--r--drivers/media/platform/ti/omap3isp/isphist.c2
-rw-r--r--drivers/media/platform/ti/omap3isp/ispstat.c13
-rw-r--r--drivers/media/platform/ti/omap3isp/ispstat.h3
-rw-r--r--drivers/media/platform/ti/omap3isp/ispvideo.c37
-rw-r--r--drivers/media/platform/ti/omap3isp/ispvideo.h6
-rw-r--r--drivers/media/platform/ti/vpe/vpdma.c33
-rw-r--r--drivers/media/platform/ti/vpe/vpdma.h3
-rw-r--r--drivers/media/platform/ti/vpe/vpe.c32
-rw-r--r--drivers/media/platform/verisilicon/Kconfig9
-rw-r--r--drivers/media/platform/verisilicon/Makefile14
-rw-r--r--drivers/media/platform/verisilicon/hantro.h15
-rw-r--r--drivers/media/platform/verisilicon/hantro_drv.c65
-rw-r--r--drivers/media/platform/verisilicon/hantro_g1_mpeg2_dec.c2
-rw-r--r--drivers/media/platform/verisilicon/hantro_g1_regs.h2
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2.c115
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c38
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2_regs.h17
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c10
-rw-r--r--drivers/media/platform/verisilicon/hantro_h1_jpeg_enc.c2
-rw-r--r--drivers/media/platform/verisilicon/hantro_h264.c6
-rw-r--r--drivers/media/platform/verisilicon/hantro_hevc.c8
-rw-r--r--drivers/media/platform/verisilicon/hantro_hw.h39
-rw-r--r--drivers/media/platform/verisilicon/hantro_jpeg.c129
-rw-r--r--drivers/media/platform/verisilicon/hantro_postproc.c44
-rw-r--r--drivers/media/platform/verisilicon/hantro_v4l2.c76
-rw-r--r--drivers/media/platform/verisilicon/imx8m_vpu_hw.c32
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu2_hw_jpeg_enc.c2
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu2_hw_mpeg2_dec.c2
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c19
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu981_regs.h10
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu_hw.c42
-rw-r--r--drivers/media/platform/via/via-camera.c4
-rw-r--r--drivers/media/platform/video-mux.c8
-rw-r--r--drivers/media/platform/xilinx/xilinx-csi2rxss.c2
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.c16
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.h2
-rw-r--r--drivers/media/platform/xilinx/xilinx-tpg.c18
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.c18
-rw-r--r--drivers/media/platform/xilinx/xilinx-vtc.c2
-rw-r--r--drivers/media/radio/Kconfig21
-rw-r--r--drivers/media/radio/Makefile2
-rw-r--r--drivers/media/radio/radio-aimslab.c2
-rw-r--r--drivers/media/radio/radio-aztech.c2
-rw-r--r--drivers/media/radio/radio-cadet.c4
-rw-r--r--drivers/media/radio/radio-gemtek.c2
-rw-r--r--drivers/media/radio/radio-isa.c2
-rw-r--r--drivers/media/radio/radio-isa.h2
-rw-r--r--drivers/media/radio/radio-keene.c4
-rw-r--r--drivers/media/radio/radio-miropcm20.c2
-rw-r--r--drivers/media/radio/radio-raremono.c6
-rw-r--r--drivers/media/radio/radio-rtrack2.c2
-rw-r--r--drivers/media/radio/radio-si476x.c2
-rw-r--r--drivers/media/radio/radio-tea5764.c2
-rw-r--r--drivers/media/radio/radio-terratec.c2
-rw-r--r--drivers/media/radio/radio-timb.c2
-rw-r--r--drivers/media/radio/radio-wl1273.c2159
-rw-r--r--drivers/media/radio/radio-zoltrix.c2
-rw-r--r--drivers/media/radio/saa7706h.c4
-rw-r--r--drivers/media/radio/si470x/radio-si470x-common.c1
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c4
-rw-r--r--drivers/media/radio/si470x/radio-si470x.h2
-rw-r--r--drivers/media/radio/si4713/radio-platform-si4713.c12
-rw-r--r--drivers/media/radio/si4713/si4713.c4
-rw-r--r--drivers/media/radio/tef6862.c4
-rw-r--r--drivers/media/radio/wl128x/Kconfig15
-rw-r--r--drivers/media/radio/wl128x/Makefile7
-rw-r--r--drivers/media/radio/wl128x/fmdrv.h228
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.c1675
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.h389
-rw-r--r--drivers/media/radio/wl128x/fmdrv_rx.c820
-rw-r--r--drivers/media/radio/wl128x/fmdrv_rx.h45
-rw-r--r--drivers/media/radio/wl128x/fmdrv_tx.c413
-rw-r--r--drivers/media/radio/wl128x/fmdrv_tx.h24
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.c604
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.h20
-rw-r--r--drivers/media/rc/Kconfig1
-rw-r--r--drivers/media/rc/ati_remote.c6
-rw-r--r--drivers/media/rc/ene_ir.c7
-rw-r--r--drivers/media/rc/gpio-ir-recv.c6
-rw-r--r--drivers/media/rc/gpio-ir-tx.c4
-rw-r--r--drivers/media/rc/igorplugusb.c6
-rw-r--r--drivers/media/rc/iguanair.c4
-rw-r--r--drivers/media/rc/img-ir/img-ir-core.c2
-rw-r--r--drivers/media/rc/img-ir/img-ir-hw.c9
-rw-r--r--drivers/media/rc/img-ir/img-ir-raw.c4
-rw-r--r--drivers/media/rc/imon.c106
-rw-r--r--drivers/media/rc/imon_raw.c2
-rw-r--r--drivers/media/rc/ir-hix5hd2.c3
-rw-r--r--drivers/media/rc/ir-mce_kbd-decoder.c7
-rw-r--r--drivers/media/rc/ir-spi.c40
-rw-r--r--drivers/media/rc/ir_toy.c2
-rw-r--r--drivers/media/rc/ite-cir.c1
-rw-r--r--drivers/media/rc/keymaps/Makefile2
-rw-r--r--drivers/media/rc/keymaps/rc-hauppauge.c42
-rw-r--r--drivers/media/rc/keymaps/rc-mygica-utv3.c69
-rw-r--r--drivers/media/rc/keymaps/rc-siemens-gigaset-rc20.c71
-rw-r--r--drivers/media/rc/lirc_dev.c23
-rw-r--r--drivers/media/rc/mceusb.c18
-rw-r--r--drivers/media/rc/meson-ir.c29
-rw-r--r--drivers/media/rc/mtk-cir.c2
-rw-r--r--drivers/media/rc/pwm-ir-tx.c8
-rw-r--r--drivers/media/rc/rc-core-priv.h4
-rw-r--r--drivers/media/rc/rc-ir-raw.c5
-rw-r--r--drivers/media/rc/rc-loopback.c1
-rw-r--r--drivers/media/rc/rc-main.c11
-rw-r--r--drivers/media/rc/redrat3.c4
-rw-r--r--drivers/media/rc/serial_ir.c2
-rw-r--r--drivers/media/rc/st_rc.c4
-rw-r--r--drivers/media/rc/streamzap.c70
-rw-r--r--drivers/media/rc/sunxi-cir.c2
-rw-r--r--drivers/media/spi/gs1662.c8
-rw-r--r--drivers/media/test-drivers/vicodec/vicodec-core.c67
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_bridge.c10
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_channel.c5
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_demod.c2
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_tuner.c2
-rw-r--r--drivers/media/test-drivers/vim2m.c364
-rw-r--r--drivers/media/test-drivers/vimc/vimc-capture.c12
-rw-r--r--drivers/media/test-drivers/vimc/vimc-common.c25
-rw-r--r--drivers/media/test-drivers/vimc/vimc-common.h14
-rw-r--r--drivers/media/test-drivers/vimc/vimc-core.c6
-rw-r--r--drivers/media/test-drivers/vimc/vimc-debayer.c201
-rw-r--r--drivers/media/test-drivers/vimc/vimc-lens.c5
-rw-r--r--drivers/media/test-drivers/vimc/vimc-scaler.c134
-rw-r--r--drivers/media/test-drivers/vimc/vimc-sensor.c125
-rw-r--r--drivers/media/test-drivers/vimc/vimc-streamer.c6
-rw-r--r--drivers/media/test-drivers/visl/visl-core.c19
-rw-r--r--drivers/media/test-drivers/visl/visl-dec.c2
-rw-r--r--drivers/media/test-drivers/visl/visl-video.c22
-rw-r--r--drivers/media/test-drivers/visl/visl.h7
-rw-r--r--drivers/media/test-drivers/vivid/Kconfig13
-rw-r--r--drivers/media/test-drivers/vivid/Makefile5
-rw-r--r--drivers/media/test-drivers/vivid/vivid-cec.c144
-rw-r--r--drivers/media/test-drivers/vivid/vivid-core.c374
-rw-r--r--drivers/media/test-drivers/vivid/vivid-core.h131
-rw-r--r--drivers/media/test-drivers/vivid/vivid-ctrls.c303
-rw-r--r--drivers/media/test-drivers/vivid/vivid-kthread-cap.c117
-rw-r--r--drivers/media/test-drivers/vivid/vivid-kthread-out.c11
-rw-r--r--drivers/media/test-drivers/vivid/vivid-kthread-touch.c11
-rw-r--r--drivers/media/test-drivers/vivid/vivid-meta-cap.c2
-rw-r--r--drivers/media/test-drivers/vivid/vivid-meta-out.c2
-rw-r--r--drivers/media/test-drivers/vivid/vivid-osd.c24
-rw-r--r--drivers/media/test-drivers/vivid/vivid-osd.h19
-rw-r--r--drivers/media/test-drivers/vivid/vivid-radio-rx.c12
-rw-r--r--drivers/media/test-drivers/vivid/vivid-radio-rx.h8
-rw-r--r--drivers/media/test-drivers/vivid/vivid-radio-tx.c8
-rw-r--r--drivers/media/test-drivers/vivid/vivid-radio-tx.h4
-rw-r--r--drivers/media/test-drivers/vivid/vivid-sdr-cap.c38
-rw-r--r--drivers/media/test-drivers/vivid/vivid-sdr-cap.h18
-rw-r--r--drivers/media/test-drivers/vivid/vivid-touch-cap.c8
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-cap.c19
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-cap.h8
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-gen.c8
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-out.c12
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-out.h6
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-cap.c180
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-cap.h26
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-common.c142
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-common.h13
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-out.c62
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-out.h16
-rw-r--r--drivers/media/tuners/e4000.c2
-rw-r--r--drivers/media/tuners/fc0013.c64
-rw-r--r--drivers/media/tuners/fc0013.h11
-rw-r--r--drivers/media/tuners/fc2580.c2
-rw-r--r--drivers/media/tuners/it913x.c2
-rw-r--r--drivers/media/tuners/m88rs6000t.c2
-rw-r--r--drivers/media/tuners/mt2060.c2
-rw-r--r--drivers/media/tuners/mt2063.c2
-rw-r--r--drivers/media/tuners/mxl301rf.c4
-rw-r--r--drivers/media/tuners/mxl5005s.c2
-rw-r--r--drivers/media/tuners/qm1d1b0004.c2
-rw-r--r--drivers/media/tuners/qm1d1c0042.c2
-rw-r--r--drivers/media/tuners/tda18212.c2
-rw-r--r--drivers/media/tuners/tda18250.c2
-rw-r--r--drivers/media/tuners/tda18271-fe.c4
-rw-r--r--drivers/media/tuners/tda9887.c1
-rw-r--r--drivers/media/tuners/tea5761.c4
-rw-r--r--drivers/media/tuners/tea5767.c4
-rw-r--r--drivers/media/tuners/tua9001.c2
-rw-r--r--drivers/media/tuners/tuner-i2c.h4
-rw-r--r--drivers/media/tuners/tuner-simple.c20
-rw-r--r--drivers/media/tuners/tuner-types.c313
-rw-r--r--drivers/media/tuners/xc2028.c2
-rw-r--r--drivers/media/tuners/xc4000.c10
-rw-r--r--drivers/media/tuners/xc5000.c14
-rw-r--r--drivers/media/usb/airspy/airspy.c4
-rw-r--r--drivers/media/usb/au0828/au0828-dvb.c6
-rw-r--r--drivers/media/usb/au0828/au0828-vbi.c2
-rw-r--r--drivers/media/usb/au0828/au0828-video.c23
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.c7
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c23
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-avcore.c60
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c6
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-pcb-cfg.h18
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-vbi.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c4
-rw-r--r--drivers/media/usb/cx231xx/cx231xx.h5
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9015.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c20
-rw-r--r--drivers/media/usb/dvb-usb-v2/anysee.c19
-rw-r--r--drivers/media/usb/dvb-usb-v2/au6610.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/az6007.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/ce6230.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvbsky.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/ec168.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/gl861.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c26
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c2
-rw-r--r--drivers/media/usb/dvb-usb/a800.c2
-rw-r--r--drivers/media/usb/dvb-usb/af9005.c4
-rw-r--r--drivers/media/usb/dvb-usb/az6027.c4
-rw-r--r--drivers/media/usb/dvb-usb/cinergyT2-core.c2
-rw-r--r--drivers/media/usb/dvb-usb/cxusb-analog.c6
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c9
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-common.c1
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-mb.c2
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-mc-common.c1
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-mc.c2
-rw-r--r--drivers/media/usb/dvb-usb/digitv.c4
-rw-r--r--drivers/media/usb/dvb-usb/dtt200u.c2
-rw-r--r--drivers/media/usb/dvb-usb/dtv5100.c9
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb.h6
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c16
-rw-r--r--drivers/media/usb/dvb-usb/gp8psk.c2
-rw-r--r--drivers/media/usb/dvb-usb/m920x.c6
-rw-r--r--drivers/media/usb/dvb-usb/nova-t-usb2.c2
-rw-r--r--drivers/media/usb/dvb-usb/opera1.c8
-rw-r--r--drivers/media/usb/dvb-usb/pctv452e.c13
-rw-r--r--drivers/media/usb/dvb-usb/technisat-usb2.c4
-rw-r--r--drivers/media/usb/dvb-usb/ttusb2.c4
-rw-r--r--drivers/media/usb/dvb-usb/umt-010.c2
-rw-r--r--drivers/media/usb/dvb-usb/vp702x.c2
-rw-r--r--drivers/media/usb/dvb-usb/vp7045.c2
-rw-r--r--drivers/media/usb/em28xx/Kconfig1
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c52
-rw-r--r--drivers/media/usb/em28xx/em28xx-core.c3
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c4
-rw-r--r--drivers/media/usb/em28xx/em28xx-vbi.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c4
-rw-r--r--drivers/media/usb/em28xx/em28xx.h1
-rw-r--r--drivers/media/usb/go7007/go7007-driver.c1
-rw-r--r--drivers/media/usb/go7007/go7007-i2c.c30
-rw-r--r--drivers/media/usb/go7007/go7007-usb.c1
-rw-r--r--drivers/media/usb/go7007/go7007-v4l2.c2
-rw-r--r--drivers/media/usb/go7007/s2250-board.c2
-rw-r--r--drivers/media/usb/gspca/gspca.c20
-rw-r--r--drivers/media/usb/gspca/ov534.c2
-rw-r--r--drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c7
-rw-r--r--drivers/media/usb/gspca/vicam.c10
-rw-r--r--drivers/media/usb/hackrf/hackrf.c6
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-i2c.c30
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-video.c69
-rw-r--r--drivers/media/usb/msi2500/msi2500.c8
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-debugifc.c5
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-encoder.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.c31
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-io.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-std.c167
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-std.h6
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c109
-rw-r--r--drivers/media/usb/pwc/pwc-if.c5
-rw-r--r--drivers/media/usb/s2255/s2255drv.c4
-rw-r--r--drivers/media/usb/stk1160/stk1160-core.c3
-rw-r--r--drivers/media/usb/stk1160/stk1160-v4l.c6
-rw-r--r--drivers/media/usb/stk1160/stk1160-video.c48
-rw-r--r--drivers/media/usb/stk1160/stk1160.h7
-rw-r--r--drivers/media/usb/ttusb-dec/ttusb_dec.c21
-rw-r--r--drivers/media/usb/usbtv/usbtv-video.c6
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c1246
-rw-r--r--drivers/media/usb/uvc/uvc_debugfs.c1
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c732
-rw-r--r--drivers/media/usb/uvc/uvc_entity.c4
-rw-r--r--drivers/media/usb/uvc/uvc_metadata.c163
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c212
-rw-r--r--drivers/media/usb/uvc/uvc_status.c78
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c691
-rw-r--r--drivers/media/usb/uvc/uvc_video.c340
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h139
-rw-r--r--drivers/media/v4l2-core/Kconfig4
-rw-r--r--drivers/media/v4l2-core/Makefile1
-rw-r--r--drivers/media/v4l2-core/v4l2-async.c8
-rw-r--r--drivers/media/v4l2-core/v4l2-cci.c11
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c272
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c11
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-api.c135
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-core.c322
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-defs.c7
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-priv.h2
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-request.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c97
-rw-r--r--drivers/media/v4l2-core/v4l2-device.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c243
-rw-r--r--drivers/media/v4l2-core/v4l2-fh.c16
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c44
-rw-r--r--drivers/media/v4l2-core/v4l2-i2c.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c535
-rw-r--r--drivers/media/v4l2-core/v4l2-isp.c132
-rw-r--r--drivers/media/v4l2-core/v4l2-jpeg.c198
-rw-r--r--drivers/media/v4l2-core/v4l2-mc.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c71
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c544
-rw-r--r--drivers/memory/Kconfig25
-rw-r--r--drivers/memory/Makefile1
-rw-r--r--drivers/memory/atmel-ebi.c35
-rw-r--r--drivers/memory/brcmstb_dpfe.c2
-rw-r--r--drivers/memory/brcmstb_memc.c58
-rw-r--r--drivers/memory/bt1-l2-ctl.c2
-rw-r--r--drivers/memory/emif.c34
-rw-r--r--drivers/memory/fsl-corenet-cf.c2
-rw-r--r--drivers/memory/fsl_ifc.c2
-rw-r--r--drivers/memory/jz4780-nemc.c2
-rw-r--r--drivers/memory/mtk-smi.c128
-rw-r--r--drivers/memory/omap-gpmc.c100
-rw-r--r--drivers/memory/pl172.c58
-rw-r--r--drivers/memory/pl353-smc.c57
-rw-r--r--drivers/memory/renesas-rpc-if-regs.h147
-rw-r--r--drivers/memory/renesas-rpc-if.c750
-rw-r--r--drivers/memory/renesas-xspi-if-regs.h105
-rw-r--r--drivers/memory/samsung/exynos-srom.c10
-rw-r--r--drivers/memory/samsung/exynos5422-dmc.c92
-rw-r--r--drivers/memory/stm32-fmc2-ebi.c25
-rw-r--r--drivers/memory/stm32_omm.c470
-rw-r--r--drivers/memory/tegra/Kconfig8
-rw-r--r--drivers/memory/tegra/Makefile2
-rw-r--r--drivers/memory/tegra/mc.c16
-rw-r--r--drivers/memory/tegra/mc.h9
-rw-r--r--drivers/memory/tegra/tegra124-emc.c147
-rw-r--r--drivers/memory/tegra/tegra186-emc.c47
-rw-r--r--drivers/memory/tegra/tegra186.c17
-rw-r--r--drivers/memory/tegra/tegra20-emc.c169
-rw-r--r--drivers/memory/tegra/tegra210-emc-cc-r21021.c429
-rw-r--r--drivers/memory/tegra/tegra210-emc-core.c11
-rw-r--r--drivers/memory/tegra/tegra210.c146
-rw-r--r--drivers/memory/tegra/tegra264-bwmgr.h50
-rw-r--r--drivers/memory/tegra/tegra264.c313
-rw-r--r--drivers/memory/tegra/tegra30-emc.c126
-rw-r--r--drivers/memory/ti-aemif.c266
-rw-r--r--drivers/memory/ti-emif-pm.c2
-rw-r--r--drivers/memstick/core/memstick.c68
-rw-r--r--drivers/memstick/core/ms_block.c23
-rw-r--r--drivers/memstick/core/mspro_block.c16
-rw-r--r--drivers/memstick/host/Kconfig10
-rw-r--r--drivers/memstick/host/Makefile1
-rw-r--r--drivers/memstick/host/jmb38x_ms.c7
-rw-r--r--drivers/memstick/host/r592.c8
-rw-r--r--drivers/memstick/host/rtsx_pci_ms.c638
-rw-r--r--drivers/memstick/host/rtsx_usb_ms.c11
-rw-r--r--drivers/memstick/host/tifm_ms.c9
-rw-r--r--drivers/message/fusion/lsi/mpi_cnfg.h60
-rw-r--r--drivers/message/fusion/mptbase.c13
-rw-r--r--drivers/message/fusion/mptbase.h3
-rw-r--r--drivers/message/fusion/mptctl.c3
-rw-r--r--drivers/message/fusion/mptfc.c23
-rw-r--r--drivers/message/fusion/mptlan.h3
-rw-r--r--drivers/message/fusion/mptsas.c28
-rw-r--r--drivers/message/fusion/mptscsih.c76
-rw-r--r--drivers/message/fusion/mptscsih.h8
-rw-r--r--drivers/message/fusion/mptspi.c21
-rw-r--r--drivers/mfd/88pm800.c4
-rw-r--r--drivers/mfd/88pm805.c6
-rw-r--r--drivers/mfd/88pm860x-core.c17
-rw-r--r--drivers/mfd/88pm886.c154
-rw-r--r--drivers/mfd/Kconfig392
-rw-r--r--drivers/mfd/Makefile42
-rw-r--r--drivers/mfd/aat2870-core.c6
-rw-r--r--drivers/mfd/ab8500-core.c5
-rw-r--r--drivers/mfd/ab8500-sysctrl.c2
-rw-r--r--drivers/mfd/act8945a.c2
-rw-r--r--drivers/mfd/adp5585.c843
-rw-r--r--drivers/mfd/altera-sysmgr.c2
-rw-r--r--drivers/mfd/arizona-core.c1
-rw-r--r--drivers/mfd/arizona-irq.c9
-rw-r--r--drivers/mfd/arizona-spi.c9
-rw-r--r--drivers/mfd/as3722.c8
-rw-r--r--drivers/mfd/atc260x-core.c4
-rw-r--r--drivers/mfd/atmel-flexcom.c2
-rw-r--r--drivers/mfd/atmel-smc.c13
-rw-r--r--drivers/mfd/axp20x-i2c.c25
-rw-r--r--drivers/mfd/axp20x.c100
-rw-r--r--drivers/mfd/bcm2835-pm.c1
-rw-r--r--drivers/mfd/bcm590xx.c66
-rw-r--r--drivers/mfd/bd9571mwv.c6
-rw-r--r--drivers/mfd/bq257xx.c99
-rw-r--r--drivers/mfd/cgbc-core.c428
-rw-r--r--drivers/mfd/cros_ec_dev.c70
-rw-r--r--drivers/mfd/cs40l50-core.c569
-rw-r--r--drivers/mfd/cs40l50-i2c.c68
-rw-r--r--drivers/mfd/cs40l50-spi.c68
-rw-r--r--drivers/mfd/cs42l43-i2c.c10
-rw-r--r--drivers/mfd/cs42l43-sdw.c12
-rw-r--r--drivers/mfd/cs42l43.c119
-rw-r--r--drivers/mfd/cs42l43.h1
-rw-r--r--drivers/mfd/da9052-core.c1
-rw-r--r--drivers/mfd/da9052-spi.c2
-rw-r--r--drivers/mfd/da9055-core.c2
-rw-r--r--drivers/mfd/da9055-i2c.c2
-rw-r--r--drivers/mfd/da9062-core.c12
-rw-r--r--drivers/mfd/da9063-i2c.c30
-rw-r--r--drivers/mfd/db8500-prcmu.c6
-rw-r--r--drivers/mfd/ene-kb3930.c2
-rw-r--r--drivers/mfd/exynos-lpass.c34
-rw-r--r--drivers/mfd/ezx-pcap.c33
-rw-r--r--drivers/mfd/fsl-imx25-tsadc.c10
-rw-r--r--drivers/mfd/gateworks-gsc.c4
-rw-r--r--drivers/mfd/hi655x-pmic.c10
-rw-r--r--drivers/mfd/intel-lpss-acpi.c4
-rw-r--r--drivers/mfd/intel-lpss-pci.c218
-rw-r--r--drivers/mfd/intel-lpss.c6
-rw-r--r--drivers/mfd/intel-m10-bmc-core.c10
-rw-r--r--drivers/mfd/intel-m10-bmc-pmci.c5
-rw-r--r--drivers/mfd/intel-m10-bmc-spi.c5
-rw-r--r--drivers/mfd/intel_pmc_bxt.c3
-rw-r--r--drivers/mfd/intel_soc_pmic_bxtwc.c195
-rw-r--r--drivers/mfd/intel_soc_pmic_chtdc_ti.c5
-rw-r--r--drivers/mfd/intel_soc_pmic_chtwc.c3
-rw-r--r--drivers/mfd/intel_soc_pmic_crc.c14
-rw-r--r--drivers/mfd/intel_soc_pmic_mrfld.c3
-rw-r--r--drivers/mfd/ioc3.c2
-rw-r--r--drivers/mfd/ipaq-micro.c4
-rw-r--r--drivers/mfd/iqs62x.c2
-rw-r--r--drivers/mfd/kempld-core.c225
-rw-r--r--drivers/mfd/lm3533-core.c28
-rw-r--r--drivers/mfd/loongson-se.c253
-rw-r--r--drivers/mfd/lp3943.c2
-rw-r--r--drivers/mfd/lp873x.c4
-rw-r--r--drivers/mfd/lp87565.c4
-rw-r--r--drivers/mfd/lp8788-irq.c2
-rw-r--r--drivers/mfd/lp8788.c2
-rw-r--r--drivers/mfd/lpc_ich.c3
-rw-r--r--drivers/mfd/ls2k-bmc-core.c532
-rw-r--r--drivers/mfd/macsmc.c499
-rw-r--r--drivers/mfd/madera-core.c4
-rw-r--r--drivers/mfd/madera-spi.c9
-rw-r--r--drivers/mfd/max14577.c4
-rw-r--r--drivers/mfd/max7360.c171
-rw-r--r--drivers/mfd/max77541.c2
-rw-r--r--drivers/mfd/max77620.c25
-rw-r--r--drivers/mfd/max77705.c180
-rw-r--r--drivers/mfd/max77759.c690
-rw-r--r--drivers/mfd/max8907.c2
-rw-r--r--drivers/mfd/max8925-core.c6
-rw-r--r--drivers/mfd/max8925-i2c.c5
-rw-r--r--drivers/mfd/max8997-irq.c19
-rw-r--r--drivers/mfd/max8997.c4
-rw-r--r--drivers/mfd/max8998-irq.c2
-rw-r--r--drivers/mfd/max8998.c4
-rw-r--r--drivers/mfd/mc13xxx-spi.c2
-rw-r--r--drivers/mfd/mcp-core.c2
-rw-r--r--drivers/mfd/mcp-sa11x0.c2
-rw-r--r--drivers/mfd/menelaus.c3
-rw-r--r--drivers/mfd/mfd-core.c7
-rw-r--r--drivers/mfd/mt6358-irq.c6
-rw-r--r--drivers/mfd/mt6360-core.c23
-rw-r--r--drivers/mfd/mt6370.c2
-rw-r--r--drivers/mfd/mt6370.h2
-rw-r--r--drivers/mfd/mt6397-core.c64
-rw-r--r--drivers/mfd/mt6397-irq.c30
-rw-r--r--drivers/mfd/mxs-lradc.c4
-rw-r--r--drivers/mfd/nct6694.c388
-rw-r--r--drivers/mfd/ntxec.c2
-rw-r--r--drivers/mfd/ocelot-core.c6
-rw-r--r--drivers/mfd/ocelot-spi.c9
-rw-r--r--drivers/mfd/omap-usb-host.c3
-rw-r--r--drivers/mfd/omap-usb-tll.c9
-rw-r--r--drivers/mfd/pcf50633-adc.c255
-rw-r--r--drivers/mfd/pcf50633-core.c304
-rw-r--r--drivers/mfd/pcf50633-gpio.c91
-rw-r--r--drivers/mfd/pcf50633-irq.c312
-rw-r--r--drivers/mfd/pf1550.c367
-rw-r--r--drivers/mfd/qcom-pm8008.c170
-rw-r--r--drivers/mfd/qcom-pm8xxx.c8
-rw-r--r--drivers/mfd/qcom-spmi-pmic.c5
-rw-r--r--drivers/mfd/qnap-mcu.c417
-rw-r--r--drivers/mfd/rave-sp.c2
-rw-r--r--drivers/mfd/retu-mfd.c16
-rw-r--r--drivers/mfd/rk8xx-core.c128
-rw-r--r--drivers/mfd/rk8xx-i2c.c70
-rw-r--r--drivers/mfd/rohm-bd71828.c108
-rw-r--r--drivers/mfd/rohm-bd718x7.c11
-rw-r--r--drivers/mfd/rohm-bd9576.c6
-rw-r--r--drivers/mfd/rohm-bd96801.c793
-rw-r--r--drivers/mfd/rsmu_core.c2
-rw-r--r--drivers/mfd/rsmu_i2c.c107
-rw-r--r--drivers/mfd/rsmu_spi.c8
-rw-r--r--drivers/mfd/rt4831.c1
-rw-r--r--drivers/mfd/rt5033.c10
-rw-r--r--drivers/mfd/rz-mtu3.c2
-rw-r--r--drivers/mfd/sec-acpm.c421
-rw-r--r--drivers/mfd/sec-common.c301
-rw-r--r--drivers/mfd/sec-core.c458
-rw-r--r--drivers/mfd/sec-core.h23
-rw-r--r--drivers/mfd/sec-i2c.c239
-rw-r--r--drivers/mfd/sec-irq.c561
-rw-r--r--drivers/mfd/si476x-cmd.c2
-rw-r--r--drivers/mfd/simple-mfd-i2c.c45
-rw-r--r--drivers/mfd/sm501.c56
-rw-r--r--drivers/mfd/sprd-sc27xx-spi.c7
-rw-r--r--drivers/mfd/ssbi.c2
-rw-r--r--drivers/mfd/sta2x11-mfd.c645
-rw-r--r--drivers/mfd/stm32-lptimer.c34
-rw-r--r--drivers/mfd/stm32-timers.c34
-rw-r--r--drivers/mfd/stmfx.c5
-rw-r--r--drivers/mfd/stmpe-i2c.c14
-rw-r--r--drivers/mfd/stmpe-spi.c16
-rw-r--r--drivers/mfd/stmpe.c13
-rw-r--r--drivers/mfd/stpmic1.c6
-rw-r--r--drivers/mfd/stw481x.c4
-rw-r--r--drivers/mfd/sun4i-gpadc.c1
-rw-r--r--drivers/mfd/syscon.c192
-rw-r--r--drivers/mfd/tc3589x.c8
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c6
-rw-r--r--drivers/mfd/timberdale.c19
-rw-r--r--drivers/mfd/tps6105x.c6
-rw-r--r--drivers/mfd/tps65010.c28
-rw-r--r--drivers/mfd/tps6507x.c2
-rw-r--r--drivers/mfd/tps65086.c4
-rw-r--r--drivers/mfd/tps65090.c6
-rw-r--r--drivers/mfd/tps65217.c4
-rw-r--r--drivers/mfd/tps65218.c2
-rw-r--r--drivers/mfd/tps65219.c301
-rw-r--r--drivers/mfd/tps6586x.c10
-rw-r--r--drivers/mfd/tps65910.c6
-rw-r--r--drivers/mfd/tps65911-comparator.c2
-rw-r--r--drivers/mfd/tps65912-core.c23
-rw-r--r--drivers/mfd/tps65912-i2c.c10
-rw-r--r--drivers/mfd/tps65912-spi.c8
-rw-r--r--drivers/mfd/tps6594-core.c384
-rw-r--r--drivers/mfd/tps6594-i2c.c26
-rw-r--r--drivers/mfd/tps6594-spi.c26
-rw-r--r--drivers/mfd/tqmx86.c123
-rw-r--r--drivers/mfd/twl-core.c26
-rw-r--r--drivers/mfd/twl4030-audio.c2
-rw-r--r--drivers/mfd/twl4030-irq.c5
-rw-r--r--drivers/mfd/twl6030-irq.c80
-rw-r--r--drivers/mfd/twl6040.c10
-rw-r--r--drivers/mfd/ucb1x00-core.c5
-rw-r--r--drivers/mfd/upboard-fpga.c324
-rw-r--r--drivers/mfd/vexpress-sysreg.c73
-rw-r--r--drivers/mfd/wcd934x.c4
-rw-r--r--drivers/mfd/wl1273-core.c262
-rw-r--r--drivers/mfd/wm831x-irq.c13
-rw-r--r--drivers/mfd/wm8350-i2c.c6
-rw-r--r--drivers/mfd/wm8400-core.c2
-rw-r--r--drivers/mfd/wm8994-core.c2
-rw-r--r--drivers/mfd/wm8994-irq.c4
-rw-r--r--drivers/misc/Kconfig96
-rw-r--r--drivers/misc/Makefile13
-rw-r--r--drivers/misc/ad525x_dpot.c7
-rw-r--r--drivers/misc/altera-stapl/altera.c2
-rw-r--r--drivers/misc/amd-sbi/Kconfig23
-rw-r--r--drivers/misc/amd-sbi/Makefile4
-rw-r--r--drivers/misc/amd-sbi/rmi-core.c592
-rw-r--r--drivers/misc/amd-sbi/rmi-core.h74
-rw-r--r--drivers/misc/amd-sbi/rmi-hwmon.c120
-rw-r--r--drivers/misc/amd-sbi/rmi-i2c.c233
-rw-r--r--drivers/misc/apds9802als.c2
-rw-r--r--drivers/misc/apds990x.c25
-rw-r--r--drivers/misc/atmel-ssc.c6
-rw-r--r--drivers/misc/bcm-vk/bcm_vk.h1
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_sg.c2
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_tty.c6
-rw-r--r--drivers/misc/bh1770glc.c18
-rw-r--r--drivers/misc/c2port/core.c27
-rw-r--r--drivers/misc/cardreader/Kconfig3
-rw-r--r--drivers/misc/cardreader/alcor_pci.c15
-rw-r--r--drivers/misc/cardreader/rts5227.c13
-rw-r--r--drivers/misc/cardreader/rts5228.c12
-rw-r--r--drivers/misc/cardreader/rts5249.c16
-rw-r--r--drivers/misc/cardreader/rts5264.c100
-rw-r--r--drivers/misc/cardreader/rts5264.h7
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c62
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.h4
-rw-r--r--drivers/misc/cardreader/rtsx_usb.c33
-rw-r--r--drivers/misc/cb710/core.c8
-rw-r--r--drivers/misc/cs5535-mfgpt.c1
-rw-r--r--drivers/misc/cxl/Kconfig26
-rw-r--r--drivers/misc/cxl/Makefile14
-rw-r--r--drivers/misc/cxl/api.c532
-rw-r--r--drivers/misc/cxl/base.c126
-rw-r--r--drivers/misc/cxl/context.c362
-rw-r--r--drivers/misc/cxl/cxl.h1135
-rw-r--r--drivers/misc/cxl/cxllib.c271
-rw-r--r--drivers/misc/cxl/debugfs.c134
-rw-r--r--drivers/misc/cxl/fault.c341
-rw-r--r--drivers/misc/cxl/file.c699
-rw-r--r--drivers/misc/cxl/flash.c538
-rw-r--r--drivers/misc/cxl/guest.c1208
-rw-r--r--drivers/misc/cxl/hcalls.c643
-rw-r--r--drivers/misc/cxl/hcalls.h200
-rw-r--r--drivers/misc/cxl/irq.c450
-rw-r--r--drivers/misc/cxl/main.c383
-rw-r--r--drivers/misc/cxl/native.c1592
-rw-r--r--drivers/misc/cxl/of.c505
-rw-r--r--drivers/misc/cxl/pci.c2107
-rw-r--r--drivers/misc/cxl/sysfs.c771
-rw-r--r--drivers/misc/cxl/trace.c9
-rw-r--r--drivers/misc/cxl/trace.h691
-rw-r--r--drivers/misc/cxl/vphb.c309
-rw-r--r--drivers/misc/ds1682.c43
-rw-r--r--drivers/misc/dw-xdata-pcie.c5
-rw-r--r--drivers/misc/echo/Kconfig9
-rw-r--r--drivers/misc/echo/Makefile2
-rw-r--r--drivers/misc/echo/echo.c589
-rw-r--r--drivers/misc/echo/echo.h175
-rw-r--r--drivers/misc/echo/fir.h154
-rw-r--r--drivers/misc/echo/oslec.h81
-rw-r--r--drivers/misc/eeprom/Kconfig25
-rw-r--r--drivers/misc/eeprom/Makefile1
-rw-r--r--drivers/misc/eeprom/at24.c22
-rw-r--r--drivers/misc/eeprom/at25.c379
-rw-r--r--drivers/misc/eeprom/digsy_mtc_eeprom.c48
-rw-r--r--drivers/misc/eeprom/ee1004.c174
-rw-r--r--drivers/misc/eeprom/eeprom_93cx6.c15
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c182
-rw-r--r--drivers/misc/eeprom/idt_89hpesx.c185
-rw-r--r--drivers/misc/eeprom/m24lr.c606
-rw-r--r--drivers/misc/eeprom/max6875.c4
-rw-r--r--drivers/misc/enclosure.c3
-rw-r--r--drivers/misc/fastrpc.c290
-rw-r--r--drivers/misc/genwqe/card_ddcb.c2
-rw-r--r--drivers/misc/hi6421v600-irq.c5
-rw-r--r--drivers/misc/hisi_hikey_usb.c5
-rw-r--r--drivers/misc/hmc6352.c2
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c38
-rw-r--r--drivers/misc/ics932s401.c2
-rw-r--r--drivers/misc/isl29003.c2
-rw-r--r--drivers/misc/isl29020.c4
-rw-r--r--drivers/misc/keba/Kconfig26
-rw-r--r--drivers/misc/keba/Makefile4
-rw-r--r--drivers/misc/keba/cp500.c989
-rw-r--r--drivers/misc/keba/lan9252.c359
-rw-r--r--drivers/misc/kgdbts.c4
-rw-r--r--drivers/misc/lan966x_pci.c215
-rw-r--r--drivers/misc/lan966x_pci.dtso177
-rw-r--r--drivers/misc/lattice-ecp3-config.c2
-rw-r--r--drivers/misc/lis3lv02d/Kconfig8
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c37
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.h4
-rw-r--r--drivers/misc/lkdtm/Makefile2
-rw-r--r--drivers/misc/lkdtm/bugs.c32
-rw-r--r--drivers/misc/lkdtm/cfi.c2
-rw-r--r--drivers/misc/lkdtm/fortify.c6
-rw-r--r--drivers/misc/lkdtm/heap.c17
-rw-r--r--drivers/misc/lkdtm/kstack_erase.c150
-rw-r--r--drivers/misc/lkdtm/perms.c19
-rw-r--r--drivers/misc/lkdtm/refcount.c16
-rw-r--r--drivers/misc/lkdtm/stackleak.c150
-rw-r--r--drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c9
-rw-r--r--drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c108
-rw-r--r--drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c6
-rw-r--r--drivers/misc/mei/Kconfig17
-rw-r--r--drivers/misc/mei/Makefile1
-rw-r--r--drivers/misc/mei/bus-fixup.c24
-rw-r--r--drivers/misc/mei/bus.c134
-rw-r--r--drivers/misc/mei/client.c104
-rw-r--r--drivers/misc/mei/client.h8
-rw-r--r--drivers/misc/mei/dma-ring.c8
-rw-r--r--drivers/misc/mei/gsc-me.c20
-rw-r--r--drivers/misc/mei/gsc_proxy/mei_gsc_proxy.c4
-rw-r--r--drivers/misc/mei/hbm.c135
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.c4
-rw-r--r--drivers/misc/mei/hw-me-regs.h5
-rw-r--r--drivers/misc/mei/hw-me.c153
-rw-r--r--drivers/misc/mei/hw-txe.c105
-rw-r--r--drivers/misc/mei/hw-txe.h2
-rw-r--r--drivers/misc/mei/hw.h4
-rw-r--r--drivers/misc/mei/init.c66
-rw-r--r--drivers/misc/mei/interrupt.c47
-rw-r--r--drivers/misc/mei/main.c196
-rw-r--r--drivers/misc/mei/mei-trace.h6
-rw-r--r--drivers/misc/mei/mei_dev.h24
-rw-r--r--drivers/misc/mei/mei_lb.c311
-rw-r--r--drivers/misc/mei/pci-me.c32
-rw-r--r--drivers/misc/mei/pci-txe.c21
-rw-r--r--drivers/misc/mei/platform-vsc.c88
-rw-r--r--drivers/misc/mei/pxp/mei_pxp.c4
-rw-r--r--drivers/misc/mei/vsc-fw-loader.c34
-rw-r--r--drivers/misc/mei/vsc-tp.c130
-rw-r--r--drivers/misc/mei/vsc-tp.h3
-rw-r--r--drivers/misc/mrvl_cn10k_dpi.c676
-rw-r--r--drivers/misc/nsm.c1
-rw-r--r--drivers/misc/ntsync.c1209
-rw-r--r--drivers/misc/ocxl/afu_irq.c2
-rw-r--r--drivers/misc/ocxl/ocxl_internal.h2
-rw-r--r--drivers/misc/ocxl/sysfs.c16
-rw-r--r--drivers/misc/open-dice.c3
-rw-r--r--drivers/misc/pch_phub.c4
-rw-r--r--drivers/misc/pci_endpoint_test.c590
-rw-r--r--drivers/misc/phantom.c1
-rw-r--r--drivers/misc/pvpanic/pvpanic.c43
-rw-r--r--drivers/misc/rp1/Kconfig20
-rw-r--r--drivers/misc/rp1/Makefile3
-rw-r--r--drivers/misc/rp1/rp1-pci.dtso25
-rw-r--r--drivers/misc/rp1/rp1_pci.c336
-rw-r--r--drivers/misc/rpmb-core.c226
-rw-r--r--drivers/misc/sgi-gru/grukservices.c2
-rw-r--r--drivers/misc/sgi-gru/grumain.c4
-rw-r--r--drivers/misc/sgi-gru/grutlbpurge.c2
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c9
-rw-r--r--drivers/misc/sgi-xp/xpc_partition.c2
-rw-r--r--drivers/misc/sram.c16
-rw-r--r--drivers/misc/ti-st/Kconfig19
-rw-r--r--drivers/misc/ti-st/Makefile7
-rw-r--r--drivers/misc/ti-st/st_core.c918
-rw-r--r--drivers/misc/ti-st/st_kim.c839
-rw-r--r--drivers/misc/ti-st/st_ll.c156
-rw-r--r--drivers/misc/ti_fpc202.c435
-rw-r--r--drivers/misc/tifm_7xx1.c6
-rw-r--r--drivers/misc/tifm_core.c8
-rw-r--r--drivers/misc/tps6594-esm.c2
-rw-r--r--drivers/misc/tps6594-pfsm.c66
-rw-r--r--drivers/misc/tsl2550.c10
-rw-r--r--drivers/misc/uacce/uacce.c40
-rw-r--r--drivers/misc/vcpu_stall_detector.c36
-rw-r--r--drivers/misc/vmw_balloon.c11
-rw-r--r--drivers/misc/vmw_vmci/vmci_context.c56
-rw-r--r--drivers/misc/vmw_vmci/vmci_context.h4
-rw-r--r--drivers/misc/vmw_vmci/vmci_doorbell.c53
-rw-r--r--drivers/misc/vmw_vmci/vmci_event.c6
-rw-r--r--drivers/misc/vmw_vmci/vmci_guest.c13
-rw-r--r--drivers/misc/vmw_vmci/vmci_host.c11
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c133
-rw-r--r--drivers/misc/vmw_vmci/vmci_resource.c3
-rw-r--r--drivers/misc/xilinx_sdfec.c2
-rw-r--r--drivers/misc/xilinx_tmr_inject.c3
-rw-r--r--drivers/mmc/core/Kconfig1
-rw-r--r--drivers/mmc/core/Makefile2
-rw-r--r--drivers/mmc/core/block.c363
-rw-r--r--drivers/mmc/core/bus.c56
-rw-r--r--drivers/mmc/core/bus.h2
-rw-r--r--drivers/mmc/core/card.h25
-rw-r--r--drivers/mmc/core/core.c165
-rw-r--r--drivers/mmc/core/core.h33
-rw-r--r--drivers/mmc/core/debugfs.c10
-rw-r--r--drivers/mmc/core/host.c11
-rw-r--r--drivers/mmc/core/host.h8
-rw-r--r--drivers/mmc/core/mmc.c289
-rw-r--r--drivers/mmc/core/mmc_ops.c102
-rw-r--r--drivers/mmc/core/mmc_ops.h17
-rw-r--r--drivers/mmc/core/mmc_test.c65
-rw-r--r--drivers/mmc/core/pwrseq_emmc.c3
-rw-r--r--drivers/mmc/core/pwrseq_sd8787.c3
-rw-r--r--drivers/mmc/core/pwrseq_simple.c52
-rw-r--r--drivers/mmc/core/queue.c33
-rw-r--r--drivers/mmc/core/queue.h3
-rw-r--r--drivers/mmc/core/quirks.h53
-rw-r--r--drivers/mmc/core/regulator.c119
-rw-r--r--drivers/mmc/core/sd.c257
-rw-r--r--drivers/mmc/core/sd.h4
-rw-r--r--drivers/mmc/core/sd_ops.c27
-rw-r--r--drivers/mmc/core/sd_ops.h3
-rw-r--r--drivers/mmc/core/sd_uhs2.c1304
-rw-r--r--drivers/mmc/core/sdio.c16
-rw-r--r--drivers/mmc/core/sdio_bus.c15
-rw-r--r--drivers/mmc/core/sdio_uart.c3
-rw-r--r--drivers/mmc/core/slot-gpio.c20
-rw-r--r--drivers/mmc/host/Kconfig71
-rw-r--r--drivers/mmc/host/Makefile4
-rw-r--r--drivers/mmc/host/alcor.c33
-rw-r--r--drivers/mmc/host/atmel-mci.c89
-rw-r--r--drivers/mmc/host/au1xmmc.c73
-rw-r--r--drivers/mmc/host/bcm2835.c69
-rw-r--r--drivers/mmc/host/cavium-octeon.c4
-rw-r--r--drivers/mmc/host/cavium-thunderx.c4
-rw-r--r--drivers/mmc/host/cavium.c10
-rw-r--r--drivers/mmc/host/cb710-mmc.c43
-rw-r--r--drivers/mmc/host/cb710-mmc.h3
-rw-r--r--drivers/mmc/host/cqhci-core.c14
-rw-r--r--drivers/mmc/host/cqhci-crypto.c46
-rw-r--r--drivers/mmc/host/cqhci.h9
-rw-r--r--drivers/mmc/host/davinci_mmc.c83
-rw-r--r--drivers/mmc/host/dw_mmc-bluefield.c20
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c56
-rw-r--r--drivers/mmc/host/dw_mmc-hi3798cv200.c2
-rw-r--r--drivers/mmc/host/dw_mmc-hi3798mv200.c10
-rw-r--r--drivers/mmc/host/dw_mmc-k3.c11
-rw-r--r--drivers/mmc/host/dw_mmc-pci.c9
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.c2
-rw-r--r--drivers/mmc/host/dw_mmc-rockchip.c235
-rw-r--r--drivers/mmc/host/dw_mmc-starfive.c2
-rw-r--r--drivers/mmc/host/dw_mmc.c195
-rw-r--r--drivers/mmc/host/dw_mmc.h41
-rw-r--r--drivers/mmc/host/jz4740_mmc.c49
-rw-r--r--drivers/mmc/host/litex_mmc.c14
-rw-r--r--drivers/mmc/host/loongson2-mmc.c1030
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c4
-rw-r--r--drivers/mmc/host/meson-mx-sdhc-clkc.c4
-rw-r--r--drivers/mmc/host/meson-mx-sdhc-mmc.c15
-rw-r--r--drivers/mmc/host/meson-mx-sdio.c348
-rw-r--r--drivers/mmc/host/mmc_spi.c24
-rw-r--r--drivers/mmc/host/mmci.c41
-rw-r--r--drivers/mmc/host/mmci.h2
-rw-r--r--drivers/mmc/host/mmci_stm32_sdmmc.c3
-rw-r--r--drivers/mmc/host/moxart-mmc.c120
-rw-r--r--drivers/mmc/host/mtk-sd.c579
-rw-r--r--drivers/mmc/host/mvsdio.c107
-rw-r--r--drivers/mmc/host/mxcmmc.c47
-rw-r--r--drivers/mmc/host/mxs-mmc.c39
-rw-r--r--drivers/mmc/host/of_mmc_spi.c1
-rw-r--r--drivers/mmc/host/omap.c48
-rw-r--r--drivers/mmc/host/omap_hsmmc.c39
-rw-r--r--drivers/mmc/host/owl-mmc.c39
-rw-r--r--drivers/mmc/host/pxamci.c90
-rw-r--r--drivers/mmc/host/renesas_sdhi.h9
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c251
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c50
-rw-r--r--drivers/mmc/host/renesas_sdhi_sys_dmac.c19
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c11
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c89
-rw-r--r--drivers/mmc/host/sdhci-acpi.c51
-rw-r--r--drivers/mmc/host/sdhci-bcm-kona.c6
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c231
-rw-r--r--drivers/mmc/host/sdhci-cadence.c106
-rw-r--r--drivers/mmc/host/sdhci-dove.c14
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c372
-rw-r--r--drivers/mmc/host/sdhci-esdhc-mcf.c27
-rw-r--r--drivers/mmc/host/sdhci-iproc.c20
-rw-r--r--drivers/mmc/host/sdhci-milbeaut.c21
-rw-r--r--drivers/mmc/host/sdhci-msm.c249
-rw-r--r--drivers/mmc/host/sdhci-npcm.c17
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c89
-rw-r--r--drivers/mmc/host/sdhci-of-aspeed.c15
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c52
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c1245
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c21
-rw-r--r--drivers/mmc/host/sdhci-of-hlwd.c2
-rw-r--r--drivers/mmc/host/sdhci-of-k1.c308
-rw-r--r--drivers/mmc/host/sdhci-of-ma35d1.c307
-rw-r--r--drivers/mmc/host/sdhci-of-sparx5.c26
-rw-r--r--drivers/mmc/host/sdhci-omap.c49
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c176
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c574
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c41
-rw-r--r--drivers/mmc/host/sdhci-pci.h4
-rw-r--r--drivers/mmc/host/sdhci-pic32.c11
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c16
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h1
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c30
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c72
-rw-r--r--drivers/mmc/host/sdhci-s3c.c39
-rw-r--r--drivers/mmc/host/sdhci-spear.c19
-rw-r--r--drivers/mmc/host/sdhci-sprd.c46
-rw-r--r--drivers/mmc/host/sdhci-st.c14
-rw-r--r--drivers/mmc/host/sdhci-tegra.c25
-rw-r--r--drivers/mmc/host/sdhci-uhs2.c1251
-rw-r--r--drivers/mmc/host/sdhci-uhs2.h188
-rw-r--r--drivers/mmc/host/sdhci-xenon.c39
-rw-r--r--drivers/mmc/host/sdhci.c381
-rw-r--r--drivers/mmc/host/sdhci.h108
-rw-r--r--drivers/mmc/host/sdhci_am654.c160
-rw-r--r--drivers/mmc/host/sdhci_f_sdh30.c15
-rw-r--r--drivers/mmc/host/sdricoh_cs.c10
-rw-r--r--drivers/mmc/host/sh_mmcif.c37
-rw-r--r--drivers/mmc/host/sunplus-mmc.c4
-rw-r--r--drivers/mmc/host/sunxi-mmc.c41
-rw-r--r--drivers/mmc/host/tifm_sd.c30
-rw-r--r--drivers/mmc/host/tmio_mmc.h35
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c87
-rw-r--r--drivers/mmc/host/toshsd.c12
-rw-r--r--drivers/mmc/host/uniphier-sd.c24
-rw-r--r--drivers/mmc/host/usdhi6rol0.c36
-rw-r--r--drivers/mmc/host/ushc.c4
-rw-r--r--drivers/mmc/host/via-sdmmc.c48
-rw-r--r--drivers/mmc/host/vub300.c30
-rw-r--r--drivers/mmc/host/wbsd.c84
-rw-r--r--drivers/mmc/host/wbsd.h10
-rw-r--r--drivers/mmc/host/wmt-sdmmc.c26
-rw-r--r--drivers/most/core.c14
-rw-r--r--drivers/most/most_cdev.c6
-rw-r--r--drivers/most/most_usb.c33
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c4
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c1
-rw-r--r--drivers/mtd/chips/cfi_probe.c2
-rw-r--r--drivers/mtd/chips/cfi_util.c1
-rw-r--r--drivers/mtd/chips/jedec_probe.c4
-rw-r--r--drivers/mtd/devices/Kconfig13
-rw-r--r--drivers/mtd/devices/Makefile1
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.c2
-rw-r--r--drivers/mtd/devices/block2mtd.c6
-rw-r--r--drivers/mtd/devices/docg3.c2
-rw-r--r--drivers/mtd/devices/docg3.h2
-rw-r--r--drivers/mtd/devices/mchp48l640.c37
-rw-r--r--drivers/mtd/devices/mtd_intel_dg.c880
-rw-r--r--drivers/mtd/devices/phram.c15
-rw-r--r--drivers/mtd/devices/powernv_flash.c5
-rw-r--r--drivers/mtd/devices/slram.c2
-rw-r--r--drivers/mtd/devices/spear_smi.c2
-rw-r--r--drivers/mtd/devices/st_spi_fsm.c8
-rw-r--r--drivers/mtd/ftl.c4
-rw-r--r--drivers/mtd/hyperbus/hbmc-am654.c24
-rw-r--r--drivers/mtd/hyperbus/rpc-if.c9
-rw-r--r--drivers/mtd/inftlcore.c9
-rw-r--r--drivers/mtd/lpddr/lpddr2_nvm.c2
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c18
-rw-r--r--drivers/mtd/lpddr/qinfo_probe.c4
-rw-r--r--drivers/mtd/maps/Makefile11
-rw-r--r--drivers/mtd/maps/lantiq-flash.c2
-rw-r--r--drivers/mtd/maps/map_funcs.c1
-rw-r--r--drivers/mtd/maps/pcmciamtd.c1
-rw-r--r--drivers/mtd/maps/physmap-core.c2
-rw-r--r--drivers/mtd/maps/plat-ram.c2
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c2
-rw-r--r--drivers/mtd/maps/sa1100-flash.c2
-rw-r--r--drivers/mtd/maps/sun_uflash.c2
-rw-r--r--drivers/mtd/mtd_blkdevs.c20
-rw-r--r--drivers/mtd/mtdchar.c6
-rw-r--r--drivers/mtd/mtdconcat.c2
-rw-r--r--drivers/mtd/mtdcore.c75
-rw-r--r--drivers/mtd/mtdoops.c11
-rw-r--r--drivers/mtd/mtdpart.c10
-rw-r--r--drivers/mtd/mtdpstore.c12
-rw-r--r--drivers/mtd/mtdswap.c4
-rw-r--r--drivers/mtd/nand/Kconfig8
-rw-r--r--drivers/mtd/nand/Makefile4
-rw-r--r--drivers/mtd/nand/core.c131
-rw-r--r--drivers/mtd/nand/ecc-mxic.c24
-rw-r--r--drivers/mtd/nand/ecc-realtek.c464
-rw-r--r--drivers/mtd/nand/ecc-sw-bch.c2
-rw-r--r--drivers/mtd/nand/ecc-sw-hamming.c2
-rw-r--r--drivers/mtd/nand/ecc.c2
-rw-r--r--drivers/mtd/nand/onenand/generic.c2
-rw-r--r--drivers/mtd/nand/onenand/onenand_base.c1
-rw-r--r--drivers/mtd/nand/onenand/onenand_omap2.c3
-rw-r--r--drivers/mtd/nand/onenand/onenand_samsung.c4
-rw-r--r--drivers/mtd/nand/qpic_common.c779
-rw-r--r--drivers/mtd/nand/raw/Kconfig56
-rw-r--r--drivers/mtd/nand/raw/Makefile4
-rw-r--r--drivers/mtd/nand/raw/ams-delta.c2
-rw-r--r--drivers/mtd/nand/raw/arasan-nand-controller.c18
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c42
-rw-r--r--drivers/mtd/nand/raw/atmel/pmecc.c19
-rw-r--r--drivers/mtd/nand/raw/atmel/pmecc.h2
-rw-r--r--drivers/mtd/nand/raw/au1550nd.c2
-rw-r--r--drivers/mtd/nand/raw/bcm47xxnflash/main.c2
-rw-r--r--drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c5
-rw-r--r--drivers/mtd/nand/raw/brcmnand/bcm6368_nand.c2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/bcma_nand.c2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/bcmbca_nand.c2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c319
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/iproc_nand.c2
-rw-r--r--drivers/mtd/nand/raw/cadence-nand-controller.c331
-rw-r--r--drivers/mtd/nand/raw/cs553x_nand.c8
-rw-r--r--drivers/mtd/nand/raw/davinci_nand.c265
-rw-r--r--drivers/mtd/nand/raw/denali_dt.c31
-rw-r--r--drivers/mtd/nand/raw/denali_pci.c2
-rw-r--r--drivers/mtd/nand/raw/diskonchip.c2
-rw-r--r--drivers/mtd/nand/raw/fsl_elbc_nand.c2
-rw-r--r--drivers/mtd/nand/raw/fsl_ifc_nand.c2
-rw-r--r--drivers/mtd/nand/raw/fsl_upm.c2
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c10
-rw-r--r--drivers/mtd/nand/raw/gpio.c2
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c105
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h6
-rw-r--r--drivers/mtd/nand/raw/hisi504_nand.c2
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c2
-rw-r--r--drivers/mtd/nand/raw/intel-nand-controller.c10
-rw-r--r--drivers/mtd/nand/raw/loongson-nand-controller.c1024
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_mlc.c28
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_slc.c30
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c29
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c98
-rw-r--r--drivers/mtd/nand/raw/mpc5121_nfc.c2
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c38
-rw-r--r--drivers/mtd/nand/raw/mxc_nand.c702
-rw-r--r--drivers/mtd/nand/raw/mxic_nand.c2
-rw-r--r--drivers/mtd/nand/raw/nand_base.c214
-rw-r--r--drivers/mtd/nand/raw/nand_hynix.c4
-rw-r--r--drivers/mtd/nand/raw/nand_macronix.c2
-rw-r--r--drivers/mtd/nand/raw/nandsim.c9
-rw-r--r--drivers/mtd/nand/raw/ndfc.c2
-rw-r--r--drivers/mtd/nand/raw/nuvoton-ma35d1-nand-controller.c1029
-rw-r--r--drivers/mtd/nand/raw/omap2.c45
-rw-r--r--drivers/mtd/nand/raw/omap_elm.c2
-rw-r--r--drivers/mtd/nand/raw/orion_nand.c2
-rw-r--r--drivers/mtd/nand/raw/pasemi_nand.c2
-rw-r--r--drivers/mtd/nand/raw/pl35x-nand-controller.c12
-rw-r--r--drivers/mtd/nand/raw/plat_nand.c2
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c1811
-rw-r--r--drivers/mtd/nand/raw/r852.c7
-rw-r--r--drivers/mtd/nand/raw/renesas-nand-controller.c25
-rw-r--r--drivers/mtd/nand/raw/rockchip-nand-controller.c29
-rw-r--r--drivers/mtd/nand/raw/s3c2410.c1230
-rw-r--r--drivers/mtd/nand/raw/sh_flctl.c2
-rw-r--r--drivers/mtd/nand/raw/sharpsl.c2
-rw-r--r--drivers/mtd/nand/raw/sm_common.c4
-rw-r--r--drivers/mtd/nand/raw/socrates_nand.c2
-rw-r--r--drivers/mtd/nand/raw/stm32_fmc2_nand.c56
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c418
-rw-r--r--drivers/mtd/nand/raw/technologic-nand-controller.c222
-rw-r--r--drivers/mtd/nand/raw/tegra_nand.c2
-rw-r--r--drivers/mtd/nand/raw/txx9ndfmc.c2
-rw-r--r--drivers/mtd/nand/raw/vf610_nfc.c2
-rw-r--r--drivers/mtd/nand/raw/xway_nand.c2
-rw-r--r--drivers/mtd/nand/spi/Makefile5
-rw-r--r--drivers/mtd/nand/spi/alliancememory.c20
-rw-r--r--drivers/mtd/nand/spi/ato.c14
-rw-r--r--drivers/mtd/nand/spi/core.c456
-rw-r--r--drivers/mtd/nand/spi/esmt.c132
-rw-r--r--drivers/mtd/nand/spi/fmsh.c146
-rw-r--r--drivers/mtd/nand/spi/foresee.c26
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c183
-rw-r--r--drivers/mtd/nand/spi/macronix.c269
-rw-r--r--drivers/mtd/nand/spi/micron.c169
-rw-r--r--drivers/mtd/nand/spi/otp.c362
-rw-r--r--drivers/mtd/nand/spi/paragon.c20
-rw-r--r--drivers/mtd/nand/spi/skyhigh.c147
-rw-r--r--drivers/mtd/nand/spi/toshiba.c22
-rw-r--r--drivers/mtd/nand/spi/winbond.c384
-rw-r--r--drivers/mtd/nand/spi/xtx.c20
-rw-r--r--drivers/mtd/nftlcore.c43
-rw-r--r--drivers/mtd/parsers/bcm47xxpart.c2
-rw-r--r--drivers/mtd/parsers/brcm_u-boot.c1
-rw-r--r--drivers/mtd/parsers/cmdlinepart.c18
-rw-r--r--drivers/mtd/parsers/ofpart_core.c4
-rw-r--r--drivers/mtd/parsers/tplink_safeloader.c1
-rw-r--r--drivers/mtd/rfd_ftl.c4
-rw-r--r--drivers/mtd/sm_ftl.c11
-rw-r--r--drivers/mtd/spi-nor/Makefile1
-rw-r--r--drivers/mtd/spi-nor/atmel.c4
-rw-r--r--drivers/mtd/spi-nor/controllers/hisi-sfc.c2
-rw-r--r--drivers/mtd/spi-nor/controllers/nxp-spifi.c2
-rw-r--r--drivers/mtd/spi-nor/core.c443
-rw-r--r--drivers/mtd/spi-nor/core.h25
-rw-r--r--drivers/mtd/spi-nor/everspin.c19
-rw-r--r--drivers/mtd/spi-nor/macronix.c206
-rw-r--r--drivers/mtd/spi-nor/micron-st.c111
-rw-r--r--drivers/mtd/spi-nor/otp.c1
-rw-r--r--drivers/mtd/spi-nor/sfdp.c34
-rw-r--r--drivers/mtd/spi-nor/sfdp.h1
-rw-r--r--drivers/mtd/spi-nor/spansion.c88
-rw-r--r--drivers/mtd/spi-nor/sst.c39
-rw-r--r--drivers/mtd/spi-nor/swp.c20
-rw-r--r--drivers/mtd/spi-nor/sysfs.c8
-rw-r--r--drivers/mtd/spi-nor/winbond.c141
-rw-r--r--drivers/mtd/spi-nor/xilinx.c169
-rw-r--r--drivers/mtd/tests/Makefile34
-rw-r--r--drivers/mtd/tests/mtd_test.c9
-rw-r--r--drivers/mtd/tests/oobtest.c2
-rw-r--r--drivers/mtd/tests/pagetest.c2
-rw-r--r--drivers/mtd/tests/subpagetest.c2
-rw-r--r--drivers/mtd/ubi/attach.c16
-rw-r--r--drivers/mtd/ubi/block.c17
-rw-r--r--drivers/mtd/ubi/build.c9
-rw-r--r--drivers/mtd/ubi/cdev.c72
-rw-r--r--drivers/mtd/ubi/debug.c5
-rw-r--r--drivers/mtd/ubi/eba.c3
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c27
-rw-r--r--drivers/mtd/ubi/io.c10
-rw-r--r--drivers/mtd/ubi/kapi.c27
-rw-r--r--drivers/mtd/ubi/nvmem.c8
-rw-r--r--drivers/mtd/ubi/ubi.h17
-rw-r--r--drivers/mtd/ubi/vmt.c2
-rw-r--r--drivers/mtd/ubi/wl.c11
-rw-r--r--drivers/mtd/ubi/wl.h3
-rw-r--r--drivers/mux/Kconfig1
-rw-r--r--drivers/mux/adg792a.c2
-rw-r--r--drivers/mux/adgs1408.c4
-rw-r--r--drivers/mux/core.c9
-rw-r--r--drivers/mux/gpio.c9
-rw-r--r--drivers/mux/mmio.c97
-rw-r--r--drivers/net/Kconfig55
-rw-r--r--drivers/net/Makefile6
-rw-r--r--drivers/net/Space.c3
-rw-r--r--drivers/net/amt.c44
-rw-r--r--drivers/net/arcnet/arcnet.c4
-rw-r--r--drivers/net/arcnet/com20020-isa.c1
-rw-r--r--drivers/net/arcnet/com20020-pci.c17
-rw-r--r--drivers/net/bareudp.c86
-rw-r--r--drivers/net/bonding/bond_3ad.c131
-rw-r--r--drivers/net/bonding/bond_alb.c8
-rw-r--r--drivers/net/bonding/bond_debugfs.c9
-rw-r--r--drivers/net/bonding/bond_main.c794
-rw-r--r--drivers/net/bonding/bond_netlink.c68
-rw-r--r--drivers/net/bonding/bond_options.c222
-rw-r--r--drivers/net/bonding/bond_sysfs.c6
-rw-r--r--drivers/net/caif/caif_serial.c16
-rw-r--r--drivers/net/caif/caif_virtio.c11
-rw-r--r--drivers/net/can/Kconfig26
-rw-r--r--drivers/net/can/Makefile4
-rw-r--r--drivers/net/can/at91_can.c3
-rw-r--r--drivers/net/can/bxcan.c7
-rw-r--r--drivers/net/can/c_can/c_can_main.c34
-rw-r--r--drivers/net/can/c_can/c_can_platform.c58
-rw-r--r--drivers/net/can/can327.c1
-rw-r--r--drivers/net/can/cc770/Kconfig2
-rw-r--r--drivers/net/can/cc770/cc770.c1
-rw-r--r--drivers/net/can/cc770/cc770_isa.c2
-rw-r--r--drivers/net/can/cc770/cc770_platform.c34
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_base.c30
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_platform.c2
-rw-r--r--drivers/net/can/dev/bittiming.c63
-rw-r--r--drivers/net/can/dev/calc_bittiming.c124
-rw-r--r--drivers/net/can/dev/dev.c188
-rw-r--r--drivers/net/can/dev/netlink.c903
-rw-r--r--drivers/net/can/dummy_can.c285
-rw-r--r--drivers/net/can/esd/esd_402_pci-core.c9
-rw-r--r--drivers/net/can/esd/esdacc.c57
-rw-r--r--drivers/net/can/esd/esdacc.h38
-rw-r--r--drivers/net/can/flexcan/flexcan-core.c137
-rw-r--r--drivers/net/can/flexcan/flexcan.h8
-rw-r--r--drivers/net/can/grcan.c22
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c71
-rw-r--r--drivers/net/can/janz-ican3.c5
-rw-r--r--drivers/net/can/kvaser_pciefd.c1869
-rw-r--r--drivers/net/can/kvaser_pciefd/Makefile3
-rw-r--r--drivers/net/can/kvaser_pciefd/kvaser_pciefd.h96
-rw-r--r--drivers/net/can/kvaser_pciefd/kvaser_pciefd_core.c1908
-rw-r--r--drivers/net/can/kvaser_pciefd/kvaser_pciefd_devlink.c60
-rw-r--r--drivers/net/can/m_can/m_can.c720
-rw-r--r--drivers/net/can/m_can/m_can.h9
-rw-r--r--drivers/net/can/m_can/m_can_pci.c7
-rw-r--r--drivers/net/can/m_can/m_can_platform.c14
-rw-r--r--drivers/net/can/m_can/tcan4x5x-core.c119
-rw-r--r--drivers/net/can/m_can/tcan4x5x.h2
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c2
-rw-r--r--drivers/net/can/mscan/mscan.c7
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c51
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd_user.h4
-rw-r--r--drivers/net/can/peak_canfd/peak_pciefd_main.c6
-rw-r--r--drivers/net/can/rcar/rcar_can.c312
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c959
-rw-r--r--drivers/net/can/rockchip/Kconfig10
-rw-r--r--drivers/net/can/rockchip/Makefile10
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-core.c962
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-ethtool.c73
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-rx.c299
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-timestamp.c105
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-tx.c167
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd.h553
-rw-r--r--drivers/net/can/sja1000/Kconfig4
-rw-r--r--drivers/net/can/sja1000/peak_pci.c6
-rw-r--r--drivers/net/can/sja1000/peak_pcmcia.c12
-rw-r--r--drivers/net/can/sja1000/plx_pci.c3
-rw-r--r--drivers/net/can/sja1000/sja1000.c72
-rw-r--r--drivers/net/can/sja1000/sja1000_isa.c2
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c17
-rw-r--r--drivers/net/can/slcan/slcan-core.c27
-rw-r--r--drivers/net/can/softing/softing_main.c3
-rw-r--r--drivers/net/can/spi/hi311x.c96
-rw-r--r--drivers/net/can/spi/mcp251x.c81
-rw-r--r--drivers/net/can/spi/mcp251xfd/Kconfig1
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c426
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c2
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c11
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c118
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c53
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c165
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c160
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c29
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c57
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd.h69
-rw-r--r--drivers/net/can/sun4i_can.c30
-rw-r--r--drivers/net/can/ti_hecc.c5
-rw-r--r--drivers/net/can/usb/Kconfig18
-rw-r--r--drivers/net/can/usb/Makefile1
-rw-r--r--drivers/net/can/usb/ems_usb.c59
-rw-r--r--drivers/net/can/usb/esd_usb.c77
-rw-r--r--drivers/net/can/usb/etas_es58x/es581_4.c2
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.c11
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_devlink.c8
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_fd.c10
-rw-r--r--drivers/net/can/usb/f81604.c13
-rw-r--r--drivers/net/can/usb/gs_usb.c189
-rw-r--r--drivers/net/can/usb/kvaser_usb/Makefile2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb.h61
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c175
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_devlink.c87
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c239
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c229
-rw-r--r--drivers/net/can/usb/mcba_usb.c2
-rw-r--r--drivers/net/can/usb/nct6694_canfd.c831
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c10
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c59
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h6
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c20
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c4
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.h4
-rw-r--r--drivers/net/can/usb/ucan.c44
-rw-r--r--drivers/net/can/usb/usb_8dev.c1
-rw-r--r--drivers/net/can/vcan.c2
-rw-r--r--drivers/net/can/vxcan.c29
-rw-r--r--drivers/net/can/xilinx_can.c39
-rw-r--r--drivers/net/dsa/Kconfig29
-rw-r--r--drivers/net/dsa/Makefile7
-rw-r--r--drivers/net/dsa/b53/Kconfig1
-rw-r--r--drivers/net/dsa/b53/b53_common.c927
-rw-r--r--drivers/net/dsa/b53/b53_mdio.c8
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c136
-rw-r--r--drivers/net/dsa/b53/b53_priv.h164
-rw-r--r--drivers/net/dsa/b53/b53_regs.h92
-rw-r--r--drivers/net/dsa/b53/b53_serdes.c5
-rw-r--r--drivers/net/dsa/b53/b53_spi.c2
-rw-r--r--drivers/net/dsa/b53/b53_srab.c2
-rw-r--r--drivers/net/dsa/bcm_sf2.c23
-rw-r--r--drivers/net/dsa/bcm_sf2.h5
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c22
-rw-r--r--drivers/net/dsa/dsa_loop.c87
-rw-r--r--drivers/net/dsa/dsa_loop.h20
-rw-r--r--drivers/net/dsa/dsa_loop_bdinfo.c36
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.c32
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.h10
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c26
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h7
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_ptp.c14
-rw-r--r--drivers/net/dsa/ks8995.c857
-rw-r--r--drivers/net/dsa/lan9303-core.c52
-rw-r--r--drivers/net/dsa/lan9303_i2c.c2
-rw-r--r--drivers/net/dsa/lan9303_mdio.c8
-rw-r--r--drivers/net/dsa/lantiq/Kconfig24
-rw-r--r--drivers/net/dsa/lantiq/Makefile3
-rw-r--r--drivers/net/dsa/lantiq/lantiq_gswip.c518
-rw-r--r--drivers/net/dsa/lantiq/lantiq_gswip.h301
-rw-r--r--drivers/net/dsa/lantiq/lantiq_gswip_common.c1739
-rw-r--r--drivers/net/dsa/lantiq/lantiq_pce.h (renamed from drivers/net/dsa/lantiq_pce.h)9
-rw-r--r--drivers/net/dsa/lantiq/mxl-gsw1xx.c733
-rw-r--r--drivers/net/dsa/lantiq/mxl-gsw1xx.h126
-rw-r--r--drivers/net/dsa/lantiq/mxl-gsw1xx_pce.h154
-rw-r--r--drivers/net/dsa/lantiq_gswip.c2283
-rw-r--r--drivers/net/dsa/microchip/Kconfig10
-rw-r--r--drivers/net/dsa/microchip/Makefile2
-rw-r--r--drivers/net/dsa/microchip/ksz8.c2115
-rw-r--r--drivers/net/dsa/microchip/ksz8.h7
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c1874
-rw-r--r--drivers/net/dsa/microchip/ksz8795_reg.h798
-rw-r--r--drivers/net/dsa/microchip/ksz8863_smi.c4
-rw-r--r--drivers/net/dsa/microchip/ksz8_reg.h850
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c615
-rw-r--r--drivers/net/dsa/microchip/ksz9477.h11
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c22
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h28
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c1441
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h208
-rw-r--r--drivers/net/dsa/microchip/ksz_dcb.c243
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.c61
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.h9
-rw-r--r--drivers/net/dsa/microchip/ksz_spi.c138
-rw-r--r--drivers/net/dsa/microchip/lan937x.h2
-rw-r--r--drivers/net/dsa/microchip/lan937x_main.c321
-rw-r--r--drivers/net/dsa/microchip/lan937x_reg.h18
-rw-r--r--drivers/net/dsa/mt7530-mdio.c21
-rw-r--r--drivers/net/dsa/mt7530-mmio.c25
-rw-r--r--drivers/net/dsa/mt7530.c832
-rw-r--r--drivers/net/dsa/mt7530.h94
-rw-r--r--drivers/net/dsa/mv88e6060.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/Kconfig10
-rw-r--r--drivers/net/dsa/mv88e6xxx/Makefile1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c344
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h35
-rw-r--r--drivers/net/dsa/mv88e6xxx/devlink.c43
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c3
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_vtu.c3
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2_scratch.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.c28
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.h21
-rw-r--r--drivers/net/dsa/mv88e6xxx/leds.c848
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-6185.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-6352.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-639x.c12
-rw-r--r--drivers/net/dsa/mv88e6xxx/phy.c9
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c5
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h133
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.c195
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.h133
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c14
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h8
-rw-r--r--drivers/net/dsa/mv88e6xxx/trace.h4
-rw-r--r--drivers/net/dsa/ocelot/felix.c339
-rw-r--r--drivers/net/dsa/ocelot/felix.h12
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c150
-rw-r--r--drivers/net/dsa/ocelot/ocelot_ext.c58
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c62
-rw-r--r--drivers/net/dsa/qca/ar9331.c6
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c15
-rw-r--r--drivers/net/dsa/qca/qca8k-common.c125
-rw-r--r--drivers/net/dsa/qca/qca8k-leds.c12
-rw-r--r--drivers/net/dsa/qca/qca8k.h4
-rw-r--r--drivers/net/dsa/realtek/Kconfig6
-rw-r--r--drivers/net/dsa/realtek/Makefile3
-rw-r--r--drivers/net/dsa/realtek/realtek-mdio.c8
-rw-r--r--drivers/net/dsa/realtek/realtek-smi.c8
-rw-r--r--drivers/net/dsa/realtek/realtek.h3
-rw-r--r--drivers/net/dsa/realtek/rtl8365mb.c12
-rw-r--r--drivers/net/dsa/realtek/rtl8366-core.c22
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb-leds.c177
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.c281
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.h107
-rw-r--r--drivers/net/dsa/realtek/rtl83xx.c24
-rw-r--r--drivers/net/dsa/rzn1_a5psw.c45
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ethtool.c10
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c111
-rw-r--r--drivers/net/dsa/sja1105/sja1105_mdio.c34
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c68
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.h9
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.c14
-rw-r--r--drivers/net/dsa/sja1105/sja1105_tas.c8
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-core.c1284
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-platform.c2
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx.h39
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.c17
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_i2c.c4
-rw-r--r--drivers/net/dsa/yt921x.c3006
-rw-r--r--drivers/net/dsa/yt921x.h567
-rw-r--r--drivers/net/dummy.c22
-rw-r--r--drivers/net/eql.c4
-rw-r--r--drivers/net/ethernet/3com/3c515.c8
-rw-r--r--drivers/net/ethernet/3com/3c574_cs.c4
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c4
-rw-r--r--drivers/net/ethernet/3com/3c59x.c6
-rw-r--r--drivers/net/ethernet/8390/ax88796.c2
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c4
-rw-r--r--drivers/net/ethernet/8390/mcf8390.c2
-rw-r--r--drivers/net/ethernet/8390/ne.c2
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c11
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c4
-rw-r--r--drivers/net/ethernet/Kconfig15
-rw-r--r--drivers/net/ethernet/Makefile5
-rw-r--r--drivers/net/ethernet/actions/owl-emac.c9
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c8
-rw-r--r--drivers/net/ethernet/adi/adin1110.c8
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c5
-rw-r--r--drivers/net/ethernet/agere/et131x.c41
-rw-r--r--drivers/net/ethernet/airoha/Kconfig34
-rw-r--r--drivers/net/ethernet/airoha/Makefile9
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.c3180
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.h673
-rw-r--r--drivers/net/ethernet/airoha/airoha_npu.c783
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe.c1561
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe_debugfs.c187
-rw-r--r--drivers/net/ethernet/airoha/airoha_regs.h923
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c21
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c2
-rw-r--r--drivers/net/ethernet/alteon/acenic.c26
-rw-r--r--drivers/net/ethernet/alteon/acenic.h8
-rw-r--r--drivers/net/ethernet/altera/altera_tse.h3
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c49
-rw-r--r--drivers/net/ethernet/amazon/Kconfig2
-rw-r--r--drivers/net/ethernet/amazon/ena/Makefile2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_admin_defs.h148
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c483
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h176
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_debugfs.c62
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_debugfs.h27
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_devlink.c210
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_devlink.h21
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c237
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c185
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h16
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_phc.c233
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_phc.h37
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_regs_defs.h8
-rw-r--r--drivers/net/ethernet/amd/7990.c1
-rw-r--r--drivers/net/ethernet/amd/Kconfig1
-rw-r--r--drivers/net/ethernet/amd/a2065.c5
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c7
-rw-r--r--drivers/net/ethernet/amd/amd8111e.h1
-rw-r--r--drivers/net/ethernet/amd/ariadne.c1
-rw-r--r--drivers/net/ethernet/amd/atarilance.c1
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c4
-rw-r--r--drivers/net/ethernet/amd/declance.c4
-rw-r--r--drivers/net/ethernet/amd/hplance.c1
-rw-r--r--drivers/net/ethernet/amd/lance.c1
-rw-r--r--drivers/net/ethernet/amd/mvme147.c8
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c15
-rw-r--r--drivers/net/ethernet/amd/pds_core/adminq.c40
-rw-r--r--drivers/net/ethernet/amd/pds_core/auxbus.c46
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.c17
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.h15
-rw-r--r--drivers/net/ethernet/amd/pds_core/debugfs.c13
-rw-r--r--drivers/net/ethernet/amd/pds_core/devlink.c16
-rw-r--r--drivers/net/ethernet/amd/pds_core/main.c29
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c1
-rw-r--r--drivers/net/ethernet/amd/sunlance.c6
-rw-r--r--drivers/net/ethernet/amd/xgbe/Makefile4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h164
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dcb.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c136
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c126
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c453
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c497
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c182
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-hwtstamp.c399
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-i2c.c135
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c119
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c147
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c212
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c165
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-platform.c119
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pps.c74
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ptp.c218
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-selftest.c346
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-smn.h30
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h226
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c6
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/mdio.c18
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c16
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c10
-rw-r--r--drivers/net/ethernet/apple/bmac.c71
-rw-r--r--drivers/net/ethernet/apple/mace.c6
-rw-r--r--drivers/net/ethernet/apple/macmace.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c14
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c100
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c22
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c67
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c16
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c21
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ptp.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ptp.h8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c9
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c153
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c43
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h21
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h32
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c39
-rw-r--r--drivers/net/ethernet/arc/Kconfig10
-rw-r--r--drivers/net/ethernet/arc/Makefile1
-rw-r--r--drivers/net/ethernet/arc/emac_arc.c88
-rw-r--r--drivers/net/ethernet/arc/emac_main.c27
-rw-r--r--drivers/net/ethernet/arc/emac_mdio.c9
-rw-r--r--drivers/net/ethernet/arc/emac_rockchip.c2
-rw-r--r--drivers/net/ethernet/atheros/Kconfig4
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c223
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c8
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c6
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c85
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c15
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig20
-rw-r--r--drivers/net/ethernet/broadcom/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.c184
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.h81
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c119
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c48
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h3
-rw-r--r--drivers/net/ethernet/broadcom/b44.c45
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c26
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c70
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h23
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c7
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c5
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnge/Makefile13
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge.h255
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_auxr.c258
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_auxr.h84
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_core.c420
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_db.h34
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_devlink.c306
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_devlink.h18
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_ethtool.c33
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_ethtool.h9
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm.c548
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h112
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c1185
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h58
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_netdev.c2485
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_netdev.h454
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_resc.c617
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_resc.h97
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_rmem.c499
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_rmem.h202
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c127
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c82
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c3216
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h266
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c305
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h53
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c125
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c828
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h10566
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c346
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h90
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c182
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c121
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h22
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c20
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c49
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h3
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c22
-rw-r--r--drivers/net/ethernet/broadcom/cnic.h2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c1423
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h84
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c113
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c88
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c305
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h4
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c26
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c46
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h1
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_debugfs.c31
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c2
-rw-r--r--drivers/net/ethernet/cadence/macb.h291
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c1283
-rw-r--r--drivers/net/ethernet/cadence/macb_pci.c5
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c16
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c2
-rw-r--r--drivers/net/ethernet/cavium/Kconfig2
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.c9
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c284
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.h1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c18
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c64
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c51
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c11
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c16
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h14
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_iq.h3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_main.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.h4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.c3
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c64
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c57
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c57
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c8
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c70
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/common.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/pm3393.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/tp.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c39
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c37
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h41
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c119
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c225
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c98
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c42
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c19
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c44
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c31
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/srq.c58
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/srq.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c41
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h1
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c24
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h7
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c9
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c13
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c9
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c8
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c2
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c67
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c3
-rw-r--r--drivers/net/ethernet/cisco/enic/Kconfig1
-rw-r--r--drivers/net/ethernet/cisco/enic/Makefile2
-rw-r--r--drivers/net/ethernet/cisco/enic/cq_desc.h25
-rw-r--r--drivers/net/ethernet/cisco/enic/cq_enet_desc.h142
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h114
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_clsf.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_clsf.h2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c190
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c839
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.c129
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.h11
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_rq.c436
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_rq.h8
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_wq.c117
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_wq.h7
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_cq.h45
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_devcmd.h19
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_enet.h5
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.h4
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.h2
-rw-r--r--drivers/net/ethernet/cortina/gemini.c101
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c5
-rw-r--r--drivers/net/ethernet/davicom/dm9051.c1
-rw-r--r--drivers/net/ethernet/dec/tulip/21142.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c12
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/eeprom.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/interrupt.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/pnic.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/pnic2.c8
-rw-r--r--drivers/net/ethernet/dec/tulip/timer.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip.h2
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c19
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c8
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c4
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c123
-rw-r--r--drivers/net/ethernet/dlink/dl2k.h24
-rw-r--r--drivers/net/ethernet/dlink/sundance.c39
-rw-r--r--drivers/net/ethernet/dnet.c2
-rw-r--r--drivers/net/ethernet/ec_bhf.c3
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c205
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h5
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c56
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c30
-rw-r--r--drivers/net/ethernet/engleder/tsnep.h8
-rw-r--r--drivers/net/ethernet/engleder/tsnep_ethtool.c6
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c99
-rw-r--r--drivers/net/ethernet/engleder/tsnep_ptp.c88
-rw-r--r--drivers/net/ethernet/ethoc.c2
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c2
-rw-r--r--drivers/net/ethernet/faraday/Kconfig1
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c112
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.h2
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c2
-rw-r--r--drivers/net/ethernet/fealnx.c12
-rw-r--r--drivers/net/ethernet/freescale/Kconfig5
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c184
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.h20
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h4
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c145
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h4
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c90
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c64
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c9
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c9
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c27
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig57
-rw-r--r--drivers/net/ethernet/freescale/enetc/Makefile13
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c1131
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h168
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_debugfs.c90
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_debugfs.h20
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_hw.h232
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_pf.c1102
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_cbdr.c50
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c410
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h98
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c31
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c474
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.h35
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf_common.c439
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf_common.h22
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ptp.c5
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c36
-rw-r--r--drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c845
-rw-r--r--drivers/net/ethernet/freescale/enetc/ntmp.c457
-rw-r--r--drivers/net/ethernet/freescale/enetc/ntmp_private.h104
-rw-r--r--drivers/net/ethernet/freescale/fec.h68
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c601
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c4
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx_phy.c4
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c241
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c36
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.h3
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c6
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c108
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c8
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.c1
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c119
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.h22
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/Kconfig2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c446
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet.h27
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fcc.c17
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c15
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-scc.c29
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c9
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c7
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c8
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c95
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c59
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c632
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h24
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c95
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_dev.c17
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_queue.c65
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_queue.h1
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth.h4
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_ethtool.c10
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_main.c40
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_trace.h6
-rw-r--r--drivers/net/ethernet/google/Kconfig2
-rw-r--r--drivers/net/ethernet/google/gve/Makefile5
-rw-r--r--drivers/net/ethernet/google/gve/gve.h304
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c565
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h193
-rw-r--r--drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c344
-rw-r--r--drivers/net/ethernet/google/gve/gve_desc_dqo.h6
-rw-r--r--drivers/net/ethernet/google/gve/gve_dqo.h4
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c329
-rw-r--r--drivers/net/ethernet/google/gve/gve_flow_rule.c298
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c1116
-rw-r--r--drivers/net/ethernet/google/gve/gve_ptp.c166
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c44
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c683
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c98
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c480
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.c5
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig20
-rw-r--r--drivers/net/ethernet/hisilicon/Makefile1
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/Makefile10
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h294
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c168
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c349
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.h11
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c194
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h14
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c500
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h16
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c399
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h63
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c145
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.h11
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c528
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c304
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h14
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h302
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_trace.h84
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c717
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h44
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c181
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h30
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c67
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c31
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c71
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c71
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/Makefile11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h34
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c1086
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c206
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c202
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_trace.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c1370
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c346
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c68
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c94
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c36
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h10
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c5
-rw-r--r--drivers/net/ethernet/huawei/Kconfig1
-rw-r--r--drivers/net/ethernet/huawei/Makefile1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_devlink.c10
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_ethtool.c80
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic3/Kconfig20
-rw-r--r--drivers/net/ethernet/huawei/hinic3/Makefile25
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c915
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.h156
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_common.c76
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_common.h54
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_csr.h79
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c776
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_eqs.h122
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c236
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h57
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c426
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h47
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h264
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c557
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h81
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c436
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h90
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_irq.c194
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_lld.c421
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_lld.h21
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_main.c409
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c860
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h141
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c21
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h15
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h224
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c496
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c385
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h61
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h93
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c885
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h145
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_pci_id_tbl.h9
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.c68
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.h54
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rss.c336
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rss.h14
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rx.c551
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rx.h104
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_tx.c779
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_tx.h147
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_wq.c138
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_wq.h87
-rw-r--r--drivers/net/ethernet/i825xx/sni_82596.c2
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c1
-rw-r--r--drivers/net/ethernet/ibm/Kconfig13
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c12
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c260
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h10
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c94
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.c49
-rw-r--r--drivers/net/ethernet/ibm/emac/tah.c49
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.c49
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c667
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h86
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c384
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h17
-rw-r--r--drivers/net/ethernet/intel/Kconfig27
-rw-r--r--drivers/net/ethernet/intel/Makefile2
-rw-r--r--drivers/net/ethernet/intel/e100.c9
-rw-r--r--drivers/net/ethernet/intel/e1000/Makefile2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c27
-rw-r--r--drivers/net/ethernet/intel/e1000e/Makefile7
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h6
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h5
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c132
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c133
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h4
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c15
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c304
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c10
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c18
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.h2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c53
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c12
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c122
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.h2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c78
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h156
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c1229
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c23
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c172
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devlink.c55
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c230
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c885
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h57
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c45
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_trace.h10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c43
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h47
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h38
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c235
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c34
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.h10
-rw-r--r--drivers/net/ethernet/intel/iavf/Makefile7
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h101
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.c62
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.h12
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h83
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adv_rss.c119
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adv_rss.h31
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_common.c110
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c249
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_fdir.c89
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_fdir.h13
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c929
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_prototype.h6
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ptp.c492
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ptp.h47
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_trace.h12
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c450
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.h68
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_type.h273
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_types.h34
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c449
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile18
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c414
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.h1
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink_port.c430
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink_port.h12
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/health.c551
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/health.h71
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/port.c1001
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/port.h58
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h226
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.c127
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.h33
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h572
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.c83
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c604
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_cgu_regs.h116
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c1719
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h116
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c259
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c52
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c450
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.h22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_debugfs.c633
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c1768
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.h34
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c202
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.h47
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch_br.c9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch_br.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c864
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.h45
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c270
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c425
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h167
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c56
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fwlog.c472
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fwlog.h79
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.c31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h42
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hwmon.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc.c269
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc_int.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_irq.c275
-rw-r--r--drivers/net/ethernet/intel/ice/ice_irq.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c1011
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.h24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h102
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c459
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c1260
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c154
-rw-r--r--drivers/net/ethernet/intel/ice/ice_osdep.h28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_parser.c2430
-rw-r--r--drivers/net/ethernet/intel/ice/ice_parser.h538
-rw-r--r--drivers/net/ethernet/intel/ice/ice_parser_rt.c859
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h63
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c2095
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h172
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_consts.h376
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c3284
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h378
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c235
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.h25
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sbq_cmd.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c205
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sf_eth.c329
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sf_eth.h33
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.c21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c247
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.h19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c756
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c281
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_trace.h40
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tspll.c626
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tspll.h31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c982
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h156
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c91
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h67
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h136
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c99
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h107
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib_private.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_mbx.c38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_mbx.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c4196
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.h104
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c186
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c2019
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h56
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vlan_mode.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c90
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c472
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h50
-rw-r--r--drivers/net/ethernet/intel/ice/virt/allowlist.c199
-rw-r--r--drivers/net/ethernet/intel/ice/virt/allowlist.h (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.h)0
-rw-r--r--drivers/net/ethernet/intel/ice/virt/fdir.c2434
-rw-r--r--drivers/net/ethernet/intel/ice/virt/fdir.h57
-rw-r--r--drivers/net/ethernet/intel/ice/virt/queues.c975
-rw-r--r--drivers/net/ethernet/intel/ice/virt/queues.h20
-rw-r--r--drivers/net/ethernet/intel/ice/virt/rss.c1922
-rw-r--r--drivers/net/ethernet/intel/ice/virt/rss.h18
-rw-r--r--drivers/net/ethernet/intel/ice/virt/virtchnl.c2936
-rw-r--r--drivers/net/ethernet/intel/ice/virt/virtchnl.h140
-rw-r--r--drivers/net/ethernet/intel/idpf/Kconfig27
-rw-r--r--drivers/net/ethernet/intel/idpf/Makefile10
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h289
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq.c43
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq.h18
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq_api.h5
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_dev.c79
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c674
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_idc.c503
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h4
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h21
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c706
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_main.c169
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_mem.h8
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ptp.c1021
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ptp.h379
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c572
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c3218
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h967
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_vf_dev.c62
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c1860
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.h125
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c673
-rw-r--r--drivers/net/ethernet/intel/idpf/virtchnl2.h590
-rw-r--r--drivers/net/ethernet/intel/idpf/xdp.c486
-rw-r--r--drivers/net/ethernet/intel/idpf/xdp.h175
-rw-r--r--drivers/net/ethernet/intel/idpf/xsk.c633
-rw-r--r--drivers/net/ethernet/intel/idpf/xsk.h33
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile6
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h72
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c57
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c399
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c70
-rw-r--r--drivers/net/ethernet/intel/igb/igb_xsk.c562
-rw-r--r--drivers/net/ethernet/intel/igbvf/Makefile6
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h31
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.h1
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c24
-rw-r--r--drivers/net/ethernet/intel/igc/Makefile6
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h84
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.h9
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h94
-rw-r--r--drivers/net/ethernet/intel/igc/igc_diag.c3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c260
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.c318
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c668
-rw-r--r--drivers/net/ethernet/intel/igc/igc_nvm.c54
-rw-r--r--drivers/net/ethernet/intel/igc/igc_nvm.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c28
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c171
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h28
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.c414
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.h58
-rw-r--r--drivers/net/ethernet/intel/igc/igc_xdp.c21
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/devlink/devlink.c558
-rw-r--r--drivers/net/ethernet/intel/ixgbe/devlink/devlink.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/devlink/region.c290
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h57
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c34
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c61
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c4043
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h102
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c323
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c18
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.c707
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c72
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c14
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c1044
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h33
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c61
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c155
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h143
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h1032
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c20
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c251
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h25
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/Makefile6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c20
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.c53
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h35
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c78
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h8
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c196
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h5
-rw-r--r--drivers/net/ethernet/intel/libeth/Kconfig10
-rw-r--r--drivers/net/ethernet/intel/libeth/Makefile10
-rw-r--r--drivers/net/ethernet/intel/libeth/priv.h37
-rw-r--r--drivers/net/ethernet/intel/libeth/rx.c169
-rw-r--r--drivers/net/ethernet/intel/libeth/tx.c41
-rw-r--r--drivers/net/ethernet/intel/libeth/xdp.c451
-rw-r--r--drivers/net/ethernet/intel/libeth/xsk.c271
-rw-r--r--drivers/net/ethernet/intel/libie/Kconfig15
-rw-r--r--drivers/net/ethernet/intel/libie/Makefile10
-rw-r--r--drivers/net/ethernet/intel/libie/adminq.c52
-rw-r--r--drivers/net/ethernet/intel/libie/fwlog.c1115
-rw-r--r--drivers/net/ethernet/intel/libie/rx.c10
-rw-r--r--drivers/net/ethernet/jme.c10
-rw-r--r--drivers/net/ethernet/korina.c7
-rw-r--r--drivers/net/ethernet/lantiq_etop.c37
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c2
-rw-r--r--drivers/net/ethernet/litex/litex_liteeth.c2
-rw-r--r--drivers/net/ethernet/marvell/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c72
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c15
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c189
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.h2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h10
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c26
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h8
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c417
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c201
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c80
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c86
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.h7
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c26
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h6
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.c91
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.h4
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.c7
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.h4
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c70
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c31
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h8
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c9
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h2
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c7
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Kconfig8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c182
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h33
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/api.h32
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.c218
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.h28
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/mbox_init.c424
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/nix.c20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/npa.c21
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/reg.h81
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h380
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c123
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h193
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c87
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.h18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c332
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h189
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c149
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c26
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c148
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c728
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c82
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c335
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c29
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c231
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c62
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c469
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h69
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c28
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h100
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c53
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c1042
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h265
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c450
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn20k.h17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c403
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h223
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c120
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c331
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c38
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c652
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h48
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c28
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c317
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c124
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c245
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h24
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c29
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/rep.c879
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/rep.h55
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_counter.c3
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c7
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_pci.c8
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c26
-rw-r--r--drivers/net/ethernet/marvell/skge.c12
-rw-r--r--drivers/net/ethernet/marvell/sky2.c13
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig3
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_path.c45
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c788
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h132
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c10
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.h2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c9
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c41
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c31
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c76
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.h2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_mcu.c38
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_wo.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c141
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c125
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h23
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c211
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c208
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/cmd_tracepoint.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dpll.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h121
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c127
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.c376
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c145
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.c120
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.h39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c100
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c294
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c107
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c365
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tir.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tir.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h99
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c67
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c349
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c838
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c1155
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.h77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c201
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.h121
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dim.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c528
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c1142
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c686
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c286
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c180
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c119
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/adj_vport.c202
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h94
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c2126
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c326
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h120
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c615
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c124
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c911
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h132
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c673
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c195
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c262
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c79
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/hwmon.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/hwmon.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c536
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c799
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c97
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c440
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c116
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c799
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/st.c185
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c250
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h136
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c127
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c237
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qos.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rl.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c115
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/diag/sf_tracepoint.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/diag/vhca_tracepoint.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h97
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c234
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c2651
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h316
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.c467
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.h69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/buddy.c149
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/buddy.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c1226
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h117
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c1101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h88
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c1219
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h347
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c257
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c487
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.h42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c2329
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h831
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c1641
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h120
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c427
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c1293
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h114
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h941
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c582
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h102
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c394
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h132
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h472
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c846
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c1358
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h265
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c494
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h74
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/vport.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/vport.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_action.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_arg.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_arg.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_buddy.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_cmd.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c)42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_definer.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_definer.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c)38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_fw.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_icm_pool.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_matcher.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ptrn.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_rule.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c)2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c)66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c)6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h)23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c)6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c)259
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h240
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.h168
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c263
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_table.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_types.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h)6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c)125
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h)10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h)40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr_ste_v1.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h)9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c139
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wc.c464
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_linecards.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c217
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/item.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c405
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c345
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h28
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c75
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h72
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/txheader.h63
-rw-r--r--drivers/net/ethernet/meta/Kconfig38
-rw-r--r--drivers/net/ethernet/meta/Makefile6
-rw-r--r--drivers/net/ethernet/meta/fbnic/Makefile28
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic.h227
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.c149
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.h1201
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c271
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_devlink.c668
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_drvinfo.h5
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c1924
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.c1920
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.h326
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw_log.c123
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw_log.h45
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c601
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h163
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c81
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_irq.c313
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.c934
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.h122
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mdio.c195
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c857
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.h117
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_pci.c655
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_phylink.c311
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.c1246
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.h232
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_time.c303
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_tlv.c560
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_tlv.h158
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.c2930
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.h201
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c4
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c30
-rw-r--r--drivers/net/ethernet/micrel/ks8851_par.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c6
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c8
-rw-r--r--drivers/net/ethernet/microchip/Kconfig7
-rw-r--r--drivers/net/ethernet/microchip/Makefile2
-rw-r--r--drivers/net/ethernet/microchip/encx24j600-regmap.c6
-rw-r--r--drivers/net/ethernet/microchip/fdma/Kconfig18
-rw-r--r--drivers/net/ethernet/microchip/fdma/Makefile7
-rw-r--r--drivers/net/ethernet/microchip/fdma/fdma_api.c146
-rw-r--r--drivers/net/ethernet/microchip/fdma/fdma_api.h243
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c203
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c777
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h33
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c91
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.h11
-rw-r--r--drivers/net/ethernet/microchip/lan865x/Kconfig19
-rw-r--r--drivers/net/ethernet/microchip/lan865x/Makefile6
-rw-r--r--drivers/net/ethernet/microchip/lan865x/lan865x.c455
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Kconfig1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Makefile1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c39
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c409
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c20
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h70
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c3
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_port.c4
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c68
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c10
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c21
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Kconfig10
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Makefile11
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.c357
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.h82
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_calendar.c191
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_fdma.c406
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_regs.c222
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_rgmii.c224
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_vcap_ag_api.c3843
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_vcap_impl.c85
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c126
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c5
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c65
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c454
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c10
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c370
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.h268
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h4750
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c25
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c39
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_packet.c45
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c15
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c16
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_police.c3
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.c181
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.h28
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_psfp.c49
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c58
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_qos.c11
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_qos.h2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_regs.c222
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_regs.h247
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_sdlb.c25
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c45
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc.c8
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c9
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.h2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c50
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h21
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c45
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api.h2
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c2
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c33
-rw-r--r--drivers/net/ethernet/microsoft/Kconfig3
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c864
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c134
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_bpf.c48
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c1212
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c354
-rw-r--r--drivers/net/ethernet/microsoft/mana/shm_channel.c13
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c285
-rw-r--r--drivers/net/ethernet/mscc/ocelot_fdma.c3
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c54
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c53
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c280
-rw-r--r--drivers/net/ethernet/mscc/ocelot_stats.c39
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.c1
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c8
-rw-r--r--drivers/net/ethernet/mucse/Kconfig33
-rw-r--r--drivers/net/ethernet/mucse/Makefile7
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/Makefile11
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h71
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c143
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h17
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c320
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c406
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h20
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c191
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h88
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c8
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c2
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c2
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c8
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c19
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c2
-rw-r--r--drivers/net/ethernet/neterion/s2io.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/cmsg.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/ipsec.c24
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/tls.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/devlink_param.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/dp.c22
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/dp.c22
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_hwmon.c40
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c26
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c15
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c19
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c2
-rw-r--r--drivers/net/ethernet/ni/nixge.c2
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c44
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c2
-rw-r--r--drivers/net/ethernet/oa_tc6.c1369
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c44
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c6
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c6
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c10
-rw-r--r--drivers/net/ethernet/pensando/Kconfig2
-rw-r--r--drivers/net/ethernet/pensando/ionic/Makefile2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h13
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_api.h131
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_aux.c102
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_aux.h10
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c19
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_debugfs.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c408
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h61
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c152
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h390
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c402
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h35
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c12
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_phc.c63
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c556
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.h4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c7
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c14
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h31
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c33
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c21
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_devlink.c12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h52
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c138
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c50
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.c9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ptp.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c8
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c58
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c5
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c24
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c89
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.h8
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c13
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c60
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c15
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c35
-rw-r--r--drivers/net/ethernet/qualcomm/Kconfig16
-rw-r--r--drivers/net/ethernet/qualcomm/Makefile1
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.c24
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c2
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/Makefile7
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe.c239
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe.h39
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_config.c2034
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_config.h317
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c847
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.h16
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_regs.h591
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c6
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c60
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.h4
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c9
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c4
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c2
-rw-r--r--drivers/net/ethernet/realtek/8139too.c4
-rw-r--r--drivers/net/ethernet/realtek/Kconfig24
-rw-r--r--drivers/net/ethernet/realtek/Makefile1
-rw-r--r--drivers/net/ethernet/realtek/atp.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.h11
-rw-r--r--drivers/net/ethernet/realtek/r8169_firmware.c6
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c1083
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c264
-rw-r--r--drivers/net/ethernet/realtek/rtase/Makefile10
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase.h362
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase_main.c2400
-rw-r--r--drivers/net/ethernet/renesas/Kconfig11
-rw-r--r--drivers/net/ethernet/renesas/Makefile3
-rw-r--r--drivers/net/ethernet/renesas/ravb.h38
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c837
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c12
-rw-r--r--drivers/net/ethernet/renesas/rcar_gen4_ptp.c78
-rw-r--r--drivers/net/ethernet/renesas/rcar_gen4_ptp.h46
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c2203
-rw-r--r--drivers/net/ethernet/renesas/rswitch.h121
-rw-r--r--drivers/net/ethernet/renesas/rswitch_l2.c316
-rw-r--r--drivers/net/ethernet/renesas/rswitch_l2.h15
-rw-r--r--drivers/net/ethernet/renesas/rswitch_main.c2295
-rw-r--r--drivers/net/ethernet/renesas/rtsn.c1371
-rw-r--r--drivers/net/ethernet/renesas/rtsn.h464
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c40
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c5
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c6
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c45
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c15
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c2
-rw-r--r--drivers/net/ethernet/seeq/ether3.c8
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c2
-rw-r--r--drivers/net/ethernet/sfc/Kconfig5
-rw-r--r--drivers/net/ethernet/sfc/Makefile2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c140
-rw-r--r--drivers/net/ethernet/sfc/ef100_ethtool.c7
-rw-r--r--drivers/net/ethernet/sfc/ef100_netdev.c7
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c49
-rw-r--r--drivers/net/ethernet/sfc/ef100_rep.c4
-rw-r--r--drivers/net/ethernet/sfc/ef100_rx.c5
-rw-r--r--drivers/net/ethernet/sfc/ef100_tx.c17
-rw-r--r--drivers/net/ethernet/sfc/efx.c147
-rw-r--r--drivers/net/ethernet/sfc/efx.h3
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c15
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.h7
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c30
-rw-r--r--drivers/net/ethernet/sfc/efx_common.h1
-rw-r--r--drivers/net/ethernet/sfc/efx_devlink.c13
-rw-r--r--drivers/net/ethernet/sfc/efx_reflash.c522
-rw-r--r--drivers/net/ethernet/sfc/efx_reflash.h20
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c19
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.c310
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.h14
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c16
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.h1
-rw-r--r--drivers/net/ethernet/sfc/falcon/ethtool.c89
-rw-r--r--drivers/net/ethernet/sfc/falcon/falcon.c10
-rw-r--r--drivers/net/ethernet/sfc/falcon/farch.c22
-rw-r--r--drivers/net/ethernet/sfc/falcon/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/falcon/nic.c20
-rw-r--r--drivers/net/ethernet/sfc/falcon/nic.h9
-rw-r--r--drivers/net/ethernet/sfc/falcon/rx.c5
-rw-r--r--drivers/net/ethernet/sfc/falcon/tx.c8
-rw-r--r--drivers/net/ethernet/sfc/falcon/tx.h3
-rw-r--r--drivers/net/ethernet/sfc/fw_formats.h114
-rw-r--r--drivers/net/ethernet/sfc/io.h24
-rw-r--r--drivers/net/ethernet/sfc/mae.c17
-rw-r--r--drivers/net/ethernet/sfc/mae.h1
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c197
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h32
-rw-r--r--drivers/net/ethernet/sfc/mcdi_filters.c135
-rw-r--r--drivers/net/ethernet/sfc/mcdi_filters.h8
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h13844
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c59
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port_common.c11
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h92
-rw-r--r--drivers/net/ethernet/sfc/nic.c9
-rw-r--r--drivers/net/ethernet/sfc/nic.h2
-rw-r--r--drivers/net/ethernet/sfc/nic_common.h3
-rw-r--r--drivers/net/ethernet/sfc/ptp.c11
-rw-r--r--drivers/net/ethernet/sfc/ptp.h8
-rw-r--r--drivers/net/ethernet/sfc/rx.c5
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c83
-rw-r--r--drivers/net/ethernet/sfc/rx_common.h8
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_channels.c9
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_common.c10
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool.c12
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool_common.c238
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool_common.h2
-rw-r--r--drivers/net/ethernet/sfc/siena/farch.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi.c6
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi_pcol.h12
-rw-r--r--drivers/net/ethernet/sfc/siena/net_driver.h32
-rw-r--r--drivers/net/ethernet/sfc/siena/nic.c14
-rw-r--r--drivers/net/ethernet/sfc/siena/nic_common.h5
-rw-r--r--drivers/net/ethernet/sfc/siena/ptp.c6
-rw-r--r--drivers/net/ethernet/sfc/siena/ptp.h6
-rw-r--r--drivers/net/ethernet/sfc/siena/rx_common.c72
-rw-r--r--drivers/net/ethernet/sfc/siena/rx_common.h4
-rw-r--r--drivers/net/ethernet/sfc/siena/siena.c2
-rw-r--r--drivers/net/ethernet/sfc/tc.c11
-rw-r--r--drivers/net/ethernet/sfc/tc_conntrack.c2
-rw-r--r--drivers/net/ethernet/sfc/tc_counters.c2
-rw-r--r--drivers/net/ethernet/sfc/tc_encap_actions.c6
-rw-r--r--drivers/net/ethernet/sfc/tx.c14
-rw-r--r--drivers/net/ethernet/sfc/tx.h3
-rw-r--r--drivers/net/ethernet/sfc/tx_common.c33
-rw-r--r--drivers/net/ethernet/sfc/tx_common.h4
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c12
-rw-r--r--drivers/net/ethernet/sgi/meth.c2
-rw-r--r--drivers/net/ethernet/sis/sis190.c6
-rw-r--r--drivers/net/ethernet/sis/sis900.c7
-rw-r--r--drivers/net/ethernet/smsc/epic100.c4
-rw-r--r--drivers/net/ethernet/smsc/smc9194.c1
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c8
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h4
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c19
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c2
-rw-r--r--drivers/net/ethernet/socionext/netsec.c9
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c2
-rw-r--r--drivers/net/ethernet/spacemit/Kconfig29
-rw-r--r--drivers/net/ethernet/spacemit/Makefile6
-rw-r--r--drivers/net/ethernet/spacemit/k1_emac.c2162
-rw-r--r--drivers/net/ethernet/spacemit/k1_emac.h416
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig81
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h89
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c46
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c230
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-eic7700.c235
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c202
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c198
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c113
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c445
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h30
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c31
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c656
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c99
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c46
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c102
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c38
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c396
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c235
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c1673
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c186
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c288
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c93
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c68
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c167
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c402
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun55i.c159
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c104
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c70
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c286
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c172
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c244
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c72
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h69
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c524
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c51
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c66
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.h16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h41
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c37
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h61
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c293
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c68
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c280
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h159
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h77
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_est.c18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_est.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c305
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c314
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.h28
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c62
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.c48
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.h12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c1941
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c511
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c121
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.c67
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h74
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c295
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c94
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c215
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c375
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.h64
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h1
-rw-r--r--drivers/net/ethernet/sun/cassini.c4
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c6
-rw-r--r--drivers/net/ethernet/sun/niu.c94
-rw-r--r--drivers/net/ethernet/sun/niu.h8
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c6
-rw-r--r--drivers/net/ethernet/sun/sungem.c10
-rw-r--r--drivers/net/ethernet/sun/sunhme.c10
-rw-r--r--drivers/net/ethernet/sun/sunqe.c2
-rw-r--r--drivers/net/ethernet/sun/sunqe.h2
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c36
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c8
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_driver.c4
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-common.c7
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c5
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c5
-rw-r--r--drivers/net/ethernet/tehuti/Kconfig15
-rw-r--r--drivers/net/ethernet/tehuti/Makefile3
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c6
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.h2
-rw-r--r--drivers/net/ethernet/tehuti/tn40.c1857
-rw-r--r--drivers/net/ethernet/tehuti/tn40.h266
-rw-r--r--drivers/net/ethernet/tehuti/tn40_mdio.c220
-rw-r--r--drivers/net/ethernet/tehuti/tn40_phy.c76
-rw-r--r--drivers/net/ethernet/tehuti/tn40_regs.h245
-rw-r--r--drivers/net/ethernet/ti/Kconfig16
-rw-r--r--drivers/net/ethernet/ti/Makefile34
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c106
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c1290
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h62
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.c51
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c63
-rw-r--r--drivers/net/ethernet/ti/cpsw.c39
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c353
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h63
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c23
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c27
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c70
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h9
-rw-r--r--drivers/net/ethernet/ti/cpts.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c23
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.c428
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.h73
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_classifier.c9
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_common.c937
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.c475
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.h95
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_ethtool.c33
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_mii_cfg.c4
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c1505
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h196
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c165
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_queues.c2
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.c49
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.h172
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_switch_map.h36
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_switchdev.c477
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_switchdev.h13
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_prueth.c1746
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_prueth.h262
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_prueth_ptp.h85
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_switch.h257
-rw-r--r--drivers/net/ethernet/ti/netcp.h5
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c70
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c87
-rw-r--r--drivers/net/ethernet/ti/tlan.c8
-rw-r--r--drivers/net/ethernet/toshiba/Kconfig11
-rw-r--r--drivers/net/ethernet/toshiba/Makefile2
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c58
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.h1
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.c1
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.h1
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c2555
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.h475
-rw-r--r--drivers/net/ethernet/toshiba/spider_net_ethtool.c174
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c6
-rw-r--r--drivers/net/ethernet/vertexcom/mse102x.c139
-rw-r--r--drivers/net/ethernet/via/via-rhine.c13
-rw-r--r--drivers/net/ethernet/via/via-velocity.c8
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig46
-rw-r--r--drivers/net/ethernet/wangxun/Makefile2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/Makefile3
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.c381
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.h17
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c806
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.h15
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c725
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.h9
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_mbx.c419
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_mbx.h99
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ptp.c905
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ptp.h20
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_sriov.c929
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_sriov.h18
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h411
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf.c599
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf.h129
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_common.c414
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_common.h22
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c292
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_lib.h15
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c15
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c119
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c24
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_type.h10
-rw-r--r--drivers/net/ethernet/wangxun/ngbevf/Makefile9
-rw-r--r--drivers/net/ethernet/wangxun/ngbevf/ngbevf_main.c266
-rw-r--r--drivers/net/ethernet/wangxun/ngbevf/ngbevf_type.h29
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/Makefile4
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c530
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h18
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c500
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h2
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c648
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.h20
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c21
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c201
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h2
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c296
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c240
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h4
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h311
-rw-r--r--drivers/net/ethernet/wangxun/txgbevf/Makefile9
-rw-r--r--drivers/net/ethernet/wangxun/txgbevf/txgbevf_main.c331
-rw-r--r--drivers/net/ethernet/wangxun/txgbevf/txgbevf_type.h26
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c4
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c2
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig1
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c6
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h172
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c837
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c25
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c2
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c69
-rw-r--r--drivers/net/fddi/defza.c12
-rw-r--r--drivers/net/fjes/fjes_ethtool.c64
-rw-r--r--drivers/net/fjes/fjes_main.c11
-rw-r--r--drivers/net/fjes/fjes_trace.h12
-rw-r--r--drivers/net/geneve.c136
-rw-r--r--drivers/net/gtp.c109
-rw-r--r--drivers/net/hamradio/6pack.c127
-rw-r--r--drivers/net/hamradio/baycom_epp.c6
-rw-r--r--drivers/net/hamradio/baycom_par.c5
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c3
-rw-r--r--drivers/net/hamradio/baycom_ser_hdx.c4
-rw-r--r--drivers/net/hamradio/bpqether.c29
-rw-r--r--drivers/net/hamradio/scc.c44
-rw-r--r--drivers/net/hamradio/yam.c2
-rw-r--r--drivers/net/hippi/rrunner.c4
-rw-r--r--drivers/net/hyperv/Kconfig2
-rw-r--r--drivers/net/hyperv/hyperv_net.h22
-rw-r--r--drivers/net/hyperv/netvsc.c78
-rw-r--r--drivers/net/hyperv/netvsc_bpf.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c198
-rw-r--r--drivers/net/hyperv/netvsc_trace.h8
-rw-r--r--drivers/net/hyperv/rndis_filter.c57
-rw-r--r--drivers/net/ieee802154/Kconfig1
-rw-r--r--drivers/net/ieee802154/at86rf230.c4
-rw-r--r--drivers/net/ieee802154/ca8210.c84
-rw-r--r--drivers/net/ieee802154/cc2520.c2
-rw-r--r--drivers/net/ieee802154/fakelb.c2
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c2
-rw-r--r--drivers/net/ieee802154/mcr20a.c5
-rw-r--r--drivers/net/ifb.c18
-rw-r--r--drivers/net/ipa/Kconfig2
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.1.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.5.1.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.11.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.2.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.5.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.7.c19
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.9.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v5.0.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v5.5.c1
-rw-r--r--drivers/net/ipa/ipa_data.h2
-rw-r--r--drivers/net/ipa/ipa_interrupt.c1
-rw-r--r--drivers/net/ipa/ipa_main.c15
-rw-r--r--drivers/net/ipa/ipa_mem.c21
-rw-r--r--drivers/net/ipa/ipa_modem.c4
-rw-r--r--drivers/net/ipa/ipa_power.c7
-rw-r--r--drivers/net/ipa/ipa_smp2p.c2
-rw-r--r--drivers/net/ipa/ipa_sysfs.c6
-rw-r--r--drivers/net/ipa/ipa_uc.c2
-rw-r--r--drivers/net/ipvlan/ipvlan.h3
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c34
-rw-r--r--drivers/net/ipvlan/ipvlan_l3s.c7
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c25
-rw-r--r--drivers/net/ipvlan/ipvtap.c6
-rw-r--r--drivers/net/loopback.c24
-rw-r--r--drivers/net/macsec.c329
-rw-r--r--drivers/net/macvlan.c58
-rw-r--r--drivers/net/macvtap.c6
-rw-r--r--drivers/net/mctp/Kconfig15
-rw-r--r--drivers/net/mctp/Makefile1
-rw-r--r--drivers/net/mctp/mctp-i2c.c59
-rw-r--r--drivers/net/mctp/mctp-i3c.c24
-rw-r--r--drivers/net/mctp/mctp-serial.c141
-rw-r--r--drivers/net/mctp/mctp-usb.c390
-rw-r--r--drivers/net/mdio.c172
-rw-r--r--drivers/net/mdio/Kconfig52
-rw-r--r--drivers/net/mdio/Makefile2
-rw-r--r--drivers/net/mdio/fwnode_mdio.c47
-rw-r--r--drivers/net/mdio/mdio-airoha.c278
-rw-r--r--drivers/net/mdio/mdio-aspeed.c2
-rw-r--r--drivers/net/mdio/mdio-bcm-iproc.c2
-rw-r--r--drivers/net/mdio/mdio-bcm-unimac.c14
-rw-r--r--drivers/net/mdio/mdio-gpio.c2
-rw-r--r--drivers/net/mdio/mdio-hisi-femac.c2
-rw-r--r--drivers/net/mdio/mdio-i2c.c90
-rw-r--r--drivers/net/mdio/mdio-ipq4019.c7
-rw-r--r--drivers/net/mdio/mdio-ipq8064.c2
-rw-r--r--drivers/net/mdio/mdio-moxart.c2
-rw-r--r--drivers/net/mdio/mdio-mscc-miim.c10
-rw-r--r--drivers/net/mdio/mdio-mux-bcm-iproc.c2
-rw-r--r--drivers/net/mdio/mdio-mux-bcm6368.c2
-rw-r--r--drivers/net/mdio/mdio-mux-gpio.c5
-rw-r--r--drivers/net/mdio/mdio-mux-meson-g12a.c2
-rw-r--r--drivers/net/mdio/mdio-mux-meson-gxl.c5
-rw-r--r--drivers/net/mdio/mdio-mux-mmioreg.c56
-rw-r--r--drivers/net/mdio/mdio-mux-multiplexer.c2
-rw-r--r--drivers/net/mdio/mdio-octeon.c27
-rw-r--r--drivers/net/mdio/mdio-realtek-rtl9300.c522
-rw-r--r--drivers/net/mdio/mdio-sun4i.c2
-rw-r--r--drivers/net/mdio/mdio-thunder.c14
-rw-r--r--drivers/net/mdio/mdio-xgene.c2
-rw-r--r--drivers/net/mdio/of_mdio.c15
-rw-r--r--drivers/net/mii.c3
-rw-r--r--drivers/net/net_failover.c4
-rw-r--r--drivers/net/netconsole.c1178
-rw-r--r--drivers/net/netdevsim/Makefile4
-rw-r--r--drivers/net/netdevsim/bpf.c3
-rw-r--r--drivers/net/netdevsim/bus.c29
-rw-r--r--drivers/net/netdevsim/dev.c137
-rw-r--r--drivers/net/netdevsim/ethtool.c58
-rw-r--r--drivers/net/netdevsim/fib.c5
-rw-r--r--drivers/net/netdevsim/health.c6
-rw-r--r--drivers/net/netdevsim/hwstats.c34
-rw-r--r--drivers/net/netdevsim/ipsec.c50
-rw-r--r--drivers/net/netdevsim/macsec.c56
-rw-r--r--drivers/net/netdevsim/netdev.c588
-rw-r--r--drivers/net/netdevsim/netdevsim.h63
-rw-r--r--drivers/net/netdevsim/psp.c252
-rw-r--r--drivers/net/netdevsim/udp_tunnels.c35
-rw-r--r--drivers/net/netkit.c215
-rw-r--r--drivers/net/nlmon.c4
-rw-r--r--drivers/net/ntb_netdev.c6
-rw-r--r--drivers/net/ovpn/Makefile22
-rw-r--r--drivers/net/ovpn/bind.c55
-rw-r--r--drivers/net/ovpn/bind.h101
-rw-r--r--drivers/net/ovpn/crypto.c210
-rw-r--r--drivers/net/ovpn/crypto.h145
-rw-r--r--drivers/net/ovpn/crypto_aead.c389
-rw-r--r--drivers/net/ovpn/crypto_aead.h29
-rw-r--r--drivers/net/ovpn/io.c465
-rw-r--r--drivers/net/ovpn/io.h34
-rw-r--r--drivers/net/ovpn/main.c279
-rw-r--r--drivers/net/ovpn/main.h14
-rw-r--r--drivers/net/ovpn/netlink-gen.c263
-rw-r--r--drivers/net/ovpn/netlink-gen.h48
-rw-r--r--drivers/net/ovpn/netlink.c1293
-rw-r--r--drivers/net/ovpn/netlink.h18
-rw-r--r--drivers/net/ovpn/ovpnpriv.h55
-rw-r--r--drivers/net/ovpn/peer.c1364
-rw-r--r--drivers/net/ovpn/peer.h163
-rw-r--r--drivers/net/ovpn/pktid.c129
-rw-r--r--drivers/net/ovpn/pktid.h86
-rw-r--r--drivers/net/ovpn/proto.h118
-rw-r--r--drivers/net/ovpn/skb.h61
-rw-r--r--drivers/net/ovpn/socket.c241
-rw-r--r--drivers/net/ovpn/socket.h49
-rw-r--r--drivers/net/ovpn/stats.c21
-rw-r--r--drivers/net/ovpn/stats.h47
-rw-r--r--drivers/net/ovpn/tcp.c619
-rw-r--r--drivers/net/ovpn/tcp.h37
-rw-r--r--drivers/net/ovpn/udp.c448
-rw-r--r--drivers/net/ovpn/udp.h25
-rw-r--r--drivers/net/pcs/Kconfig17
-rw-r--r--drivers/net/pcs/Makefile3
-rw-r--r--drivers/net/pcs/pcs-lynx.c126
-rw-r--r--drivers/net/pcs/pcs-mtk-lynxi.c26
-rw-r--r--drivers/net/pcs/pcs-rzn1-miic.c343
-rw-r--r--drivers/net/pcs/pcs-xpcs-nxp.c24
-rw-r--r--drivers/net/pcs/pcs-xpcs-plat.c457
-rw-r--r--drivers/net/pcs/pcs-xpcs-wx.c56
-rw-r--r--drivers/net/pcs/pcs-xpcs.c1087
-rw-r--r--drivers/net/pcs/pcs-xpcs.h71
-rw-r--r--drivers/net/pfcp.c35
-rw-r--r--drivers/net/phy/Kconfig92
-rw-r--r--drivers/net/phy/Makefile31
-rw-r--r--drivers/net/phy/adin.c8
-rw-r--r--drivers/net/phy/adin1100.c14
-rw-r--r--drivers/net/phy/air_en8811h.c144
-rw-r--r--drivers/net/phy/amd.c2
-rw-r--r--drivers/net/phy/aquantia/Makefile2
-rw-r--r--drivers/net/phy/aquantia/aquantia.h137
-rw-r--r--drivers/net/phy/aquantia/aquantia_firmware.c47
-rw-r--r--drivers/net/phy/aquantia/aquantia_hwmon.c32
-rw-r--r--drivers/net/phy/aquantia/aquantia_leds.c160
-rw-r--r--drivers/net/phy/aquantia/aquantia_main.c1020
-rw-r--r--drivers/net/phy/as21xxx.c1088
-rw-r--r--drivers/net/phy/ax88796b.c7
-rw-r--r--drivers/net/phy/ax88796b_rust.rs9
-rw-r--r--drivers/net/phy/bcm-cygnus.c2
-rw-r--r--drivers/net/phy/bcm-phy-lib.c118
-rw-r--r--drivers/net/phy/bcm-phy-lib.h4
-rw-r--r--drivers/net/phy/bcm-phy-ptp.c33
-rw-r--r--drivers/net/phy/bcm54140.c3
-rw-r--r--drivers/net/phy/bcm63xx.c2
-rw-r--r--drivers/net/phy/bcm7xxx.c2
-rw-r--r--drivers/net/phy/bcm84881.c16
-rw-r--r--drivers/net/phy/bcm87xx.c14
-rw-r--r--drivers/net/phy/broadcom.c605
-rw-r--r--drivers/net/phy/cicada.c2
-rw-r--r--drivers/net/phy/cortina.c2
-rw-r--r--drivers/net/phy/davicom.c2
-rw-r--r--drivers/net/phy/dp83640.c106
-rw-r--r--drivers/net/phy/dp83822.c533
-rw-r--r--drivers/net/phy/dp83848.c4
-rw-r--r--drivers/net/phy/dp83867.c125
-rw-r--r--drivers/net/phy/dp83869.c34
-rw-r--r--drivers/net/phy/dp83tc811.c2
-rw-r--r--drivers/net/phy/dp83td510.c734
-rw-r--r--drivers/net/phy/dp83tg720.c518
-rw-r--r--drivers/net/phy/et1011c.c2
-rw-r--r--drivers/net/phy/fixed_phy.c282
-rw-r--r--drivers/net/phy/icplus.c11
-rw-r--r--drivers/net/phy/intel-xway.c262
-rw-r--r--drivers/net/phy/lxt.c2
-rw-r--r--drivers/net/phy/marvell-88q2xxx.c420
-rw-r--r--drivers/net/phy/marvell-88x2222.c17
-rw-r--r--drivers/net/phy/marvell.c221
-rw-r--r--drivers/net/phy/marvell10g.c47
-rw-r--r--drivers/net/phy/mdio-boardinfo.c80
-rw-r--r--drivers/net/phy/mdio-boardinfo.h23
-rw-r--r--drivers/net/phy/mdio-open-alliance.h49
-rw-r--r--drivers/net/phy/mdio-private.h11
-rw-r--r--drivers/net/phy/mdio_bus.c570
-rw-r--r--drivers/net/phy/mdio_bus_provider.c442
-rw-r--r--drivers/net/phy/mdio_device.c66
-rw-r--r--drivers/net/phy/mediatek-ge-soc.c1555
-rw-r--r--drivers/net/phy/mediatek-ge.c111
-rw-r--r--drivers/net/phy/mediatek/Kconfig40
-rw-r--r--drivers/net/phy/mediatek/Makefile5
-rw-r--r--drivers/net/phy/mediatek/mtk-2p5ge.c413
-rw-r--r--drivers/net/phy/mediatek/mtk-ge-soc.c1548
-rw-r--r--drivers/net/phy/mediatek/mtk-ge.c142
-rw-r--r--drivers/net/phy/mediatek/mtk-phy-lib.c347
-rw-r--r--drivers/net/phy/mediatek/mtk.h104
-rw-r--r--drivers/net/phy/meson-gxl.c2
-rw-r--r--drivers/net/phy/micrel.c1943
-rw-r--r--drivers/net/phy/microchip.c200
-rw-r--r--drivers/net/phy/microchip_rds_ptp.c1306
-rw-r--r--drivers/net/phy/microchip_rds_ptp.h247
-rw-r--r--drivers/net/phy/microchip_t1.c1272
-rw-r--r--drivers/net/phy/microchip_t1s.c430
-rw-r--r--drivers/net/phy/motorcomm.c804
-rw-r--r--drivers/net/phy/mscc/mscc.h31
-rw-r--r--drivers/net/phy/mscc/mscc_main.c528
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.c162
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.h1
-rw-r--r--drivers/net/phy/mxl-86110.c978
-rw-r--r--drivers/net/phy/mxl-gpy.c439
-rw-r--r--drivers/net/phy/national.c2
-rw-r--r--drivers/net/phy/ncn26000.c2
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx-macsec.c8
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c240
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.h1
-rw-r--r--drivers/net/phy/nxp-cbtx.c4
-rw-r--r--drivers/net/phy/nxp-tja11xx.c65
-rw-r--r--drivers/net/phy/open_alliance_helpers.c77
-rw-r--r--drivers/net/phy/open_alliance_helpers.h47
-rw-r--r--drivers/net/phy/phy-c45.c369
-rw-r--r--drivers/net/phy/phy-caps.h64
-rw-r--r--drivers/net/phy/phy-core.c492
-rw-r--r--drivers/net/phy/phy.c426
-rw-r--r--drivers/net/phy/phy_caps.c380
-rw-r--r--drivers/net/phy/phy_device.c1035
-rw-r--r--drivers/net/phy/phy_led_triggers.c25
-rw-r--r--drivers/net/phy/phy_link_topology.c105
-rw-r--r--drivers/net/phy/phy_package.c419
-rw-r--r--drivers/net/phy/phylib-internal.h31
-rw-r--r--drivers/net/phy/phylib.h34
-rw-r--r--drivers/net/phy/phylink.c1723
-rw-r--r--drivers/net/phy/qcom/Kconfig3
-rw-r--r--drivers/net/phy/qcom/at803x.c207
-rw-r--r--drivers/net/phy/qcom/qca807x.c79
-rw-r--r--drivers/net/phy/qcom/qca808x.c27
-rw-r--r--drivers/net/phy/qcom/qca83xx.c18
-rw-r--r--drivers/net/phy/qcom/qcom-phy-lib.c100
-rw-r--r--drivers/net/phy/qcom/qcom.h28
-rw-r--r--drivers/net/phy/qsemi.c2
-rw-r--r--drivers/net/phy/qt2025.rs111
-rw-r--r--drivers/net/phy/realtek.c1364
-rw-r--r--drivers/net/phy/realtek/Kconfig15
-rw-r--r--drivers/net/phy/realtek/Makefile4
-rw-r--r--drivers/net/phy/realtek/realtek.h10
-rw-r--r--drivers/net/phy/realtek/realtek_hwmon.c74
-rw-r--r--drivers/net/phy/realtek/realtek_main.c2277
-rw-r--r--drivers/net/phy/rockchip.c2
-rw-r--r--drivers/net/phy/sfp-bus.c133
-rw-r--r--drivers/net/phy/sfp.c208
-rw-r--r--drivers/net/phy/sfp.h4
-rw-r--r--drivers/net/phy/smsc.c65
-rw-r--r--drivers/net/phy/spi_ks8995.c506
-rw-r--r--drivers/net/phy/ste10Xp.c2
-rw-r--r--drivers/net/phy/teranetics.c5
-rw-r--r--drivers/net/phy/uPD60620.c2
-rw-r--r--drivers/net/phy/vitesse.c181
-rw-r--r--drivers/net/phy/xilinx_gmii2rgmii.c14
-rw-r--r--drivers/net/plip/plip.c3
-rw-r--r--drivers/net/ppp/Kconfig3
-rw-r--r--drivers/net/ppp/bsd_comp.c4
-rw-r--r--drivers/net/ppp/ppp_async.c4
-rw-r--r--drivers/net/ppp/ppp_deflate.c2
-rw-r--r--drivers/net/ppp/ppp_generic.c313
-rw-r--r--drivers/net/ppp/ppp_mppe.c110
-rw-r--r--drivers/net/ppp/ppp_synctty.c7
-rw-r--r--drivers/net/ppp/pppoe.c140
-rw-r--r--drivers/net/ppp/pptp.c27
-rw-r--r--drivers/net/pse-pd/Kconfig12
-rw-r--r--drivers/net/pse-pd/Makefile1
-rw-r--r--drivers/net/pse-pd/pd692x0.c770
-rw-r--r--drivers/net/pse-pd/pse_core.c1301
-rw-r--r--drivers/net/pse-pd/pse_regulator.c23
-rw-r--r--drivers/net/pse-pd/si3474.c578
-rw-r--r--drivers/net/pse-pd/tps23881.c931
-rw-r--r--drivers/net/rionet.c2
-rw-r--r--drivers/net/sb1000.c1179
-rw-r--r--drivers/net/slip/slhc.c59
-rw-r--r--drivers/net/slip/slip.c18
-rw-r--r--drivers/net/sungem_phy.c37
-rw-r--r--drivers/net/tap.c198
-rw-r--r--drivers/net/team/team_core.c231
-rw-r--r--drivers/net/team/team_mode_activebackup.c3
-rw-r--r--drivers/net/team/team_mode_loadbalance.c13
-rw-r--r--drivers/net/team/team_nl.c1
-rw-r--r--drivers/net/team/team_nl.h1
-rw-r--r--drivers/net/thunderbolt/main.c21
-rw-r--r--drivers/net/tun.c336
-rw-r--r--drivers/net/tun_vnet.h269
-rw-r--r--drivers/net/usb/Kconfig8
-rw-r--r--drivers/net/usb/aqc111.c10
-rw-r--r--drivers/net/usb/asix.h1
-rw-r--r--drivers/net/usb/asix_common.c22
-rw-r--r--drivers/net/usb/asix_devices.c76
-rw-r--r--drivers/net/usb/ax88172a.c12
-rw-r--r--drivers/net/usb/ax88179_178a.c24
-rw-r--r--drivers/net/usb/catc.c4
-rw-r--r--drivers/net/usb/cdc_ether.c10
-rw-r--r--drivers/net/usb/cdc_mbim.c4
-rw-r--r--drivers/net/usb/cdc_ncm.c75
-rw-r--r--drivers/net/usb/ch9200.c7
-rw-r--r--drivers/net/usb/gl620a.c4
-rw-r--r--drivers/net/usb/ipheth.c89
-rw-r--r--drivers/net/usb/lan78xx.c2103
-rw-r--r--drivers/net/usb/net1080.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c27
-rw-r--r--drivers/net/usb/r8152.c153
-rw-r--r--drivers/net/usb/r8153_ecm.c6
-rw-r--r--drivers/net/usb/rtl8150.c38
-rw-r--r--drivers/net/usb/sierra_net.c10
-rw-r--r--drivers/net/usb/smsc75xx.c5
-rw-r--r--drivers/net/usb/smsc95xx.c83
-rw-r--r--drivers/net/usb/sr9700.c17
-rw-r--r--drivers/net/usb/usbnet.c415
-rw-r--r--drivers/net/veth.c112
-rw-r--r--drivers/net/virtio_net.c2249
-rw-r--r--drivers/net/vmxnet3/Makefile2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h61
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c272
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c94
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h33
-rw-r--r--drivers/net/vmxnet3/vmxnet3_xdp.c18
-rw-r--r--drivers/net/vrf.c96
-rw-r--r--drivers/net/vsockmon.c4
-rw-r--r--drivers/net/vxlan/vxlan_core.c1033
-rw-r--r--drivers/net/vxlan/vxlan_mdb.c4
-rw-r--r--drivers/net/vxlan/vxlan_private.h21
-rw-r--r--drivers/net/vxlan/vxlan_vnifilter.c69
-rw-r--r--drivers/net/wan/framer/framer-core.c23
-rw-r--r--drivers/net/wan/framer/pef2256/pef2256.c37
-rw-r--r--drivers/net/wan/fsl_qmc_hdlc.c33
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c2
-rw-r--r--drivers/net/wan/hdlc_cisco.c4
-rw-r--r--drivers/net/wan/hdlc_fr.c4
-rw-r--r--drivers/net/wan/hdlc_ppp.c8
-rw-r--r--drivers/net/wan/ixp4xx_hss.c2
-rw-r--r--drivers/net/wan/lapbether.c4
-rw-r--r--drivers/net/wireguard/Makefile2
-rw-r--r--drivers/net/wireguard/allowedips.c106
-rw-r--r--drivers/net/wireguard/allowedips.h4
-rw-r--r--drivers/net/wireguard/cookie.c22
-rw-r--r--drivers/net/wireguard/device.c21
-rw-r--r--drivers/net/wireguard/generated/netlink.c73
-rw-r--r--drivers/net/wireguard/generated/netlink.h30
-rw-r--r--drivers/net/wireguard/netlink.c107
-rw-r--r--drivers/net/wireguard/noise.c36
-rw-r--r--drivers/net/wireguard/peer.h2
-rw-r--r--drivers/net/wireguard/queueing.h17
-rw-r--r--drivers/net/wireguard/selftest/allowedips.c49
-rw-r--r--drivers/net/wireguard/send.c2
-rw-r--r--drivers/net/wireguard/socket.c4
-rw-r--r--drivers/net/wireguard/timers.c25
-rw-r--r--drivers/net/wireless/admtek/adm8211.c4
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig7
-rw-r--r--drivers/net/wireless/ath/ath10k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c28
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c34
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c111
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h27
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c14
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c9
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c18
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c63
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h45
-rw-r--r--drivers/net/wireless/ath/ath10k/leds.c89
-rw-r--r--drivers/net/wireless/ath/ath10k/leds.h34
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c214
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c50
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c37
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.c255
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode_i.h15
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h64
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h32
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c9
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c69
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h54
-rw-r--r--drivers/net/wireless/ath/ath11k/Kconfig1
-rw-r--r--drivers/net/wireless/ath/ath11k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c108
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.c17
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.h6
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c577
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h66
-rw-r--r--drivers/net/wireless/ath/ath11k/coredump.c54
-rw-r--r--drivers/net/wireless/ath/ath11k/coredump.h79
-rw-r--r--drivers/net/wireless/ath/ath11k/dbring.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c194
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.h10
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c15
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.c11
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c58
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h30
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c295
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.h3
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c37
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/fw.c5
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c72
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h47
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_tx.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/hif.h9
-rw-r--r--drivers/net/wireless/ath/ath11k/htc.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h5
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c1035
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c8
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c224
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.h18
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.c40
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c84
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.h10
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c123
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.h5
-rw-r--r--drivers/net/wireless/ath/ath11k/spectral.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/testmode.c82
-rw-r--r--drivers/net/wireless/ath/ath11k/testmode_i.h66
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.h44
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c101
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h27
-rw-r--r--drivers/net/wireless/ath/ath11k/wow.c45
-rw-r--r--drivers/net/wireless/ath/ath12k/Kconfig19
-rw-r--r--drivers/net/wireless/ath/ath12k/Makefile6
-rw-r--r--drivers/net/wireless/ath/ath12k/acpi.c204
-rw-r--r--drivers/net/wireless/ath/ath12k/acpi.h40
-rw-r--r--drivers/net/wireless/ath/ath12k/ahb.c1156
-rw-r--r--drivers/net/wireless/ath/ath12k/ahb.h80
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.c109
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.h24
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c1386
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h701
-rw-r--r--drivers/net/wireless/ath/ath12k/coredump.c54
-rw-r--r--drivers/net/wireless/ath/ath12k/coredump.h81
-rw-r--r--drivers/net/wireless/ath/ath12k/dbring.c3
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.c10
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.h17
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.c1437
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.h119
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c6178
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h2076
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_sta.c337
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_sta.h24
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c461
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h241
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c2813
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.h17
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c1510
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.h79
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.c830
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.h10
-rw-r--r--drivers/net/wireless/ath/ath12k/fw.c9
-rw-r--r--drivers/net/wireless/ath/ath12k/fw.h6
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.c206
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.h107
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_desc.h96
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.c144
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.h534
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_tx.h12
-rw-r--r--drivers/net/wireless/ath/ath12k/hif.h15
-rw-r--r--drivers/net/wireless/ath/ath12k/htc.c6
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.c611
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.h75
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c9539
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h144
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.c25
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.h2
-rw-r--r--drivers/net/wireless/ath/ath12k/p2p.c20
-rw-r--r--drivers/net/wireless/ath/ath12k/p2p.h2
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c369
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.h10
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.c244
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.h59
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.c763
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.h51
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.c625
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.h28
-rw-r--r--drivers/net/wireless/ath/ath12k/rx_desc.h100
-rw-r--r--drivers/net/wireless/ath/ath12k/testmode.c395
-rw-r--r--drivers/net/wireless/ath/ath12k/testmode.h40
-rw-r--r--drivers/net/wireless/ath/ath12k/trace.h16
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c4392
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h1629
-rw-r--r--drivers/net/wireless/ath/ath12k/wow.c1029
-rw-r--r--drivers/net/wireless/ath/ath12k/wow.h62
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c8
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h2
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c14
-rw-r--r--drivers/net/wireless/ath/ath5k/pci.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c14
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h2
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/bmi.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c14
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc.h6
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_mbox.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/recovery.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/trace.h4
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c5
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h18
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c81
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_aic.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h11
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c16
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c37
-rw-r--r--drivers/net/wireless/ath/ath9k/common-beacon.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/common-debug.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/common-init.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/dynack.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_debug.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c24
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c40
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c58
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c37
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c37
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/wow.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c63
-rw-r--r--drivers/net/wireless/ath/carl9170/debug.c28
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/mac.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c4
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c3
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c19
-rw-r--r--drivers/net/wireless/ath/hw.c2
-rw-r--r--drivers/net/wireless/ath/key.c2
-rw-r--r--drivers/net/wireless/ath/main.c1
-rw-r--r--drivers/net/wireless/ath/testmode_i.h66
-rw-r--r--drivers/net/wireless/ath/trace.h4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c6
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h74
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c20
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c60
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h1
-rw-r--r--drivers/net/wireless/ath/wcn36xx/testmode.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h2
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c7
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c26
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c6
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c34
-rw-r--r--drivers/net/wireless/ath/wil6210/p2p.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c1
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h4
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h2
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c8
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h4
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c64
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.h2
-rw-r--r--drivers/net/wireless/broadcom/b43/debugfs.c27
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c12
-rw-r--r--drivers/net/wireless/broadcom/b43/tables_lpphy.c20
-rw-r--r--drivers/net/wireless/broadcom/b43/tables_lpphy.h2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/debugfs.c26
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/Kconfig1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/module.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c38
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c19
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c217
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h27
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c19
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c314
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/fwil_types.h87
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/module.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h42
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h29
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c42
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c32
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c116
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c41
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c43
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/module.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c28
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/antsel.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_tx.h6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c37
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c33
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c447
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_hal.h27
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c26
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c28
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/pmu.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h5
-rw-r--r--drivers/net/wireless/intel/ipw2x00/Kconfig11
-rw-r--r--drivers/net/wireless/intel/ipw2x00/Makefile7
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c17
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.h2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c29
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.h6
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw.h162
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_crypto.c246
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_crypto_ccmp.c438
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_crypto_tkip.c728
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_crypto_wep.c247
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_module.c38
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_rx.c102
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_spy.c233
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_tx.c7
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_wx.c43
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c42
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-rs.c5
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.c4
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.h7
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c17
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-rs.c21
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.h2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/commands.h305
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c52
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.h19
-rw-r--r--drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/1000.c51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/2000.c90
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c398
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/5000.c83
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/6000.c227
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/7000.c173
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/8000.c91
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c167
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/ax210.c296
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/bz.c200
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/dr.c89
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-fm.c52
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-gf.c85
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-hr.c89
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-jf.c111
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-pe.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-wh.c39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/sc.c202
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/agn.h25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/commands.h176
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/dev.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/devices.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c1144
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/led.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/lib.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c37
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c185
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/power.c36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/power.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.c23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rx.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rxon.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tt.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tt.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tx.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/ucode.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c273
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h61
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/alive.h35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/binding.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/coex.h100
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/config.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/context.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h309
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h70
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dhc.h226
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h322
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h399
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h166
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/offload.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h187
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h172
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h350
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h138
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sta.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/stats.h56
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h62
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h113
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c389
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c53
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dhc-utils.h75
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dump.c98
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h109
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/paging.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c154
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.c256
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.h94
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/rs.c137
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h48
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/smem.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c226
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.h93
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h550
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h311
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h52
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-debug.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c379
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.h18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c882
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h88
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c394
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h60
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c123
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.h25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c306
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h113
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.c119
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.h71
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h122
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c837
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h1028
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-utils.c195
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-utils.h58
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/main.c58
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/sap.h32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/Makefile12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/agg.c677
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/agg.h127
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ap.c363
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ap.h45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/coex.c40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/coex.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/constants.h81
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/d3.c2065
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/d3.h51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/debugfs.c1113
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/debugfs.h244
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.c451
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/fw.c548
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/hcmd.h56
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.c703
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.h250
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/key.c408
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/key.h46
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/led.c100
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/led.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.c909
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.h131
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/low_latency.c336
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/low_latency.h68
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mac80211.c2702
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mac80211.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mcc.c285
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mcc.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.c776
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.h615
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mlo.c1199
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mlo.h171
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/notif.c719
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/notif.h35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/phy.c198
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/phy.h60
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/power.c391
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/power.h33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ptp.c321
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ptp.h45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/regulatory.c383
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/regulatory.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/roc.c261
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/roc.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/rx.c2264
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/rx.h73
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/scan.c2179
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/scan.h173
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/session-protect.c222
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/session-protect.h102
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/sta.c1319
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/sta.h273
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/stats.c521
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/stats.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/Makefile5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/agg.c663
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/hcmd.c62
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/link-selection.c339
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/link.c110
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/module.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/rx.c353
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/utils.c503
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/utils.h140
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/thermal.c467
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/thermal.h36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/time_sync.c240
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/time_sync.h26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tlc.c739
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tlc.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tx.c1394
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tx.h77
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/binding.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c89
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c1098
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c156
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c357
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c377
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c101
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c386
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/led.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/link.c882
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c224
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c997
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c55
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c419
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c96
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h436
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c55
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/offloading.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c526
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c137
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ptp.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/quota.c43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c44
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c156
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c292
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c343
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c370
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h52
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tdls.c54
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/hcmd.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c435
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/scan.c110
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c273
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c159
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c448
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c263
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c582
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-v2.c618
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c2461
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/internal.h1172
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c2488
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans-gen2.c654
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c4367
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx-gen2.c1434
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c2689
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h868
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/iwl-context-info-v2.h344
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/iwl-context-info.h (renamed from drivers/net/wireless/intel/iwlwifi/iwl-context-info.h)44
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c2400
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c548
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c3810
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c262
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c1651
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/utils.c104
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/utils.h40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.c1900
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.h191
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/devinfo.c258
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/nvm_parse.c72
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/utils.c109
-rw-r--r--drivers/net/wireless/intersil/p54/fwio.c8
-rw-r--r--drivers/net/wireless/intersil/p54/main.c5
-rw-r--r--drivers/net/wireless/intersil/p54/p54.h1
-rw-r--r--drivers/net/wireless/intersil/p54/p54pci.c8
-rw-r--r--drivers/net/wireless/intersil/p54/p54spi.c18
-rw-r--r--drivers/net/wireless/intersil/p54/txrx.c15
-rw-r--r--drivers/net/wireless/marvell/libertas/Kconfig1
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c16
-rw-r--r--drivers/net/wireless/marvell/libertas/cmd.c143
-rw-r--r--drivers/net/wireless/marvell/libertas/cmd.h15
-rw-r--r--drivers/net/wireless/marvell/libertas/cmdresp.c5
-rw-r--r--drivers/net/wireless/marvell/libertas/decl.h4
-rw-r--r--drivers/net/wireless/marvell/libertas/dev.h4
-rw-r--r--drivers/net/wireless/marvell/libertas/if_sdio.c3
-rw-r--r--drivers/net/wireless/marvell/libertas/if_spi.c3
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/main.c105
-rw-r--r--drivers/net/wireless/marvell/libertas/mesh.h1
-rw-r--r--drivers/net/wireless/marvell/libertas/radiotap.h4
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/cmd.c2
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/if_usb.c4
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/libertas_tf.h3
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/main.c12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11h.c11
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.h4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c27
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c534
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfp.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c112
-rw-r--r--drivers/net/wireless/marvell/mwifiex/decl.h23
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h77
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c99
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ioctl.h7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/join.c80
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c143
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h72
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c10
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c11
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c17
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c182
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c46
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c67
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_tx.c9
-rw-r--r--drivers/net/wireless/marvell/mwifiex/tdls.c11
-rw-r--r--drivers/net/wireless/marvell/mwifiex/txrx.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_cmd.c202
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_event.c16
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c19
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c128
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c19
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c104
-rw-r--r--drivers/net/wireless/mediatek/mt76/Kconfig6
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile5
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/channel.c416
-rw-r--r--drivers/net/wireless/mediatek/mt76/debugfs.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c355
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.h107
-rw-r--r--drivers/net/wireless/mediatek/mt76/eeprom.c90
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c426
-rw-r--r--drivers/net/wireless/mediatek/mt76/mcu.c29
-rw-r--r--drivers/net/wireless/mediatek/mt76/mmio.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h473
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/Kconfig2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/beacon.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/core.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c54
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/pci.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/soc.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/Kconfig2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/dma.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c63
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c40
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mmio.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615_trace.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/soc.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/testmode.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/trace.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac.h13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h24
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c72
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c243
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h97
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dma.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_trace.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_trace.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c35
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/phy.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c31
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/Kconfig2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/coredump.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/coredump.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c157
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/dma.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c60
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c54
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c152
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c249
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c377
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h45
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/pci.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/regs.h9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/soc.c25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/testmode.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/testmode.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/Kconfig2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c82
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c336
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c82
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c27
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c60
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/testmode.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/Kconfig2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/Makefile5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/init.c56
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mac.c166
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/main.c1247
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.c1545
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.h173
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h104
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci.c85
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/regd.c265
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/regd.h19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/regs.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/testmode.c201
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/usb.c27
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x.h149
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c125
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.h20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_core.c201
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_dma.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_mac.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_trace.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_trace.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_usb.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/Kconfig9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/Makefile3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/coredump.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/coredump.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c291
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/dma.c537
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c261
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/eeprom.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/init.c877
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c1343
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c1787
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c1608
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.h78
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mmio.c318
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h415
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/npu.c352
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/pci.c31
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/regs.h97
-rw-r--r--drivers/net/wireless/mediatek/mt76/npu.c501
-rw-r--r--drivers/net/wireless/mediatek/mt76/pci.c25
-rw-r--r--drivers/net/wireless/mediatek/mt76/scan.c174
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio_txrx.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/trace.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/trace.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c116
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_trace.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_trace.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/wed.c26
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.h2
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/eeprom.c2
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c7
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c197
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.h2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/fw.h13
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.c59
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.h2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/mon.c4
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.c142
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.h15
-rw-r--r--drivers/net/wireless/microchip/wilc1000/sdio.c243
-rw-r--r--drivers/net/wireless/microchip/wilc1000/spi.c35
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.c481
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.h55
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan_cfg.c37
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan_cfg.h5
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/mac.c19
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/mac.h4
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/usb.c47
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c10
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c12
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h21
-rw-r--r--drivers/net/wireless/ralink/rt2x00/Kconfig11
-rw-r--r--drivers/net/wireless/ralink/rt2x00/Makefile1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c49
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.h5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800mmio.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800pci.c3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800soc.c116
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h22
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c20
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c10
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00soc.c153
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00soc.h29
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00usb.c2
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c13
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c34
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8188e.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8188f.c15
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8192c.c82
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8723a.c115
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/Kconfig5
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/core.c280
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/regs.h1
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/Kconfig12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/Makefile1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c22
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c79
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c32
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c94
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c25
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c23
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.c94
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.h28
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.c92
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.h16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c18
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c24
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/Makefile13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/dm.c120
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/dm.h10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/fw.c63
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/fw.h9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.c1212
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.h22
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/led.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/led.h9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.c3118
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.h31
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/rf.c240
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/rf.h11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/sw.c394
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/table.c1675
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/table.h29
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/trx.c372
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/trx.h60
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c21
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c24
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c38
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c60
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h24
-rw-r--r--drivers/net/wireless/realtek/rtw88/Kconfig64
-rw-r--r--drivers/net/wireless/realtek/rtw88/Makefile26
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.c8
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.h7
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c99
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.h11
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c362
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.h3
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c97
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h18
-rw-r--r--drivers/net/wireless/realtek/rtw88/hci.h15
-rw-r--r--drivers/net/wireless/realtek/rtw88/led.c74
-rw-r--r--drivers/net/wireless/realtek/rtw88/led.h25
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c74
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.h6
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c32
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c219
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h148
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c80
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.h3
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c297
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.h20
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h281
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8703b.c156
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723cs.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.c79
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723de.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723ds.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723du.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723x.c71
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723x.h14
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812a.c1125
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812a.h10
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812a_table.c2812
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812a_table.h26
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812au.c94
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a.c2270
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a.h62
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a_table.c23930
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a_table.h40
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814ae.c31
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814au.c54
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821a.c1226
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821a.h10
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821a_table.c2350
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821a_table.h21
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821au.c78
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c128
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.h33
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821ce.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821cs.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821cu.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c116
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.h25
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822be.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822bs.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822bu.c16
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c131
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.h33
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822ce.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822cs.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822cu.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw88xxa.c1989
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw88xxa.h175
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.c129
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.h77
-rw-r--r--drivers/net/wireless/realtek/rtw88/sar.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/sdio.c56
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c20
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.h5
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.c538
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.h3
-rw-r--r--drivers/net/wireless/realtek/rtw88/util.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/Kconfig76
-rw-r--r--drivers/net/wireless/realtek/rtw89/Makefile29
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.c1131
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.h232
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.c548
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.h495
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c1528
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.h138
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c4793
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.h43
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c3331
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h1696
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c2843
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.h3
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse.c150
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse_be.c52
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c3448
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h929
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c1631
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h366
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c1366
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac_be.c131
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c868
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h265
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci_be.c98
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c2730
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h133
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy_be.c27
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c337
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.h18
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h326
-rw-r--r--drivers/net/wireless/realtek/rtw89/regd.c1060
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.c273
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c461
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.h18
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk_table.c77
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk_table.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_table.c501
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851be.c10
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851bu.c66
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c226
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.h4
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c308
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.h17
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ae.c10
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852au.c79
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c2053
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.h122
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_common.c2106
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_common.h400
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c324
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h23
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852be.c10
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt.c890
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt.h15
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c4279
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h34
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk_table.c490
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk_table.h38
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bte.c108
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bu.c81
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c351
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c379
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h17
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ce.c10
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852cu.c69
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c457
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c123
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922ae.c31
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.c755
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.h28
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c83
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h119
-rw-r--r--drivers/net/wireless/realtek/rtw89/usb.c1071
-rw-r--r--drivers/net/wireless/realtek/rtw89/usb.h77
-rw-r--r--drivers/net/wireless/realtek/rtw89/util.c162
-rw-r--r--drivers/net/wireless/realtek/rtw89/util.h26
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c610
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.h71
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c6
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c20
-rw-r--r--drivers/net/wireless/rsi/rsi_debugfs.h1
-rw-r--r--drivers/net/wireless/silabs/wfx/bus.h1
-rw-r--r--drivers/net/wireless/silabs/wfx/bus_sdio.c54
-rw-r--r--drivers/net/wireless/silabs/wfx/bus_spi.c45
-rw-r--r--drivers/net/wireless/silabs/wfx/main.c33
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.c36
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.h9
-rw-r--r--drivers/net/wireless/st/cw1200/bh.c11
-rw-r--r--drivers/net/wireless/st/cw1200/cw1200_spi.c4
-rw-r--r--drivers/net/wireless/st/cw1200/main.c2
-rw-r--r--drivers/net/wireless/st/cw1200/pm.c2
-rw-r--r--drivers/net/wireless/st/cw1200/queue.c31
-rw-r--r--drivers/net/wireless/st/cw1200/queue.h1
-rw-r--r--drivers/net/wireless/st/cw1200/sta.c18
-rw-r--r--drivers/net/wireless/st/cw1200/sta.h7
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.c35
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.h3
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.c79
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.h3
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c7
-rw-r--r--drivers/net/wireless/ti/wl1251/reg.h6
-rw-r--r--drivers/net/wireless/ti/wl1251/sdio.c4
-rw-r--r--drivers/net/wireless/ti/wl1251/tx.c4
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c2
-rw-r--r--drivers/net/wireless/ti/wl12xx/reg.h6
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.c3
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.c2
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c75
-rw-r--r--drivers/net/wireless/ti/wl18xx/tx.c13
-rw-r--r--drivers/net/wireless/ti/wl18xx/wl18xx.h62
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c43
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c11
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c186
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c13
-rw-r--r--drivers/net/wireless/ti/wlcore/sysfs.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c8
-rw-r--r--drivers/net/wireless/ti/wlcore/vendor_cmd.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h10
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c472
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.h26
-rw-r--r--drivers/net/wireless/virtual/virt_wifi.c34
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_mac.c12
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_mac.h2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c15
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_devlink.c5
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.c24
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mmio.c2
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pcie.c58
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_trace.c3
-rw-r--r--drivers/net/wwan/mhi_wwan_mbim.c32
-rw-r--r--drivers/net/wwan/qcom_bam_dmux.c15
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_cldma.c5
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_cldma.h2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c8
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_modem_ops.c48
-rw-r--r--drivers/net/wwan/t7xx/t7xx_modem_ops.h9
-rw-r--r--drivers/net/wwan/t7xx/t7xx_netdev.c11
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.c139
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.h4
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port.h3
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_proxy.c52
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_proxy.h1
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_trace.c3
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_wwan.c8
-rw-r--r--drivers/net/wwan/t7xx/t7xx_state_monitor.c60
-rw-r--r--drivers/net/wwan/t7xx/t7xx_state_monitor.h5
-rw-r--r--drivers/net/wwan/wwan_core.c28
-rw-r--r--drivers/net/wwan/wwan_hwsim.c2
-rw-r--r--drivers/net/xen-netback/hash.c5
-rw-r--r--drivers/net/xen-netback/interface.c2
-rw-r--r--drivers/net/xen-netback/netback.c3
-rw-r--r--drivers/net/xen-netfront.c37
-rw-r--r--drivers/nfc/mei_phy.h4
-rw-r--r--drivers/nfc/microread/i2c.c2
-rw-r--r--drivers/nfc/nfcmrvl/fw_dnld.c11
-rw-r--r--drivers/nfc/nfcmrvl/i2c.c2
-rw-r--r--drivers/nfc/nfcmrvl/uart.c9
-rw-r--r--drivers/nfc/nxp-nci/firmware.c2
-rw-r--r--drivers/nfc/nxp-nci/i2c.c4
-rw-r--r--drivers/nfc/pn533/i2c.c2
-rw-r--r--drivers/nfc/pn533/pn533.c23
-rw-r--r--drivers/nfc/pn533/uart.c4
-rw-r--r--drivers/nfc/pn533/usb.c1
-rw-r--r--drivers/nfc/pn544/i2c.c6
-rw-r--r--drivers/nfc/s3fwrn5/Kconfig3
-rw-r--r--drivers/nfc/s3fwrn5/core.c2
-rw-r--r--drivers/nfc/s3fwrn5/firmware.c19
-rw-r--r--drivers/nfc/s3fwrn5/firmware.h2
-rw-r--r--drivers/nfc/s3fwrn5/i2c.c4
-rw-r--r--drivers/nfc/s3fwrn5/nci.c2
-rw-r--r--drivers/nfc/s3fwrn5/nci.h2
-rw-r--r--drivers/nfc/s3fwrn5/phy_common.c4
-rw-r--r--drivers/nfc/s3fwrn5/phy_common.h4
-rw-r--r--drivers/nfc/s3fwrn5/s3fwrn5.h2
-rw-r--r--drivers/nfc/st-nci/i2c.c2
-rw-r--r--drivers/nfc/st-nci/ndlc.c16
-rw-r--r--drivers/nfc/st-nci/se.c17
-rw-r--r--drivers/nfc/st21nfca/core.c4
-rw-r--r--drivers/nfc/st21nfca/dep.c18
-rw-r--r--drivers/nfc/st21nfca/i2c.c3
-rw-r--r--drivers/nfc/st21nfca/se.c13
-rw-r--r--drivers/nfc/trf7970a.c91
-rw-r--r--drivers/nfc/virtual_ncidev.c6
-rw-r--r--drivers/ntb/core.c4
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.c19
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.h1
-rw-r--r--drivers/ntb/hw/epf/ntb_hw_epf.c120
-rw-r--r--drivers/ntb/hw/idt/ntb_hw_idt.c35
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen1.c2
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen3.c3
-rw-r--r--drivers/ntb/hw/mscc/ntb_hw_switchtec.c5
-rw-r--r--drivers/ntb/msi.c22
-rw-r--r--drivers/ntb/ntb_transport.c44
-rw-r--r--drivers/ntb/test/ntb_perf.c6
-rw-r--r--drivers/ntb/test/ntb_pingpong.c3
-rw-r--r--drivers/nvdimm/Kconfig19
-rw-r--r--drivers/nvdimm/Makefile1
-rw-r--r--drivers/nvdimm/badrange.c5
-rw-r--r--drivers/nvdimm/btt.c26
-rw-r--r--drivers/nvdimm/btt_devs.c24
-rw-r--r--drivers/nvdimm/bus.c99
-rw-r--r--drivers/nvdimm/claim.c27
-rw-r--r--drivers/nvdimm/core.c18
-rw-r--r--drivers/nvdimm/dax_devs.c14
-rw-r--r--drivers/nvdimm/dimm.c5
-rw-r--r--drivers/nvdimm/dimm_devs.c48
-rw-r--r--drivers/nvdimm/e820.c4
-rw-r--r--drivers/nvdimm/label.c3
-rw-r--r--drivers/nvdimm/namespace_devs.c156
-rw-r--r--drivers/nvdimm/nd-core.h4
-rw-r--r--drivers/nvdimm/nd.h12
-rw-r--r--drivers/nvdimm/nd_virtio.c12
-rw-r--r--drivers/nvdimm/of_pmem.c7
-rw-r--r--drivers/nvdimm/pfn_devs.c72
-rw-r--r--drivers/nvdimm/pmem.c29
-rw-r--r--drivers/nvdimm/pmem.h4
-rw-r--r--drivers/nvdimm/ramdax.c282
-rw-r--r--drivers/nvdimm/region.c18
-rw-r--r--drivers/nvdimm/region_devs.c163
-rw-r--r--drivers/nvdimm/security.c14
-rw-r--r--drivers/nvdimm/virtio_pmem.c25
-rw-r--r--drivers/nvme/common/Kconfig1
-rw-r--r--drivers/nvme/common/auth.c402
-rw-r--r--drivers/nvme/common/keyring.c123
-rw-r--r--drivers/nvme/host/Kconfig24
-rw-r--r--drivers/nvme/host/apple.c303
-rw-r--r--drivers/nvme/host/auth.c162
-rw-r--r--drivers/nvme/host/constants.c6
-rw-r--r--drivers/nvme/host/core.c1110
-rw-r--r--drivers/nvme/host/fabrics.c120
-rw-r--r--drivers/nvme/host/fabrics.h18
-rw-r--r--drivers/nvme/host/fault_inject.c3
-rw-r--r--drivers/nvme/host/fc.c197
-rw-r--r--drivers/nvme/host/hwmon.c2
-rw-r--r--drivers/nvme/host/ioctl.c162
-rw-r--r--drivers/nvme/host/multipath.c578
-rw-r--r--drivers/nvme/host/nvme.h184
-rw-r--r--drivers/nvme/host/pci.c1350
-rw-r--r--drivers/nvme/host/pr.c138
-rw-r--r--drivers/nvme/host/rdma.c93
-rw-r--r--drivers/nvme/host/sysfs.c151
-rw-r--r--drivers/nvme/host/tcp.c591
-rw-r--r--drivers/nvme/host/trace.c60
-rw-r--r--drivers/nvme/host/zns.c25
-rw-r--r--drivers/nvme/target/Kconfig24
-rw-r--r--drivers/nvme/target/Makefile5
-rw-r--r--drivers/nvme/target/admin-cmd.c725
-rw-r--r--drivers/nvme/target/auth.c148
-rw-r--r--drivers/nvme/target/configfs.c170
-rw-r--r--drivers/nvme/target/core.c567
-rw-r--r--drivers/nvme/target/debugfs.c229
-rw-r--r--drivers/nvme/target/debugfs.h42
-rw-r--r--drivers/nvme/target/discovery.c33
-rw-r--r--drivers/nvme/target/fabrics-cmd-auth.c141
-rw-r--r--drivers/nvme/target/fabrics-cmd.c170
-rw-r--r--drivers/nvme/target/fc.c296
-rw-r--r--drivers/nvme/target/fcloop.c515
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c46
-rw-r--r--drivers/nvme/target/loop.c50
-rw-r--r--drivers/nvme/target/nvmet.h257
-rw-r--r--drivers/nvme/target/passthru.c52
-rw-r--r--drivers/nvme/target/pci-epf.c2651
-rw-r--r--drivers/nvme/target/pr.c1155
-rw-r--r--drivers/nvme/target/rdma.c161
-rw-r--r--drivers/nvme/target/tcp.c186
-rw-r--r--drivers/nvme/target/trace.c110
-rw-r--r--drivers/nvme/target/zns.c52
-rw-r--r--drivers/nvmem/Kconfig71
-rw-r--r--drivers/nvmem/Makefile12
-rw-r--r--drivers/nvmem/an8855-efuse.c68
-rw-r--r--drivers/nvmem/apple-efuses.c1
-rw-r--r--drivers/nvmem/apple-spmi-nvmem.c62
-rw-r--r--drivers/nvmem/brcm_nvram.c5
-rw-r--r--drivers/nvmem/core.c258
-rw-r--r--drivers/nvmem/imx-iim.c10
-rw-r--r--drivers/nvmem/imx-ocotp-ele.c93
-rw-r--r--drivers/nvmem/imx-ocotp.c5
-rw-r--r--drivers/nvmem/layouts.c25
-rw-r--r--drivers/nvmem/layouts/Kconfig11
-rw-r--r--drivers/nvmem/layouts/Makefile1
-rw-r--r--drivers/nvmem/layouts/onie-tlv.c1
-rw-r--r--drivers/nvmem/layouts/sl28vpd.c1
-rw-r--r--drivers/nvmem/layouts/u-boot-env.c213
-rw-r--r--drivers/nvmem/layouts/u-boot-env.h15
-rw-r--r--drivers/nvmem/lpc18xx_eeprom.c4
-rw-r--r--drivers/nvmem/lpc18xx_otp.c2
-rw-r--r--drivers/nvmem/max77759-nvmem.c145
-rw-r--r--drivers/nvmem/meson-efuse.c19
-rw-r--r--drivers/nvmem/meson-mx-efuse.c6
-rw-r--r--drivers/nvmem/microchip-otpc.c2
-rw-r--r--drivers/nvmem/mtk-efuse.c2
-rw-r--r--drivers/nvmem/qcom-spmi-sdam.c1
-rw-r--r--drivers/nvmem/qfprom.c26
-rw-r--r--drivers/nvmem/qnap-mcu-eeprom.c111
-rw-r--r--drivers/nvmem/rcar-efuse.c143
-rw-r--r--drivers/nvmem/rmem.c94
-rw-r--r--drivers/nvmem/rockchip-efuse.c1
-rw-r--r--drivers/nvmem/rockchip-otp.c19
-rw-r--r--drivers/nvmem/s32g-ocotp-nvmem.c100
-rw-r--r--drivers/nvmem/sc27xx-efuse.c1
-rw-r--r--drivers/nvmem/sprd-efuse.c1
-rw-r--r--drivers/nvmem/sunplus-ocotp.c7
-rw-r--r--drivers/nvmem/u-boot-env.c159
-rw-r--r--drivers/nvmem/zynqmp_nvmem.c1
-rw-r--r--drivers/of/.kunitconfig2
-rw-r--r--drivers/of/Kconfig16
-rw-r--r--drivers/of/Makefile3
-rw-r--r--drivers/of/address.c130
-rw-r--r--drivers/of/base.c203
-rw-r--r--drivers/of/cpu.c2
-rw-r--r--drivers/of/device.c72
-rw-r--r--drivers/of/dynamic.c84
-rw-r--r--drivers/of/empty_root.dts9
-rw-r--r--drivers/of/fdt.c205
-rw-r--r--drivers/of/fdt_address.c25
-rw-r--r--drivers/of/irq.c379
-rw-r--r--drivers/of/kexec.c44
-rw-r--r--drivers/of/kobj.c10
-rw-r--r--drivers/of/kunit_overlay_test.dtso9
-rw-r--r--drivers/of/module.c4
-rw-r--r--drivers/of/of_kunit_helpers.c93
-rw-r--r--drivers/of/of_numa.c13
-rw-r--r--drivers/of/of_private.h52
-rw-r--r--drivers/of/of_reserved_mem.c366
-rw-r--r--drivers/of/of_test.c123
-rw-r--r--drivers/of/overlay.c46
-rw-r--r--drivers/of/overlay_test.c120
-rw-r--r--drivers/of/pdt.c2
-rw-r--r--drivers/of/platform.c56
-rw-r--r--drivers/of/property.c248
-rw-r--r--drivers/of/resolver.c65
-rw-r--r--drivers/of/unittest-data/tests-address.dtsi2
-rw-r--r--drivers/of/unittest-data/tests-interrupts.dtsi13
-rw-r--r--drivers/of/unittest-data/tests-platform.dtsi28
-rw-r--r--drivers/of/unittest.c330
-rw-r--r--drivers/opp/core.c876
-rw-r--r--drivers/opp/cpu.c38
-rw-r--r--drivers/opp/debugfs.c10
-rw-r--r--drivers/opp/of.c345
-rw-r--r--drivers/opp/opp.h9
-rw-r--r--drivers/opp/ti-opp-supply.c8
-rw-r--r--drivers/parisc/ccio-dma.c54
-rw-r--r--drivers/parisc/eisa_eeprom.c2
-rw-r--r--drivers/parisc/gsc.c4
-rw-r--r--drivers/parisc/iommu-helpers.h10
-rw-r--r--drivers/parisc/led.c6
-rw-r--r--drivers/parisc/pdc_stable.c2
-rw-r--r--drivers/parisc/power.c20
-rw-r--r--drivers/parisc/sba_iommu.c54
-rw-r--r--drivers/parport/daisy.c1
-rw-r--r--drivers/parport/ieee1284.c4
-rw-r--r--drivers/parport/parport_amiga.c10
-rw-r--r--drivers/parport/parport_mfc3.c3
-rw-r--r--drivers/parport/parport_serial.c12
-rw-r--r--drivers/parport/parport_sunbpp.c2
-rw-r--r--drivers/parport/procfs.c36
-rw-r--r--drivers/parport/share.c12
-rw-r--r--drivers/pci/Kconfig53
-rw-r--r--drivers/pci/Makefile7
-rw-r--r--drivers/pci/access.c40
-rw-r--r--drivers/pci/ats.c72
-rw-r--r--drivers/pci/bus.c148
-rw-r--r--drivers/pci/controller/Kconfig51
-rw-r--r--drivers/pci/controller/Makefile3
-rw-r--r--drivers/pci/controller/cadence/Kconfig49
-rw-r--r--drivers/pci/controller/cadence/Makefile12
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c267
-rw-r--r--drivers/pci/controller/cadence/pci-sky1.c238
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c96
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host-common.c288
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host-common.h46
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host-hpa.c368
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host.c388
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-hpa-regs.h193
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-hpa.c167
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-lga-regs.h230
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-plat.c9
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.c46
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h506
-rw-r--r--drivers/pci/controller/cadence/pcie-sg2042.c131
-rw-r--r--drivers/pci/controller/dwc/Kconfig124
-rw-r--r--drivers/pci/controller/dwc/Makefile13
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c62
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c123
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c1659
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c334
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape-ep.c11
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c10
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c25
-rw-r--r--drivers/pci/controller/dwc/pcie-al.c17
-rw-r--r--drivers/pci/controller/dwc/pcie-amd-mdb.c526
-rw-r--r--drivers/pci/controller/dwc/pcie-armada8k.c6
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c34
-rw-r--r--drivers/pci/controller/dwc/pcie-bt1.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-debugfs.c927
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c732
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c592
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c12
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c417
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h301
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c557
-rw-r--r--drivers/pci/controller/dwc/pcie-hisi.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-histb.c23
-rw-r--r--drivers/pci/controller/dwc/pcie-intel-gw.c14
-rw-r--r--drivers/pci/controller/dwc/pcie-keembay.c21
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c181
-rw-r--r--drivers/pci/controller/dwc/pcie-nxp-s32g.c406
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-common.c88
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-common.h14
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c144
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c869
-rw-r--r--drivers/pci/controller/dwc/pcie-rcar-gen4.c376
-rw-r--r--drivers/pci/controller/dwc/pcie-sophgo.c257
-rw-r--r--drivers/pci/controller/dwc/pcie-spacemit-k1.c357
-rw-r--r--drivers/pci/controller/dwc/pcie-spear13xx.c9
-rw-r--r--drivers/pci/controller/dwc/pcie-stm32-ep.c343
-rw-r--r--drivers/pci/controller/dwc/pcie-stm32.c370
-rw-r--r--drivers/pci/controller/dwc/pcie-stm32.h19
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c180
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier-ep.c15
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-visconti.c4
-rw-r--r--drivers/pci/controller/mobiveil/Kconfig1
-rw-r--r--drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c14
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-host.c56
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil.h5
-rw-r--r--drivers/pci/controller/pci-aardvark.c142
-rw-r--r--drivers/pci/controller/pci-ftpci100.c4
-rw-r--r--drivers/pci/controller/pci-host-common.c49
-rw-r--r--drivers/pci/controller/pci-host-common.h23
-rw-r--r--drivers/pci/controller/pci-host-generic.c5
-rw-r--r--drivers/pci/controller/pci-hyperv-intf.c1
-rw-r--r--drivers/pci/controller/pci-hyperv.c316
-rw-r--r--drivers/pci/controller/pci-ixp4xx.c6
-rw-r--r--drivers/pci/controller/pci-loongson.c13
-rw-r--r--drivers/pci/controller/pci-mvebu.c56
-rw-r--r--drivers/pci/controller/pci-tegra.c182
-rw-r--r--drivers/pci/controller/pci-thunder-ecam.c4
-rw-r--r--drivers/pci/controller/pci-thunder-pem.c5
-rw-r--r--drivers/pci/controller/pci-xgene-msi.c479
-rw-r--r--drivers/pci/controller/pci-xgene.c39
-rw-r--r--drivers/pci/controller/pcie-altera-msi.c55
-rw-r--r--drivers/pci/controller/pcie-altera.c270
-rw-r--r--drivers/pci/controller/pcie-apple.c394
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c967
-rw-r--r--drivers/pci/controller/pcie-hisi-error.c2
-rw-r--r--drivers/pci/controller/pcie-iproc-msi.c46
-rw-r--r--drivers/pci/controller/pcie-iproc-platform.c2
-rw-r--r--drivers/pci/controller/pcie-iproc.c40
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c474
-rw-r--r--drivers/pci/controller/pcie-mediatek.c191
-rw-r--r--drivers/pci/controller/pcie-microchip-host.c1216
-rw-r--r--drivers/pci/controller/pcie-mt7621.c20
-rw-r--r--drivers/pci/controller/pcie-rcar-ep.c14
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c138
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c464
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c77
-rw-r--r--drivers/pci/controller/pcie-rockchip.c240
-rw-r--r--drivers/pci/controller/pcie-rockchip.h118
-rw-r--r--drivers/pci/controller/pcie-rzg3s-host.c1761
-rw-r--r--drivers/pci/controller/pcie-xilinx-cpm.c113
-rw-r--r--drivers/pci/controller/pcie-xilinx-dma-pl.c117
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c206
-rw-r--r--drivers/pci/controller/pcie-xilinx.c68
-rw-r--r--drivers/pci/controller/plda/Kconfig31
-rw-r--r--drivers/pci/controller/plda/Makefile4
-rw-r--r--drivers/pci/controller/plda/pcie-microchip-host.c834
-rw-r--r--drivers/pci/controller/plda/pcie-plda-host.c651
-rw-r--r--drivers/pci/controller/plda/pcie-plda.h274
-rw-r--r--drivers/pci/controller/plda/pcie-starfive.c492
-rw-r--r--drivers/pci/controller/vmd.c354
-rw-r--r--drivers/pci/devres.c730
-rw-r--r--drivers/pci/doe.c271
-rw-r--r--drivers/pci/ecam.c2
-rw-r--r--drivers/pci/endpoint/Kconfig10
-rw-r--r--drivers/pci/endpoint/Makefile1
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-mhi.c56
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c834
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-vntb.c342
-rw-r--r--drivers/pci/endpoint/pci-ep-cfs.c11
-rw-r--r--drivers/pci/endpoint/pci-ep-msi.c100
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c405
-rw-r--r--drivers/pci/endpoint/pci-epc-mem.c9
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c201
-rw-r--r--drivers/pci/host-bridge.c1
-rw-r--r--drivers/pci/hotplug/Kconfig12
-rw-r--r--drivers/pci/hotplug/Makefile1
-rw-r--r--drivers/pci/hotplug/TODO19
-rw-r--r--drivers/pci/hotplug/acpiphp_ampere_altra.c3
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c4
-rw-r--r--drivers/pci/hotplug/cpci_hotplug.h3
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c17
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c2
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c4
-rw-r--r--drivers/pci/hotplug/cpqphp_pci.c57
-rw-r--r--drivers/pci/hotplug/cpqphp_sysfs.c1
-rw-r--r--drivers/pci/hotplug/ibmphp_hpc.c6
-rw-r--r--drivers/pci/hotplug/octep_hp.c427
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c223
-rw-r--r--drivers/pci/hotplug/pciehp.h5
-rw-r--r--drivers/pci/hotplug/pciehp_core.c16
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c5
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c100
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c4
-rw-r--r--drivers/pci/hotplug/pnv_php.c249
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c8
-rw-r--r--drivers/pci/hotplug/shpchp.h56
-rw-r--r--drivers/pci/hotplug/shpchp_core.c28
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c79
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c69
-rw-r--r--drivers/pci/ide.c815
-rw-r--r--drivers/pci/iomap.c31
-rw-r--r--drivers/pci/iov.c209
-rw-r--r--drivers/pci/msi/api.c68
-rw-r--r--drivers/pci/msi/irqdomain.c225
-rw-r--r--drivers/pci/msi/msi.c262
-rw-r--r--drivers/pci/msi/msi.h4
-rw-r--r--drivers/pci/npem.c595
-rw-r--r--drivers/pci/of.c288
-rw-r--r--drivers/pci/of_property.c169
-rw-r--r--drivers/pci/p2pdma.c245
-rw-r--r--drivers/pci/pci-acpi.c213
-rw-r--r--drivers/pci/pci-bridge-emul.c4
-rw-r--r--drivers/pci/pci-driver.c56
-rw-r--r--drivers/pci/pci-mid.c4
-rw-r--r--drivers/pci/pci-pf-stub.c1
-rw-r--r--drivers/pci/pci-stub.c1
-rw-r--r--drivers/pci/pci-sysfs.c296
-rw-r--r--drivers/pci/pci.c1165
-rw-r--r--drivers/pci/pci.h556
-rw-r--r--drivers/pci/pcie/Kconfig2
-rw-r--r--drivers/pci/pcie/Makefile4
-rw-r--r--drivers/pci/pcie/aer.c615
-rw-r--r--drivers/pci/pcie/aer_inject.c6
-rw-r--r--drivers/pci/pcie/aspm.c372
-rw-r--r--drivers/pci/pcie/bwctrl.c329
-rw-r--r--drivers/pci/pcie/dpc.c169
-rw-r--r--drivers/pci/pcie/edr.c28
-rw-r--r--drivers/pci/pcie/err.c51
-rw-r--r--drivers/pci/pcie/portdrv.c34
-rw-r--r--drivers/pci/pcie/portdrv.h6
-rw-r--r--drivers/pci/pcie/ptm.c325
-rw-r--r--drivers/pci/pcie/tlp.c137
-rw-r--r--drivers/pci/probe.c481
-rw-r--r--drivers/pci/proc.c4
-rw-r--r--drivers/pci/pwrctrl/Kconfig48
-rw-r--r--drivers/pci/pwrctrl/Makefile11
-rw-r--r--drivers/pci/pwrctrl/core.c150
-rw-r--r--drivers/pci/pwrctrl/pci-pwrctrl-pwrseq.c139
-rw-r--r--drivers/pci/pwrctrl/pci-pwrctrl-tc9563.c648
-rw-r--r--drivers/pci/pwrctrl/slot.c95
-rw-r--r--drivers/pci/quirks.c230
-rw-r--r--drivers/pci/rebar.c328
-rw-r--r--drivers/pci/remove.c55
-rw-r--r--drivers/pci/search.c62
-rw-r--r--drivers/pci/setup-bus.c1516
-rw-r--r--drivers/pci/setup-res.c120
-rw-r--r--drivers/pci/slot.c68
-rw-r--r--drivers/pci/switch/switchtec.c67
-rw-r--r--drivers/pci/tph.c516
-rw-r--r--drivers/pci/tsm.c900
-rw-r--r--drivers/pci/vgaarb.c31
-rw-r--r--drivers/pci/vpd.c16
-rw-r--r--drivers/pcmcia/Kconfig3
-rw-r--r--drivers/pcmcia/Makefile1
-rw-r--r--drivers/pcmcia/bcm63xx_pcmcia.c5
-rw-r--r--drivers/pcmcia/cardbus.c1
-rw-r--r--drivers/pcmcia/cistpl.c6
-rw-r--r--drivers/pcmcia/cs.c17
-rw-r--r--drivers/pcmcia/cs_internal.h1
-rw-r--r--drivers/pcmcia/db1xxx_ss.c2
-rw-r--r--drivers/pcmcia/ds.c4
-rw-r--r--drivers/pcmcia/electra_cf.c4
-rw-r--r--drivers/pcmcia/i82092.c1
-rw-r--r--drivers/pcmcia/i82365.c3
-rw-r--r--drivers/pcmcia/max1600.c1
-rw-r--r--drivers/pcmcia/omap_cf.c14
-rw-r--r--drivers/pcmcia/pd6729.c3
-rw-r--r--drivers/pcmcia/pxa2xx_base.c2
-rw-r--r--drivers/pcmcia/rsrc_iodyn.c168
-rw-r--r--drivers/pcmcia/rsrc_mgr.c1
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c4
-rw-r--r--drivers/pcmcia/sa1100_generic.c2
-rw-r--r--drivers/pcmcia/soc_common.c18
-rw-r--r--drivers/pcmcia/socket_sysfs.c5
-rw-r--r--drivers/pcmcia/tcic.c2
-rw-r--r--drivers/pcmcia/xxs1500_ss.c2
-rw-r--r--drivers/pcmcia/yenta_socket.c10
-rw-r--r--drivers/peci/controller/peci-aspeed.c17
-rw-r--r--drivers/peci/controller/peci-npcm.c5
-rw-r--r--drivers/peci/core.c15
-rw-r--r--drivers/peci/cpu.c37
-rw-r--r--drivers/peci/device.c9
-rw-r--r--drivers/peci/internal.h17
-rw-r--r--drivers/peci/request.c32
-rw-r--r--drivers/perf/Kconfig48
-rw-r--r--drivers/perf/Makefile7
-rw-r--r--drivers/perf/alibaba_uncore_drw_pmu.c16
-rw-r--r--drivers/perf/amlogic/meson_ddr_pmu_core.c2
-rw-r--r--drivers/perf/amlogic/meson_g12_ddr_pmu.c2
-rw-r--r--drivers/perf/apple_m1_cpu_pmu.c292
-rw-r--r--drivers/perf/arm-cci.c14
-rw-r--r--drivers/perf/arm-ccn.c21
-rw-r--r--drivers/perf/arm-cmn.c486
-rw-r--r--drivers/perf/arm-ni.c862
-rw-r--r--drivers/perf/arm_brbe.c805
-rw-r--r--drivers/perf/arm_brbe.h47
-rw-r--r--drivers/perf/arm_cspmu/ampere_cspmu.c33
-rw-r--r--drivers/perf/arm_cspmu/arm_cspmu.c146
-rw-r--r--drivers/perf/arm_cspmu/arm_cspmu.h101
-rw-r--r--drivers/perf/arm_cspmu/nvidia_cspmu.c278
-rw-r--r--drivers/perf/arm_dmc620_pmu.c11
-rw-r--r--drivers/perf/arm_dsu_pmu.c13
-rw-r--r--drivers/perf/arm_pmu.c88
-rw-r--r--drivers/perf/arm_pmu_acpi.c2
-rw-r--r--drivers/perf/arm_pmu_platform.c22
-rw-r--r--drivers/perf/arm_pmuv3.c364
-rw-r--r--drivers/perf/arm_smmuv3_pmu.c21
-rw-r--r--drivers/perf/arm_spe_pmu.c215
-rw-r--r--drivers/perf/arm_v6_pmu.c431
-rw-r--r--drivers/perf/arm_v7_pmu.c1924
-rw-r--r--drivers/perf/arm_xscale_pmu.c747
-rw-r--r--drivers/perf/cxl_pmu.c37
-rw-r--r--drivers/perf/dwc_pcie_pmu.c303
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c96
-rw-r--r--drivers/perf/fsl_imx9_ddr_perf.c384
-rw-r--r--drivers/perf/fujitsu_uncore_pmu.c613
-rw-r--r--drivers/perf/hisilicon/Makefile3
-rw-r--r--drivers/perf/hisilicon/hisi_pcie_pmu.c47
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c44
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c413
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_hha_pmu.c56
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c589
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_mn_pmu.c411
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_noc_pmu.c443
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pa_pmu.c57
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c189
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.h61
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c253
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_uc_pmu.c45
-rw-r--r--drivers/perf/hisilicon/hns3_pmu.c12
-rw-r--r--drivers/perf/marvell_cn10k_ddr_pmu.c537
-rw-r--r--drivers/perf/marvell_cn10k_tad_pmu.c68
-rw-r--r--drivers/perf/marvell_pem_pmu.c425
-rw-r--r--drivers/perf/qcom_l2_pmu.c2
-rw-r--r--drivers/perf/qcom_l3_pmu.c11
-rw-r--r--drivers/perf/riscv_pmu.c4
-rw-r--r--drivers/perf/riscv_pmu_legacy.c4
-rw-r--r--drivers/perf/riscv_pmu_sbi.c254
-rw-r--r--drivers/perf/thunderx2_pmu.c7
-rw-r--r--drivers/perf/xgene_pmu.c13
-rw-r--r--drivers/phy/Kconfig31
-rw-r--r--drivers/phy/Makefile5
-rw-r--r--drivers/phy/allwinner/phy-sun4i-usb.c49
-rw-r--r--drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c10
-rw-r--r--drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c10
-rw-r--r--drivers/phy/amlogic/phy-meson-axg-pcie.c14
-rw-r--r--drivers/phy/amlogic/phy-meson-g12a-usb2.c10
-rw-r--r--drivers/phy/amlogic/phy-meson-gxl-usb2.c11
-rw-r--r--drivers/phy/amlogic/phy-meson8b-usb2.c35
-rw-r--r--drivers/phy/broadcom/phy-bcm-cygnus-pcie.c20
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns-usb2.c55
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns-usb3.c1
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns2-pcie.c2
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c1
-rw-r--r--drivers/phy/broadcom/phy-bcm-sr-pcie.c2
-rw-r--r--drivers/phy/broadcom/phy-bcm63xx-usbh.c6
-rw-r--r--drivers/phy/broadcom/phy-brcm-sata.c24
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c79
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init.c433
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init.h1
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb.c17
-rw-r--r--drivers/phy/cadence/cdns-dphy-rx.c3
-rw-r--r--drivers/phy/cadence/cdns-dphy.c156
-rw-r--r--drivers/phy/cadence/phy-cadence-sierra.c299
-rw-r--r--drivers/phy/cadence/phy-cadence-torrent.c1168
-rw-r--r--drivers/phy/freescale/Kconfig14
-rw-r--r--drivers/phy/freescale/Makefile2
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8m-pcie.c46
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8mq-usb.c349
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8qm-hsio.c614
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8qm-lvds-phy.c6
-rw-r--r--drivers/phy/freescale/phy-fsl-lynx-28g.c22
-rw-r--r--drivers/phy/freescale/phy-fsl-samsung-hdmi.c740
-rw-r--r--drivers/phy/hisilicon/phy-hi3670-pcie.c11
-rw-r--r--drivers/phy/hisilicon/phy-hi6220-usb.c1
-rw-r--r--drivers/phy/hisilicon/phy-hisi-inno-usb2.c12
-rw-r--r--drivers/phy/hisilicon/phy-histb-combphy.c2
-rw-r--r--drivers/phy/ingenic/phy-ingenic-usb.c8
-rw-r--r--drivers/phy/intel/phy-intel-lgm-combo.c2
-rw-r--r--drivers/phy/marvell/Kconfig4
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-comphy.c6
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-utmi.c16
-rw-r--r--drivers/phy/marvell/phy-pxa-usb.c1
-rw-r--r--drivers/phy/mediatek/Kconfig12
-rw-r--r--drivers/phy/mediatek/Makefile1
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi-mt8195.c44
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi-mt8195.h3
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi.c28
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi.h4
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c137
-rw-r--r--drivers/phy/mediatek/phy-mtk-xfi-tphy.c451
-rw-r--r--drivers/phy/mediatek/phy-mtk-xsphy.c112
-rw-r--r--drivers/phy/microchip/Kconfig1
-rw-r--r--drivers/phy/microchip/sparx5_serdes.c195
-rw-r--r--drivers/phy/microchip/sparx5_serdes.h44
-rw-r--r--drivers/phy/microchip/sparx5_serdes_regs.h746
-rw-r--r--drivers/phy/motorola/phy-cpcap-usb.c2
-rw-r--r--drivers/phy/motorola/phy-mapphone-mdm6600.c6
-rw-r--r--drivers/phy/nuvoton/Kconfig12
-rw-r--r--drivers/phy/nuvoton/Makefile3
-rw-r--r--drivers/phy/nuvoton/phy-ma35d1-usb2.c143
-rw-r--r--drivers/phy/phy-airoha-pcie-regs.h494
-rw-r--r--drivers/phy/phy-airoha-pcie.c1290
-rw-r--r--drivers/phy/phy-can-transceiver.c177
-rw-r--r--drivers/phy/phy-core.c133
-rw-r--r--drivers/phy/phy-lgm-usb.c2
-rw-r--r--drivers/phy/phy-nxp-ptn3222.c123
-rw-r--r--drivers/phy/phy-snps-eusb2.c633
-rw-r--r--drivers/phy/qualcomm/Kconfig38
-rw-r--r--drivers/phy/qualcomm/Makefile3
-rw-r--r--drivers/phy/qualcomm/phy-qcom-apq8064-sata.c10
-rw-r--r--drivers/phy/qualcomm/phy-qcom-edp.c383
-rw-r--r--drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c115
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ipq806x-sata.c8
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-m31-eusb2.c326
-rw-r--r--drivers/phy/qualcomm/phy-qcom-m31.c16
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c980
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-common.h19
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c19
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie.c1485
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v4_20.h5
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5.h14
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h7
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6.h3
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6_30.h25
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v6.h4
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v8.h38
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v2.h1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5_20.h4
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6-n4.h32
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6.h2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6_30.h19
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v7.h2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v8.h32
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v8_50.h13
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v6.h7
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v8.h64
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-ln-shrd-v5.h11
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-pll.h3
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-ufs-v6.h6
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-ufs-v7.h67
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6.h1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h13
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v7.h4
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v8.h68
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-ufs.c623
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usb-legacy.c1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usb.c137
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usbc.c17
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h10
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qusb2.c34
-rw-r--r--drivers/phy/qualcomm/phy-qcom-snps-eusb2.c442
-rw-r--r--drivers/phy/qualcomm/phy-qcom-uniphy-pcie-28lp.c331
-rw-r--r--drivers/phy/realtek/phy-rtk-usb2.c4
-rw-r--r--drivers/phy/realtek/phy-rtk-usb3.c4
-rw-r--r--drivers/phy/renesas/Kconfig7
-rw-r--r--drivers/phy/renesas/Makefile1
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-pcie.c10
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb2.c389
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb3.c10
-rw-r--r--drivers/phy/renesas/phy-rzg3e-usb3.c259
-rw-r--r--drivers/phy/renesas/r8a779f0-ether-serdes.c99
-rw-r--r--drivers/phy/rockchip/Kconfig28
-rw-r--r--drivers/phy/rockchip/Makefile2
-rw-r--r--drivers/phy/rockchip/phy-rockchip-emmc.c3
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-csidphy.c69
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c93
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-hdmi.c4
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-usb2.c285
-rw-r--r--drivers/phy/rockchip/phy-rockchip-naneng-combphy.c976
-rw-r--r--drivers/phy/rockchip/phy-rockchip-pcie.c213
-rw-r--r--drivers/phy/rockchip/phy-rockchip-samsung-dcphy.c1714
-rw-r--r--drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c1472
-rw-r--r--drivers/phy/rockchip/phy-rockchip-snps-pcie3.c49
-rw-r--r--drivers/phy/rockchip/phy-rockchip-typec.c4
-rw-r--r--drivers/phy/rockchip/phy-rockchip-usb.c51
-rw-r--r--drivers/phy/rockchip/phy-rockchip-usbdp.c1666
-rw-r--r--drivers/phy/samsung/Kconfig4
-rw-r--r--drivers/phy/samsung/Makefile2
-rw-r--r--drivers/phy/samsung/phy-exynos-mipi-video.c52
-rw-r--r--drivers/phy/samsung/phy-exynos5-usbdrd.c1820
-rw-r--r--drivers/phy/samsung/phy-exynos5250-usb2.c2
-rw-r--r--drivers/phy/samsung/phy-exynos7-ufs.c1
-rw-r--r--drivers/phy/samsung/phy-exynosautov9-ufs.c1
-rw-r--r--drivers/phy/samsung/phy-exynosautov920-ufs.c168
-rw-r--r--drivers/phy/samsung/phy-fsd-ufs.c1
-rw-r--r--drivers/phy/samsung/phy-gs101-ufs.c210
-rw-r--r--drivers/phy/samsung/phy-samsung-ufs.c75
-rw-r--r--drivers/phy/samsung/phy-samsung-ufs.h17
-rw-r--r--drivers/phy/samsung/phy-samsung-usb2.c1
-rw-r--r--drivers/phy/sophgo/Kconfig19
-rw-r--r--drivers/phy/sophgo/Makefile2
-rw-r--r--drivers/phy/sophgo/phy-cv1800-usb2.c169
-rw-r--r--drivers/phy/st/Kconfig11
-rw-r--r--drivers/phy/st/Makefile1
-rw-r--r--drivers/phy/st/phy-miphy28lp.c5
-rw-r--r--drivers/phy/st/phy-stih407-usb.c26
-rw-r--r--drivers/phy/st/phy-stm32-combophy.c605
-rw-r--r--drivers/phy/st/phy-stm32-usbphyc.c6
-rw-r--r--drivers/phy/starfive/Kconfig10
-rw-r--r--drivers/phy/starfive/Makefile1
-rw-r--r--drivers/phy/starfive/phy-jh7110-dphy-rx.c5
-rw-r--r--drivers/phy/starfive/phy-jh7110-dphy-tx.c461
-rw-r--r--drivers/phy/starfive/phy-jh7110-usb.c23
-rw-r--r--drivers/phy/tegra/Kconfig5
-rw-r--r--drivers/phy/tegra/xusb-tegra186.c132
-rw-r--r--drivers/phy/tegra/xusb-tegra210.c6
-rw-r--r--drivers/phy/tegra/xusb.c14
-rw-r--r--drivers/phy/tegra/xusb.h1
-rw-r--r--drivers/phy/ti/Kconfig2
-rw-r--r--drivers/phy/ti/phy-am654-serdes.c54
-rw-r--r--drivers/phy/ti/phy-da8xx-usb.c53
-rw-r--r--drivers/phy/ti/phy-dm816x-usb.c3
-rw-r--r--drivers/phy/ti/phy-gmii-sel.c83
-rw-r--r--drivers/phy/ti/phy-j721e-wiz.c156
-rw-r--r--drivers/phy/ti/phy-omap-control.c1
-rw-r--r--drivers/phy/ti/phy-omap-usb2.c16
-rw-r--r--drivers/phy/ti/phy-ti-pipe3.c16
-rw-r--r--drivers/phy/ti/phy-tusb1210.c11
-rw-r--r--drivers/phy/ti/phy-twl4030-usb.c3
-rw-r--r--drivers/phy/xilinx/phy-zynqmp.c330
-rw-r--r--drivers/pinctrl/Kconfig153
-rw-r--r--drivers/pinctrl/Makefile16
-rw-r--r--drivers/pinctrl/actions/pinctrl-owl.c6
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c2
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c2
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c28
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c2
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.h2
-rw-r--r--drivers/pinctrl/bcm/Kconfig12
-rw-r--r--drivers/pinctrl/bcm/Kconfig.stb10
-rw-r--r--drivers/pinctrl/bcm/Makefile2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm281xx.c849
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c33
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm4908.c3
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm6358.c4
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm63xx.c4
-rw-r--r--drivers/pinctrl/bcm/pinctrl-brcmstb-bcm2712.c747
-rw-r--r--drivers/pinctrl/bcm/pinctrl-brcmstb.c442
-rw-r--r--drivers/pinctrl/bcm/pinctrl-brcmstb.h93
-rw-r--r--drivers/pinctrl/bcm/pinctrl-cygnus-mux.c8
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c6
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns2-mux.c8
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-gpio.c7
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-mux.c8
-rw-r--r--drivers/pinctrl/berlin/berlin.c31
-rw-r--r--drivers/pinctrl/berlin/berlin.h6
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-cs42l43.c44
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-lochnagar.c26
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-madera-core.c27
-rw-r--r--drivers/pinctrl/cix/Kconfig15
-rw-r--r--drivers/pinctrl/cix/Makefile4
-rw-r--r--drivers/pinctrl/cix/pinctrl-sky1-base.c587
-rw-r--r--drivers/pinctrl/cix/pinctrl-sky1.c559
-rw-r--r--drivers/pinctrl/cix/pinctrl-sky1.h48
-rw-r--r--drivers/pinctrl/core.c143
-rw-r--r--drivers/pinctrl/core.h5
-rw-r--r--drivers/pinctrl/devicetree.c10
-rw-r--r--drivers/pinctrl/freescale/Kconfig120
-rw-r--r--drivers/pinctrl/freescale/Makefile2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx-scmi.c361
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c83
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1-core.c16
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1.c228
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx27.c350
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx8mq.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx91.c271
-rw-r--r--drivers/pinctrl/freescale/pinctrl-mxs.c18
-rw-r--r--drivers/pinctrl/intel/Kconfig3
-rw-r--r--drivers/pinctrl/intel/pinctrl-alderlake.c70
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c46
-rw-r--r--drivers/pinctrl/intel/pinctrl-broxton.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-cannonlake.c70
-rw-r--r--drivers/pinctrl/intel/pinctrl-cedarfork.c39
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c99
-rw-r--r--drivers/pinctrl/intel/pinctrl-denverton.c23
-rw-r--r--drivers/pinctrl/intel/pinctrl-elkhartlake.c81
-rw-r--r--drivers/pinctrl/intel/pinctrl-emmitsburg.c35
-rw-r--r--drivers/pinctrl/intel/pinctrl-geminilake.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-icelake.c62
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel-platform.c7
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c418
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h14
-rw-r--r--drivers/pinctrl/intel/pinctrl-jasperlake.c36
-rw-r--r--drivers/pinctrl/intel/pinctrl-lakefield.c28
-rw-r--r--drivers/pinctrl/intel/pinctrl-lewisburg.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-lynxpoint.c41
-rw-r--r--drivers/pinctrl/intel/pinctrl-merrifield.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-meteorlake.c57
-rw-r--r--drivers/pinctrl/intel/pinctrl-meteorpoint.c48
-rw-r--r--drivers/pinctrl/intel/pinctrl-moorefield.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-sunrisepoint.c28
-rw-r--r--drivers/pinctrl/intel/pinctrl-tangier.c10
-rw-r--r--drivers/pinctrl/intel/pinctrl-tigerlake.c72
-rw-r--r--drivers/pinctrl/mediatek/Kconfig68
-rw-r--r--drivers/pinctrl/mediatek/Makefile6
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.c325
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.h29
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-airoha.c3030
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-moore.c29
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-moore.h3
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt6878.c1478
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt6893.c879
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7622.c34
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7623.c44
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7629.c22
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt76x8.c88
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7981.c36
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7986.c26
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7988.c1546
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8189.c1698
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8196.c1858
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c146
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h4
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c31
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt6878.h2248
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt6893.h2283
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt8189.h2452
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt8196.h3085
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c42
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.h7
-rw-r--r--drivers/pinctrl/meson/Kconfig33
-rw-r--r--drivers/pinctrl/meson/Makefile1
-rw-r--r--drivers/pinctrl/meson/pinctrl-amlogic-a4.c1109
-rw-r--r--drivers/pinctrl/meson/pinctrl-amlogic-c3.c12
-rw-r--r--drivers/pinctrl/meson/pinctrl-amlogic-t7.c12
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-a1.c13
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-axg-pmx.c13
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-axg-pmx.h2
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-axg.c25
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-g12a.c55
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxbb.c17
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxl.c27
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-s4.c13
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c38
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.h8
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8-pmx.c7
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8.c16
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8b.c16
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c49
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-dove.c42
-rw-r--r--drivers/pinctrl/nomadik/Kconfig6
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c30
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c51
-rw-r--r--drivers/pinctrl/nuvoton/Kconfig19
-rw-r--r--drivers/pinctrl/nuvoton/Makefile2
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-ma35.c1184
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-ma35.h52
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-ma35d1.c1798
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c233
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c264
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-wpcm450.c66
-rw-r--r--drivers/pinctrl/nxp/pinctrl-s32cc.c89
-rw-r--r--drivers/pinctrl/nxp/pinctrl-s32g2.c52
-rw-r--r--drivers/pinctrl/pinconf-generic.c214
-rw-r--r--drivers/pinctrl/pinconf.h21
-rw-r--r--drivers/pinctrl/pinctrl-amd.c146
-rw-r--r--drivers/pinctrl/pinctrl-amd.h11
-rw-r--r--drivers/pinctrl/pinctrl-amdisp.c233
-rw-r--r--drivers/pinctrl/pinctrl-amdisp.h95
-rw-r--r--drivers/pinctrl/pinctrl-apple-gpio.c38
-rw-r--r--drivers/pinctrl/pinctrl-artpec6.c4
-rw-r--r--drivers/pinctrl/pinctrl-as3722.c21
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c29
-rw-r--r--drivers/pinctrl/pinctrl-at91.c36
-rw-r--r--drivers/pinctrl/pinctrl-aw9523.c87
-rw-r--r--drivers/pinctrl/pinctrl-axp209.c33
-rw-r--r--drivers/pinctrl/pinctrl-bm1880.c2
-rw-r--r--drivers/pinctrl/pinctrl-cy8c95x0.c574
-rw-r--r--drivers/pinctrl/pinctrl-da9062.c14
-rw-r--r--drivers/pinctrl/pinctrl-digicolor.c6
-rw-r--r--drivers/pinctrl/pinctrl-eic7700.c704
-rw-r--r--drivers/pinctrl/pinctrl-ep93xx.c1434
-rw-r--r--drivers/pinctrl/pinctrl-equilibrium.c82
-rw-r--r--drivers/pinctrl/pinctrl-equilibrium.h14
-rw-r--r--drivers/pinctrl/pinctrl-eyeq5.c575
-rw-r--r--drivers/pinctrl/pinctrl-falcon.c2
-rw-r--r--drivers/pinctrl/pinctrl-gemini.c11
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c1011
-rw-r--r--drivers/pinctrl/pinctrl-k210.c59
-rw-r--r--drivers/pinctrl/pinctrl-k230.c648
-rw-r--r--drivers/pinctrl/pinctrl-keembay.c65
-rw-r--r--drivers/pinctrl/pinctrl-lpc18xx.c2
-rw-r--r--drivers/pinctrl/pinctrl-max7360.c215
-rw-r--r--drivers/pinctrl/pinctrl-max77620.c9
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c80
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08_i2c.c1
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08_spi.c1
-rw-r--r--drivers/pinctrl/pinctrl-microchip-sgpio.c12
-rw-r--r--drivers/pinctrl/pinctrl-mlxbf3.c14
-rw-r--r--drivers/pinctrl/pinctrl-mpfs-iomux0.c278
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c248
-rw-r--r--drivers/pinctrl/pinctrl-palmas.c4
-rw-r--r--drivers/pinctrl/pinctrl-pic32.c10
-rw-r--r--drivers/pinctrl/pinctrl-pic64gx-gpio2.c356
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c20
-rw-r--r--drivers/pinctrl/pinctrl-rk805.c97
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c1113
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.h10
-rw-r--r--drivers/pinctrl/pinctrl-rp1.c1913
-rw-r--r--drivers/pinctrl/pinctrl-scmi.c14
-rw-r--r--drivers/pinctrl/pinctrl-single.c37
-rw-r--r--drivers/pinctrl/pinctrl-st.c46
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c26
-rw-r--r--drivers/pinctrl/pinctrl-sx150x.c35
-rw-r--r--drivers/pinctrl/pinctrl-tb10x.c7
-rw-r--r--drivers/pinctrl/pinctrl-th1520.c918
-rw-r--r--drivers/pinctrl/pinctrl-tps6594.c313
-rw-r--r--drivers/pinctrl/pinctrl-upboard.c1070
-rw-r--r--drivers/pinctrl/pinctrl-utils.c4
-rw-r--r--drivers/pinctrl/pinctrl-xway.c18
-rw-r--r--drivers/pinctrl/pinctrl-zynq.c3
-rw-r--r--drivers/pinctrl/pinctrl-zynqmp.c178
-rw-r--r--drivers/pinctrl/pinmux.c280
-rw-r--r--drivers/pinctrl/pinmux.h22
-rw-r--r--drivers/pinctrl/pxa/pinctrl-pxa2xx.c8
-rw-r--r--drivers/pinctrl/qcom/Kconfig20
-rw-r--r--drivers/pinctrl/qcom/Kconfig.msm81
-rw-r--r--drivers/pinctrl/qcom/Makefile12
-rw-r--r--drivers/pinctrl/qcom/pinctrl-apq8064.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-apq8084.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-glymur.c1777
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq4019.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq5018.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq5332.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq5424.c809
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq6018.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq8064.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq8074.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq9574.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-kaanapali.c1803
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.c32
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.h18
-rw-r--r--drivers/pinctrl/qcom/pinctrl-mdm9607.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-mdm9615.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-milos.c1339
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c128
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.h6
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8226.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8660.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8909.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8916.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8917.c1625
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8953.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8960.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8976.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8994.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8996.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8998.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8x74.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcm2290.c82
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs404.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs615.c1106
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs8300.c1245
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qdf2xxx.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qdu1000.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sa8775p.c61
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sar2130p.c1504
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7180.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7280.c5
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc8180x.c5
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc8280xp-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc8280xp.c5
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm660-lpass-lpi.c160
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm660.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm670.c22
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm845.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx55.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx65.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx75.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm4250-lpass-lpi.c236
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm4450.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6115-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6115.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6125.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6350.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6375.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm7150.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8150.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8250-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8250.c84
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8350-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8350.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8450.c5
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8550-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8550.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8650-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8650.c5
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8750.c1730
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c36
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c18
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c16
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c21
-rw-r--r--drivers/pinctrl/qcom/pinctrl-x1e80100.c42
-rw-r--r--drivers/pinctrl/qcom/tlmm-test.c709
-rw-r--r--drivers/pinctrl/realtek/Kconfig1
-rw-r--r--drivers/pinctrl/realtek/pinctrl-rtd.c2
-rw-r--r--drivers/pinctrl/renesas/Kconfig265
-rw-r--r--drivers/pinctrl/renesas/Makefile1
-rw-r--r--drivers/pinctrl/renesas/gpio.c4
-rw-r--r--drivers/pinctrl/renesas/pfc-emev2.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a73a4.c2
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7778.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77951.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7796.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77965.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77970.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77980.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77995.c2
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779f0.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779g0.c932
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779h0.c89
-rw-r--r--drivers/pinctrl/renesas/pfc-sh73a0.c4
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7723.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7724.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7734.c1
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rza1.c37
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rza2.c15
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c1501
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzn1.c32
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzt2h.c813
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzv2m.c30
-rw-r--r--drivers/pinctrl/renesas/pinctrl.c13
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm.c14
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm64.c1150
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c419
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.h89
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c64xx.c14
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c161
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h49
-rw-r--r--drivers/pinctrl/sophgo/Kconfig88
-rw-r--r--drivers/pinctrl/sophgo/Makefile13
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-cv1800b.c471
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-cv1812h.c780
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-cv18xx.c451
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-cv18xx.h139
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-sg2000.c780
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-sg2002.c551
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-sg2042-ops.c296
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-sg2042.c655
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-sg2042.h49
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-sg2044.c718
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-sophgo-common.c451
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-sophgo.h136
-rw-r--r--drivers/pinctrl/spacemit/Kconfig18
-rw-r--r--drivers/pinctrl/spacemit/Makefile3
-rw-r--r--drivers/pinctrl/spacemit/pinctrl-k1.c1065
-rw-r--r--drivers/pinctrl/spacemit/pinctrl-k1.h40
-rw-r--r--drivers/pinctrl/spear/pinctrl-plgpio.c9
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.c13
-rw-r--r--drivers/pinctrl/sprd/pinctrl-sprd-sc9860.c2
-rw-r--r--drivers/pinctrl/sprd/pinctrl-sprd.c23
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c35
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7110-aon.c2
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7110-sys.c2
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c29
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7110.h1
-rw-r--r--drivers/pinctrl/stm32/Kconfig20
-rw-r--r--drivers/pinctrl/stm32/Makefile1
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32-hdp.c726
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c625
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.h23
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32mp257.c17
-rw-r--r--drivers/pinctrl/sunplus/sppctl.c8
-rw-r--r--drivers/pinctrl/sunxi/Kconfig10
-rw-r--r--drivers/pinctrl/sunxi/Makefile3
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun20i-d1.c6
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c8
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c12
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun55i-a523-r.c54
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun55i-a523.c54
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun5i.c8
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c8
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-v3s.c9
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c373
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c92
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h47
-rw-r--r--drivers/pinctrl/tegra/Kconfig4
-rw-r--r--drivers/pinctrl/tegra/Makefile1
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra-xusb.c7
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.c77
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.h34
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra186.c1979
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra20.c11
-rw-r--r--drivers/pinctrl/ti/pinctrl-ti-iodelay.c100
-rw-r--r--drivers/pinctrl/uniphier/Kconfig2
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c15
-rw-r--r--drivers/platform/Kconfig6
-rw-r--r--drivers/platform/Makefile5
-rw-r--r--drivers/platform/arm64/Kconfig60
-rw-r--r--drivers/platform/arm64/Makefile3
-rw-r--r--drivers/platform/arm64/acer-aspire1-ec.c12
-rw-r--r--drivers/platform/arm64/huawei-gaokun-ec.c827
-rw-r--r--drivers/platform/arm64/lenovo-thinkpad-t14s.c662
-rw-r--r--drivers/platform/arm64/lenovo-yoga-c630.c257
-rw-r--r--drivers/platform/chrome/Kconfig25
-rw-r--r--drivers/platform/chrome/Makefile8
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c16
-rw-r--r--drivers/platform/chrome/chromeos_of_hw_prober.c187
-rw-r--r--drivers/platform/chrome/chromeos_pstore.c7
-rw-r--r--drivers/platform/chrome/cros_ec.c100
-rw-r--r--drivers/platform/chrome/cros_ec.h3
-rw-r--r--drivers/platform/chrome/cros_ec_chardev.c74
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.c64
-rw-r--r--drivers/platform/chrome/cros_ec_i2c.c10
-rw-r--r--drivers/platform/chrome/cros_ec_ishtp.c7
-rw-r--r--drivers/platform/chrome/cros_ec_lightbar.c18
-rw-r--r--drivers/platform/chrome/cros_ec_lpc.c409
-rw-r--r--drivers/platform/chrome/cros_ec_lpc_mec.c91
-rw-r--r--drivers/platform/chrome/cros_ec_lpc_mec.h18
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c205
-rw-r--r--drivers/platform/chrome/cros_ec_proto_test.c11
-rw-r--r--drivers/platform/chrome/cros_ec_proto_test_util.h5
-rw-r--r--drivers/platform/chrome/cros_ec_rpmsg.c6
-rw-r--r--drivers/platform/chrome/cros_ec_sensorhub.c23
-rw-r--r--drivers/platform/chrome/cros_ec_sensorhub_ring.c11
-rw-r--r--drivers/platform/chrome/cros_ec_spi.c9
-rw-r--r--drivers/platform/chrome/cros_ec_sysfs.c73
-rw-r--r--drivers/platform/chrome/cros_ec_trace.c10
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c180
-rw-r--r--drivers/platform/chrome/cros_ec_typec.h2
-rw-r--r--drivers/platform/chrome/cros_ec_uart.c6
-rw-r--r--drivers/platform/chrome/cros_ec_vbc.c10
-rw-r--r--drivers/platform/chrome/cros_hps_i2c.c2
-rw-r--r--drivers/platform/chrome/cros_kbd_led_backlight.c101
-rw-r--r--drivers/platform/chrome/cros_typec_altmode.c373
-rw-r--r--drivers/platform/chrome/cros_typec_altmode.h51
-rw-r--r--drivers/platform/chrome/cros_typec_switch.c2
-rw-r--r--drivers/platform/chrome/cros_usbpd_logger.c7
-rw-r--r--drivers/platform/chrome/cros_usbpd_notify.c21
-rw-r--r--drivers/platform/chrome/wilco_ec/core.c2
-rw-r--r--drivers/platform/chrome/wilco_ec/debugfs.c3
-rw-r--r--drivers/platform/chrome/wilco_ec/event.c1
-rw-r--r--drivers/platform/chrome/wilco_ec/mailbox.c22
-rw-r--r--drivers/platform/chrome/wilco_ec/properties.c2
-rw-r--r--drivers/platform/chrome/wilco_ec/telemetry.c5
-rw-r--r--drivers/platform/cznic/Kconfig98
-rw-r--r--drivers/platform/cznic/Makefile11
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-base.c419
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-gpio.c1123
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-keyctl.c162
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-sys-off-wakeup.c260
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-trng.c94
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-watchdog.c130
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu.h163
-rw-r--r--drivers/platform/cznic/turris-signing-key.c193
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c6
-rw-r--r--drivers/platform/loongarch/Kconfig2
-rw-r--r--drivers/platform/loongarch/loongson-laptop.c87
-rw-r--r--drivers/platform/mellanox/Kconfig26
-rw-r--r--drivers/platform/mellanox/Makefile2
-rw-r--r--drivers/platform/mellanox/mlx-platform.c (renamed from drivers/platform/x86/mlx-platform.c)1567
-rw-r--r--drivers/platform/mellanox/mlxbf-bootctl.c40
-rw-r--r--drivers/platform/mellanox/mlxbf-bootctl.h5
-rw-r--r--drivers/platform/mellanox/mlxbf-pmc.c301
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo.c19
-rw-r--r--drivers/platform/mellanox/mlxreg-dpu.c613
-rw-r--r--drivers/platform/mellanox/mlxreg-hotplug.c12
-rw-r--r--drivers/platform/mellanox/mlxreg-io.c4
-rw-r--r--drivers/platform/mellanox/mlxreg-lc.c14
-rw-r--r--drivers/platform/mellanox/nvsw-sn2201.c121
-rw-r--r--drivers/platform/mips/cpu_hwmon.c3
-rw-r--r--drivers/platform/olpc/olpc-ec.c3
-rw-r--r--drivers/platform/olpc/olpc-xo175-ec.c4
-rw-r--r--drivers/platform/raspberrypi/Kconfig52
-rw-r--r--drivers/platform/raspberrypi/Makefile15
-rw-r--r--drivers/platform/raspberrypi/vchiq-interface/TESTING125
-rw-r--r--drivers/platform/raspberrypi/vchiq-interface/TODO4
-rw-r--r--drivers/platform/raspberrypi/vchiq-interface/vchiq_arm.c1477
-rw-r--r--drivers/platform/raspberrypi/vchiq-interface/vchiq_bus.c (renamed from drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.c)17
-rw-r--r--drivers/platform/raspberrypi/vchiq-interface/vchiq_core.c (renamed from drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c)1090
-rw-r--r--drivers/platform/raspberrypi/vchiq-interface/vchiq_debugfs.c (renamed from drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c)17
-rw-r--r--drivers/platform/raspberrypi/vchiq-interface/vchiq_dev.c (renamed from drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c)102
-rw-r--r--drivers/platform/raspberrypi/vchiq-interface/vchiq_ioctl.h (renamed from drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h)5
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/Kconfig (renamed from drivers/staging/vc04_services/vchiq-mmal/Kconfig)0
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/Makefile (renamed from drivers/staging/vc04_services/vchiq-mmal/Makefile)0
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/mmal-common.h (renamed from drivers/staging/vc04_services/vchiq-mmal/mmal-common.h)0
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/mmal-encodings.h (renamed from drivers/staging/vc04_services/vchiq-mmal/mmal-encodings.h)0
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/mmal-msg-common.h (renamed from drivers/staging/vc04_services/vchiq-mmal/mmal-msg-common.h)0
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/mmal-msg-format.h (renamed from drivers/staging/vc04_services/vchiq-mmal/mmal-msg-format.h)0
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/mmal-msg-port.h (renamed from drivers/staging/vc04_services/vchiq-mmal/mmal-msg-port.h)0
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/mmal-msg.h (renamed from drivers/staging/vc04_services/vchiq-mmal/mmal-msg.h)2
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/mmal-parameters.h (renamed from drivers/staging/vc04_services/vchiq-mmal/mmal-parameters.h)0
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/mmal-vchiq.c (renamed from drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c)32
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/mmal-vchiq.h (renamed from drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.h)34
-rw-r--r--drivers/platform/surface/Kconfig2
-rw-r--r--drivers/platform/surface/aggregator/bus.c6
-rw-r--r--drivers/platform/surface/aggregator/controller.c70
-rw-r--r--drivers/platform/surface/aggregator/core.c82
-rw-r--r--drivers/platform/surface/aggregator/ssh_msgb.h2
-rw-r--r--drivers/platform/surface/aggregator/ssh_packet_layer.c4
-rw-r--r--drivers/platform/surface/aggregator/ssh_parser.c2
-rw-r--r--drivers/platform/surface/aggregator/ssh_request_layer.c4
-rw-r--r--drivers/platform/surface/aggregator/trace.h2
-rw-r--r--drivers/platform/surface/surface3-wmi.c2
-rw-r--r--drivers/platform/surface/surface3_power.c2
-rw-r--r--drivers/platform/surface/surface_acpi_notify.c6
-rw-r--r--drivers/platform/surface/surface_aggregator_cdev.c3
-rw-r--r--drivers/platform/surface/surface_aggregator_registry.c114
-rw-r--r--drivers/platform/surface/surface_aggregator_tabletsw.c2
-rw-r--r--drivers/platform/surface/surface_dtx.c3
-rw-r--r--drivers/platform/surface/surface_gpe.c2
-rw-r--r--drivers/platform/surface/surface_hotplug.c2
-rw-r--r--drivers/platform/surface/surface_platform_profile.c46
-rw-r--r--drivers/platform/wmi/Kconfig34
-rw-r--r--drivers/platform/wmi/Makefile8
-rw-r--r--drivers/platform/wmi/core.c1429
-rw-r--r--drivers/platform/x86/Kconfig375
-rw-r--r--drivers/platform/x86/Makefile42
-rw-r--r--drivers/platform/x86/acer-wmi.c823
-rw-r--r--drivers/platform/x86/acerhdf.c37
-rw-r--r--drivers/platform/x86/adv_swbutton.c2
-rw-r--r--drivers/platform/x86/amd/Kconfig30
-rw-r--r--drivers/platform/x86/amd/Makefile7
-rw-r--r--drivers/platform/x86/amd/amd_isp4.c419
-rw-r--r--drivers/platform/x86/amd/hfi/Kconfig18
-rw-r--r--drivers/platform/x86/amd/hfi/Makefile7
-rw-r--r--drivers/platform/x86/amd/hfi/hfi.c546
-rw-r--r--drivers/platform/x86/amd/hsmp.c952
-rw-r--r--drivers/platform/x86/amd/hsmp/Kconfig49
-rw-r--r--drivers/platform/x86/amd/hsmp/Makefile13
-rw-r--r--drivers/platform/x86/amd/hsmp/acpi.c647
-rw-r--r--drivers/platform/x86/amd/hsmp/hsmp.c466
-rw-r--r--drivers/platform/x86/amd/hsmp/hsmp.h74
-rw-r--r--drivers/platform/x86/amd/hsmp/hwmon.c121
-rw-r--r--drivers/platform/x86/amd/hsmp/plat.c350
-rw-r--r--drivers/platform/x86/amd/pmc/Kconfig2
-rw-r--r--drivers/platform/x86/amd/pmc/Makefile6
-rw-r--r--drivers/platform/x86/amd/pmc/mp1_stb.c332
-rw-r--r--drivers/platform/x86/amd/pmc/pmc-quirks.c134
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.c536
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.h108
-rw-r--r--drivers/platform/x86/amd/pmf/Kconfig3
-rw-r--r--drivers/platform/x86/amd/pmf/Makefile8
-rw-r--r--drivers/platform/x86/amd/pmf/acpi.c178
-rw-r--r--drivers/platform/x86/amd/pmf/auto-mode.c18
-rw-r--r--drivers/platform/x86/amd/pmf/cnqf.c18
-rw-r--r--drivers/platform/x86/amd/pmf/core.c91
-rw-r--r--drivers/platform/x86/amd/pmf/pmf-quirks.c51
-rw-r--r--drivers/platform/x86/amd/pmf/pmf.h196
-rw-r--r--drivers/platform/x86/amd/pmf/spc.c221
-rw-r--r--drivers/platform/x86/amd/pmf/sps.c112
-rw-r--r--drivers/platform/x86/amd/pmf/tee-if.c258
-rw-r--r--drivers/platform/x86/amd/x3d_vcache.c176
-rw-r--r--drivers/platform/x86/amilo-rfkill.c7
-rw-r--r--drivers/platform/x86/asus-armoury.c1161
-rw-r--r--drivers/platform/x86/asus-armoury.h1541
-rw-r--r--drivers/platform/x86/asus-laptop.c16
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c55
-rw-r--r--drivers/platform/x86/asus-tf103c-dock.c14
-rw-r--r--drivers/platform/x86/asus-wmi.c749
-rw-r--r--drivers/platform/x86/asus-wmi.h5
-rw-r--r--drivers/platform/x86/ayaneo-ec.c593
-rw-r--r--drivers/platform/x86/barco-p50-gpio.c114
-rw-r--r--drivers/platform/x86/classmate-laptop.c7
-rw-r--r--drivers/platform/x86/compal-laptop.c62
-rw-r--r--drivers/platform/x86/dasharo-acpi.c360
-rw-r--r--drivers/platform/x86/dell/Kconfig51
-rw-r--r--drivers/platform/x86/dell/Makefile43
-rw-r--r--drivers/platform/x86/dell/alienware-wmi-base.c491
-rw-r--r--drivers/platform/x86/dell/alienware-wmi-legacy.c95
-rw-r--r--drivers/platform/x86/dell/alienware-wmi-wmax.c1652
-rw-r--r--drivers/platform/x86/dell/alienware-wmi.c842
-rw-r--r--drivers/platform/x86/dell/alienware-wmi.h117
-rw-r--r--drivers/platform/x86/dell/dcdbas.c21
-rw-r--r--drivers/platform/x86/dell/dcdbas.h8
-rw-r--r--drivers/platform/x86/dell/dell-laptop.c441
-rw-r--r--drivers/platform/x86/dell/dell-lis3lv02d.c259
-rw-r--r--drivers/platform/x86/dell/dell-pc.c302
-rw-r--r--drivers/platform/x86/dell/dell-smbios-base.c163
-rw-r--r--drivers/platform/x86/dell/dell-smbios-smm.c3
-rw-r--r--drivers/platform/x86/dell/dell-smbios-wmi.c4
-rw-r--r--drivers/platform/x86/dell/dell-smbios.h16
-rw-r--r--drivers/platform/x86/dell/dell-smo8800-ids.h27
-rw-r--r--drivers/platform/x86/dell/dell-smo8800.c18
-rw-r--r--drivers/platform/x86/dell/dell-uart-backlight.c21
-rw-r--r--drivers/platform/x86/dell/dell-wmi-aio.c13
-rw-r--r--drivers/platform/x86/dell/dell-wmi-base.c27
-rw-r--r--drivers/platform/x86/dell/dell-wmi-ddv.c316
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/Makefile2
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/dell-wmi-sysman.h5
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c5
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c5
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c7
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c5
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/sysman.c26
-rw-r--r--drivers/platform/x86/dell/dell_rbu.c42
-rw-r--r--drivers/platform/x86/eeepc-laptop.c16
-rw-r--r--drivers/platform/x86/eeepc-wmi.c4
-rw-r--r--drivers/platform/x86/firmware_attributes_class.c43
-rw-r--r--drivers/platform/x86/firmware_attributes_class.h5
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c58
-rw-r--r--drivers/platform/x86/gigabyte-wmi.c4
-rw-r--r--drivers/platform/x86/gpd-pocket-fan.c4
-rw-r--r--drivers/platform/x86/hp/Kconfig2
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/Makefile2
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/bioscfg.c29
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/enum-attributes.c18
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/int-attributes.c7
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/order-list-attributes.c18
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/passwdobj-attributes.c30
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/spmobj-attributes.c3
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/string-attributes.c12
-rw-r--r--drivers/platform/x86/hp/hp-wmi.c675
-rw-r--r--drivers/platform/x86/hp/hp_accel.c6
-rw-r--r--drivers/platform/x86/hp/tc1100-wmi.c2
-rw-r--r--drivers/platform/x86/huawei-wmi.c20
-rw-r--r--drivers/platform/x86/ibm_rtl.c1
-rw-r--r--drivers/platform/x86/ideapad-laptop.c2096
-rw-r--r--drivers/platform/x86/ideapad-laptop.h152
-rw-r--r--drivers/platform/x86/inspur_platform_profile.c43
-rw-r--r--drivers/platform/x86/intel/Kconfig27
-rw-r--r--drivers/platform/x86/intel/Makefile65
-rw-r--r--drivers/platform/x86/intel/bxtwc_tmu.c24
-rw-r--r--drivers/platform/x86/intel/bytcrc_pwrsrc.c81
-rw-r--r--drivers/platform/x86/intel/chtdc_ti_pwrbtn.c2
-rw-r--r--drivers/platform/x86/intel/chtwc_int33fe.c37
-rw-r--r--drivers/platform/x86/intel/ehl_pse_io.c86
-rw-r--r--drivers/platform/x86/intel/hid.c59
-rw-r--r--drivers/platform/x86/intel/ifs/Makefile2
-rw-r--r--drivers/platform/x86/intel/ifs/core.c54
-rw-r--r--drivers/platform/x86/intel/ifs/ifs.h97
-rw-r--r--drivers/platform/x86/intel/ifs/load.c41
-rw-r--r--drivers/platform/x86/intel/ifs/runtest.c248
-rw-r--r--drivers/platform/x86/intel/int0002_vgpio.c25
-rw-r--r--drivers/platform/x86/intel/int1092/intel_sar.c2
-rw-r--r--drivers/platform/x86/intel/int3472/Makefile10
-rw-r--r--drivers/platform/x86/intel/int3472/clk_and_regulator.c170
-rw-r--r--drivers/platform/x86/intel/int3472/common.c12
-rw-r--r--drivers/platform/x86/intel/int3472/common.h131
-rw-r--r--drivers/platform/x86/intel/int3472/discrete.c219
-rw-r--r--drivers/platform/x86/intel/int3472/discrete_quirks.c21
-rw-r--r--drivers/platform/x86/intel/int3472/led.c5
-rw-r--r--drivers/platform/x86/intel/int3472/tps68470.c6
-rw-r--r--drivers/platform/x86/intel/int3472/tps68470_board_data.c128
-rw-r--r--drivers/platform/x86/intel/mrfld_pwrbtn.c2
-rw-r--r--drivers/platform/x86/intel/oaktrail.c3
-rw-r--r--drivers/platform/x86/intel/plr_tpmi.c355
-rw-r--r--drivers/platform/x86/intel/pmc/Kconfig4
-rw-r--r--drivers/platform/x86/intel/pmc/Makefile8
-rw-r--r--drivers/platform/x86/intel/pmc/adl.c56
-rw-r--r--drivers/platform/x86/intel/pmc/arl.c160
-rw-r--r--drivers/platform/x86/intel/pmc/cnp.c83
-rw-r--r--drivers/platform/x86/intel/pmc/core.c855
-rw-r--r--drivers/platform/x86/intel/pmc/core.h271
-rw-r--r--drivers/platform/x86/intel/pmc/core_ssram.c326
-rw-r--r--drivers/platform/x86/intel/pmc/icl.c24
-rw-r--r--drivers/platform/x86/intel/pmc/lnl.c88
-rw-r--r--drivers/platform/x86/intel/pmc/mtl.c131
-rw-r--r--drivers/platform/x86/intel/pmc/pltdrv.c17
-rw-r--r--drivers/platform/x86/intel/pmc/ptl.c580
-rw-r--r--drivers/platform/x86/intel/pmc/spt.c45
-rw-r--r--drivers/platform/x86/intel/pmc/ssram_telemetry.c208
-rw-r--r--drivers/platform/x86/intel/pmc/ssram_telemetry.h24
-rw-r--r--drivers/platform/x86/intel/pmc/tgl.c63
-rw-r--r--drivers/platform/x86/intel/pmc/wcl.c504
-rw-r--r--drivers/platform/x86/intel/pmt/Kconfig28
-rw-r--r--drivers/platform/x86/intel/pmt/Makefile4
-rw-r--r--drivers/platform/x86/intel/pmt/class.c90
-rw-r--r--drivers/platform/x86/intel/pmt/class.h22
-rw-r--r--drivers/platform/x86/intel/pmt/crashlog.c463
-rw-r--r--drivers/platform/x86/intel/pmt/discovery-kunit.c116
-rw-r--r--drivers/platform/x86/intel/pmt/discovery.c635
-rw-r--r--drivers/platform/x86/intel/pmt/features.c205
-rw-r--r--drivers/platform/x86/intel/pmt/telemetry.c122
-rw-r--r--drivers/platform/x86/intel/punit_ipc.c35
-rw-r--r--drivers/platform/x86/intel/rst.c1
-rw-r--r--drivers/platform/x86/intel/sdsi.c37
-rw-r--r--drivers/platform/x86/intel/smartconnect.c1
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c157
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.h3
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c19
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c4
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_tpmi.c2
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c136
-rw-r--r--drivers/platform/x86/intel/telemetry/core.c177
-rw-r--r--drivers/platform/x86/intel/telemetry/debugfs.c4
-rw-r--r--drivers/platform/x86/intel/telemetry/pltdrv.c237
-rw-r--r--drivers/platform/x86/intel/tpmi.c847
-rw-r--r--drivers/platform/x86/intel/tpmi_power_domains.c265
-rw-r--r--drivers/platform/x86/intel/tpmi_power_domains.h19
-rw-r--r--drivers/platform/x86/intel/turbo_max_3.c9
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c157
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h51
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c410
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c138
-rw-r--r--drivers/platform/x86/intel/vbtn.c12
-rw-r--r--drivers/platform/x86/intel/vsec.c408
-rw-r--r--drivers/platform/x86/intel/vsec.h108
-rw-r--r--drivers/platform/x86/intel/vsec_tpmi.c861
-rw-r--r--drivers/platform/x86/intel_ips.c47
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c142
-rw-r--r--drivers/platform/x86/intel_scu_ipcutil.c2
-rw-r--r--drivers/platform/x86/intel_scu_pcidrv.c2
-rw-r--r--drivers/platform/x86/intel_scu_pltdrv.c2
-rw-r--r--drivers/platform/x86/intel_scu_wdt.c5
-rw-r--r--drivers/platform/x86/lenovo-wmi-camera.c127
-rw-r--r--drivers/platform/x86/lenovo-ymc.c219
-rw-r--r--drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c338
-rw-r--r--drivers/platform/x86/lenovo-yogabook.c573
-rw-r--r--drivers/platform/x86/lenovo/Kconfig276
-rw-r--r--drivers/platform/x86/lenovo/Makefile28
-rw-r--r--drivers/platform/x86/lenovo/ideapad-laptop.c2580
-rw-r--r--drivers/platform/x86/lenovo/ideapad-laptop.h22
-rw-r--r--drivers/platform/x86/lenovo/think-lmi.c (renamed from drivers/platform/x86/think-lmi.c)419
-rw-r--r--drivers/platform/x86/lenovo/think-lmi.h (renamed from drivers/platform/x86/think-lmi.h)23
-rw-r--r--drivers/platform/x86/lenovo/thinkpad_acpi.c (renamed from drivers/platform/x86/thinkpad_acpi.c)536
-rw-r--r--drivers/platform/x86/lenovo/wmi-camera.c146
-rw-r--r--drivers/platform/x86/lenovo/wmi-capdata01.c302
-rw-r--r--drivers/platform/x86/lenovo/wmi-capdata01.h25
-rw-r--r--drivers/platform/x86/lenovo/wmi-events.c196
-rw-r--r--drivers/platform/x86/lenovo/wmi-events.h20
-rw-r--r--drivers/platform/x86/lenovo/wmi-gamezone.c414
-rw-r--r--drivers/platform/x86/lenovo/wmi-gamezone.h20
-rw-r--r--drivers/platform/x86/lenovo/wmi-helpers.c74
-rw-r--r--drivers/platform/x86/lenovo/wmi-helpers.h20
-rw-r--r--drivers/platform/x86/lenovo/wmi-hotkey-utilities.c224
-rw-r--r--drivers/platform/x86/lenovo/wmi-other.c665
-rw-r--r--drivers/platform/x86/lenovo/wmi-other.h16
-rw-r--r--drivers/platform/x86/lenovo/ymc.c165
-rw-r--r--drivers/platform/x86/lenovo/yoga-tab2-pro-1380-fastcharger.c333
-rw-r--r--drivers/platform/x86/lenovo/yogabook.c573
-rw-r--r--drivers/platform/x86/lg-laptop.c277
-rw-r--r--drivers/platform/x86/meraki-mx100.c404
-rw-r--r--drivers/platform/x86/msi-laptop.c6
-rw-r--r--drivers/platform/x86/msi-wmi-platform.c144
-rw-r--r--drivers/platform/x86/msi-wmi.c20
-rw-r--r--drivers/platform/x86/oxpec.c973
-rw-r--r--drivers/platform/x86/p2sb.c80
-rw-r--r--drivers/platform/x86/panasonic-laptop.c92
-rw-r--r--drivers/platform/x86/pcengines-apuv2.c192
-rw-r--r--drivers/platform/x86/portwell-ec.c460
-rw-r--r--drivers/platform/x86/quickstart.c13
-rw-r--r--drivers/platform/x86/redmi-wmi.c130
-rw-r--r--drivers/platform/x86/samsung-galaxybook.c1426
-rw-r--r--drivers/platform/x86/samsung-laptop.c117
-rw-r--r--drivers/platform/x86/samsung-q10.c2
-rw-r--r--drivers/platform/x86/sel3350-platform.c2
-rw-r--r--drivers/platform/x86/serdev_helpers.h60
-rw-r--r--drivers/platform/x86/serial-multi-instantiate.c55
-rw-r--r--drivers/platform/x86/siemens/simatic-ipc-batt-apollolake.c3
-rw-r--r--drivers/platform/x86/siemens/simatic-ipc-batt-elkhartlake.c3
-rw-r--r--drivers/platform/x86/siemens/simatic-ipc-batt-f7188x.c3
-rw-r--r--drivers/platform/x86/siemens/simatic-ipc-batt.c3
-rw-r--r--drivers/platform/x86/siemens/simatic-ipc.c1
-rw-r--r--drivers/platform/x86/silicom-platform.c11
-rw-r--r--drivers/platform/x86/sony-laptop.c178
-rw-r--r--drivers/platform/x86/topstar-laptop.c4
-rw-r--r--drivers/platform/x86/toshiba-wmi.c15
-rw-r--r--drivers/platform/x86/toshiba_acpi.c45
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c222
-rw-r--r--drivers/platform/x86/tuxedo/Kconfig8
-rw-r--r--drivers/platform/x86/tuxedo/Makefile8
-rw-r--r--drivers/platform/x86/tuxedo/nb04/Kconfig17
-rw-r--r--drivers/platform/x86/tuxedo/nb04/Makefile10
-rw-r--r--drivers/platform/x86/tuxedo/nb04/wmi_ab.c923
-rw-r--r--drivers/platform/x86/tuxedo/nb04/wmi_util.c91
-rw-r--r--drivers/platform/x86/tuxedo/nb04/wmi_util.h109
-rw-r--r--drivers/platform/x86/uniwill/Kconfig38
-rw-r--r--drivers/platform/x86/uniwill/Makefile8
-rw-r--r--drivers/platform/x86/uniwill/uniwill-acpi.c1912
-rw-r--r--drivers/platform/x86/uniwill/uniwill-wmi.c92
-rw-r--r--drivers/platform/x86/uniwill/uniwill-wmi.h129
-rw-r--r--drivers/platform/x86/uv_sysfs.c1
-rw-r--r--drivers/platform/x86/wireless-hotkey.c3
-rw-r--r--drivers/platform/x86/wmi-bmof.c75
-rw-r--r--drivers/platform/x86/wmi.c1484
-rw-r--r--drivers/platform/x86/x86-android-tablets/Kconfig9
-rw-r--r--drivers/platform/x86/x86-android-tablets/Makefile4
-rw-r--r--drivers/platform/x86/x86-android-tablets/acer.c247
-rw-r--r--drivers/platform/x86/x86-android-tablets/asus.c139
-rw-r--r--drivers/platform/x86/x86-android-tablets/core.c226
-rw-r--r--drivers/platform/x86/x86-android-tablets/dmi.c51
-rw-r--r--drivers/platform/x86/x86-android-tablets/lenovo.c319
-rw-r--r--drivers/platform/x86/x86-android-tablets/other.c541
-rw-r--r--drivers/platform/x86/x86-android-tablets/shared-psy-info.c110
-rw-r--r--drivers/platform/x86/x86-android-tablets/shared-psy-info.h9
-rw-r--r--drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c261
-rw-r--r--drivers/platform/x86/x86-android-tablets/x86-android-tablets.h44
-rw-r--r--drivers/platform/x86/xiaomi-wmi.c10
-rw-r--r--drivers/platform/x86/xo1-rfkill.c3
-rw-r--r--drivers/platform/x86/xo15-ebook.c10
-rw-r--r--drivers/pmdomain/Kconfig2
-rw-r--r--drivers/pmdomain/Makefile2
-rw-r--r--drivers/pmdomain/amlogic/Kconfig11
-rw-r--r--drivers/pmdomain/amlogic/Makefile1
-rw-r--r--drivers/pmdomain/amlogic/meson-ee-pwrc.c79
-rw-r--r--drivers/pmdomain/amlogic/meson-gx-pwrc-vpu.c379
-rw-r--r--drivers/pmdomain/amlogic/meson-secure-pwrc.c170
-rw-r--r--drivers/pmdomain/apple/Kconfig1
-rw-r--r--drivers/pmdomain/apple/pmgr-pwrstate.c3
-rw-r--r--drivers/pmdomain/arm/Kconfig6
-rw-r--r--drivers/pmdomain/arm/scmi_perf_domain.c3
-rw-r--r--drivers/pmdomain/arm/scmi_pm_domain.c43
-rw-r--r--drivers/pmdomain/bcm/bcm2835-power.c34
-rw-r--r--drivers/pmdomain/bcm/raspberrypi-power.c43
-rw-r--r--drivers/pmdomain/core.c753
-rw-r--r--drivers/pmdomain/governor.c61
-rw-r--r--drivers/pmdomain/imx/gpc.c25
-rw-r--r--drivers/pmdomain/imx/gpcv2.c31
-rw-r--r--drivers/pmdomain/imx/imx8m-blk-ctrl.c13
-rw-r--r--drivers/pmdomain/imx/imx8mp-blk-ctrl.c5
-rw-r--r--drivers/pmdomain/imx/imx93-blk-ctrl.c29
-rw-r--r--drivers/pmdomain/imx/imx93-pd.c29
-rw-r--r--drivers/pmdomain/imx/scu-pd.c5
-rw-r--r--drivers/pmdomain/marvell/Kconfig18
-rw-r--r--drivers/pmdomain/marvell/Makefile3
-rw-r--r--drivers/pmdomain/marvell/pxa1908-power-controller.c274
-rw-r--r--drivers/pmdomain/mediatek/Kconfig29
-rw-r--r--drivers/pmdomain/mediatek/Makefile9
-rw-r--r--drivers/pmdomain/mediatek/airoha-cpu-pmdomain.c144
-rw-r--r--drivers/pmdomain/mediatek/mt6735-pm-domains.h96
-rw-r--r--drivers/pmdomain/mediatek/mt6795-pm-domains.h5
-rw-r--r--drivers/pmdomain/mediatek/mt6893-pm-domains.h585
-rw-r--r--drivers/pmdomain/mediatek/mt8167-pm-domains.h5
-rw-r--r--drivers/pmdomain/mediatek/mt8173-pm-domains.h5
-rw-r--r--drivers/pmdomain/mediatek/mt8183-pm-domains.h5
-rw-r--r--drivers/pmdomain/mediatek/mt8186-pm-domains.h5
-rw-r--r--drivers/pmdomain/mediatek/mt8188-pm-domains.h6
-rw-r--r--drivers/pmdomain/mediatek/mt8192-pm-domains.h5
-rw-r--r--drivers/pmdomain/mediatek/mt8195-pm-domains.h6
-rw-r--r--drivers/pmdomain/mediatek/mt8196-pm-domains.h625
-rw-r--r--drivers/pmdomain/mediatek/mt8365-pm-domains.h14
-rw-r--r--drivers/pmdomain/mediatek/mtk-mfg-pmdomain.c1044
-rw-r--r--drivers/pmdomain/mediatek/mtk-pm-domains.c729
-rw-r--r--drivers/pmdomain/mediatek/mtk-pm-domains.h127
-rw-r--r--drivers/pmdomain/qcom/cpr.c94
-rw-r--r--drivers/pmdomain/qcom/rpmhpd.c196
-rw-r--r--drivers/pmdomain/qcom/rpmpd.c134
-rw-r--r--drivers/pmdomain/renesas/Kconfig124
-rw-r--r--drivers/pmdomain/renesas/rcar-gen4-sysc.c8
-rw-r--r--drivers/pmdomain/renesas/rcar-sysc.c27
-rw-r--r--drivers/pmdomain/renesas/rmobile-sysc.c14
-rw-r--r--drivers/pmdomain/rockchip/Kconfig3
-rw-r--r--drivers/pmdomain/rockchip/pm-domains.c437
-rw-r--r--drivers/pmdomain/samsung/exynos-pm-domains.c20
-rw-r--r--drivers/pmdomain/sunxi/Kconfig29
-rw-r--r--drivers/pmdomain/sunxi/Makefile2
-rw-r--r--drivers/pmdomain/sunxi/sun20i-ppu.c32
-rw-r--r--drivers/pmdomain/sunxi/sun50i-h6-prcm-ppu.c208
-rw-r--r--drivers/pmdomain/sunxi/sun55i-pck600.c234
-rw-r--r--drivers/pmdomain/tegra/powergate-bpmp.c1
-rw-r--r--drivers/pmdomain/thead/Kconfig13
-rw-r--r--drivers/pmdomain/thead/Makefile2
-rw-r--r--drivers/pmdomain/thead/th1520-pm-domains.c285
-rw-r--r--drivers/pmdomain/ti/Kconfig2
-rw-r--r--drivers/pmdomain/ti/omap_prm.c10
-rw-r--r--drivers/pmdomain/ti/ti_sci_pm_domains.c130
-rw-r--r--drivers/pmdomain/xilinx/zynqmp-pm-domains.c18
-rw-r--r--drivers/pnp/base.h5
-rw-r--r--drivers/pnp/card.c32
-rw-r--r--drivers/pnp/core.c16
-rw-r--r--drivers/pnp/driver.c31
-rw-r--r--drivers/pnp/isapnp/core.c4
-rw-r--r--drivers/pnp/quirks.c2
-rw-r--r--drivers/power/Kconfig1
-rw-r--r--drivers/power/Makefile1
-rw-r--r--drivers/power/reset/Kconfig54
-rw-r--r--drivers/power/reset/Makefile5
-rw-r--r--drivers/power/reset/as3722-poweroff.c2
-rw-r--r--drivers/power/reset/at91-poweroff.c2
-rw-r--r--drivers/power/reset/at91-reset.c7
-rw-r--r--drivers/power/reset/at91-sama5d2_shdwc.c6
-rw-r--r--drivers/power/reset/brcmstb-reboot.c59
-rw-r--r--drivers/power/reset/ep93xx-restart.c84
-rw-r--r--drivers/power/reset/gpio-poweroff.c8
-rw-r--r--drivers/power/reset/keystone-reset.c20
-rw-r--r--drivers/power/reset/ltc2952-poweroff.c10
-rw-r--r--drivers/power/reset/macsmc-reboot.c290
-rw-r--r--drivers/power/reset/piix4-poweroff.c1
-rw-r--r--drivers/power/reset/pwr-mlxbf.c16
-rw-r--r--drivers/power/reset/qcom-pon.c30
-rw-r--r--drivers/power/reset/qnap-poweroff.c2
-rw-r--r--drivers/power/reset/reboot-mode.c25
-rw-r--r--drivers/power/reset/sc27xx-poweroff.c10
-rw-r--r--drivers/power/reset/spacemit-p1-reboot.c88
-rw-r--r--drivers/power/reset/syscon-reboot.c101
-rw-r--r--drivers/power/reset/tdx-ec-poweroff.c150
-rw-r--r--drivers/power/reset/th1520-aon-reboot.c98
-rw-r--r--drivers/power/sequencing/Kconfig38
-rw-r--r--drivers/power/sequencing/Makefile7
-rw-r--r--drivers/power/sequencing/core.c1106
-rw-r--r--drivers/power/sequencing/pwrseq-qcom-wcn.c478
-rw-r--r--drivers/power/sequencing/pwrseq-thead-gpu.c249
-rw-r--r--drivers/power/supply/88pm860x_battery.c8
-rw-r--r--drivers/power/supply/88pm860x_charger.c8
-rw-r--r--drivers/power/supply/Kconfig155
-rw-r--r--drivers/power/supply/Makefile18
-rw-r--r--drivers/power/supply/ab8500_bmdata.c4
-rw-r--r--drivers/power/supply/ab8500_btemp.c12
-rw-r--r--drivers/power/supply/ab8500_chargalg.c18
-rw-r--r--drivers/power/supply/ab8500_charger.c63
-rw-r--r--drivers/power/supply/ab8500_fg.c42
-rw-r--r--drivers/power/supply/acer_a500_battery.c12
-rw-r--r--drivers/power/supply/act8945a_charger.c4
-rw-r--r--drivers/power/supply/adc-battery-helper.c327
-rw-r--r--drivers/power/supply/adc-battery-helper.h62
-rw-r--r--drivers/power/supply/adp5061.c4
-rw-r--r--drivers/power/supply/apm_power.c9
-rw-r--r--drivers/power/supply/axp20x_ac_power.c2
-rw-r--r--drivers/power/supply/axp20x_battery.c630
-rw-r--r--drivers/power/supply/axp20x_usb_power.c391
-rw-r--r--drivers/power/supply/axp288_charger.c22
-rw-r--r--drivers/power/supply/axp288_fuel_gauge.c2
-rw-r--r--drivers/power/supply/bd71828-power.c1049
-rw-r--r--drivers/power/supply/bd99954-charger.c11
-rw-r--r--drivers/power/supply/bq2415x_charger.c42
-rw-r--r--drivers/power/supply/bq24190_charger.c75
-rw-r--r--drivers/power/supply/bq24257_charger.c10
-rw-r--r--drivers/power/supply/bq24735-charger.c4
-rw-r--r--drivers/power/supply/bq2515x_charger.c6
-rw-r--r--drivers/power/supply/bq256xx_charger.c23
-rw-r--r--drivers/power/supply/bq257xx_charger.c755
-rw-r--r--drivers/power/supply/bq25890_charger.c12
-rw-r--r--drivers/power/supply/bq25980_charger.c14
-rw-r--r--drivers/power/supply/bq27xxx_battery.c142
-rw-r--r--drivers/power/supply/bq27xxx_battery_i2c.c15
-rw-r--r--drivers/power/supply/chagall-battery.c291
-rw-r--r--drivers/power/supply/charger-manager.c10
-rw-r--r--drivers/power/supply/collie_battery.c1
-rw-r--r--drivers/power/supply/cpcap-battery.c4
-rw-r--r--drivers/power/supply/cpcap-charger.c14
-rw-r--r--drivers/power/supply/cros_charge-control.c337
-rw-r--r--drivers/power/supply/cros_peripheral_charger.c2
-rw-r--r--drivers/power/supply/cros_usbpd-charger.c26
-rw-r--r--drivers/power/supply/cw2015_battery.c16
-rw-r--r--drivers/power/supply/da9030_battery.c12
-rw-r--r--drivers/power/supply/da9052-battery.c2
-rw-r--r--drivers/power/supply/da9150-charger.c2
-rw-r--r--drivers/power/supply/da9150-fg.c4
-rw-r--r--drivers/power/supply/ds2760_battery.c58
-rw-r--r--drivers/power/supply/ds2780_battery.c14
-rw-r--r--drivers/power/supply/ds2781_battery.c14
-rw-r--r--drivers/power/supply/ds2782_battery.c89
-rw-r--r--drivers/power/supply/generic-adc-battery.c6
-rw-r--r--drivers/power/supply/gpio-charger.c34
-rw-r--r--drivers/power/supply/huawei-gaokun-battery.c645
-rw-r--r--drivers/power/supply/ingenic-battery.c12
-rw-r--r--drivers/power/supply/intel_dc_ti_battery.c391
-rw-r--r--drivers/power/supply/ip5xxx_power.c571
-rw-r--r--drivers/power/supply/ipaq_micro_battery.c5
-rw-r--r--drivers/power/supply/isp1704_charger.c2
-rw-r--r--drivers/power/supply/lego_ev3_battery.c3
-rw-r--r--drivers/power/supply/lenovo_yoga_c630_battery.c498
-rw-r--r--drivers/power/supply/lp8727_charger.c2
-rw-r--r--drivers/power/supply/lp8788-charger.c2
-rw-r--r--drivers/power/supply/lt3651-charger.c2
-rw-r--r--drivers/power/supply/ltc4162-l-charger.c446
-rw-r--r--drivers/power/supply/max14577_charger.c6
-rw-r--r--drivers/power/supply/max14656_charger_detector.c2
-rw-r--r--drivers/power/supply/max17040_battery.c9
-rw-r--r--drivers/power/supply/max17042_battery.c210
-rw-r--r--drivers/power/supply/max1720x_battery.c634
-rw-r--r--drivers/power/supply/max77650-charger.c4
-rw-r--r--drivers/power/supply/max77693_charger.c56
-rw-r--r--drivers/power/supply/max77705_charger.c713
-rw-r--r--drivers/power/supply/max77976_charger.c19
-rw-r--r--drivers/power/supply/max8903_charger.c2
-rw-r--r--drivers/power/supply/max8925_power.c4
-rw-r--r--drivers/power/supply/max8971_charger.c752
-rw-r--r--drivers/power/supply/max8998_charger.c1
-rw-r--r--drivers/power/supply/mm8013.c6
-rw-r--r--drivers/power/supply/mp2629_charger.c15
-rw-r--r--drivers/power/supply/mt6360_charger.c15
-rw-r--r--drivers/power/supply/mt6370-charger.c34
-rw-r--r--drivers/power/supply/olpc_battery.c11
-rw-r--r--drivers/power/supply/pcf50633-charger.c466
-rw-r--r--drivers/power/supply/pf1550-charger.c641
-rw-r--r--drivers/power/supply/pm8916_bms_vm.c2
-rw-r--r--drivers/power/supply/pm8916_lbc.c2
-rw-r--r--drivers/power/supply/pmu_battery.c1
-rw-r--r--drivers/power/supply/power_supply.h53
-rw-r--r--drivers/power/supply/power_supply_core.c633
-rw-r--r--drivers/power/supply/power_supply_hwmon.c75
-rw-r--r--drivers/power/supply/power_supply_leds.c175
-rw-r--r--drivers/power/supply/power_supply_sysfs.c301
-rw-r--r--drivers/power/supply/qcom_battmgr.c410
-rw-r--r--drivers/power/supply/qcom_pmi8998_charger.c1055
-rw-r--r--drivers/power/supply/qcom_smbb.c10
-rw-r--r--drivers/power/supply/qcom_smbx.c1052
-rw-r--r--drivers/power/supply/rk817_charger.c129
-rw-r--r--drivers/power/supply/rn5t618_power.c13
-rw-r--r--drivers/power/supply/rt5033_battery.c3
-rw-r--r--drivers/power/supply/rt5033_charger.c5
-rw-r--r--drivers/power/supply/rt9455_charger.c8
-rw-r--r--drivers/power/supply/rt9467-charger.c71
-rw-r--r--drivers/power/supply/rt9471.c81
-rw-r--r--drivers/power/supply/rt9756.c955
-rw-r--r--drivers/power/supply/rx51_battery.c2
-rw-r--r--drivers/power/supply/samsung-sdi-battery.c36
-rw-r--r--drivers/power/supply/sbs-battery.c7
-rw-r--r--drivers/power/supply/sbs-charger.c20
-rw-r--r--drivers/power/supply/sbs-manager.c8
-rw-r--r--drivers/power/supply/sc2731_charger.c4
-rw-r--r--drivers/power/supply/sc27xx_fuel_gauge.c20
-rw-r--r--drivers/power/supply/smb347-charger.c4
-rw-r--r--drivers/power/supply/stc3117_fuel_gauge.c612
-rw-r--r--drivers/power/supply/surface_battery.c6
-rw-r--r--drivers/power/supply/surface_charger.c2
-rw-r--r--drivers/power/supply/test_power.c138
-rw-r--r--drivers/power/supply/tps65090-charger.c4
-rw-r--r--drivers/power/supply/tps65217_charger.c4
-rw-r--r--drivers/power/supply/twl4030_charger.c5
-rw-r--r--drivers/power/supply/twl6030_charger.c581
-rw-r--r--drivers/power/supply/ucs1002_power.c30
-rw-r--r--drivers/power/supply/ug3105_battery.c401
-rw-r--r--drivers/power/supply/wm831x_power.c32
-rw-r--r--drivers/power/supply/wm8350_power.c2
-rw-r--r--drivers/power/supply/wm97xx_battery.c2
-rw-r--r--drivers/powercap/Kconfig2
-rw-r--r--drivers/powercap/dtpm.c16
-rw-r--r--drivers/powercap/dtpm_cpu.c2
-rw-r--r--drivers/powercap/dtpm_devfreq.c2
-rw-r--r--drivers/powercap/idle_inject.c26
-rw-r--r--drivers/powercap/intel_rapl_common.c221
-rw-r--r--drivers/powercap/intel_rapl_msr.c71
-rw-r--r--drivers/powercap/intel_rapl_tpmi.c32
-rw-r--r--drivers/powercap/powercap_sys.c3
-rw-r--r--drivers/pps/Makefile3
-rw-r--r--drivers/pps/clients/pps-gpio.c22
-rw-r--r--drivers/pps/clients/pps-ktimer.c6
-rw-r--r--drivers/pps/clients/pps-ldisc.c6
-rw-r--r--drivers/pps/clients/pps_parport.c13
-rw-r--r--drivers/pps/generators/Kconfig38
-rw-r--r--drivers/pps/generators/Makefile5
-rw-r--r--drivers/pps/generators/pps_gen-dummy.c96
-rw-r--r--drivers/pps/generators/pps_gen.c344
-rw-r--r--drivers/pps/generators/pps_gen_parport.c7
-rw-r--r--drivers/pps/generators/pps_gen_tio.c272
-rw-r--r--drivers/pps/generators/sysfs.c75
-rw-r--r--drivers/pps/kapi.c16
-rw-r--r--drivers/pps/kc.c10
-rw-r--r--drivers/pps/pps.c142
-rw-r--r--drivers/ps3/ps3-lpm.c2
-rw-r--r--drivers/ps3/ps3-sys-manager.c2
-rw-r--r--drivers/ps3/ps3-vuart.c4
-rw-r--r--drivers/ps3/ps3stor_lib.c3
-rw-r--r--drivers/ps3/sys-manager-core.c2
-rw-r--r--drivers/ptp/Kconfig45
-rw-r--r--drivers/ptp/Makefile7
-rw-r--r--drivers/ptp/ptp_chardev.c771
-rw-r--r--drivers/ptp/ptp_clock.c172
-rw-r--r--drivers/ptp/ptp_clockmatrix.c20
-rw-r--r--drivers/ptp/ptp_dte.c2
-rw-r--r--drivers/ptp/ptp_fc3.c10
-rw-r--r--drivers/ptp/ptp_idt82p33.c25
-rw-r--r--drivers/ptp/ptp_ines.c39
-rw-r--r--drivers/ptp/ptp_kvm_x86.c6
-rw-r--r--drivers/ptp/ptp_mock.c2
-rw-r--r--drivers/ptp/ptp_netc.c1043
-rw-r--r--drivers/ptp/ptp_ocp.c360
-rw-r--r--drivers/ptp/ptp_pch.c6
-rw-r--r--drivers/ptp/ptp_private.h20
-rw-r--r--drivers/ptp/ptp_qoriq.c26
-rw-r--r--drivers/ptp/ptp_qoriq_debugfs.c101
-rw-r--r--drivers/ptp/ptp_s390.c129
-rw-r--r--drivers/ptp/ptp_sysfs.c5
-rw-r--r--drivers/ptp/ptp_vclock.c9
-rw-r--r--drivers/ptp/ptp_vmclock.c610
-rw-r--r--drivers/ptp/ptp_vmw.c12
-rw-r--r--drivers/pwm/Kconfig216
-rw-r--r--drivers/pwm/Makefile19
-rw-r--r--drivers/pwm/core.c1596
-rw-r--r--drivers/pwm/pwm-adp5585.c223
-rw-r--r--drivers/pwm/pwm-airoha.c622
-rw-r--r--drivers/pwm/pwm-argon-fan-hat.c109
-rw-r--r--drivers/pwm/pwm-atmel-hlcdc.c7
-rw-r--r--drivers/pwm/pwm-atmel-tcb.c119
-rw-r--r--drivers/pwm/pwm-atmel.c12
-rw-r--r--drivers/pwm/pwm-axi-pwmgen.c341
-rw-r--r--drivers/pwm/pwm-bcm2835.c28
-rw-r--r--drivers/pwm/pwm-berlin.c4
-rw-r--r--drivers/pwm/pwm-clk.c2
-rw-r--r--drivers/pwm/pwm-clps711x.c12
-rw-r--r--drivers/pwm/pwm-cros-ec.c74
-rw-r--r--drivers/pwm/pwm-dwc-core.c2
-rw-r--r--drivers/pwm/pwm-dwc.c14
-rw-r--r--drivers/pwm/pwm-dwc.h2
-rw-r--r--drivers/pwm/pwm-ep93xx.c26
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c69
-rw-r--r--drivers/pwm/pwm-gpio.c240
-rw-r--r--drivers/pwm/pwm-hibvt.c2
-rw-r--r--drivers/pwm/pwm-img.c4
-rw-r--r--drivers/pwm/pwm-imx-tpm.c29
-rw-r--r--drivers/pwm/pwm-imx1.c1
-rw-r--r--drivers/pwm/pwm-imx27.c162
-rw-r--r--drivers/pwm/pwm-intel-lgm.c1
-rw-r--r--drivers/pwm/pwm-jz4740.c9
-rw-r--r--drivers/pwm/pwm-loongson.c290
-rw-r--r--drivers/pwm/pwm-lp3943.c10
-rw-r--r--drivers/pwm/pwm-lpc18xx-sct.c16
-rw-r--r--drivers/pwm/pwm-lpss-pci.c33
-rw-r--r--drivers/pwm/pwm-lpss-platform.c12
-rw-r--r--drivers/pwm/pwm-lpss.c5
-rw-r--r--drivers/pwm/pwm-lpss.h1
-rw-r--r--drivers/pwm/pwm-max7360.c209
-rw-r--r--drivers/pwm/pwm-mc33xs2410.c407
-rw-r--r--drivers/pwm/pwm-mediatek.c474
-rw-r--r--drivers/pwm/pwm-meson.c162
-rw-r--r--drivers/pwm/pwm-microchip-core.c19
-rw-r--r--drivers/pwm/pwm-omap-dmtimer.c4
-rw-r--r--drivers/pwm/pwm-pca9685.c522
-rw-r--r--drivers/pwm/pwm-pxa.c25
-rw-r--r--drivers/pwm/pwm-rcar.c26
-rw-r--r--drivers/pwm/pwm-rockchip.c35
-rw-r--r--drivers/pwm/pwm-rzg2l-gpt.c456
-rw-r--r--drivers/pwm/pwm-samsung.c5
-rw-r--r--drivers/pwm/pwm-sifive.c54
-rw-r--r--drivers/pwm/pwm-sophgo-sg2042.c301
-rw-r--r--drivers/pwm/pwm-spear.c1
-rw-r--r--drivers/pwm/pwm-sti.c23
-rw-r--r--drivers/pwm/pwm-stm32-lp.c227
-rw-r--r--drivers/pwm/pwm-stm32.c652
-rw-r--r--drivers/pwm/pwm-stmpe.c25
-rw-r--r--drivers/pwm/pwm-sun4i.c12
-rw-r--r--drivers/pwm/pwm-tegra.c2
-rw-r--r--drivers/pwm/pwm-tiecap.c6
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c156
-rw-r--r--drivers/pwm/pwm-twl-led.c49
-rw-r--r--drivers/pwm/pwm-visconti.c1
-rw-r--r--drivers/pwm/pwm-xilinx.c27
-rw-r--r--drivers/pwm/pwm_th1520.rs387
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c23
-rw-r--r--drivers/rapidio/rio-driver.c6
-rw-r--r--drivers/rapidio/rio-scan.c5
-rw-r--r--drivers/rapidio/rio-sysfs.c8
-rw-r--r--drivers/rapidio/rio.c103
-rw-r--r--drivers/rapidio/rio.h2
-rw-r--r--drivers/rapidio/rio_cm.c9
-rw-r--r--drivers/ras/amd/atl/Kconfig5
-rw-r--r--drivers/ras/amd/atl/Makefile2
-rw-r--r--drivers/ras/amd/atl/access.c8
-rw-r--r--drivers/ras/amd/atl/core.c59
-rw-r--r--drivers/ras/amd/atl/dehash.c43
-rw-r--r--drivers/ras/amd/atl/denormalize.c561
-rw-r--r--drivers/ras/amd/atl/internal.h72
-rw-r--r--drivers/ras/amd/atl/map.c97
-rw-r--r--drivers/ras/amd/atl/prm.c53
-rw-r--r--drivers/ras/amd/atl/system.c53
-rw-r--r--drivers/ras/amd/atl/umc.c203
-rw-r--r--drivers/ras/amd/fmpm.c13
-rw-r--r--drivers/ras/cec.c2
-rw-r--r--drivers/ras/ras.c41
-rw-r--r--drivers/regulator/88pm886-regulator.c392
-rw-r--r--drivers/regulator/Kconfig202
-rw-r--r--drivers/regulator/Makefile20
-rw-r--r--drivers/regulator/act8865-regulator.c6
-rw-r--r--drivers/regulator/ad5398.c30
-rw-r--r--drivers/regulator/adp5055-regulator.c424
-rw-r--r--drivers/regulator/arizona-ldo1.c12
-rw-r--r--drivers/regulator/arizona-micsupp.c8
-rw-r--r--drivers/regulator/axp20x-regulator.c74
-rw-r--r--drivers/regulator/bcm590xx-regulator.c1289
-rw-r--r--drivers/regulator/bd71815-regulator.c10
-rw-r--r--drivers/regulator/bd71828-regulator.c62
-rw-r--r--drivers/regulator/bd718x7-regulator.c54
-rw-r--r--drivers/regulator/bd9571mwv-regulator.c2
-rw-r--r--drivers/regulator/bd9576-regulator.c8
-rw-r--r--drivers/regulator/bd96801-regulator.c1352
-rw-r--r--drivers/regulator/bq257xx-regulator.c186
-rw-r--r--drivers/regulator/core.c856
-rw-r--r--drivers/regulator/cros-ec-regulator.c4
-rw-r--r--drivers/regulator/da903x-regulator.c2
-rw-r--r--drivers/regulator/da9052-regulator.c22
-rw-r--r--drivers/regulator/da9055-regulator.c28
-rw-r--r--drivers/regulator/da9063-regulator.c4
-rw-r--r--drivers/regulator/da9121-regulator.c29
-rw-r--r--drivers/regulator/da9210-regulator.c4
-rw-r--r--drivers/regulator/da9211-regulator.c2
-rw-r--r--drivers/regulator/db8500-prcmu.c2
-rw-r--r--drivers/regulator/devres.c79
-rw-r--r--drivers/regulator/dummy.c37
-rw-r--r--drivers/regulator/fan53555.c16
-rw-r--r--drivers/regulator/fixed-helper.c2
-rw-r--r--drivers/regulator/fixed.c9
-rw-r--r--drivers/regulator/fp9931.c551
-rw-r--r--drivers/regulator/gpio-regulator.c18
-rw-r--r--drivers/regulator/helpers.c51
-rw-r--r--drivers/regulator/hi6421-regulator.c24
-rw-r--r--drivers/regulator/hi6421v530-regulator.c27
-rw-r--r--drivers/regulator/hi6421v600-regulator.c16
-rw-r--r--drivers/regulator/internal.h31
-rw-r--r--drivers/regulator/irq_helpers.c35
-rw-r--r--drivers/regulator/isl6271a-regulator.c4
-rw-r--r--drivers/regulator/lp3971.c2
-rw-r--r--drivers/regulator/lp3972.c2
-rw-r--r--drivers/regulator/lp8755.c2
-rw-r--r--drivers/regulator/max14577-regulator.c5
-rw-r--r--drivers/regulator/max1586.c2
-rw-r--r--drivers/regulator/max20086-regulator.c17
-rw-r--r--drivers/regulator/max20411-regulator.c5
-rw-r--r--drivers/regulator/max5970-regulator.c23
-rw-r--r--drivers/regulator/max77503-regulator.c8
-rw-r--r--drivers/regulator/max77650-regulator.c31
-rw-r--r--drivers/regulator/max77802-regulator.c4
-rw-r--r--drivers/regulator/max77826-regulator.c4
-rw-r--r--drivers/regulator/max77838-regulator.c221
-rw-r--r--drivers/regulator/max77857-regulator.c4
-rw-r--r--drivers/regulator/max8649.c2
-rw-r--r--drivers/regulator/max8893.c4
-rw-r--r--drivers/regulator/max8952.c4
-rw-r--r--drivers/regulator/max8973-regulator.c7
-rw-r--r--drivers/regulator/max8997-regulator.c16
-rw-r--r--drivers/regulator/mcp16502.c19
-rw-r--r--drivers/regulator/mp5416.c4
-rw-r--r--drivers/regulator/mp886x.c3
-rw-r--r--drivers/regulator/mt6311-regulator.c4
-rw-r--r--drivers/regulator/mt6315-regulator.c6
-rw-r--r--drivers/regulator/mt6316-regulator.c345
-rw-r--r--drivers/regulator/mt6357-regulator.c2
-rw-r--r--drivers/regulator/mt6358-regulator.c2
-rw-r--r--drivers/regulator/mt6363-regulator.c938
-rw-r--r--drivers/regulator/mt6370-regulator.c4
-rw-r--r--drivers/regulator/mtk-dvfsrc-regulator.c282
-rw-r--r--drivers/regulator/of_regulator.c235
-rw-r--r--drivers/regulator/pca9450-regulator.c567
-rw-r--r--drivers/regulator/pcap-regulator.c12
-rw-r--r--drivers/regulator/pcf50633-regulator.c124
-rw-r--r--drivers/regulator/pf0900-regulator.c975
-rw-r--r--drivers/regulator/pf1550-regulator.c429
-rw-r--r--drivers/regulator/pf530x-regulator.c375
-rw-r--r--drivers/regulator/pf8x00-regulator.c8
-rw-r--r--drivers/regulator/pf9453-regulator.c870
-rw-r--r--drivers/regulator/pfuze100-regulator.c10
-rw-r--r--drivers/regulator/pv88060-regulator.c4
-rw-r--r--drivers/regulator/pv88090-regulator.c4
-rw-r--r--drivers/regulator/qcom-labibb-regulator.c4
-rw-r--r--drivers/regulator/qcom-pm8008-regulator.c198
-rw-r--r--drivers/regulator/qcom-refgen-regulator.c5
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c1348
-rw-r--r--drivers/regulator/qcom_smd-regulator.c15
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c81
-rw-r--r--drivers/regulator/renesas-usb-vbus-regulator.c69
-rw-r--r--drivers/regulator/rk808-regulator.c265
-rw-r--r--drivers/regulator/rohm-regulator.c4
-rw-r--r--drivers/regulator/rpi-panel-attiny-regulator.c81
-rw-r--r--drivers/regulator/rpi-panel-v2-regulator.c125
-rw-r--r--drivers/regulator/rt4831-regulator.c1
-rw-r--r--drivers/regulator/rt5120-regulator.c4
-rw-r--r--drivers/regulator/rt5133-regulator.c642
-rw-r--r--drivers/regulator/rt5739.c9
-rw-r--r--drivers/regulator/rt6160-regulator.c19
-rw-r--r--drivers/regulator/rtq2208-regulator.c220
-rw-r--r--drivers/regulator/rtq6752-regulator.c2
-rw-r--r--drivers/regulator/s2dos05-regulator.c165
-rw-r--r--drivers/regulator/s2mps11.c109
-rw-r--r--drivers/regulator/s5m8767.c163
-rw-r--r--drivers/regulator/scmi-regulator.c11
-rw-r--r--drivers/regulator/slg51000-regulator.c4
-rw-r--r--drivers/regulator/sm5703-regulator.c170
-rw-r--r--drivers/regulator/spacemit-p1.c157
-rw-r--r--drivers/regulator/stm32-pwr.c1
-rw-r--r--drivers/regulator/stm32-vrefbuf.c8
-rw-r--r--drivers/regulator/sy7636a-regulator.c34
-rw-r--r--drivers/regulator/sy8106a-regulator.c4
-rw-r--r--drivers/regulator/sy8824x.c5
-rw-r--r--drivers/regulator/sy8827n.c3
-rw-r--r--drivers/regulator/tps6286x-regulator.c20
-rw-r--r--drivers/regulator/tps6287x-regulator.c77
-rw-r--r--drivers/regulator/tps65023-regulator.c6
-rw-r--r--drivers/regulator/tps65219-regulator.c279
-rw-r--r--drivers/regulator/tps6524x-regulator.c1
-rw-r--r--drivers/regulator/tps6594-regulator.c457
-rw-r--r--drivers/regulator/uniphier-regulator.c2
-rw-r--r--drivers/regulator/userspace-consumer.c8
-rw-r--r--drivers/regulator/virtual.c2
-rw-r--r--drivers/regulator/wm831x-isink.c8
-rw-r--r--drivers/regulator/wm8350-regulator.c6
-rw-r--r--drivers/regulator/wm8400-regulator.c2
-rw-r--r--drivers/remoteproc/Kconfig37
-rw-r--r--drivers/remoteproc/Makefile5
-rw-r--r--drivers/remoteproc/da8xx_remoteproc.c96
-rw-r--r--drivers/remoteproc/imx_dsp_rproc.c543
-rw-r--r--drivers/remoteproc/imx_rproc.c723
-rw-r--r--drivers/remoteproc/imx_rproc.h21
-rw-r--r--drivers/remoteproc/ingenic_rproc.c3
-rw-r--r--drivers/remoteproc/keystone_remoteproc.c133
-rw-r--r--drivers/remoteproc/meson_mx_ao_arc.c2
-rw-r--r--drivers/remoteproc/mtk_common.h11
-rw-r--r--drivers/remoteproc/mtk_scp.c337
-rw-r--r--drivers/remoteproc/mtk_scp_ipi.c7
-rw-r--r--drivers/remoteproc/omap_remoteproc.c76
-rw-r--r--drivers/remoteproc/pru_rproc.c9
-rw-r--r--drivers/remoteproc/qcom_common.c87
-rw-r--r--drivers/remoteproc/qcom_common.h10
-rw-r--r--drivers/remoteproc/qcom_q6v5.c8
-rw-r--r--drivers/remoteproc/qcom_q6v5_adsp.c64
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c314
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c834
-rw-r--r--drivers/remoteproc/qcom_q6v5_wcss.c159
-rw-r--r--drivers/remoteproc/qcom_sysmon.c2
-rw-r--r--drivers/remoteproc/qcom_wcnss.c63
-rw-r--r--drivers/remoteproc/qcom_wcnss_iris.c7
-rw-r--r--drivers/remoteproc/rcar_rproc.c40
-rw-r--r--drivers/remoteproc/remoteproc_core.c59
-rw-r--r--drivers/remoteproc/remoteproc_internal.h2
-rw-r--r--drivers/remoteproc/remoteproc_sysfs.c2
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c16
-rw-r--r--drivers/remoteproc/st_remoteproc.c100
-rw-r--r--drivers/remoteproc/st_slim_rproc.c8
-rw-r--r--drivers/remoteproc/stm32_rproc.c58
-rw-r--r--drivers/remoteproc/ti_k3_common.c542
-rw-r--r--drivers/remoteproc/ti_k3_common.h118
-rw-r--r--drivers/remoteproc/ti_k3_dsp_remoteproc.c682
-rw-r--r--drivers/remoteproc/ti_k3_m4_remoteproc.c156
-rw-r--r--drivers/remoteproc/ti_k3_r5_remoteproc.c1154
-rw-r--r--drivers/remoteproc/ti_sci_proc.h26
-rw-r--r--drivers/remoteproc/wkup_m3_rproc.c69
-rw-r--r--drivers/remoteproc/xlnx_r5_remoteproc.c744
-rw-r--r--drivers/resctrl/Kconfig24
-rw-r--r--drivers/resctrl/Makefile4
-rw-r--r--drivers/resctrl/mpam_devices.c2723
-rw-r--r--drivers/resctrl/mpam_internal.h658
-rw-r--r--drivers/resctrl/test_mpam_devices.c389
-rw-r--r--drivers/reset/Kconfig120
-rw-r--r--drivers/reset/Makefile18
-rw-r--r--drivers/reset/amlogic/Kconfig27
-rw-r--r--drivers/reset/amlogic/Makefile4
-rw-r--r--drivers/reset/amlogic/reset-meson-audio-arb.c (renamed from drivers/reset/reset-meson-audio-arb.c)13
-rw-r--r--drivers/reset/amlogic/reset-meson-aux.c81
-rw-r--r--drivers/reset/amlogic/reset-meson-common.c142
-rw-r--r--drivers/reset/amlogic/reset-meson.c105
-rw-r--r--drivers/reset/amlogic/reset-meson.h28
-rw-r--r--drivers/reset/core.c386
-rw-r--r--drivers/reset/hisilicon/hi6220_reset.c1
-rw-r--r--drivers/reset/reset-aspeed.c253
-rw-r--r--drivers/reset/reset-bcm6345.c1
-rw-r--r--drivers/reset/reset-berlin.c3
-rw-r--r--drivers/reset/reset-eic7700.c429
-rw-r--r--drivers/reset/reset-eyeq.c581
-rw-r--r--drivers/reset/reset-gpio.c19
-rw-r--r--drivers/reset/reset-imx-scu.c101
-rw-r--r--drivers/reset/reset-imx8mp-audiomix.c162
-rw-r--r--drivers/reset/reset-intel-gw.c1
-rw-r--r--drivers/reset/reset-k210.c3
-rw-r--r--drivers/reset/reset-k230.c371
-rw-r--r--drivers/reset/reset-lpc18xx.c43
-rw-r--r--drivers/reset/reset-meson.c153
-rw-r--r--drivers/reset/reset-microchip-sparx5.c47
-rw-r--r--drivers/reset/reset-mpfs.c133
-rw-r--r--drivers/reset/reset-npcm.c82
-rw-r--r--drivers/reset/reset-qcom-pdc.c1
-rw-r--r--drivers/reset/reset-rzg2l-usbphy-ctrl.c130
-rw-r--r--drivers/reset/reset-rzv2h-usb2phy.c236
-rw-r--r--drivers/reset/reset-simple.c2
-rw-r--r--drivers/reset/reset-spacemit.c304
-rw-r--r--drivers/reset/reset-th1520.c983
-rw-r--r--drivers/reset/reset-ti-sci.c4
-rw-r--r--drivers/reset/reset-uniphier-glue.c24
-rw-r--r--drivers/reset/starfive/reset-starfive-jh71x0.c3
-rw-r--r--drivers/reset/sti/Kconfig4
-rw-r--r--drivers/reset/tegra/Kconfig3
-rw-r--r--drivers/rpmsg/Makefile1
-rw-r--r--drivers/rpmsg/qcom_glink_native.c216
-rw-r--r--drivers/rpmsg/qcom_glink_rpm.c2
-rw-r--r--drivers/rpmsg/qcom_glink_ssr.c1
-rw-r--r--drivers/rpmsg/qcom_glink_trace.h406
-rw-r--r--drivers/rpmsg/qcom_smd.c16
-rw-r--r--drivers/rpmsg/rpmsg_char.c10
-rw-r--r--drivers/rpmsg/rpmsg_core.c92
-rw-r--r--drivers/rpmsg/rpmsg_ctrl.c2
-rw-r--r--drivers/rpmsg/rpmsg_internal.h10
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c35
-rw-r--r--drivers/rtc/Kconfig165
-rw-r--r--drivers/rtc/Makefile15
-rw-r--r--drivers/rtc/class.c5
-rw-r--r--drivers/rtc/dev.c5
-rw-r--r--drivers/rtc/interface.c45
-rw-r--r--drivers/rtc/lib.c36
-rw-r--r--drivers/rtc/lib_test.c81
-rw-r--r--drivers/rtc/rtc-88pm80x.c4
-rw-r--r--drivers/rtc/rtc-88pm860x.c4
-rw-r--r--drivers/rtc/rtc-88pm886.c97
-rw-r--r--drivers/rtc/rtc-ab-b5ze-s3.c2
-rw-r--r--drivers/rtc/rtc-ab-eoz9.c37
-rw-r--r--drivers/rtc/rtc-ab8500.c11
-rw-r--r--drivers/rtc/rtc-abx80x.c14
-rw-r--r--drivers/rtc/rtc-ac100.c2
-rw-r--r--drivers/rtc/rtc-amlogic-a4.c465
-rw-r--r--drivers/rtc/rtc-armada38x.c2
-rw-r--r--drivers/rtc/rtc-as3722.c2
-rw-r--r--drivers/rtc/rtc-asm9260.c2
-rw-r--r--drivers/rtc/rtc-aspeed.c16
-rw-r--r--drivers/rtc/rtc-at91rm9200.c5
-rw-r--r--drivers/rtc/rtc-at91sam9.c5
-rw-r--r--drivers/rtc/rtc-bd70528.c5
-rw-r--r--drivers/rtc/rtc-bq32k.c2
-rw-r--r--drivers/rtc/rtc-brcmstb-waketimer.c3
-rw-r--r--drivers/rtc/rtc-cadence.c4
-rw-r--r--drivers/rtc/rtc-cmos.c52
-rw-r--r--drivers/rtc/rtc-cpcap.c3
-rw-r--r--drivers/rtc/rtc-cros-ec.c43
-rw-r--r--drivers/rtc/rtc-cv1800.c218
-rw-r--r--drivers/rtc/rtc-da9055.c2
-rw-r--r--drivers/rtc/rtc-da9063.c31
-rw-r--r--drivers/rtc/rtc-ds1307.c41
-rw-r--r--drivers/rtc/rtc-ds1343.c8
-rw-r--r--drivers/rtc/rtc-ds1374.c2
-rw-r--r--drivers/rtc/rtc-ds1672.c2
-rw-r--r--drivers/rtc/rtc-ds1685.c6
-rw-r--r--drivers/rtc/rtc-ds2404.c14
-rw-r--r--drivers/rtc/rtc-ds3232.c28
-rw-r--r--drivers/rtc/rtc-efi.c76
-rw-r--r--drivers/rtc/rtc-em3027.c2
-rw-r--r--drivers/rtc/rtc-ep93xx.c16
-rw-r--r--drivers/rtc/rtc-fm3130.c2
-rw-r--r--drivers/rtc/rtc-fsl-ftm-alarm.c2
-rw-r--r--drivers/rtc/rtc-ftrtc010.c19
-rw-r--r--drivers/rtc/rtc-goldfish.c1
-rw-r--r--drivers/rtc/rtc-hid-sensor-time.c4
-rw-r--r--drivers/rtc/rtc-hym8563.c19
-rw-r--r--drivers/rtc/rtc-imx-sm-bbm.c162
-rw-r--r--drivers/rtc/rtc-imxdi.c2
-rw-r--r--drivers/rtc/rtc-isl12022.c272
-rw-r--r--drivers/rtc/rtc-isl1208.c38
-rw-r--r--drivers/rtc/rtc-jz4740.c3
-rw-r--r--drivers/rtc/rtc-loongson.c27
-rw-r--r--drivers/rtc/rtc-lp8788.c2
-rw-r--r--drivers/rtc/rtc-lpc24xx.c2
-rw-r--r--drivers/rtc/rtc-lpc32xx.c2
-rw-r--r--drivers/rtc/rtc-m41t80.c104
-rw-r--r--drivers/rtc/rtc-m48t59.c30
-rw-r--r--drivers/rtc/rtc-m48t86.c14
-rw-r--r--drivers/rtc/rtc-max31335.c179
-rw-r--r--drivers/rtc/rtc-max6900.c2
-rw-r--r--drivers/rtc/rtc-max77686.c41
-rw-r--r--drivers/rtc/rtc-max8925.c2
-rw-r--r--drivers/rtc/rtc-max8997.c2
-rw-r--r--drivers/rtc/rtc-mc13xxx.c15
-rw-r--r--drivers/rtc/rtc-mc146818-lib.c6
-rw-r--r--drivers/rtc/rtc-mcp795.c1
-rw-r--r--drivers/rtc/rtc-meson-vrtc.c14
-rw-r--r--drivers/rtc/rtc-meson.c17
-rw-r--r--drivers/rtc/rtc-mpc5121.c5
-rw-r--r--drivers/rtc/rtc-mpfs.c10
-rw-r--r--drivers/rtc/rtc-mt6397.c32
-rw-r--r--drivers/rtc/rtc-mt7622.c2
-rw-r--r--drivers/rtc/rtc-mv.c6
-rw-r--r--drivers/rtc/rtc-mxc.c2
-rw-r--r--drivers/rtc/rtc-mxc_v2.c4
-rw-r--r--drivers/rtc/rtc-nct3018y.c32
-rw-r--r--drivers/rtc/rtc-nct6694.c297
-rw-r--r--drivers/rtc/rtc-nxp-bbnsm.c9
-rw-r--r--drivers/rtc/rtc-omap.c5
-rw-r--r--drivers/rtc/rtc-optee.c465
-rw-r--r--drivers/rtc/rtc-palmas.c4
-rw-r--r--drivers/rtc/rtc-pcf2127.c108
-rw-r--r--drivers/rtc/rtc-pcf50633.c284
-rw-r--r--drivers/rtc/rtc-pcf85063.c293
-rw-r--r--drivers/rtc/rtc-pcf8523.c2
-rw-r--r--drivers/rtc/rtc-pcf8563.c240
-rw-r--r--drivers/rtc/rtc-pcf8583.c2
-rw-r--r--drivers/rtc/rtc-pic32.c4
-rw-r--r--drivers/rtc/rtc-pl030.c16
-rw-r--r--drivers/rtc/rtc-pl031.c8
-rw-r--r--drivers/rtc/rtc-pm8xxx.c228
-rw-r--r--drivers/rtc/rtc-pxa.c4
-rw-r--r--drivers/rtc/rtc-rc5t583.c5
-rw-r--r--drivers/rtc/rtc-rc5t619.c15
-rw-r--r--drivers/rtc/rtc-renesas-rtca3.c897
-rw-r--r--drivers/rtc/rtc-rk808.c2
-rw-r--r--drivers/rtc/rtc-rtd119x.c2
-rw-r--r--drivers/rtc/rtc-rv3028.c21
-rw-r--r--drivers/rtc/rtc-rv3029c2.c4
-rw-r--r--drivers/rtc/rtc-rv3032.c29
-rw-r--r--drivers/rtc/rtc-rx6110.c6
-rw-r--r--drivers/rtc/rtc-rx8010.c2
-rw-r--r--drivers/rtc/rtc-rx8025.c2
-rw-r--r--drivers/rtc/rtc-rx8111.c368
-rw-r--r--drivers/rtc/rtc-rx8581.c87
-rw-r--r--drivers/rtc/rtc-rzn1.c269
-rw-r--r--drivers/rtc/rtc-s32g.c385
-rw-r--r--drivers/rtc/rtc-s35390a.c25
-rw-r--r--drivers/rtc/rtc-s3c.c56
-rw-r--r--drivers/rtc/rtc-s3c.h19
-rw-r--r--drivers/rtc/rtc-s5m.c227
-rw-r--r--drivers/rtc/rtc-sa1100.c4
-rw-r--r--drivers/rtc/rtc-sc27xx.c4
-rw-r--r--drivers/rtc/rtc-sd2405al.c229
-rw-r--r--drivers/rtc/rtc-sd3078.c73
-rw-r--r--drivers/rtc/rtc-sh.c295
-rw-r--r--drivers/rtc/rtc-spacemit-p1.c167
-rw-r--r--drivers/rtc/rtc-spear.c6
-rw-r--r--drivers/rtc/rtc-st-lpc.c5
-rw-r--r--drivers/rtc/rtc-stm32.c394
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c2
-rw-r--r--drivers/rtc/rtc-sun6i.c3
-rw-r--r--drivers/rtc/rtc-sunplus.c6
-rw-r--r--drivers/rtc/rtc-tegra.c4
-rw-r--r--drivers/rtc/rtc-test.c8
-rw-r--r--drivers/rtc/rtc-tps6586x.c4
-rw-r--r--drivers/rtc/rtc-tps65910.c3
-rw-r--r--drivers/rtc/rtc-tps6594.c77
-rw-r--r--drivers/rtc/rtc-twl.c9
-rw-r--r--drivers/rtc/rtc-vt8500.c2
-rw-r--r--drivers/rtc/rtc-wm831x.c2
-rw-r--r--drivers/rtc/rtc-wm8350.c4
-rw-r--r--drivers/rtc/rtc-x1205.c4
-rw-r--r--drivers/rtc/rtc-xgene.c6
-rw-r--r--drivers/rtc/rtc-zynqmp.c29
-rw-r--r--drivers/rtc/sysfs.c64
-rw-r--r--drivers/rtc/test_rtc_lib.c106
-rw-r--r--drivers/s390/block/Kconfig5
-rw-r--r--drivers/s390/block/dasd.c146
-rw-r--r--drivers/s390/block/dasd_3990_erp.c10
-rw-r--r--drivers/s390/block/dasd_devmap.c19
-rw-r--r--drivers/s390/block/dasd_diag.c21
-rw-r--r--drivers/s390/block/dasd_eckd.c84
-rw-r--r--drivers/s390/block/dasd_eer.c1
-rw-r--r--drivers/s390/block/dasd_erp.c1
-rw-r--r--drivers/s390/block/dasd_fba.c4
-rw-r--r--drivers/s390/block/dasd_genhd.c83
-rw-r--r--drivers/s390/block/dasd_int.h2
-rw-r--r--drivers/s390/block/dasd_ioctl.c9
-rw-r--r--drivers/s390/block/dasd_proc.c5
-rw-r--r--drivers/s390/block/dcssblk.c92
-rw-r--r--drivers/s390/block/scm_blk.c9
-rw-r--r--drivers/s390/block/scm_drv.c3
-rw-r--r--drivers/s390/char/Kconfig2
-rw-r--r--drivers/s390/char/Makefile1
-rw-r--r--drivers/s390/char/con3215.c6
-rw-r--r--drivers/s390/char/con3270.c70
-rw-r--r--drivers/s390/char/diag_ftp.c7
-rw-r--r--drivers/s390/char/fs3270.c9
-rw-r--r--drivers/s390/char/hmcdrv_cache.c3
-rw-r--r--drivers/s390/char/hmcdrv_dev.c25
-rw-r--r--drivers/s390/char/hmcdrv_ftp.c9
-rw-r--r--drivers/s390/char/hmcdrv_mod.c3
-rw-r--r--drivers/s390/char/keyboard.c1
-rw-r--r--drivers/s390/char/monreader.c6
-rw-r--r--drivers/s390/char/monwriter.c6
-rw-r--r--drivers/s390/char/raw3270.c5
-rw-r--r--drivers/s390/char/sclp.c44
-rw-r--r--drivers/s390/char/sclp.h45
-rw-r--r--drivers/s390/char/sclp_ap.c3
-rw-r--r--drivers/s390/char/sclp_cmd.c480
-rw-r--r--drivers/s390/char/sclp_con.c19
-rw-r--r--drivers/s390/char/sclp_config.c7
-rw-r--r--drivers/s390/char/sclp_cpi_sys.c11
-rw-r--r--drivers/s390/char/sclp_ctl.c13
-rw-r--r--drivers/s390/char/sclp_early.c15
-rw-r--r--drivers/s390/char/sclp_early_core.c27
-rw-r--r--drivers/s390/char/sclp_ftp.c3
-rw-r--r--drivers/s390/char/sclp_mem.c521
-rw-r--r--drivers/s390/char/sclp_ocf.c8
-rw-r--r--drivers/s390/char/sclp_pci.c22
-rw-r--r--drivers/s390/char/sclp_sd.c35
-rw-r--r--drivers/s390/char/sclp_sdias.c3
-rw-r--r--drivers/s390/char/sclp_tty.c16
-rw-r--r--drivers/s390/char/sclp_vt220.c8
-rw-r--r--drivers/s390/char/tape.h21
-rw-r--r--drivers/s390/char/tape_34xx.c32
-rw-r--r--drivers/s390/char/tape_3590.c95
-rw-r--r--drivers/s390/char/tape_char.c143
-rw-r--r--drivers/s390/char/tape_class.c4
-rw-r--r--drivers/s390/char/tape_core.c59
-rw-r--r--drivers/s390/char/tape_proc.c3
-rw-r--r--drivers/s390/char/tape_std.c89
-rw-r--r--drivers/s390/char/tape_std.h9
-rw-r--r--drivers/s390/char/uvdevice.c154
-rw-r--r--drivers/s390/char/vmcp.c14
-rw-r--r--drivers/s390/char/vmlogrdr.c43
-rw-r--r--drivers/s390/char/vmur.c8
-rw-r--r--drivers/s390/char/zcore.c5
-rw-r--r--drivers/s390/cio/airq.c2
-rw-r--r--drivers/s390/cio/blacklist.c3
-rw-r--r--drivers/s390/cio/ccwgroup.c11
-rw-r--r--drivers/s390/cio/ccwreq.c3
-rw-r--r--drivers/s390/cio/chp.c63
-rw-r--r--drivers/s390/cio/chp.h1
-rw-r--r--drivers/s390/cio/chsc.c45
-rw-r--r--drivers/s390/cio/chsc.h16
-rw-r--r--drivers/s390/cio/chsc_sch.c8
-rw-r--r--drivers/s390/cio/cio.c13
-rw-r--r--drivers/s390/cio/cio.h2
-rw-r--r--drivers/s390/cio/cio_inject.c3
-rw-r--r--drivers/s390/cio/cmf.c20
-rw-r--r--drivers/s390/cio/crw.c5
-rw-r--r--drivers/s390/cio/css.c14
-rw-r--r--drivers/s390/cio/css.h2
-rw-r--r--drivers/s390/cio/device.c84
-rw-r--r--drivers/s390/cio/device_fsm.c5
-rw-r--r--drivers/s390/cio/device_id.c3
-rw-r--r--drivers/s390/cio/device_ops.c2
-rw-r--r--drivers/s390/cio/device_status.c2
-rw-r--r--drivers/s390/cio/eadm_sch.c5
-rw-r--r--drivers/s390/cio/fcx.c1
-rw-r--r--drivers/s390/cio/ioasm.c122
-rw-r--r--drivers/s390/cio/isc.c1
-rw-r--r--drivers/s390/cio/itcw.c1
-rw-r--r--drivers/s390/cio/qdio.h9
-rw-r--r--drivers/s390/cio/qdio_debug.c1
-rw-r--r--drivers/s390/cio/qdio_main.c32
-rw-r--r--drivers/s390/cio/qdio_setup.c21
-rw-r--r--drivers/s390/cio/qdio_thinint.c2
-rw-r--r--drivers/s390/cio/scm.c3
-rw-r--r--drivers/s390/cio/trace.h2
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c9
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c7
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c47
-rw-r--r--drivers/s390/cio/vfio_ccw_private.h2
-rw-r--r--drivers/s390/crypto/Makefile20
-rw-r--r--drivers/s390/crypto/ap_bus.c362
-rw-r--r--drivers/s390/crypto/ap_bus.h41
-rw-r--r--drivers/s390/crypto/ap_card.c3
-rw-r--r--drivers/s390/crypto/ap_queue.c126
-rw-r--r--drivers/s390/crypto/pkey_api.c2671
-rw-r--r--drivers/s390/crypto/pkey_base.c380
-rw-r--r--drivers/s390/crypto/pkey_base.h240
-rw-r--r--drivers/s390/crypto/pkey_cca.c625
-rw-r--r--drivers/s390/crypto/pkey_ep11.c571
-rw-r--r--drivers/s390/crypto/pkey_pckmo.c473
-rw-r--r--drivers/s390/crypto/pkey_sysfs.c646
-rw-r--r--drivers/s390/crypto/pkey_uv.c317
-rw-r--r--drivers/s390/crypto/vfio_ap_drv.c13
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c233
-rw-r--r--drivers/s390/crypto/vfio_ap_private.h4
-rw-r--r--drivers/s390/crypto/zcrypt_api.c456
-rw-r--r--drivers/s390/crypto/zcrypt_api.h16
-rw-r--r--drivers/s390/crypto/zcrypt_card.c2
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.c494
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.h52
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c39
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.c486
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.h39
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c49
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c149
-rw-r--r--drivers/s390/crypto/zcrypt_queue.c2
-rw-r--r--drivers/s390/net/Kconfig26
-rw-r--r--drivers/s390/net/Makefile2
-rw-r--r--drivers/s390/net/ctcm_fsms.c17
-rw-r--r--drivers/s390/net/ctcm_main.c3
-rw-r--r--drivers/s390/net/ctcm_mpc.c7
-rw-r--r--drivers/s390/net/ctcm_sysfs.c3
-rw-r--r--drivers/s390/net/fsm.c7
-rw-r--r--drivers/s390/net/ism.h53
-rw-r--r--drivers/s390/net/ism_drv.c575
-rw-r--r--drivers/s390/net/lcs.c2384
-rw-r--r--drivers/s390/net/lcs.h342
-rw-r--r--drivers/s390/net/netiucv.c2095
-rw-r--r--drivers/s390/net/qeth_core_main.c23
-rw-r--r--drivers/s390/net/qeth_core_mpc.c247
-rw-r--r--drivers/s390/net/qeth_core_mpc.h20
-rw-r--r--drivers/s390/net/qeth_core_sys.c25
-rw-r--r--drivers/s390/net/qeth_ethtool.c5
-rw-r--r--drivers/s390/net/qeth_l2_main.c7
-rw-r--r--drivers/s390/net/qeth_l3_main.c4
-rw-r--r--drivers/s390/net/smsgiucv.c4
-rw-r--r--drivers/s390/net/smsgiucv_app.c34
-rw-r--r--drivers/s390/scsi/zfcp_aux.c19
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c3
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c3
-rw-r--r--drivers/s390/scsi/zfcp_erp.c7
-rw-r--r--drivers/s390/scsi/zfcp_fc.c10
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c13
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c8
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c18
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c89
-rw-r--r--drivers/s390/scsi/zfcp_unit.c2
-rw-r--r--drivers/s390/virtio/virtio_ccw.c37
-rw-r--r--drivers/sbus/char/bbc_i2c.c3
-rw-r--r--drivers/sbus/char/display7seg.c2
-rw-r--r--drivers/sbus/char/envctrl.c3
-rw-r--r--drivers/sbus/char/flash.c3
-rw-r--r--drivers/sbus/char/openprom.c1
-rw-r--r--drivers/sbus/char/uctrl.c4
-rw-r--r--drivers/scsi/3w-9xxx.c11
-rw-r--r--drivers/scsi/3w-sas.c19
-rw-r--r--drivers/scsi/3w-xxxx.c12
-rw-r--r--drivers/scsi/53c700.c19
-rw-r--r--drivers/scsi/BusLogic.c18
-rw-r--r--drivers/scsi/BusLogic.h5
-rw-r--r--drivers/scsi/Kconfig9
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/NCR5380.c233
-rw-r--r--drivers/scsi/NCR5380.h20
-rw-r--r--drivers/scsi/a100u2w.c2
-rw-r--r--drivers/scsi/a3000.c6
-rw-r--r--drivers/scsi/a4000t.c6
-rw-r--r--drivers/scsi/aacraid/aachba.c34
-rw-r--r--drivers/scsi/aacraid/aacraid.h23
-rw-r--r--drivers/scsi/aacraid/commctrl.c4
-rw-r--r--drivers/scsi/aacraid/comminit.c8
-rw-r--r--drivers/scsi/aacraid/commsup.c136
-rw-r--r--drivers/scsi/aacraid/linit.c18
-rw-r--r--drivers/scsi/aacraid/src.c2
-rw-r--r--drivers/scsi/advansys.c31
-rw-r--r--drivers/scsi/aha152x.c9
-rw-r--r--drivers/scsi/aha1542.c4
-rw-r--r--drivers/scsi/aha1740.c3
-rw-r--r--drivers/scsi/aic7xxx/aic7770.c15
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c6
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c12
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.h2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c12
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_gram.y1
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y1
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_scan.l3
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c5
-rw-r--r--drivers/scsi/aic94xx/aic94xx_scb.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c1
-rw-r--r--drivers/scsi/aic94xx/aic94xx_tmf.c10
-rw-r--r--drivers/scsi/am53c974.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_attr.c6
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c46
-rw-r--r--drivers/scsi/arm/acornscsi.c11
-rw-r--r--drivers/scsi/arm/cumana_2.c2
-rw-r--r--drivers/scsi/arm/eesox.c2
-rw-r--r--drivers/scsi/arm/fas216.c8
-rw-r--r--drivers/scsi/arm/powertec.c2
-rw-r--r--drivers/scsi/atari_scsi.c3
-rw-r--r--drivers/scsi/atp870u.c6
-rw-r--r--drivers/scsi/be2iscsi/be_main.c17
-rw-r--r--drivers/scsi/bfa/bfa.h10
-rw-r--r--drivers/scsi/bfa/bfa_core.c36
-rw-r--r--drivers/scsi/bfa/bfa_defs_fcs.h22
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.c482
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.h72
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c9
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.h1
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c2
-rw-r--r--drivers/scsi/bfa/bfa_fcs.h12
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c142
-rw-r--r--drivers/scsi/bfa/bfa_fcs_rport.c36
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c21
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h2
-rw-r--r--drivers/scsi/bfa/bfa_modules.h1
-rw-r--r--drivers/scsi/bfa/bfa_svc.c72
-rw-r--r--drivers/scsi/bfa/bfa_svc.h5
-rw-r--r--drivers/scsi/bfa/bfad.c39
-rw-r--r--drivers/scsi/bfa/bfad_attr.c28
-rw-r--r--drivers/scsi/bfa/bfad_drv.h1
-rw-r--r--drivers/scsi/bfa/bfad_im.c32
-rw-r--r--drivers/scsi/bfa/bfad_im.h1
-rw-r--r--drivers/scsi/bfa/bfi.h2
-rw-r--r--drivers/scsi/bnx2fc/Kconfig1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h6
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c26
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c8
-rw-r--r--drivers/scsi/bnx2i/Kconfig1
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h11
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c7
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c8
-rw-r--r--drivers/scsi/bvme6000_scsi.c2
-rw-r--r--drivers/scsi/csiostor/csio_hw.c8
-rw-r--r--drivers/scsi/csiostor/csio_init.c3
-rw-r--r--drivers/scsi/csiostor/csio_lnode.c2
-rw-r--r--drivers/scsi/csiostor/csio_mb.c4
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c22
-rw-r--r--drivers/scsi/csiostor/csio_wr.c4
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c4
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c4
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h3
-rw-r--r--drivers/scsi/cxlflash/Kconfig13
-rw-r--r--drivers/scsi/cxlflash/Makefile5
-rw-r--r--drivers/scsi/cxlflash/backend.h48
-rw-r--r--drivers/scsi/cxlflash/common.h340
-rw-r--r--drivers/scsi/cxlflash/cxl_hw.c177
-rw-r--r--drivers/scsi/cxlflash/lunmgt.c278
-rw-r--r--drivers/scsi/cxlflash/main.c3968
-rw-r--r--drivers/scsi/cxlflash/main.h129
-rw-r--r--drivers/scsi/cxlflash/ocxl_hw.c1399
-rw-r--r--drivers/scsi/cxlflash/ocxl_hw.h72
-rw-r--r--drivers/scsi/cxlflash/sislite.h560
-rw-r--r--drivers/scsi/cxlflash/superpipe.c2218
-rw-r--r--drivers/scsi/cxlflash/superpipe.h150
-rw-r--r--drivers/scsi/cxlflash/vlun.c1336
-rw-r--r--drivers/scsi/cxlflash/vlun.h82
-rw-r--r--drivers/scsi/dc395x.c725
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c35
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c3
-rw-r--r--drivers/scsi/dmx3191d.c2
-rw-r--r--drivers/scsi/elx/efct/efct_driver.c7
-rw-r--r--drivers/scsi/elx/efct/efct_hw.c5
-rw-r--r--drivers/scsi/elx/efct/efct_lio.c5
-rw-r--r--drivers/scsi/elx/efct/efct_xport.c4
-rw-r--r--drivers/scsi/elx/libefc/efc_els.c2
-rw-r--r--drivers/scsi/elx/libefc/efc_fabric.c4
-rw-r--r--drivers/scsi/elx/libefc/efc_node.c2
-rw-r--r--drivers/scsi/elx/libefc/efc_nport.c2
-rw-r--r--drivers/scsi/elx/libefc_sli/sli4.c6
-rw-r--r--drivers/scsi/esas2r/esas2r.h17
-rw-r--r--drivers/scsi/esas2r/esas2r_init.c7
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c28
-rw-r--r--drivers/scsi/esas2r/esas2r_vda.c17
-rw-r--r--drivers/scsi/esp_scsi.c14
-rw-r--r--drivers/scsi/esp_scsi.h2
-rw-r--r--drivers/scsi/fcoe/fcoe.c32
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c6
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c20
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c2
-rw-r--r--drivers/scsi/fdomain.c4
-rw-r--r--drivers/scsi/fdomain_pci.c2
-rw-r--r--drivers/scsi/fnic/Makefile5
-rw-r--r--drivers/scsi/fnic/fdls_disc.c5093
-rw-r--r--drivers/scsi/fnic/fdls_fc.h253
-rw-r--r--drivers/scsi/fnic/fip.c1005
-rw-r--r--drivers/scsi/fnic/fip.h159
-rw-r--r--drivers/scsi/fnic/fnic.h288
-rw-r--r--drivers/scsi/fnic/fnic_attrs.c12
-rw-r--r--drivers/scsi/fnic/fnic_debugfs.c11
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c1744
-rw-r--r--drivers/scsi/fnic/fnic_fdls.h435
-rw-r--r--drivers/scsi/fnic/fnic_fip.h48
-rw-r--r--drivers/scsi/fnic/fnic_io.h14
-rw-r--r--drivers/scsi/fnic/fnic_isr.c28
-rw-r--r--drivers/scsi/fnic/fnic_main.c766
-rw-r--r--drivers/scsi/fnic/fnic_pci_subsys_devid.c131
-rw-r--r--drivers/scsi/fnic/fnic_res.c78
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c1161
-rw-r--r--drivers/scsi/fnic/fnic_stats.h49
-rw-r--r--drivers/scsi/fnic/fnic_trace.c154
-rw-r--r--drivers/scsi/g_NCR5380.c1
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h61
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c184
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c30
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c76
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c492
-rw-r--r--drivers/scsi/hosts.c51
-rw-r--r--drivers/scsi/hpsa.c96
-rw-r--r--drivers/scsi/hptiop.c8
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c59
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c17
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c23
-rw-r--r--drivers/scsi/ibmvscsi_tgt/libsrp.c6
-rw-r--r--drivers/scsi/imm.c5
-rw-r--r--drivers/scsi/initio.c6
-rw-r--r--drivers/scsi/ipr.c77
-rw-r--r--drivers/scsi/ipr.h6
-rw-r--r--drivers/scsi/ips.c16
-rw-r--r--drivers/scsi/ips.h5
-rw-r--r--drivers/scsi/isci/host.c18
-rw-r--r--drivers/scsi/isci/init.c21
-rw-r--r--drivers/scsi/isci/isci.h15
-rw-r--r--drivers/scsi/isci/phy.c2
-rw-r--r--drivers/scsi/isci/port.c2
-rw-r--r--drivers/scsi/isci/port_config.c4
-rw-r--r--drivers/scsi/isci/remote_device.c61
-rw-r--r--drivers/scsi/isci/remote_device.h34
-rw-r--r--drivers/scsi/isci/request.c2
-rw-r--r--drivers/scsi/isci/task.h10
-rw-r--r--drivers/scsi/iscsi_tcp.c68
-rw-r--r--drivers/scsi/iscsi_tcp.h4
-rw-r--r--drivers/scsi/jazz_esp.c2
-rw-r--r--drivers/scsi/libfc/fc_disc.c2
-rw-r--r--drivers/scsi/libfc/fc_elsct.c2
-rw-r--r--drivers/scsi/libfc/fc_encode.h4
-rw-r--r--drivers/scsi/libfc/fc_exch.c3
-rw-r--r--drivers/scsi/libfc/fc_fcp.c16
-rw-r--r--drivers/scsi/libfc/fc_lport.c2
-rw-r--r--drivers/scsi/libfc/fc_rport.c5
-rw-r--r--drivers/scsi/libiscsi.c16
-rw-r--r--drivers/scsi/libiscsi_tcp.c91
-rw-r--r--drivers/scsi/libsas/sas_ata.c43
-rw-r--r--drivers/scsi/libsas/sas_discover.c6
-rw-r--r--drivers/scsi/libsas/sas_expander.c9
-rw-r--r--drivers/scsi/libsas/sas_init.c4
-rw-r--r--drivers/scsi/libsas/sas_internal.h92
-rw-r--r--drivers/scsi/libsas/sas_phy.c6
-rw-r--r--drivers/scsi/libsas/sas_port.c13
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c23
-rw-r--r--drivers/scsi/lpfc/lpfc.h92
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c160
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c227
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h19
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c122
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c670
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h16
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h77
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c1020
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c418
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h50
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h114
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c302
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c442
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c84
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c27
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c126
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c436
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_vmid.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c67
-rw-r--r--drivers/scsi/mac_esp.c2
-rw-r--r--drivers/scsi/mac_scsi.c173
-rw-r--r--drivers/scsi/megaraid.c16
-rw-r--r--drivers/scsi/megaraid.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c20
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h10
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c90
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c11
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h17
-rw-r--r--drivers/scsi/mesh.c1
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h87
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_image.h21
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_init.h11
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_ioc.h39
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_pci.h2
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_sas.h1
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_tool.h45
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_transport.h22
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h205
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_app.c1314
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c719
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c619
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_transport.c102
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2.h9
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h7
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_ioc.h54
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c95
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h21
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c79
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c281
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.h49
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c96
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c33
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_warpdrive.c2
-rw-r--r--drivers/scsi/mvme16x_scsi.c2
-rw-r--r--drivers/scsi/mvsas/mv_64xx.h4
-rw-r--r--drivers/scsi/mvsas/mv_defs.h4
-rw-r--r--drivers/scsi/mvsas/mv_init.c14
-rw-r--r--drivers/scsi/mvsas/mv_sas.c20
-rw-r--r--drivers/scsi/mvsas/mv_sas.h3
-rw-r--r--drivers/scsi/mvumi.c7
-rw-r--r--drivers/scsi/myrb.c34
-rw-r--r--drivers/scsi/myrb.h1
-rw-r--r--drivers/scsi/myrs.c28
-rw-r--r--drivers/scsi/myrs.h1
-rw-r--r--drivers/scsi/ncr53c8xx.c11
-rw-r--r--drivers/scsi/nsp32.c2
-rw-r--r--drivers/scsi/pcmcia/aha152x_stub.c1
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c26
-rw-r--r--drivers/scsi/pm8001/pm8001_defs.h9
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c16
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.h4
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c23
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c130
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h20
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c140
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h4
-rw-r--r--drivers/scsi/pmcraid.c42
-rw-r--r--drivers/scsi/ppa.c5
-rw-r--r--drivers/scsi/ps3rom.c5
-rw-r--r--drivers/scsi/qedf/qedf.h1
-rw-r--r--drivers/scsi/qedf/qedf_attr.c6
-rw-r--r--drivers/scsi/qedf/qedf_dbg.h2
-rw-r--r--drivers/scsi/qedf/qedf_io.c2
-rw-r--r--drivers/scsi/qedf/qedf_main.c77
-rw-r--r--drivers/scsi/qedi/qedi_dbg.c22
-rw-r--r--drivers/scsi/qedi/qedi_dbg.h12
-rw-r--r--drivers/scsi/qedi/qedi_gbl.h1
-rw-r--r--drivers/scsi/qedi/qedi_main.c27
-rw-r--r--drivers/scsi/qla1280.c51
-rw-r--r--drivers/scsi/qla1280.h12
-rw-r--r--drivers/scsi/qla2xxx/Kconfig6
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c64
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c184
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c56
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h21
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c144
-rw-r--r--drivers/scsi/qla2xxx/qla_dsd.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_edif.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h17
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c557
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c135
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c48
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c52
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c50
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c131
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c112
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c1894
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h115
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c19
-rw-r--r--drivers/scsi/qla4xxx/ql4_attr.c8
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c34
-rw-r--r--drivers/scsi/qlogicfas408.c2
-rw-r--r--drivers/scsi/qlogicfas408.h2
-rw-r--r--drivers/scsi/qlogicpti.c7
-rw-r--r--drivers/scsi/scsi.c103
-rw-r--r--drivers/scsi/scsi_common.c3
-rw-r--r--drivers/scsi/scsi_debug.c1976
-rw-r--r--drivers/scsi/scsi_devinfo.c51
-rw-r--r--drivers/scsi/scsi_error.c53
-rw-r--r--drivers/scsi/scsi_ioctl.c37
-rw-r--r--drivers/scsi/scsi_lib.c208
-rw-r--r--drivers/scsi/scsi_lib_test.c7
-rw-r--r--drivers/scsi/scsi_logging.c21
-rw-r--r--drivers/scsi/scsi_pm.c1
-rw-r--r--drivers/scsi/scsi_priv.h3
-rw-r--r--drivers/scsi/scsi_proto_test.c2
-rw-r--r--drivers/scsi/scsi_scan.c138
-rw-r--r--drivers/scsi/scsi_sysctl.c6
-rw-r--r--drivers/scsi/scsi_sysfs.c103
-rw-r--r--drivers/scsi/scsi_trace.c24
-rw-r--r--drivers/scsi/scsi_transport_fc.c86
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c65
-rw-r--r--drivers/scsi/scsi_transport_sas.c93
-rw-r--r--drivers/scsi/scsi_transport_spi.c3
-rw-r--r--drivers/scsi/scsi_transport_srp.c2
-rw-r--r--drivers/scsi/scsicam.c18
-rw-r--r--drivers/scsi/sd.c557
-rw-r--r--drivers/scsi/sd.h33
-rw-r--r--drivers/scsi/sd_dif.c46
-rw-r--r--drivers/scsi/sd_zbc.c81
-rw-r--r--drivers/scsi/ses.c2
-rw-r--r--drivers/scsi/sg.c30
-rw-r--r--drivers/scsi/sgiwd93.c2
-rw-r--r--drivers/scsi/sim710.c2
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h39
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c736
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sas_transport.c2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.c62
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.h3
-rw-r--r--drivers/scsi/sni_53c710.c2
-rw-r--r--drivers/scsi/snic/snic_debugfs.c10
-rw-r--r--drivers/scsi/snic/snic_main.c24
-rw-r--r--drivers/scsi/snic/snic_trc.c5
-rw-r--r--drivers/scsi/sr.c45
-rw-r--r--drivers/scsi/sr.h2
-rw-r--r--drivers/scsi/sr_ioctl.c5
-rw-r--r--drivers/scsi/st.c218
-rw-r--r--drivers/scsi/st.h7
-rw-r--r--drivers/scsi/stex.c16
-rw-r--r--drivers/scsi/storvsc_drv.c153
-rw-r--r--drivers/scsi/sun3_scsi.c13
-rw-r--r--drivers/scsi/sun3x_esp.c2
-rw-r--r--drivers/scsi/sun_esp.c2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c21
-rw-r--r--drivers/scsi/virtio_scsi.c39
-rw-r--r--drivers/scsi/vmw_pvscsi.c5
-rw-r--r--drivers/scsi/wd33c93.c2
-rw-r--r--drivers/scsi/wd719x.c2
-rw-r--r--drivers/scsi/xen-scsifront.c11
-rw-r--r--drivers/scsi/zalon.c2
-rw-r--r--drivers/sh/clk/core.c10
-rw-r--r--drivers/sh/clk/cpg.c25
-rw-r--r--drivers/sh/intc/core.c14
-rw-r--r--drivers/sh/intc/irqdomain.c5
-rw-r--r--drivers/sh/intc/userimask.c5
-rw-r--r--drivers/sh/intc/virq-debugfs.c1
-rw-r--r--drivers/sh/maple/maple.c4
-rw-r--r--drivers/siox/siox-bus-gpio.c4
-rw-r--r--drivers/siox/siox-core.c2
-rw-r--r--drivers/slimbus/Kconfig7
-rw-r--r--drivers/slimbus/Makefile3
-rw-r--r--drivers/slimbus/core.c21
-rw-r--r--drivers/slimbus/messaging.c20
-rw-r--r--drivers/slimbus/qcom-ctrl.c734
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c42
-rw-r--r--drivers/slimbus/stream.c8
-rw-r--r--drivers/soc/Kconfig3
-rw-r--r--drivers/soc/Makefile5
-rw-r--r--drivers/soc/amlogic/meson-canvas.c12
-rw-r--r--drivers/soc/amlogic/meson-clk-measure.c462
-rw-r--r--drivers/soc/amlogic/meson-gx-socinfo.c18
-rw-r--r--drivers/soc/apple/Kconfig3
-rw-r--r--drivers/soc/apple/mailbox.c34
-rw-r--r--drivers/soc/apple/rtkit-internal.h1
-rw-r--r--drivers/soc/apple/rtkit.c115
-rw-r--r--drivers/soc/apple/sart.c73
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-ctrl.c16
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-snoop.c229
-rw-r--r--drivers/soc/aspeed/aspeed-p2a-ctrl.c16
-rw-r--r--drivers/soc/aspeed/aspeed-socinfo.c4
-rw-r--r--drivers/soc/aspeed/aspeed-uart-routing.c2
-rw-r--r--drivers/soc/atmel/soc.c30
-rw-r--r--drivers/soc/atmel/soc.h12
-rw-r--r--drivers/soc/bcm/brcmstb/biuctrl.c12
-rw-r--r--drivers/soc/bcm/brcmstb/pm/pm.h2
-rw-r--r--drivers/soc/cirrus/Kconfig17
-rw-r--r--drivers/soc/cirrus/Makefile2
-rw-r--r--drivers/soc/cirrus/soc-ep93xx.c252
-rw-r--r--drivers/soc/dove/pmu.c7
-rw-r--r--drivers/soc/fsl/Kconfig4
-rw-r--r--drivers/soc/fsl/dpaa2-console.c2
-rw-r--r--drivers/soc/fsl/dpio/dpio-service.c2
-rw-r--r--drivers/soc/fsl/qbman/Kconfig2
-rw-r--r--drivers/soc/fsl/qbman/qman.c9
-rw-r--r--drivers/soc/fsl/qbman/qman_ccsr.c2
-rw-r--r--drivers/soc/fsl/qbman/qman_portal.c5
-rw-r--r--drivers/soc/fsl/qbman/qman_test_stash.c10
-rw-r--r--drivers/soc/fsl/qe/Kconfig17
-rw-r--r--drivers/soc/fsl/qe/gpio.c149
-rw-r--r--drivers/soc/fsl/qe/qe_common.c80
-rw-r--r--drivers/soc/fsl/qe/qe_ic.c24
-rw-r--r--drivers/soc/fsl/qe/qmc.c748
-rw-r--r--drivers/soc/fsl/qe/tsa.c689
-rw-r--r--drivers/soc/fsl/qe/tsa.h3
-rw-r--r--drivers/soc/fsl/qe/ucc.c1
-rw-r--r--drivers/soc/fsl/rcpm.c1
-rw-r--r--drivers/soc/fujitsu/a64fx-diag.c2
-rw-r--r--drivers/soc/hisilicon/Kconfig7
-rw-r--r--drivers/soc/hisilicon/kunpeng_hccs.c532
-rw-r--r--drivers/soc/hisilicon/kunpeng_hccs.h35
-rw-r--r--drivers/soc/imx/Makefile2
-rw-r--r--drivers/soc/imx/soc-imx8m.c294
-rw-r--r--drivers/soc/imx/soc-imx9.c128
-rw-r--r--drivers/soc/ixp4xx/ixp4xx-npe.c3
-rw-r--r--drivers/soc/ixp4xx/ixp4xx-qmgr.c3
-rw-r--r--drivers/soc/litex/Kconfig2
-rw-r--r--drivers/soc/litex/litex_soc_ctrl.c27
-rw-r--r--drivers/soc/loongson/loongson2_guts.c7
-rw-r--r--drivers/soc/mediatek/Kconfig11
-rw-r--r--drivers/soc/mediatek/Makefile1
-rw-r--r--drivers/soc/mediatek/mt8167-mmsys.h31
-rw-r--r--drivers/soc/mediatek/mt8173-mmsys.h99
-rw-r--r--drivers/soc/mediatek/mt8183-mmsys.h50
-rw-r--r--drivers/soc/mediatek/mt8186-mmsys.h88
-rw-r--r--drivers/soc/mediatek/mt8188-mmsys.h266
-rw-r--r--drivers/soc/mediatek/mt8192-mmsys.h71
-rw-r--r--drivers/soc/mediatek/mt8195-mmsys.h632
-rw-r--r--drivers/soc/mediatek/mt8365-mmsys.h84
-rw-r--r--drivers/soc/mediatek/mtk-cmdq-helper.c273
-rw-r--r--drivers/soc/mediatek/mtk-devapc.c21
-rw-r--r--drivers/soc/mediatek/mtk-dvfsrc.c578
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.c3
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.h14
-rw-r--r--drivers/soc/mediatek/mtk-mutex.c168
-rw-r--r--drivers/soc/mediatek/mtk-pmic-wrap.c120
-rw-r--r--drivers/soc/mediatek/mtk-regulator-coupler.c1
-rw-r--r--drivers/soc/mediatek/mtk-socinfo.c27
-rw-r--r--drivers/soc/mediatek/mtk-svs.c27
-rw-r--r--drivers/soc/microchip/Kconfig12
-rw-r--r--drivers/soc/microchip/Makefile1
-rw-r--r--drivers/soc/microchip/mpfs-control-scb.c38
-rw-r--r--drivers/soc/microchip/mpfs-mss-top-sysreg.c44
-rw-r--r--drivers/soc/microchip/mpfs-sys-controller.c2
-rw-r--r--drivers/soc/pxa/ssp.c2
-rw-r--r--drivers/soc/qcom/Kconfig28
-rw-r--r--drivers/soc/qcom/Makefile4
-rw-r--r--drivers/soc/qcom/apr.c9
-rw-r--r--drivers/soc/qcom/cmd-db.c2
-rw-r--r--drivers/soc/qcom/icc-bwmon.c27
-rw-r--r--drivers/soc/qcom/ice.c454
-rw-r--r--drivers/soc/qcom/llcc-qcom.c4222
-rw-r--r--drivers/soc/qcom/mdt_loader.c129
-rw-r--r--drivers/soc/qcom/ocmem.c21
-rw-r--r--drivers/soc/qcom/pdr_interface.c54
-rw-r--r--drivers/soc/qcom/pdr_internal.h317
-rw-r--r--drivers/soc/qcom/pmic_glink.c135
-rw-r--r--drivers/soc/qcom/pmic_glink_altmode.c60
-rw-r--r--drivers/soc/qcom/pmic_pdcharger_ulog.h2
-rw-r--r--drivers/soc/qcom/qcom-geni-se.c522
-rw-r--r--drivers/soc/qcom/qcom-pbs.c40
-rw-r--r--drivers/soc/qcom/qcom_aoss.c13
-rw-r--r--drivers/soc/qcom/qcom_gsbi.c8
-rw-r--r--drivers/soc/qcom/qcom_pd_mapper.c731
-rw-r--r--drivers/soc/qcom/qcom_pdr_msg.c352
-rw-r--r--drivers/soc/qcom/qcom_stats.c135
-rw-r--r--drivers/soc/qcom/qmi_encdec.c52
-rw-r--r--drivers/soc/qcom/qmi_interface.c8
-rw-r--r--drivers/soc/qcom/ramp_controller.c5
-rw-r--r--drivers/soc/qcom/rmtfs_mem.c4
-rw-r--r--drivers/soc/qcom/rpm-proc.c2
-rw-r--r--drivers/soc/qcom/rpm_master_stats.c4
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c25
-rw-r--r--drivers/soc/qcom/rpmh.c1
-rw-r--r--drivers/soc/qcom/smd-rpm.c41
-rw-r--r--drivers/soc/qcom/smem.c114
-rw-r--r--drivers/soc/qcom/smem_state.c15
-rw-r--r--drivers/soc/qcom/smp2p.c51
-rw-r--r--drivers/soc/qcom/smsm.c59
-rw-r--r--drivers/soc/qcom/socinfo.c152
-rw-r--r--drivers/soc/qcom/spm.c1
-rw-r--r--drivers/soc/qcom/trace-aoss.h4
-rw-r--r--drivers/soc/qcom/trace-rpmh.h4
-rw-r--r--drivers/soc/qcom/trace-smp2p.h98
-rw-r--r--drivers/soc/qcom/trace_icc-bwmon.h48
-rw-r--r--drivers/soc/qcom/ubwc_config.c317
-rw-r--r--drivers/soc/qcom/wcnss_ctrl.c11
-rw-r--r--drivers/soc/renesas/Kconfig388
-rw-r--r--drivers/soc/renesas/Makefile5
-rw-r--r--drivers/soc/renesas/pwc-rzv2m.c6
-rw-r--r--drivers/soc/renesas/r9a08g045-sysc.c93
-rw-r--r--drivers/soc/renesas/r9a09g047-sys.c147
-rw-r--r--drivers/soc/renesas/r9a09g056-sys.c144
-rw-r--r--drivers/soc/renesas/r9a09g057-sys.c169
-rw-r--r--drivers/soc/renesas/rcar-rst.c3
-rw-r--r--drivers/soc/renesas/renesas-soc.c49
-rw-r--r--drivers/soc/renesas/rz-sysc.c169
-rw-r--r--drivers/soc/renesas/rz-sysc.h53
-rw-r--r--drivers/soc/rockchip/grf.c74
-rw-r--r--drivers/soc/rockchip/io-domain.c48
-rw-r--r--drivers/soc/samsung/Makefile3
-rw-r--r--drivers/soc/samsung/exynos-asv.c1
-rw-r--r--drivers/soc/samsung/exynos-chipid.c30
-rw-r--r--drivers/soc/samsung/exynos-pmu.c456
-rw-r--r--drivers/soc/samsung/exynos-pmu.h38
-rw-r--r--drivers/soc/samsung/exynos-usi.c108
-rw-r--r--drivers/soc/samsung/exynos3250-pmu.c1
-rw-r--r--drivers/soc/samsung/exynos5250-pmu.c1
-rw-r--r--drivers/soc/samsung/exynos5420-pmu.c1
-rw-r--r--drivers/soc/samsung/gs101-pmu.c446
-rw-r--r--drivers/soc/sophgo/Kconfig34
-rw-r--r--drivers/soc/sophgo/Makefile4
-rw-r--r--drivers/soc/sophgo/cv1800-rtcsys.c63
-rw-r--r--drivers/soc/sophgo/sg2044-topsys.c45
-rw-r--r--drivers/soc/sunxi/sunxi_sram.c18
-rw-r--r--drivers/soc/tegra/Kconfig18
-rw-r--r--drivers/soc/tegra/cbb/tegra-cbb.c20
-rw-r--r--drivers/soc/tegra/cbb/tegra194-cbb.c38
-rw-r--r--drivers/soc/tegra/cbb/tegra234-cbb.c760
-rw-r--r--drivers/soc/tegra/common.c12
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c6
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra30.c139
-rw-r--r--drivers/soc/tegra/fuse/speedo-tegra210.c63
-rw-r--r--drivers/soc/tegra/fuse/tegra-apbmisc.c1
-rw-r--r--drivers/soc/tegra/pmc.c259
-rw-r--r--drivers/soc/ti/k3-ringacc.c16
-rw-r--r--drivers/soc/ti/k3-socinfo.c25
-rw-r--r--drivers/soc/ti/knav_dma.c40
-rw-r--r--drivers/soc/ti/knav_qmss.h2
-rw-r--r--drivers/soc/ti/knav_qmss_acc.c2
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c118
-rw-r--r--drivers/soc/ti/pm33xx.c60
-rw-r--r--drivers/soc/ti/pruss.c180
-rw-r--r--drivers/soc/ti/smartreflex.c6
-rw-r--r--drivers/soc/ti/ti_sci_inta_msi.c10
-rw-r--r--drivers/soc/ti/wkup_m3_ipc.c10
-rw-r--r--drivers/soc/versatile/Kconfig4
-rw-r--r--drivers/soc/versatile/soc-integrator.c1
-rw-r--r--drivers/soc/versatile/soc-realview.c20
-rw-r--r--drivers/soc/vt8500/Kconfig19
-rw-r--r--drivers/soc/vt8500/Makefile2
-rw-r--r--drivers/soc/vt8500/wmt-socinfo.c125
-rw-r--r--drivers/soc/xilinx/xlnx_event_manager.c30
-rw-r--r--drivers/soc/xilinx/zynqmp_power.c159
-rw-r--r--drivers/soundwire/Kconfig2
-rw-r--r--drivers/soundwire/amd_init.c62
-rw-r--r--drivers/soundwire/amd_init.h8
-rw-r--r--drivers/soundwire/amd_manager.c296
-rw-r--r--drivers/soundwire/amd_manager.h42
-rw-r--r--drivers/soundwire/bus.c189
-rw-r--r--drivers/soundwire/bus.h21
-rw-r--r--drivers/soundwire/bus_type.c46
-rw-r--r--drivers/soundwire/cadence_master.c778
-rw-r--r--drivers/soundwire/cadence_master.h26
-rw-r--r--drivers/soundwire/debugfs.c212
-rw-r--r--drivers/soundwire/generic_bandwidth_allocation.c384
-rw-r--r--drivers/soundwire/intel.c95
-rw-r--r--drivers/soundwire/intel.h46
-rw-r--r--drivers/soundwire/intel_ace2x.c517
-rw-r--r--drivers/soundwire/intel_ace2x_debugfs.c8
-rw-r--r--drivers/soundwire/intel_auxdevice.c154
-rw-r--r--drivers/soundwire/intel_auxdevice.h1
-rw-r--r--drivers/soundwire/intel_bus_common.c35
-rw-r--r--drivers/soundwire/intel_init.c38
-rw-r--r--drivers/soundwire/irq.c16
-rw-r--r--drivers/soundwire/irq.h5
-rw-r--r--drivers/soundwire/mipi_disco.c218
-rw-r--r--drivers/soundwire/qcom.c65
-rw-r--r--drivers/soundwire/slave.c34
-rw-r--r--drivers/soundwire/stream.c221
-rw-r--r--drivers/soundwire/sysfs_local.h4
-rw-r--r--drivers/soundwire/sysfs_slave.c66
-rw-r--r--drivers/soundwire/sysfs_slave_dpn.c3
-rw-r--r--drivers/spi/Kconfig177
-rw-r--r--drivers/spi/Makefile24
-rw-r--r--drivers/spi/atmel-quadspi.c1142
-rw-r--r--drivers/spi/internals.h8
-rw-r--r--drivers/spi/spi-airoha-snfi.c711
-rw-r--r--drivers/spi/spi-altera-core.c1
-rw-r--r--drivers/spi/spi-altera-platform.c1
-rw-r--r--drivers/spi/spi-amd-pci.c69
-rw-r--r--drivers/spi/spi-amd.c516
-rw-r--r--drivers/spi/spi-amd.h44
-rw-r--r--drivers/spi/spi-amlogic-spifc-a1.c11
-rw-r--r--drivers/spi/spi-amlogic-spifc-a4.c1222
-rw-r--r--drivers/spi/spi-amlogic-spisg.c888
-rw-r--r--drivers/spi/spi-apple.c531
-rw-r--r--drivers/spi/spi-ar934x.c2
-rw-r--r--drivers/spi/spi-aspeed-smc.c766
-rw-r--r--drivers/spi/spi-at91-usart.c2
-rw-r--r--drivers/spi/spi-ath79.c2
-rw-r--r--drivers/spi/spi-atmel.c80
-rw-r--r--drivers/spi/spi-au1550.c2
-rw-r--r--drivers/spi/spi-axi-spi-engine.c495
-rw-r--r--drivers/spi/spi-bcm2835.c22
-rw-r--r--drivers/spi/spi-bcm2835aux.c2
-rw-r--r--drivers/spi/spi-bcm63xx-hsspi.c4
-rw-r--r--drivers/spi/spi-bcm63xx.c32
-rw-r--r--drivers/spi/spi-bcmbca-hsspi.c27
-rw-r--r--drivers/spi/spi-bitbang.c97
-rw-r--r--drivers/spi/spi-brcmstb-qspi.c2
-rw-r--r--drivers/spi/spi-butterfly.c1
-rw-r--r--drivers/spi/spi-cadence-quadspi.c222
-rw-r--r--drivers/spi/spi-cadence-xspi.c712
-rw-r--r--drivers/spi/spi-cadence.c144
-rw-r--r--drivers/spi/spi-cavium-octeon.c2
-rw-r--r--drivers/spi/spi-cavium-thunderx.c4
-rw-r--r--drivers/spi/spi-ch341.c241
-rw-r--r--drivers/spi/spi-coldfire-qspi.c2
-rw-r--r--drivers/spi/spi-cs42l43.c177
-rw-r--r--drivers/spi/spi-davinci.c80
-rw-r--r--drivers/spi/spi-dln2.c4
-rw-r--r--drivers/spi/spi-dw-bt1.c18
-rw-r--r--drivers/spi/spi-dw-core.c204
-rw-r--r--drivers/spi/spi-dw-dma.c26
-rw-r--r--drivers/spi/spi-dw-mmio.c17
-rw-r--r--drivers/spi/spi-dw-pci.c19
-rw-r--r--drivers/spi/spi-dw.h12
-rw-r--r--drivers/spi/spi-ep93xx.c70
-rw-r--r--drivers/spi/spi-falcon.c5
-rw-r--r--drivers/spi/spi-fsi.c13
-rw-r--r--drivers/spi/spi-fsl-cpm.c1
-rw-r--r--drivers/spi/spi-fsl-dspi.c638
-rw-r--r--drivers/spi/spi-fsl-espi.c4
-rw-r--r--drivers/spi/spi-fsl-lib.c1
-rw-r--r--drivers/spi/spi-fsl-lpspi.c128
-rw-r--r--drivers/spi/spi-fsl-qspi.c210
-rw-r--r--drivers/spi/spi-fsl-spi.c6
-rw-r--r--drivers/spi/spi-geni-qcom.c88
-rw-r--r--drivers/spi/spi-gpio.c139
-rw-r--r--drivers/spi/spi-hisi-kunpeng.c6
-rw-r--r--drivers/spi/spi-hisi-sfc-v3xx.c2
-rw-r--r--drivers/spi/spi-img-spfi.c2
-rw-r--r--drivers/spi/spi-imx.c230
-rw-r--r--drivers/spi/spi-ingenic.c4
-rw-r--r--drivers/spi/spi-intel-pci.c14
-rw-r--r--drivers/spi/spi-intel-platform.c10
-rw-r--r--drivers/spi/spi-intel.c93
-rw-r--r--drivers/spi/spi-intel.h4
-rw-r--r--drivers/spi/spi-iproc-qspi.c2
-rw-r--r--drivers/spi/spi-kspi2.c431
-rw-r--r--drivers/spi/spi-lantiq-ssc.c4
-rw-r--r--drivers/spi/spi-ljca.c4
-rw-r--r--drivers/spi/spi-lm70llp.c1
-rw-r--r--drivers/spi/spi-loongson-core.c5
-rw-r--r--drivers/spi/spi-loongson-pci.c7
-rw-r--r--drivers/spi/spi-loongson-plat.c2
-rw-r--r--drivers/spi/spi-loopback-test.c22
-rw-r--r--drivers/spi/spi-mem.c108
-rw-r--r--drivers/spi/spi-meson-spicc.c287
-rw-r--r--drivers/spi/spi-meson-spifc.c2
-rw-r--r--drivers/spi/spi-microchip-core-qspi.c237
-rw-r--r--drivers/spi/spi-microchip-core-spi.c429
-rw-r--r--drivers/spi/spi-microchip-core.c597
-rw-r--r--drivers/spi/spi-mpc52xx-psc.c4
-rw-r--r--drivers/spi/spi-mpc52xx.c3
-rw-r--r--drivers/spi/spi-mpfs.c626
-rw-r--r--drivers/spi/spi-mt65xx.c107
-rw-r--r--drivers/spi/spi-mtk-nor.c3
-rw-r--r--drivers/spi/spi-mtk-snfi.c10
-rw-r--r--drivers/spi/spi-mux.c6
-rw-r--r--drivers/spi/spi-mxic.c34
-rw-r--r--drivers/spi/spi-mxs.c17
-rw-r--r--drivers/spi/spi-npcm-fiu.c19
-rw-r--r--drivers/spi/spi-npcm-pspi.c4
-rw-r--r--drivers/spi/spi-nxp-fspi.c403
-rw-r--r--drivers/spi/spi-oc-tiny.c2
-rw-r--r--drivers/spi/spi-offload-trigger-adi-util-sigma-delta.c62
-rw-r--r--drivers/spi/spi-offload-trigger-pwm.c172
-rw-r--r--drivers/spi/spi-offload.c465
-rw-r--r--drivers/spi/spi-omap-uwire.c3
-rw-r--r--drivers/spi/spi-omap2-mcspi.c65
-rw-r--r--drivers/spi/spi-orion.c4
-rw-r--r--drivers/spi/spi-pci1xxxx.c346
-rw-r--r--drivers/spi/spi-pic32-sqi.c4
-rw-r--r--drivers/spi/spi-pic32.c2
-rw-r--r--drivers/spi/spi-pl022.c15
-rw-r--r--drivers/spi/spi-ppc4xx.c24
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c52
-rw-r--r--drivers/spi/spi-pxa2xx-platform.c230
-rw-r--r--drivers/spi/spi-pxa2xx.c349
-rw-r--r--drivers/spi/spi-pxa2xx.h7
-rw-r--r--drivers/spi/spi-qcom-qspi.c4
-rw-r--r--drivers/spi/spi-qpic-snand.c1653
-rw-r--r--drivers/spi/spi-qup.c12
-rw-r--r--drivers/spi/spi-rb4xx.c36
-rw-r--r--drivers/spi/spi-realtek-rtl-snand.c418
-rw-r--r--drivers/spi/spi-rockchip-sfc.c267
-rw-r--r--drivers/spi/spi-rockchip.c98
-rw-r--r--drivers/spi/spi-rpc-if.c49
-rw-r--r--drivers/spi/spi-rspi.c11
-rw-r--r--drivers/spi/spi-rzv2h-rspi.c687
-rw-r--r--drivers/spi/spi-rzv2m-csi.c2
-rw-r--r--drivers/spi/spi-s3c64xx.c35
-rw-r--r--drivers/spi/spi-sc18is602.c34
-rw-r--r--drivers/spi/spi-sg2044-nor.c511
-rw-r--r--drivers/spi/spi-sh-hspi.c2
-rw-r--r--drivers/spi/spi-sh-msiof.c412
-rw-r--r--drivers/spi/spi-sh-sci.c2
-rw-r--r--drivers/spi/spi-sh.c2
-rw-r--r--drivers/spi/spi-sifive.c2
-rw-r--r--drivers/spi/spi-slave-mt27xx.c22
-rw-r--r--drivers/spi/spi-slave-system-control.c2
-rw-r--r--drivers/spi/spi-slave-time.c2
-rw-r--r--drivers/spi/spi-sn-f-ospi.c13
-rw-r--r--drivers/spi/spi-sprd.c5
-rw-r--r--drivers/spi/spi-st-ssc4.c16
-rw-r--r--drivers/spi/spi-stm32-ospi.c1068
-rw-r--r--drivers/spi/spi-stm32-qspi.c26
-rw-r--r--drivers/spi/spi-stm32.c337
-rw-r--r--drivers/spi/spi-sun4i.c8
-rw-r--r--drivers/spi/spi-sun6i.c2
-rw-r--r--drivers/spi/spi-sunplus-sp7021.c8
-rw-r--r--drivers/spi/spi-synquacer.c2
-rw-r--r--drivers/spi/spi-tegra114.c8
-rw-r--r--drivers/spi/spi-tegra20-sflash.c2
-rw-r--r--drivers/spi/spi-tegra20-slink.c4
-rw-r--r--drivers/spi/spi-tegra210-quad.c444
-rw-r--r--drivers/spi/spi-ti-qspi.c26
-rw-r--r--drivers/spi/spi-tle62x0.c2
-rw-r--r--drivers/spi/spi-topcliff-pch.c2
-rw-r--r--drivers/spi/spi-uniphier.c4
-rw-r--r--drivers/spi/spi-virtio.c431
-rw-r--r--drivers/spi/spi-wpcm-fiu.c23
-rw-r--r--drivers/spi/spi-xcomm.c77
-rw-r--r--drivers/spi/spi-xilinx.c9
-rw-r--r--drivers/spi/spi-xtensa-xtfpga.c2
-rw-r--r--drivers/spi/spi-zynq-qspi.c34
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c253
-rw-r--r--drivers/spi/spi.c469
-rw-r--r--drivers/spi/spidev.c39
-rw-r--r--drivers/spmi/Kconfig9
-rw-r--r--drivers/spmi/Makefile1
-rw-r--r--drivers/spmi/hisi-spmi-controller.c5
-rw-r--r--drivers/spmi/spmi-apple-controller.c168
-rw-r--r--drivers/spmi/spmi-mtk-pmif.c2
-rw-r--r--drivers/spmi/spmi-pmic-arb.c965
-rw-r--r--drivers/spmi/spmi.c6
-rw-r--r--drivers/ssb/driver_gpio.c20
-rw-r--r--drivers/ssb/main.c4
-rw-r--r--drivers/staging/Kconfig22
-rw-r--r--drivers/staging/Makefile11
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.c562
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.txt5
-rw-r--r--drivers/staging/fbtft/Kconfig36
-rw-r--r--drivers/staging/fbtft/fb_ili9320.c2
-rw-r--r--drivers/staging/fbtft/fb_ra8875.c7
-rw-r--r--drivers/staging/fbtft/fb_seps525.c7
-rw-r--r--drivers/staging/fbtft/fb_sh1106.c3
-rw-r--r--drivers/staging/fbtft/fb_ssd1289.c3
-rw-r--r--drivers/staging/fbtft/fb_ssd1306.c3
-rw-r--r--drivers/staging/fbtft/fb_ssd1325.c9
-rw-r--r--drivers/staging/fbtft/fb_ssd1331.c2
-rw-r--r--drivers/staging/fbtft/fb_ssd1351.c12
-rw-r--r--drivers/staging/fbtft/fb_uc1611.c3
-rw-r--r--drivers/staging/fbtft/fbtft-bus.c9
-rw-r--r--drivers/staging/fbtft/fbtft-core.c65
-rw-r--r--drivers/staging/fbtft/fbtft-sysfs.c4
-rw-r--r--drivers/staging/fbtft/fbtft.h4
-rw-r--r--drivers/staging/fieldbus/Documentation/ABI/fieldbus-dev-cdev31
-rw-r--r--drivers/staging/fieldbus/Documentation/ABI/sysfs-class-fieldbus-dev62
-rw-r--r--drivers/staging/fieldbus/Documentation/devicetree/bindings/fieldbus/arcx,anybus-controller.txt71
-rw-r--r--drivers/staging/fieldbus/Documentation/fieldbus_dev.txt66
-rw-r--r--drivers/staging/fieldbus/Kconfig19
-rw-r--r--drivers/staging/fieldbus/Makefile7
-rw-r--r--drivers/staging/fieldbus/TODO5
-rw-r--r--drivers/staging/fieldbus/anybuss/Kconfig41
-rw-r--r--drivers/staging/fieldbus/anybuss/Makefile10
-rw-r--r--drivers/staging/fieldbus/anybuss/anybuss-client.h99
-rw-r--r--drivers/staging/fieldbus/anybuss/anybuss-controller.h47
-rw-r--r--drivers/staging/fieldbus/anybuss/arcx-anybus.c379
-rw-r--r--drivers/staging/fieldbus/anybuss/hms-profinet.c224
-rw-r--r--drivers/staging/fieldbus/anybuss/host.c1452
-rw-r--r--drivers/staging/fieldbus/dev_core.c344
-rw-r--r--drivers/staging/fieldbus/fieldbus_dev.h114
-rw-r--r--drivers/staging/gdm724x/Kconfig16
-rw-r--r--drivers/staging/gdm724x/Makefile8
-rw-r--r--drivers/staging/gdm724x/TODO16
-rw-r--r--drivers/staging/gdm724x/gdm_endian.c37
-rw-r--r--drivers/staging/gdm724x/gdm_endian.h30
-rw-r--r--drivers/staging/gdm724x/gdm_lte.c937
-rw-r--r--drivers/staging/gdm724x/gdm_lte.h71
-rw-r--r--drivers/staging/gdm724x/gdm_mux.c668
-rw-r--r--drivers/staging/gdm724x/gdm_mux.h85
-rw-r--r--drivers/staging/gdm724x/gdm_tty.c316
-rw-r--r--drivers/staging/gdm724x/gdm_tty.h60
-rw-r--r--drivers/staging/gdm724x/gdm_usb.c1012
-rw-r--r--drivers/staging/gdm724x/gdm_usb.h99
-rw-r--r--drivers/staging/gdm724x/hci.h45
-rw-r--r--drivers/staging/gdm724x/hci_packet.h82
-rw-r--r--drivers/staging/gdm724x/netlink_k.c128
-rw-r--r--drivers/staging/gdm724x/netlink_k.h16
-rw-r--r--drivers/staging/greybus/Documentation/firmware/firmware.c28
-rw-r--r--drivers/staging/greybus/arche-apb-ctrl.c3
-rw-r--r--drivers/staging/greybus/arche-platform.c11
-rw-r--r--drivers/staging/greybus/audio_codec.c16
-rw-r--r--drivers/staging/greybus/audio_helper.c9
-rw-r--r--drivers/staging/greybus/audio_manager.c1
-rw-r--r--drivers/staging/greybus/audio_manager_module.c2
-rw-r--r--drivers/staging/greybus/audio_topology.c30
-rw-r--r--drivers/staging/greybus/bootrom.c1
-rw-r--r--drivers/staging/greybus/camera.c78
-rw-r--r--drivers/staging/greybus/fw-management.c54
-rw-r--r--drivers/staging/greybus/gb-camera.h4
-rw-r--r--drivers/staging/greybus/gbphy.c9
-rw-r--r--drivers/staging/greybus/gpio.c52
-rw-r--r--drivers/staging/greybus/hid.c1
-rw-r--r--drivers/staging/greybus/i2c.c1
-rw-r--r--drivers/staging/greybus/light.c9
-rw-r--r--drivers/staging/greybus/log.c1
-rw-r--r--drivers/staging/greybus/loopback.c2
-rw-r--r--drivers/staging/greybus/power_supply.c15
-rw-r--r--drivers/staging/greybus/pwm.c1
-rw-r--r--drivers/staging/greybus/raw.c1
-rw-r--r--drivers/staging/greybus/sdio.c1
-rw-r--r--drivers/staging/greybus/spi.c1
-rw-r--r--drivers/staging/greybus/spilib.c7
-rw-r--r--drivers/staging/greybus/uart.c22
-rw-r--r--drivers/staging/greybus/usb.c1
-rw-r--r--drivers/staging/greybus/vibrator.c1
-rw-r--r--drivers/staging/iio/TODO5
-rw-r--r--drivers/staging/iio/accel/Kconfig12
-rw-r--r--drivers/staging/iio/accel/Makefile1
-rw-r--r--drivers/staging/iio/accel/adis16203.c4
-rw-r--r--drivers/staging/iio/accel/adis16240.c443
-rw-r--r--drivers/staging/iio/adc/ad7816.c6
-rw-r--r--drivers/staging/iio/addac/adt7316-i2c.c14
-rw-r--r--drivers/staging/iio/addac/adt7316.c113
-rw-r--r--drivers/staging/iio/frequency/ad9832.c150
-rw-r--r--drivers/staging/iio/frequency/ad9832.h1
-rw-r--r--drivers/staging/iio/frequency/ad9834.c33
-rw-r--r--drivers/staging/iio/frequency/ad9834.h10
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c59
-rw-r--r--drivers/staging/ks7010/Kconfig14
-rw-r--r--drivers/staging/ks7010/Makefile4
-rw-r--r--drivers/staging/ks7010/TODO36
-rw-r--r--drivers/staging/ks7010/eap_packet.h70
-rw-r--r--drivers/staging/ks7010/ks7010_sdio.c1143
-rw-r--r--drivers/staging/ks7010/ks_hostif.c2312
-rw-r--r--drivers/staging/ks7010/ks_hostif.h617
-rw-r--r--drivers/staging/ks7010/ks_wlan.h567
-rw-r--r--drivers/staging/ks7010/ks_wlan_ioctl.h61
-rw-r--r--drivers/staging/ks7010/ks_wlan_net.c2676
-rw-r--r--drivers/staging/media/Kconfig6
-rw-r--r--drivers/staging/media/Makefile3
-rw-r--r--drivers/staging/media/atomisp/Kconfig1
-rw-r--r--drivers/staging/media/atomisp/Makefile1
-rw-r--r--drivers/staging/media/atomisp/TODO31
-rw-r--r--drivers/staging/media/atomisp/i2c/Kconfig30
-rw-r--r--drivers/staging/media/atomisp/i2c/Makefile4
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc0310.c722
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc2235.c16
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-libmsrlisthelper.c210
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c1617
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-ov2722.c16
-rw-r--r--drivers/staging/media/atomisp/i2c/gc2235.h29
-rw-r--r--drivers/staging/media/atomisp/i2c/mt9m114.h1779
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2722.h27
-rw-r--r--drivers/staging/media/atomisp/include/hmm/hmm.h11
-rw-r--r--drivers/staging/media/atomisp/include/hmm/hmm_bo.h15
-rw-r--r--drivers/staging/media/atomisp/include/hmm/hmm_common.h11
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp.h16
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h9
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp_platform.h44
-rw-r--r--drivers/staging/media/atomisp/include/linux/libmsrlisthelper.h28
-rw-r--r--drivers/staging/media/atomisp/include/mmu/isp_mmu.h11
-rw-r--r--drivers/staging/media/atomisp/include/mmu/sh_mmu_mrfld.h11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp-regs.h11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.c84
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.h11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_common.h12
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat.h11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_css20.c15
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_css20.h11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.h255
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2.c13
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2.h28
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c249
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_dfs_tables.h11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_drvfs.c166
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_drvfs.h26
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_fops.c16
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_fops.h11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c101
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_internal.h21
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.c216
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.h15
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.c22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.h14
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_tables.h11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_trace_event.h11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_v4l2.c121
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_v4l2.h11
-rw-r--r--drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h9
-rw-r--r--drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf_comm.h15
-rw-r--r--drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf_desc.h9
-rw-r--r--drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c9
-rw-r--r--drivers/staging/media/atomisp/pci/base/refcount/interface/ia_css_refcount.h9
-rw-r--r--drivers/staging/media/atomisp/pci/base/refcount/src/refcount.c9
-rw-r--r--drivers/staging/media/atomisp/pci/bits.h9
-rw-r--r--drivers/staging/media/atomisp/pci/camera/pipe/interface/ia_css_pipe_binarydesc.h9
-rw-r--r--drivers/staging/media/atomisp/pci/camera/pipe/interface/ia_css_pipe_stagedesc.h9
-rw-r--r--drivers/staging/media/atomisp/pci/camera/pipe/interface/ia_css_pipe_util.h9
-rw-r--r--drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_binarydesc.c9
-rw-r--r--drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_stagedesc.c9
-rw-r--r--drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_util.c9
-rw-r--r--drivers/staging/media/atomisp/pci/camera/util/interface/ia_css_util.h20
-rw-r--r--drivers/staging/media/atomisp/pci/camera/util/src/util.c34
-rw-r--r--drivers/staging/media/atomisp/pci/cell_params.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/csi_rx_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx.c9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl.c9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma.c9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq.c9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_stream2mmio.c9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_stream2mmio_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_stream2mmio_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/pixelgen_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/pixelgen_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hrt/PixelGen_SysBlock_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hrt/ibuf_cntrl_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hrt/mipi_backend_common_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hrt/mipi_backend_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hrt/rx_csi_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hrt/stream2mmio_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/ibuf_ctrl_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/isys_dma_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/isys_irq_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/isys_stream2mmio_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/pixelgen_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_receiver_2400_common_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_receiver_2400_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_trace.h9
-rw-r--r--drivers/staging/media/atomisp/pci/dma_v2_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/gdc_v2_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/gp_timer_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/gpio_block_defs.h26
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/debug_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/dma_global.h10
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/event_fifo_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/fifo_monitor_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/gdc_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/gp_device_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/gp_timer_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/gpio_global.h32
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/hmem_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/debug.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/debug_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/debug_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/event_fifo.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/event_fifo_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/event_fifo_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/fifo_monitor.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/fifo_monitor_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/fifo_monitor_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_device.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_device_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_device_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_timer.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_timer_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_timer_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gpio_local.h21
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gpio_private.h18
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/hmem.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/hmem_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/hmem_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_system.c11
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/mmu.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/mmu_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/timed_ctrl.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/timed_ctrl_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/timed_ctrl_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vamem_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem_local.h13
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/input_formatter_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/irq_global.h11
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/isp_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/mmu_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/sp_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/timed_ctrl_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/vamem_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/vmem_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/assert_support.h38
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/bitop_support.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/csi_rx.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/debug.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/device_access/device_access.h8
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/dma.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/event_fifo.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/fifo_monitor.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/gdc_device.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/gp_device.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/gp_timer.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/gpio.h46
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/hmem.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/csi_rx_public.h13
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/debug_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/dma_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/event_fifo_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/fifo_monitor_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gdc_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gp_device_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gp_timer_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gpio_public.h46
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/hmem_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/input_formatter_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/irq_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isp_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_dma_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_irq_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_stream2mmio_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/mmu_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/pixelgen_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/sp_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/tag_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/timed_ctrl_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/vamem_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/vmem_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/input_formatter.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/input_system.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/irq.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/isp.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_irq.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_stream2mmio.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/math_support.h135
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/misc_support.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/mmu_device.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/pixelgen.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/platform_support.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/print_support.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/queue.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/resource.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/sp.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/tag.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/timed_ctrl.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/type_support.h14
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/vamem.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/vmem.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/queue_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/queue_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/tag.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/tag_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/tag_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/queue_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/sw_event_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/tag_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_streaming_to_mipi_types_hrt.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm.c107
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm_bo.c24
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_3a.h14
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_acc_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_buffer.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_control.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_device_access.c9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_device_access.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_dvs.h13
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_env.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_err.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_event_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_firmware.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_frac.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_frame_format.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_frame_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_host_data.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_input_port.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_irq.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_isp_configs.c9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_isp_configs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_isp_params.c9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_isp_params.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_isp_states.c9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_isp_states.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_metadata.h13
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_mipi.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_mmu.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_mmu_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_morph.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_pipe.h11
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_pipe_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_prbs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_properties.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_shading.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_stream.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_stream_format.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_stream_public.h17
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_timer.h8
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_types.h11
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_version.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_version_data.h9
-rw-r--r--drivers/staging/media/atomisp/pci/if_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/input_formatter_subsystem_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/input_selector_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/input_switch_2400_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/input_system_ctrl_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/input_system_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/irq_controller_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/irq_types_hrt.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr_types.h13
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_table.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_table.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.c12
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.c12
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2_param.h15
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.c13
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.c33
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_param.h35
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c18
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_table.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_table.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c18
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/common/ia_css_common_io_param.h8
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/common/ia_css_common_io_types.h8
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c17
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.h8
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_param.h8
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_types.h8
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c12
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/norm/norm_1.0/ia_css_norm.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/norm/norm_1.0/ia_css_norm.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/norm/norm_1.0/ia_css_norm_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref_state.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc_param.h11
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/common/ia_css_sdis_common.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/common/ia_css_sdis_common_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c18
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h11
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr3/ia_css_tnr3_types.h8
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_state.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/uds/uds_1.0/ia_css_uds_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.c14
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.c27
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/modes/interface/input_buf.isp.h14
-rw-r--r--drivers/staging/media/atomisp/pci/isp/modes/interface/isp_const.h165
-rw-r--r--drivers/staging/media/atomisp/pci/isp/modes/interface/isp_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_input_system_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_input_system_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_input_system_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_input_system_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_support.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp2401_input_system_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp2401_input_system_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp2401_input_system_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp_acquisition_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp_capture_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/mamoiada_params.h9
-rw-r--r--drivers/staging/media/atomisp/pci/mmu/isp_mmu.c11
-rw-r--r--drivers/staging/media/atomisp/pci/mmu/sh_mmu_mrfld.c11
-rw-r--r--drivers/staging/media/atomisp/pci/mmu_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/binary/interface/ia_css_binary.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c272
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/bufq/interface/ia_css_bufq.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/bufq/interface/ia_css_bufq_comm.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c13
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug_internal.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug_pipe.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c17
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/event/interface/ia_css_event.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/event/src/event.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/eventq/interface/ia_css_eventq.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/eventq/src/eventq.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame_comm.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/frame/src/frame.c40
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/ifmtr/interface/ia_css_ifmtr.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/ifmtr/src/ifmtr.c20
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/inputfifo/interface/ia_css_inputfifo.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/inputfifo/src/inputfifo.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isp_param/interface/ia_css_isp_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isp_param/interface/ia_css_isp_param_types.h8
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isp_param/src/isp_param.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys_comm.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/csi_rx_rmgr.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/csi_rx_rmgr.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/ibuf_ctrl_rmgr.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/ibuf_ctrl_rmgr.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/isys_dma_rmgr.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/isys_dma_rmgr.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/isys_init.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/isys_stream2mmio_rmgr.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/isys_stream2mmio_rmgr.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/rx.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c19
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/pipeline/interface/ia_css_pipeline.h10
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/pipeline/interface/ia_css_pipeline_common.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c11
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/queue/interface/ia_css_queue.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/queue/interface/ia_css_queue_comm.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/queue/src/queue.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/queue/src/queue_access.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/queue/src/queue_access.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/rmgr/interface/ia_css_rmgr.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/rmgr/interface/ia_css_rmgr_vbuf.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr_vbuf.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/spctrl/interface/ia_css_spctrl.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/spctrl/interface/ia_css_spctrl_comm.h13
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/spctrl/src/spctrl.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/tagger/interface/ia_css_tagger_common.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/timer/src/timer.c9
-rw-r--r--drivers/staging/media/atomisp/pci/scalar_processor_2400_params.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css.c131
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_defs.h21
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_dvs_info.h37
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_firmware.c9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_firmware.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_frac.h41
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_host_data.c9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_hrt.c9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_hrt.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_internal.h55
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_legacy.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_metrics.c9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_metrics.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_mipi.c22
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_mipi.h11
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_mmu.c9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_param_dvs.c9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_param_dvs.h32
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_param_shading.c9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_param_shading.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_params.c23
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_params.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_params_internal.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_properties.c9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_sp.c33
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_sp.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_stream_format.c9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_stream_format.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_struct.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_uds.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_version.c9
-rw-r--r--drivers/staging/media/atomisp/pci/str2mem_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/streaming_to_mipi_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/system_local.c9
-rw-r--r--drivers/staging/media/atomisp/pci/system_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/timed_controller_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/version.h9
-rw-r--r--drivers/staging/media/av7110/Kconfig22
-rw-r--r--drivers/staging/media/av7110/Makefile2
-rw-r--r--drivers/staging/media/av7110/TODO3
-rw-r--r--drivers/staging/media/av7110/audio-bilingual-channel-select.rst58
-rw-r--r--drivers/staging/media/av7110/audio-channel-select.rst57
-rw-r--r--drivers/staging/media/av7110/audio-clear-buffer.rst48
-rw-r--r--drivers/staging/media/av7110/audio-continue.rst48
-rw-r--r--drivers/staging/media/av7110/audio-fclose.rst51
-rw-r--r--drivers/staging/media/av7110/audio-fopen.rst103
-rw-r--r--drivers/staging/media/av7110/audio-fwrite.rst79
-rw-r--r--drivers/staging/media/av7110/audio-get-capabilities.rst54
-rw-r--r--drivers/staging/media/av7110/audio-get-status.rst54
-rw-r--r--drivers/staging/media/av7110/audio-pause.rst49
-rw-r--r--drivers/staging/media/av7110/audio-play.rst48
-rw-r--r--drivers/staging/media/av7110/audio-select-source.rst56
-rw-r--r--drivers/staging/media/av7110/audio-set-av-sync.rst58
-rw-r--r--drivers/staging/media/av7110/audio-set-bypass-mode.rst62
-rw-r--r--drivers/staging/media/av7110/audio-set-id.rst59
-rw-r--r--drivers/staging/media/av7110/audio-set-mixer.rst53
-rw-r--r--drivers/staging/media/av7110/audio-set-mute.rst62
-rw-r--r--drivers/staging/media/av7110/audio-set-streamtype.rst66
-rw-r--r--drivers/staging/media/av7110/audio-stop.rst48
-rw-r--r--drivers/staging/media/av7110/audio.rst27
-rw-r--r--drivers/staging/media/av7110/audio_data_types.rst116
-rw-r--r--drivers/staging/media/av7110/audio_function_calls.rst30
-rw-r--r--drivers/staging/media/av7110/av7110.c498
-rw-r--r--drivers/staging/media/av7110/av7110.h54
-rw-r--r--drivers/staging/media/av7110/av7110_av.c234
-rw-r--r--drivers/staging/media/av7110/av7110_av.h37
-rw-r--r--drivers/staging/media/av7110/av7110_ca.c68
-rw-r--r--drivers/staging/media/av7110/av7110_ca.h12
-rw-r--r--drivers/staging/media/av7110/av7110_hw.c230
-rw-r--r--drivers/staging/media/av7110/av7110_hw.h79
-rw-r--r--drivers/staging/media/av7110/av7110_ipack.c282
-rw-r--r--drivers/staging/media/av7110/av7110_ipack.h12
-rw-r--r--drivers/staging/media/av7110/av7110_ir.c3
-rw-r--r--drivers/staging/media/av7110/av7110_v4l.c106
-rw-r--r--drivers/staging/media/av7110/budget-patch.c665
-rw-r--r--drivers/staging/media/av7110/dvb_filter.c116
-rw-r--r--drivers/staging/media/av7110/dvb_filter.h22
-rw-r--r--drivers/staging/media/av7110/sp8870.c200
-rw-r--r--drivers/staging/media/av7110/sp8870.h24
-rw-r--r--drivers/staging/media/av7110/video-clear-buffer.rst54
-rw-r--r--drivers/staging/media/av7110/video-command.rst96
-rw-r--r--drivers/staging/media/av7110/video-continue.rst57
-rw-r--r--drivers/staging/media/av7110/video-fast-forward.rst72
-rw-r--r--drivers/staging/media/av7110/video-fclose.rst51
-rw-r--r--drivers/staging/media/av7110/video-fopen.rst111
-rw-r--r--drivers/staging/media/av7110/video-freeze.rst61
-rw-r--r--drivers/staging/media/av7110/video-fwrite.rst79
-rw-r--r--drivers/staging/media/av7110/video-get-capabilities.rst61
-rw-r--r--drivers/staging/media/av7110/video-get-event.rst105
-rw-r--r--drivers/staging/media/av7110/video-get-frame-count.rst65
-rw-r--r--drivers/staging/media/av7110/video-get-pts.rst69
-rw-r--r--drivers/staging/media/av7110/video-get-size.rst69
-rw-r--r--drivers/staging/media/av7110/video-get-status.rst72
-rw-r--r--drivers/staging/media/av7110/video-play.rst57
-rw-r--r--drivers/staging/media/av7110/video-select-source.rst76
-rw-r--r--drivers/staging/media/av7110/video-set-blank.rst64
-rw-r--r--drivers/staging/media/av7110/video-set-display-format.rst60
-rw-r--r--drivers/staging/media/av7110/video-set-format.rst82
-rw-r--r--drivers/staging/media/av7110/video-set-streamtype.rst61
-rw-r--r--drivers/staging/media/av7110/video-slowmotion.rst72
-rw-r--r--drivers/staging/media/av7110/video-stillpicture.rst61
-rw-r--r--drivers/staging/media/av7110/video-stop.rst74
-rw-r--r--drivers/staging/media/av7110/video-try-command.rst66
-rw-r--r--drivers/staging/media/av7110/video.rst36
-rw-r--r--drivers/staging/media/av7110/video_function_calls.rst35
-rw-r--r--drivers/staging/media/av7110/video_types.rst248
-rw-r--r--drivers/staging/media/deprecated/atmel/atmel-isc-base.c2
-rw-r--r--drivers/staging/media/deprecated/atmel/atmel-sama5d2-isc.c12
-rw-r--r--drivers/staging/media/deprecated/atmel/atmel-sama7g5-isc.c12
-rw-r--r--drivers/staging/media/imx/imx-ic-prpencvf.c4
-rw-r--r--drivers/staging/media/imx/imx-media-capture.c2
-rw-r--r--drivers/staging/media/imx/imx-media-csc-scaler.c32
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c14
-rw-r--r--drivers/staging/media/imx/imx-media-dev.c2
-rw-r--r--drivers/staging/media/imx/imx-media-of.c8
-rw-r--r--drivers/staging/media/imx/imx-media-vdic.c54
-rw-r--r--drivers/staging/media/imx/imx6-mipi-csi2.c2
-rw-r--r--drivers/staging/media/ipu3/include/uapi/intel-ipu3.h3
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-params.c6
-rw-r--r--drivers/staging/media/ipu3/ipu3-css.c3
-rw-r--r--drivers/staging/media/ipu3/ipu3-v4l2.c47
-rw-r--r--drivers/staging/media/ipu3/ipu3.c3
-rw-r--r--drivers/staging/media/ipu3/ipu3.h1
-rw-r--r--drivers/staging/media/ipu7/Kconfig19
-rw-r--r--drivers/staging/media/ipu7/Makefile23
-rw-r--r--drivers/staging/media/ipu7/TODO28
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_boot_abi.h163
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_common_abi.h175
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_config_abi.h19
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_insys_config_abi.h19
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_isys_abi.h412
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_msg_abi.h465
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_psys_config_abi.h24
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_syscom_abi.h49
-rw-r--r--drivers/staging/media/ipu7/ipu7-boot.c430
-rw-r--r--drivers/staging/media/ipu7/ipu7-boot.h25
-rw-r--r--drivers/staging/media/ipu7/ipu7-bus.c158
-rw-r--r--drivers/staging/media/ipu7/ipu7-bus.h69
-rw-r--r--drivers/staging/media/ipu7/ipu7-buttress-regs.h461
-rw-r--r--drivers/staging/media/ipu7/ipu7-buttress.c1192
-rw-r--r--drivers/staging/media/ipu7/ipu7-buttress.h77
-rw-r--r--drivers/staging/media/ipu7/ipu7-cpd.c276
-rw-r--r--drivers/staging/media/ipu7/ipu7-cpd.h16
-rw-r--r--drivers/staging/media/ipu7/ipu7-dma.c477
-rw-r--r--drivers/staging/media/ipu7/ipu7-dma.h46
-rw-r--r--drivers/staging/media/ipu7/ipu7-fw-isys.c301
-rw-r--r--drivers/staging/media/ipu7/ipu7-fw-isys.h39
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-csi-phy.c1034
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-csi-phy.h16
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-csi2-regs.h1197
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-csi2.c543
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-csi2.h64
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-queue.c828
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-queue.h72
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-subdev.c333
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-subdev.h52
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-video.c1078
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-video.h117
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys.c1166
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys.h140
-rw-r--r--drivers/staging/media/ipu7/ipu7-mmu.c853
-rw-r--r--drivers/staging/media/ipu7/ipu7-mmu.h414
-rw-r--r--drivers/staging/media/ipu7/ipu7-platform-regs.h82
-rw-r--r--drivers/staging/media/ipu7/ipu7-syscom.c78
-rw-r--r--drivers/staging/media/ipu7/ipu7-syscom.h35
-rw-r--r--drivers/staging/media/ipu7/ipu7.c2778
-rw-r--r--drivers/staging/media/ipu7/ipu7.h242
-rw-r--r--drivers/staging/media/max96712/max96712.c97
-rw-r--r--drivers/staging/media/meson/vdec/vdec.c35
-rw-r--r--drivers/staging/media/meson/vdec/vdec.h5
-rw-r--r--drivers/staging/media/meson/vdec/vdec_1.c16
-rw-r--r--drivers/staging/media/meson/vdec/vdec_hevc.c43
-rw-r--r--drivers/staging/media/meson/vdec/vdec_platform.c44
-rw-r--r--drivers/staging/media/meson/vdec/vdec_platform.h2
-rw-r--r--drivers/staging/media/omap4iss/Kconfig12
-rw-r--r--drivers/staging/media/omap4iss/Makefile9
-rw-r--r--drivers/staging/media/omap4iss/TODO3
-rw-r--r--drivers/staging/media/omap4iss/iss.c1354
-rw-r--r--drivers/staging/media/omap4iss/iss.h247
-rw-r--r--drivers/staging/media/omap4iss/iss_csi2.c1379
-rw-r--r--drivers/staging/media/omap4iss/iss_csi2.h155
-rw-r--r--drivers/staging/media/omap4iss/iss_csiphy.c277
-rw-r--r--drivers/staging/media/omap4iss/iss_csiphy.h47
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipe.c579
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipe.h63
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipeif.c844
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipeif.h89
-rw-r--r--drivers/staging/media/omap4iss/iss_regs.h899
-rw-r--r--drivers/staging/media/omap4iss/iss_resizer.c884
-rw-r--r--drivers/staging/media/omap4iss/iss_resizer.h72
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c1274
-rw-r--r--drivers/staging/media/omap4iss/iss_video.h203
-rw-r--r--drivers/staging/media/rkvdec/Makefile3
-rw-r--r--drivers/staging/media/rkvdec/TODO11
-rw-r--r--drivers/staging/media/rkvdec/rkvdec.c1117
-rw-r--r--drivers/staging/media/rkvdec/rkvdec.h127
-rw-r--r--drivers/staging/media/starfive/camss/stf-camss.c4
-rw-r--r--drivers/staging/media/starfive/camss/stf-capture.c4
-rw-r--r--drivers/staging/media/starfive/camss/stf-isp.c2
-rw-r--r--drivers/staging/media/starfive/camss/stf-video.c2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c10
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.h5
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_dec.c2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.c19
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_video.c25
-rw-r--r--drivers/staging/media/sunxi/sun6i-isp/sun6i_isp.c2
-rw-r--r--drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_capture.c18
-rw-r--r--drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_params.c8
-rw-r--r--drivers/staging/media/tegra-video/csi.c2
-rw-r--r--drivers/staging/media/tegra-video/tegra20.c6
-rw-r--r--drivers/staging/media/tegra-video/vi.c4
-rw-r--r--drivers/staging/media/tegra-video/vip.c2
-rw-r--r--drivers/staging/most/Kconfig2
-rw-r--r--drivers/staging/most/Makefile1
-rw-r--r--drivers/staging/most/TODO7
-rw-r--r--drivers/staging/most/dim2/dim2.c2
-rw-r--r--drivers/staging/most/i2c/Kconfig13
-rw-r--r--drivers/staging/most/i2c/Makefile4
-rw-r--r--drivers/staging/most/i2c/i2c.c374
-rw-r--r--drivers/staging/most/video/video.c25
-rw-r--r--drivers/staging/nvec/TODO1
-rw-r--r--drivers/staging/nvec/nvec.c69
-rw-r--r--drivers/staging/nvec/nvec_kbd.c11
-rw-r--r--drivers/staging/nvec/nvec_power.c4
-rw-r--r--drivers/staging/nvec/nvec_ps2.c45
-rw-r--r--drivers/staging/octeon/ethernet-tx.c49
-rw-r--r--drivers/staging/octeon/ethernet.c4
-rw-r--r--drivers/staging/octeon/octeon-stubs.h134
-rw-r--r--drivers/staging/olpc_dcon/Kconfig17
-rw-r--r--drivers/staging/olpc_dcon/Makefile5
-rw-r--r--drivers/staging/olpc_dcon/TODO15
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c807
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.h112
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1.c201
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c204
-rw-r--r--drivers/staging/pi433/Documentation/devicetree/pi433-overlay.dtso48
-rw-r--r--drivers/staging/pi433/Documentation/devicetree/pi433.txt62
-rw-r--r--drivers/staging/pi433/Documentation/pi433.txt274
-rw-r--r--drivers/staging/pi433/Kconfig17
-rw-r--r--drivers/staging/pi433/Makefile4
-rw-r--r--drivers/staging/pi433/TODO8
-rw-r--r--drivers/staging/pi433/pi433_if.c1438
-rw-r--r--drivers/staging/pi433/pi433_if.h148
-rw-r--r--drivers/staging/pi433/rf69.c832
-rw-r--r--drivers/staging/pi433/rf69.h66
-rw-r--r--drivers/staging/pi433/rf69_enum.h126
-rw-r--r--drivers/staging/pi433/rf69_registers.h478
-rw-r--r--drivers/staging/rtl8192e/Kconfig61
-rw-r--r--drivers/staging/rtl8192e/Makefile19
-rw-r--r--drivers/staging/rtl8192e/TODO18
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/Kconfig10
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/Makefile19
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_def.h274
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c198
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h17
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c82
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.h12
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c1921
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h34
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c189
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.h52
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h244
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c1109
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h57
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h773
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.c123
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.h24
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c2016
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.h402
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c1869
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.h159
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c84
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h12
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c37
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pci.c79
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pci.h19
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pm.c89
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pm.h16
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.c231
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.h31
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_wx.c866
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_wx.h13
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/table.c543
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/table.h27
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BA.h60
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c544
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HT.h223
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HTProc.c699
-rw-r--r--drivers/staging/rtl8192e/rtl819x_Qos.h43
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TS.h50
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TSProc.c450
-rw-r--r--drivers/staging/rtl8192e/rtllib.h1810
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_ccmp.c410
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_tkip.c711
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_wep.c241
-rw-r--r--drivers/staging/rtl8192e/rtllib_module.c178
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c2568
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c2309
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac_wx.c534
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c906
-rw-r--r--drivers/staging/rtl8192e/rtllib_wx.c752
-rw-r--r--drivers/staging/rtl8712/Kconfig21
-rw-r--r--drivers/staging/rtl8712/Makefile35
-rw-r--r--drivers/staging/rtl8712/TODO13
-rw-r--r--drivers/staging/rtl8712/basic_types.h28
-rw-r--r--drivers/staging/rtl8712/drv_types.h175
-rw-r--r--drivers/staging/rtl8712/ethernet.h21
-rw-r--r--drivers/staging/rtl8712/hal_init.c401
-rw-r--r--drivers/staging/rtl8712/ieee80211.c415
-rw-r--r--drivers/staging/rtl8712/ieee80211.h165
-rw-r--r--drivers/staging/rtl8712/mlme_linux.c160
-rw-r--r--drivers/staging/rtl8712/mlme_osdep.h31
-rw-r--r--drivers/staging/rtl8712/mp_custom_oid.h287
-rw-r--r--drivers/staging/rtl8712/os_intfs.c481
-rw-r--r--drivers/staging/rtl8712/osdep_intf.h32
-rw-r--r--drivers/staging/rtl8712/osdep_service.h60
-rw-r--r--drivers/staging/rtl8712/recv_linux.c139
-rw-r--r--drivers/staging/rtl8712/recv_osdep.h39
-rw-r--r--drivers/staging/rtl8712/rtl8712_bitdef.h26
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c409
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.h231
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmdctrl_bitdef.h95
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmdctrl_regdef.h19
-rw-r--r--drivers/staging/rtl8712/rtl8712_debugctrl_bitdef.h41
-rw-r--r--drivers/staging/rtl8712/rtl8712_debugctrl_regdef.h32
-rw-r--r--drivers/staging/rtl8712/rtl8712_edcasetting_bitdef.h65
-rw-r--r--drivers/staging/rtl8712/rtl8712_edcasetting_regdef.h24
-rw-r--r--drivers/staging/rtl8712/rtl8712_efuse.c563
-rw-r--r--drivers/staging/rtl8712/rtl8712_efuse.h44
-rw-r--r--drivers/staging/rtl8712/rtl8712_event.h86
-rw-r--r--drivers/staging/rtl8712/rtl8712_fifoctrl_bitdef.h131
-rw-r--r--drivers/staging/rtl8712/rtl8712_fifoctrl_regdef.h61
-rw-r--r--drivers/staging/rtl8712/rtl8712_gp_bitdef.h68
-rw-r--r--drivers/staging/rtl8712/rtl8712_gp_regdef.h29
-rw-r--r--drivers/staging/rtl8712/rtl8712_hal.h142
-rw-r--r--drivers/staging/rtl8712/rtl8712_interrupt_bitdef.h44
-rw-r--r--drivers/staging/rtl8712/rtl8712_io.c99
-rw-r--r--drivers/staging/rtl8712/rtl8712_led.c1830
-rw-r--r--drivers/staging/rtl8712/rtl8712_macsetting_bitdef.h31
-rw-r--r--drivers/staging/rtl8712/rtl8712_macsetting_regdef.h20
-rw-r--r--drivers/staging/rtl8712/rtl8712_powersave_bitdef.h39
-rw-r--r--drivers/staging/rtl8712/rtl8712_powersave_regdef.h26
-rw-r--r--drivers/staging/rtl8712/rtl8712_ratectrl_bitdef.h36
-rw-r--r--drivers/staging/rtl8712/rtl8712_ratectrl_regdef.h43
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c1079
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.h145
-rw-r--r--drivers/staging/rtl8712/rtl8712_regdef.h32
-rw-r--r--drivers/staging/rtl8712/rtl8712_security_bitdef.h34
-rw-r--r--drivers/staging/rtl8712/rtl8712_spec.h121
-rw-r--r--drivers/staging/rtl8712/rtl8712_syscfg_bitdef.h163
-rw-r--r--drivers/staging/rtl8712/rtl8712_syscfg_regdef.h42
-rw-r--r--drivers/staging/rtl8712/rtl8712_timectrl_bitdef.h49
-rw-r--r--drivers/staging/rtl8712/rtl8712_timectrl_regdef.h26
-rw-r--r--drivers/staging/rtl8712/rtl8712_wmac_bitdef.h49
-rw-r--r--drivers/staging/rtl8712/rtl8712_wmac_regdef.h36
-rw-r--r--drivers/staging/rtl8712/rtl8712_xmit.c732
-rw-r--r--drivers/staging/rtl8712/rtl8712_xmit.h108
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c749
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.h750
-rw-r--r--drivers/staging/rtl8712/rtl871x_debug.h130
-rw-r--r--drivers/staging/rtl8712/rtl871x_eeprom.c220
-rw-r--r--drivers/staging/rtl8712/rtl871x_eeprom.h88
-rw-r--r--drivers/staging/rtl8712/rtl871x_event.h109
-rw-r--r--drivers/staging/rtl8712/rtl871x_ht.h33
-rw-r--r--drivers/staging/rtl8712/rtl871x_io.c147
-rw-r--r--drivers/staging/rtl8712/rtl871x_io.h236
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl.h94
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c2275
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_rtl.c519
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_rtl.h109
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_set.c354
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_set.h45
-rw-r--r--drivers/staging/rtl8712/rtl871x_led.h118
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c1710
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.h205
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp.c724
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp.h275
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_ioctl.c883
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_ioctl.h328
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h1034
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.c234
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.h113
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.c671
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.h208
-rw-r--r--drivers/staging/rtl8712/rtl871x_rf.h55
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.c1386
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.h218
-rw-r--r--drivers/staging/rtl8712/rtl871x_sta_mgt.c263
-rw-r--r--drivers/staging/rtl8712/rtl871x_wlan_sme.h35
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c1059
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.h288
-rw-r--r--drivers/staging/rtl8712/sta_info.h132
-rw-r--r--drivers/staging/rtl8712/usb_halinit.c307
-rw-r--r--drivers/staging/rtl8712/usb_intf.c638
-rw-r--r--drivers/staging/rtl8712/usb_ops.c195
-rw-r--r--drivers/staging/rtl8712/usb_ops.h38
-rw-r--r--drivers/staging/rtl8712/usb_ops_linux.c515
-rw-r--r--drivers/staging/rtl8712/usb_osintf.h35
-rw-r--r--drivers/staging/rtl8712/wifi.h196
-rw-r--r--drivers/staging/rtl8712/wlan_bssdef.h223
-rw-r--r--drivers/staging/rtl8712/xmit_linux.c181
-rw-r--r--drivers/staging/rtl8712/xmit_osdep.h52
-rw-r--r--drivers/staging/rtl8723bs/Kconfig2
-rw-r--r--drivers/staging/rtl8723bs/Makefile5
-rw-r--r--drivers/staging/rtl8723bs/TODO4
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ap.c434
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_btcoex.c13
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c26
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_debug.c68
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_efuse.c327
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ieee80211.c50
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_io.c59
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ioctl_set.c1
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c576
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c265
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_pwrctrl.c45
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_recv.c214
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_rf.c34
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_security.c358
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_sta_mgt.c19
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_wlan_util.c159
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c33
-rw-r--r--drivers/staging/rtl8723bs/hal/Hal8723BReg.h373
-rw-r--r--drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c68
-rw-r--r--drivers/staging/rtl8723bs/hal/HalHWImg8723B_BB.c17
-rw-r--r--drivers/staging/rtl8723bs/hal/HalHWImg8723B_MAC.c12
-rw-r--r--drivers/staging/rtl8723bs/hal/HalHWImg8723B_RF.c12
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPhyRf.h2
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c11
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.h7
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c1
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_btcoex.c23
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com.c178
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com_phycfg.c6
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_intf.c184
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_pwr_seq.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_sdio.c1
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.c189
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.h10
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_CfoTracking.c4
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_DIG.c3
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c65
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_dm.c8
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c1110
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c16
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_rxdesc.c4
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c7
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c40
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_halinit.c91
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_ops.c8
-rw-r--r--drivers/staging/rtl8723bs/include/Hal8192CPhyReg.h882
-rw-r--r--drivers/staging/rtl8723bs/include/HalPwrSeqCmd.h11
-rw-r--r--drivers/staging/rtl8723bs/include/basic_types.h46
-rw-r--r--drivers/staging/rtl8723bs/include/drv_types.h28
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com.h13
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com_h2c.h76
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com_reg.h800
-rw-r--r--drivers/staging/rtl8723bs/include/hal_intf.h113
-rw-r--r--drivers/staging/rtl8723bs/include/hal_phy_cfg.h5
-rw-r--r--drivers/staging/rtl8723bs/include/hal_phy_reg_8723b.h69
-rw-r--r--drivers/staging/rtl8723bs/include/hal_pwr_seq.h11
-rw-r--r--drivers/staging/rtl8723bs/include/ioctl_cfg80211.h1
-rw-r--r--drivers/staging/rtl8723bs/include/mlme_osdep.h19
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_intf.h32
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_service.h9
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_service_linux.h72
-rw-r--r--drivers/staging/rtl8723bs/include/recv_osdep.h40
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_cmd.h3
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_hal.h10
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_recv.h5
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_xmit.h3
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_cmd.h4
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_debug.h14
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_efuse.h33
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_event.h2
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_ht.h22
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_io.h149
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme.h191
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme_ext.h54
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mp.h374
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_pwrctrl.h38
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_recv.h48
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_rf.h2
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_security.h132
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_xmit.h12
-rw-r--r--drivers/staging/rtl8723bs/include/sdio_hal.h6
-rw-r--r--drivers/staging/rtl8723bs/include/sdio_osintf.h16
-rw-r--r--drivers/staging/rtl8723bs/include/sta_info.h46
-rw-r--r--drivers/staging/rtl8723bs/include/wifi.h299
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c79
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c1300
-rw-r--r--drivers/staging/rtl8723bs/os_dep/mlme_linux.c180
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c68
-rw-r--r--drivers/staging/rtl8723bs/os_dep/osdep_service.c1
-rw-r--r--drivers/staging/rtl8723bs/os_dep/recv_linux.c229
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_intf.c24
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/wifi_regd.c17
-rw-r--r--drivers/staging/rtl8723bs/os_dep/xmit_linux.c4
-rw-r--r--drivers/staging/rts5208/Kconfig9
-rw-r--r--drivers/staging/rts5208/Makefile5
-rw-r--r--drivers/staging/rts5208/TODO7
-rw-r--r--drivers/staging/rts5208/general.c25
-rw-r--r--drivers/staging/rts5208/general.h19
-rw-r--r--drivers/staging/rts5208/ms.c4311
-rw-r--r--drivers/staging/rts5208/ms.h214
-rw-r--r--drivers/staging/rts5208/rtsx.c987
-rw-r--r--drivers/staging/rts5208/rtsx.h164
-rw-r--r--drivers/staging/rts5208/rtsx_card.c1151
-rw-r--r--drivers/staging/rts5208/rtsx_card.h1087
-rw-r--r--drivers/staging/rts5208/rtsx_chip.c2161
-rw-r--r--drivers/staging/rts5208/rtsx_chip.h987
-rw-r--r--drivers/staging/rts5208/rtsx_scsi.c3279
-rw-r--r--drivers/staging/rts5208/rtsx_scsi.h131
-rw-r--r--drivers/staging/rts5208/rtsx_sys.h36
-rw-r--r--drivers/staging/rts5208/rtsx_transport.c768
-rw-r--r--drivers/staging/rts5208/rtsx_transport.h57
-rw-r--r--drivers/staging/rts5208/sd.c4717
-rw-r--r--drivers/staging/rts5208/sd.h289
-rw-r--r--drivers/staging/rts5208/spi.c906
-rw-r--r--drivers/staging/rts5208/spi.h52
-rw-r--r--drivers/staging/rts5208/xd.c2145
-rw-r--r--drivers/staging/rts5208/xd.h176
-rw-r--r--drivers/staging/sm750fb/Makefile3
-rw-r--r--drivers/staging/sm750fb/TODO8
-rw-r--r--drivers/staging/sm750fb/ddk750.h3
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.c2
-rw-r--r--drivers/staging/sm750fb/ddk750_display.c1
-rw-r--r--drivers/staging/sm750fb/ddk750_dvi.c62
-rw-r--r--drivers/staging/sm750fb/ddk750_dvi.h57
-rw-r--r--drivers/staging/sm750fb/ddk750_hwi2c.c247
-rw-r--r--drivers/staging/sm750fb/ddk750_hwi2c.h12
-rw-r--r--drivers/staging/sm750fb/ddk750_power.h8
-rw-r--r--drivers/staging/sm750fb/ddk750_sii164.c408
-rw-r--r--drivers/staging/sm750fb/ddk750_sii164.h174
-rw-r--r--drivers/staging/sm750fb/sm750.c71
-rw-r--r--drivers/staging/sm750fb/sm750.h40
-rw-r--r--drivers/staging/sm750fb/sm750_accel.c18
-rw-r--r--drivers/staging/sm750fb/sm750_cursor.c14
-rw-r--r--drivers/staging/sm750fb/sm750_cursor.h12
-rw-r--r--drivers/staging/sm750fb/sm750_hw.c42
-rw-r--r--drivers/staging/vc04_services/Kconfig43
-rw-r--r--drivers/staging/vc04_services/Makefile15
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/Kconfig2
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c6
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c11
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835.c3
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835.h3
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/Kconfig13
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/Makefile6
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/TODO17
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c2012
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.h142
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/controls.c1401
-rw-r--r--drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h109
-rw-r--r--drivers/staging/vc04_services/interface/TESTING82
-rw-r--r--drivers/staging/vc04_services/interface/TODO63
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c1817
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h137
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.h57
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_cfg.h41
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c74
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.h12
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h569
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h21
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h21
-rw-r--r--drivers/staging/vme_user/vme.c18
-rw-r--r--drivers/staging/vme_user/vme.h17
-rw-r--r--drivers/staging/vme_user/vme_bridge.h56
-rw-r--r--drivers/staging/vme_user/vme_fake.c10
-rw-r--r--drivers/staging/vme_user/vme_tsi148.c158
-rw-r--r--drivers/staging/vme_user/vme_tsi148.h2
-rw-r--r--drivers/staging/vme_user/vme_user.c1
-rw-r--r--drivers/staging/vt6655/Kconfig6
-rw-r--r--drivers/staging/vt6655/Makefile15
-rw-r--r--drivers/staging/vt6655/TODO21
-rw-r--r--drivers/staging/vt6655/baseband.c2257
-rw-r--r--drivers/staging/vt6655/baseband.h72
-rw-r--r--drivers/staging/vt6655/card.c836
-rw-r--r--drivers/staging/vt6655/card.h62
-rw-r--r--drivers/staging/vt6655/channel.c135
-rw-r--r--drivers/staging/vt6655/channel.h17
-rw-r--r--drivers/staging/vt6655/desc.h249
-rw-r--r--drivers/staging/vt6655/device.h292
-rw-r--r--drivers/staging/vt6655/device_cfg.h44
-rw-r--r--drivers/staging/vt6655/device_main.c1868
-rw-r--r--drivers/staging/vt6655/dpc.c145
-rw-r--r--drivers/staging/vt6655/dpc.h21
-rw-r--r--drivers/staging/vt6655/key.c143
-rw-r--r--drivers/staging/vt6655/key.h51
-rw-r--r--drivers/staging/vt6655/mac.c851
-rw-r--r--drivers/staging/vt6655/mac.h580
-rw-r--r--drivers/staging/vt6655/power.c144
-rw-r--r--drivers/staging/vt6655/power.h29
-rw-r--r--drivers/staging/vt6655/rf.c535
-rw-r--r--drivers/staging/vt6655/rf.h75
-rw-r--r--drivers/staging/vt6655/rxtx.c1462
-rw-r--r--drivers/staging/vt6655/rxtx.h184
-rw-r--r--drivers/staging/vt6655/srom.c140
-rw-r--r--drivers/staging/vt6655/srom.h85
-rw-r--r--drivers/staging/vt6655/test9
-rw-r--r--drivers/staging/vt6656/Kconfig7
-rw-r--r--drivers/staging/vt6656/Makefile15
-rw-r--r--drivers/staging/vt6656/TODO19
-rw-r--r--drivers/staging/vt6656/baseband.c455
-rw-r--r--drivers/staging/vt6656/baseband.h75
-rw-r--r--drivers/staging/vt6656/card.c456
-rw-r--r--drivers/staging/vt6656/card.h44
-rw-r--r--drivers/staging/vt6656/channel.c77
-rw-r--r--drivers/staging/vt6656/channel.h21
-rw-r--r--drivers/staging/vt6656/desc.h91
-rw-r--r--drivers/staging/vt6656/device.h386
-rw-r--r--drivers/staging/vt6656/key.c142
-rw-r--r--drivers/staging/vt6656/key.h40
-rw-r--r--drivers/staging/vt6656/mac.c183
-rw-r--r--drivers/staging/vt6656/mac.h373
-rw-r--r--drivers/staging/vt6656/main_usb.c1121
-rw-r--r--drivers/staging/vt6656/power.c112
-rw-r--r--drivers/staging/vt6656/power.h23
-rw-r--r--drivers/staging/vt6656/rf.c443
-rw-r--r--drivers/staging/vt6656/rf.h46
-rw-r--r--drivers/staging/vt6656/rxtx.c730
-rw-r--r--drivers/staging/vt6656/rxtx.h178
-rw-r--r--drivers/staging/vt6656/usbpipe.c506
-rw-r--r--drivers/staging/vt6656/usbpipe.h67
-rw-r--r--drivers/staging/vt6656/wcmd.c185
-rw-r--r--drivers/staging/vt6656/wcmd.h48
-rw-r--r--drivers/staging/wlan-ng/Kconfig13
-rw-r--r--drivers/staging/wlan-ng/Makefile8
-rw-r--r--drivers/staging/wlan-ng/README8
-rw-r--r--drivers/staging/wlan-ng/TODO16
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c718
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h1236
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c3880
-rw-r--r--drivers/staging/wlan-ng/p80211conv.c643
-rw-r--r--drivers/staging/wlan-ng/p80211conv.h141
-rw-r--r--drivers/staging/wlan-ng/p80211hdr.h189
-rw-r--r--drivers/staging/wlan-ng/p80211ioctl.h69
-rw-r--r--drivers/staging/wlan-ng/p80211metadef.h227
-rw-r--r--drivers/staging/wlan-ng/p80211metastruct.h236
-rw-r--r--drivers/staging/wlan-ng/p80211mgmt.h199
-rw-r--r--drivers/staging/wlan-ng/p80211msg.h39
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c988
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.h212
-rw-r--r--drivers/staging/wlan-ng/p80211req.c223
-rw-r--r--drivers/staging/wlan-ng/p80211req.h33
-rw-r--r--drivers/staging/wlan-ng/p80211types.h292
-rw-r--r--drivers/staging/wlan-ng/p80211wep.c207
-rw-r--r--drivers/staging/wlan-ng/prism2fw.c1213
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.c1315
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.h89
-rw-r--r--drivers/staging/wlan-ng/prism2mib.c742
-rw-r--r--drivers/staging/wlan-ng/prism2sta.c1945
-rw-r--r--drivers/staging/wlan-ng/prism2usb.c299
-rw-r--r--drivers/target/iscsi/Kconfig4
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_target.c2
-rw-r--r--drivers/target/iscsi/iscsi_target.c174
-rw-r--r--drivers/target/iscsi/iscsi_target.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c5
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c48
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c52
-rw-r--r--drivers/target/iscsi/iscsi_target_login.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c27
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c48
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c5
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c5
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h6
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c69
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h7
-rw-r--r--drivers/target/loopback/tcm_loop.c8
-rw-r--r--drivers/target/sbp/sbp_target.c10
-rw-r--r--drivers/target/target_core_alua.c2
-rw-r--r--drivers/target/target_core_configfs.c78
-rw-r--r--drivers/target/target_core_device.c123
-rw-r--r--drivers/target/target_core_fabric_configfs.c2
-rw-r--r--drivers/target/target_core_fabric_lib.c65
-rw-r--r--drivers/target/target_core_file.c10
-rw-r--r--drivers/target/target_core_iblock.c85
-rw-r--r--drivers/target/target_core_iblock.h1
-rw-r--r--drivers/target/target_core_internal.h5
-rw-r--r--drivers/target/target_core_pr.c30
-rw-r--r--drivers/target/target_core_pscsi.c12
-rw-r--r--drivers/target/target_core_sbc.c53
-rw-r--r--drivers/target/target_core_spc.c221
-rw-r--r--drivers/target/target_core_stat.c227
-rw-r--r--drivers/target/target_core_tpg.c23
-rw-r--r--drivers/target/target_core_transport.c147
-rw-r--r--drivers/target/target_core_user.c18
-rw-r--r--drivers/target/target_core_xcopy.c4
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c2
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c4
-rw-r--r--drivers/target/tcm_fc/tfc_io.c2
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c2
-rw-r--r--drivers/tc/tc-driver.c6
-rw-r--r--drivers/tc/tc.c2
-rw-r--r--drivers/tee/Kconfig9
-rw-r--r--drivers/tee/Makefile2
-rw-r--r--drivers/tee/amdtee/core.c16
-rw-r--r--drivers/tee/optee/Kconfig6
-rw-r--r--drivers/tee/optee/Makefile1
-rw-r--r--drivers/tee/optee/core.c103
-rw-r--r--drivers/tee/optee/device.c7
-rw-r--r--drivers/tee/optee/ffa_abi.c215
-rw-r--r--drivers/tee/optee/notif.c9
-rw-r--r--drivers/tee/optee/optee_ffa.h29
-rw-r--r--drivers/tee/optee/optee_msg.h84
-rw-r--r--drivers/tee/optee/optee_private.h48
-rw-r--r--drivers/tee/optee/optee_rpc_cmd.h36
-rw-r--r--drivers/tee/optee/optee_smc.h39
-rw-r--r--drivers/tee/optee/protmem.c335
-rw-r--r--drivers/tee/optee/rpc.c187
-rw-r--r--drivers/tee/optee/smc_abi.c165
-rw-r--r--drivers/tee/optee/supp.c35
-rw-r--r--drivers/tee/qcomtee/Kconfig13
-rw-r--r--drivers/tee/qcomtee/Makefile9
-rw-r--r--drivers/tee/qcomtee/async.c182
-rw-r--r--drivers/tee/qcomtee/call.c820
-rw-r--r--drivers/tee/qcomtee/core.c915
-rw-r--r--drivers/tee/qcomtee/mem_obj.c169
-rw-r--r--drivers/tee/qcomtee/primordial_obj.c113
-rw-r--r--drivers/tee/qcomtee/qcomtee.h185
-rw-r--r--drivers/tee/qcomtee/qcomtee_msg.h304
-rw-r--r--drivers/tee/qcomtee/qcomtee_object.h316
-rw-r--r--drivers/tee/qcomtee/shm.c150
-rw-r--r--drivers/tee/qcomtee/user_obj.c692
-rw-r--r--drivers/tee/tee_core.c374
-rw-r--r--drivers/tee/tee_heap.c500
-rw-r--r--drivers/tee/tee_private.h20
-rw-r--r--drivers/tee/tee_shm.c179
-rw-r--r--drivers/thermal/Kconfig69
-rw-r--r--drivers/thermal/Makefile10
-rw-r--r--drivers/thermal/airoha_thermal.c489
-rw-r--r--drivers/thermal/amlogic_thermal.c18
-rw-r--r--drivers/thermal/armada_thermal.c4
-rw-r--r--drivers/thermal/broadcom/bcm2835_thermal.c60
-rw-r--r--drivers/thermal/broadcom/brcmstb_thermal.c26
-rw-r--r--drivers/thermal/broadcom/ns-thermal.c2
-rw-r--r--drivers/thermal/cpufreq_cooling.c2
-rw-r--r--drivers/thermal/da9062-thermal.c8
-rw-r--r--drivers/thermal/dove_thermal.c4
-rw-r--r--drivers/thermal/gov_bang_bang.c115
-rw-r--r--drivers/thermal/gov_fair_share.c20
-rw-r--r--drivers/thermal/gov_power_allocator.c120
-rw-r--r--drivers/thermal/gov_step_wise.c53
-rw-r--r--drivers/thermal/gov_user_space.c12
-rw-r--r--drivers/thermal/hisi_thermal.c42
-rw-r--r--drivers/thermal/imx8mm_thermal.c2
-rw-r--r--drivers/thermal/imx91_thermal.c384
-rw-r--r--drivers/thermal/imx_sc_thermal.c3
-rw-r--r--drivers/thermal/imx_thermal.c95
-rw-r--r--drivers/thermal/intel/Kconfig7
-rw-r--r--drivers/thermal/intel/int340x_thermal/Kconfig5
-rw-r--r--drivers/thermal/intel/int340x_thermal/Makefile2
-rw-r--r--drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c4
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c49
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3401_thermal.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3402_thermal.c5
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3403_thermal.c16
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3406_thermal.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c29
-rw-r--r--drivers/thermal/intel/int340x_thermal/platform_temperature_control.c313
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.c45
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.h14
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c159
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_mbox.c6
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_power_floor.c12
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c72
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c136
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_soc_slider.c284
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_wt_hint.c10
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_wt_req.c2
-rw-r--r--drivers/thermal/intel/intel_hfi.c56
-rw-r--r--drivers/thermal/intel/intel_pch_thermal.c5
-rw-r--r--drivers/thermal/intel/intel_powerclamp.c6
-rw-r--r--drivers/thermal/intel/intel_quark_dts_thermal.c30
-rw-r--r--drivers/thermal/intel/intel_soc_dts_iosf.c17
-rw-r--r--drivers/thermal/intel/intel_soc_dts_thermal.c2
-rw-r--r--drivers/thermal/intel/intel_tcc.c185
-rw-r--r--drivers/thermal/intel/intel_tcc_cooling.c39
-rw-r--r--drivers/thermal/intel/therm_throt.c10
-rw-r--r--drivers/thermal/intel/x86_pkg_temp_thermal.c13
-rw-r--r--drivers/thermal/k3_bandgap.c2
-rw-r--r--drivers/thermal/k3_j72xx_bandgap.c123
-rw-r--r--drivers/thermal/kirkwood_thermal.c4
-rw-r--r--drivers/thermal/loongson2_thermal.c15
-rw-r--r--drivers/thermal/mediatek/lvts_thermal.c266
-rw-r--r--drivers/thermal/pcie_cooling.c80
-rw-r--r--drivers/thermal/qcom/Kconfig3
-rw-r--r--drivers/thermal/qcom/lmh.c13
-rw-r--r--drivers/thermal/qcom/qcom-spmi-adc-tm5.c18
-rw-r--r--drivers/thermal/qcom/qcom-spmi-temp-alarm.c609
-rw-r--r--drivers/thermal/qcom/tsens-v1.c81
-rw-r--r--drivers/thermal/qcom/tsens-v2.c178
-rw-r--r--drivers/thermal/qcom/tsens.c44
-rw-r--r--drivers/thermal/qcom/tsens.h9
-rw-r--r--drivers/thermal/qoriq_thermal.c57
-rw-r--r--drivers/thermal/renesas/Kconfig43
-rw-r--r--drivers/thermal/renesas/Makefile7
-rw-r--r--drivers/thermal/renesas/rcar_gen3_thermal.c (renamed from drivers/thermal/rcar_gen3_thermal.c)190
-rw-r--r--drivers/thermal/renesas/rcar_thermal.c (renamed from drivers/thermal/rcar_thermal.c)16
-rw-r--r--drivers/thermal/renesas/rzg2l_thermal.c (renamed from drivers/thermal/rzg2l_thermal.c)4
-rw-r--r--drivers/thermal/renesas/rzg3e_thermal.c547
-rw-r--r--drivers/thermal/renesas/rzg3s_thermal.c272
-rw-r--r--drivers/thermal/rockchip_thermal.c304
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c56
-rw-r--r--drivers/thermal/spear_thermal.c4
-rw-r--r--drivers/thermal/sprd_thermal.c16
-rw-r--r--drivers/thermal/st/st_thermal.c34
-rw-r--r--drivers/thermal/st/st_thermal_memmap.c14
-rw-r--r--drivers/thermal/st/stm_thermal.c10
-rw-r--r--drivers/thermal/sun8i_thermal.c11
-rw-r--r--drivers/thermal/tegra/Makefile1
-rw-r--r--drivers/thermal/tegra/soctherm-fuse.c18
-rw-r--r--drivers/thermal/tegra/soctherm.c84
-rw-r--r--drivers/thermal/tegra/soctherm.h11
-rw-r--r--drivers/thermal/tegra/tegra-bpmp-thermal.c2
-rw-r--r--drivers/thermal/tegra/tegra114-soctherm.c209
-rw-r--r--drivers/thermal/tegra/tegra124-soctherm.c4
-rw-r--r--drivers/thermal/tegra/tegra132-soctherm.c4
-rw-r--r--drivers/thermal/tegra/tegra210-soctherm.c4
-rw-r--r--drivers/thermal/tegra/tegra30-tsensor.c57
-rw-r--r--drivers/thermal/testing/Makefile7
-rw-r--r--drivers/thermal/testing/command.c211
-rw-r--r--drivers/thermal/testing/thermal_testing.h11
-rw-r--r--drivers/thermal/testing/zone.c448
-rw-r--r--drivers/thermal/thermal-generic-adc.c82
-rw-r--r--drivers/thermal/thermal_core.c1203
-rw-r--r--drivers/thermal/thermal_core.h96
-rw-r--r--drivers/thermal/thermal_debugfs.c188
-rw-r--r--drivers/thermal/thermal_debugfs.h2
-rw-r--r--drivers/thermal/thermal_helpers.c69
-rw-r--r--drivers/thermal/thermal_hwmon.c9
-rw-r--r--drivers/thermal/thermal_netlink.c262
-rw-r--r--drivers/thermal/thermal_netlink.h34
-rw-r--r--drivers/thermal/thermal_of.c263
-rw-r--r--drivers/thermal/thermal_sysfs.c326
-rw-r--r--drivers/thermal/thermal_thresholds.c244
-rw-r--r--drivers/thermal/thermal_thresholds.h19
-rw-r--r--drivers/thermal/thermal_trace.h10
-rw-r--r--drivers/thermal/thermal_trip.c104
-rw-r--r--drivers/thermal/ti-soc-thermal/dra752-bandgap.h4
-rw-r--r--drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h8
-rw-r--r--drivers/thermal/ti-soc-thermal/omap5xxx-bandgap.h4
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.c2
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.h4
-rw-r--r--drivers/thermal/uniphier_thermal.c41
-rw-r--r--drivers/thunderbolt/Kconfig21
-rw-r--r--drivers/thunderbolt/acpi.c68
-rw-r--r--drivers/thunderbolt/cap.c49
-rw-r--r--drivers/thunderbolt/clx.c12
-rw-r--r--drivers/thunderbolt/ctl.c53
-rw-r--r--drivers/thunderbolt/ctl.h2
-rw-r--r--drivers/thunderbolt/debugfs.c1495
-rw-r--r--drivers/thunderbolt/dma_port.c21
-rw-r--r--drivers/thunderbolt/domain.c87
-rw-r--r--drivers/thunderbolt/eeprom.c86
-rw-r--r--drivers/thunderbolt/icm.c45
-rw-r--r--drivers/thunderbolt/lc.c60
-rw-r--r--drivers/thunderbolt/nhi.c44
-rw-r--r--drivers/thunderbolt/nhi.h5
-rw-r--r--drivers/thunderbolt/nhi_regs.h6
-rw-r--r--drivers/thunderbolt/nvm.c44
-rw-r--r--drivers/thunderbolt/path.c18
-rw-r--r--drivers/thunderbolt/property.c38
-rw-r--r--drivers/thunderbolt/retimer.c104
-rw-r--r--drivers/thunderbolt/sb_regs.h62
-rw-r--r--drivers/thunderbolt/switch.c160
-rw-r--r--drivers/thunderbolt/tb.c378
-rw-r--r--drivers/thunderbolt/tb.h165
-rw-r--r--drivers/thunderbolt/tb_msgs.h7
-rw-r--r--drivers/thunderbolt/tb_regs.h6
-rw-r--r--drivers/thunderbolt/test.c90
-rw-r--r--drivers/thunderbolt/tmu.c20
-rw-r--r--drivers/thunderbolt/trace.h13
-rw-r--r--drivers/thunderbolt/tunnel.c636
-rw-r--r--drivers/thunderbolt/tunnel.h87
-rw-r--r--drivers/thunderbolt/usb4.c635
-rw-r--r--drivers/thunderbolt/usb4_port.c63
-rw-r--r--drivers/thunderbolt/xdomain.c61
-rw-r--r--drivers/tty/Kconfig25
-rw-r--r--drivers/tty/amiserial.c25
-rw-r--r--drivers/tty/goldfish.c3
-rw-r--r--drivers/tty/hvc/hvc_console.c8
-rw-r--r--drivers/tty/hvc/hvc_iucv.c22
-rw-r--r--drivers/tty/hvc/hvc_opal.c2
-rw-r--r--drivers/tty/hvc/hvc_xen.c2
-rw-r--r--drivers/tty/hvc/hvsi_lib.c2
-rw-r--r--drivers/tty/ipwireless/hardware.c6
-rw-r--r--drivers/tty/mips_ejtag_fdc.c11
-rw-r--r--drivers/tty/moxa.c422
-rw-r--r--drivers/tty/mxser.c272
-rw-r--r--drivers/tty/n_gsm.c231
-rw-r--r--drivers/tty/n_hdlc.c80
-rw-r--r--drivers/tty/n_tty.c343
-rw-r--r--drivers/tty/pty.c156
-rw-r--r--drivers/tty/serdev/core.c34
-rw-r--r--drivers/tty/serdev/serdev-ttyport.c9
-rw-r--r--drivers/tty/serial/8250/8250.h55
-rw-r--r--drivers/tty/serial/8250/8250_aspeed_vuart.c8
-rw-r--r--drivers/tty/serial/8250/8250_bcm2835aux.c53
-rw-r--r--drivers/tty/serial/8250/8250_bcm7271.c122
-rw-r--r--drivers/tty/serial/8250/8250_ce4100.c93
-rw-r--r--drivers/tty/serial/8250/8250_core.c868
-rw-r--r--drivers/tty/serial/8250/8250_dma.c48
-rw-r--r--drivers/tty/serial/8250/8250_dw.c195
-rw-r--r--drivers/tty/serial/8250/8250_dwlib.c5
-rw-r--r--drivers/tty/serial/8250/8250_dwlib.h33
-rw-r--r--drivers/tty/serial/8250/8250_early.c17
-rw-r--r--drivers/tty/serial/8250/8250_em.c6
-rw-r--r--drivers/tty/serial/8250/8250_exar.c1014
-rw-r--r--drivers/tty/serial/8250/8250_fintek.c16
-rw-r--r--drivers/tty/serial/8250/8250_fsl.c10
-rw-r--r--drivers/tty/serial/8250/8250_ingenic.c10
-rw-r--r--drivers/tty/serial/8250/8250_ioc3.c8
-rw-r--r--drivers/tty/serial/8250/8250_keba.c280
-rw-r--r--drivers/tty/serial/8250/8250_loongson.c238
-rw-r--r--drivers/tty/serial/8250/8250_lpc18xx.c4
-rw-r--r--drivers/tty/serial/8250/8250_men_mcb.c2
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c22
-rw-r--r--drivers/tty/serial/8250/8250_ni.c450
-rw-r--r--drivers/tty/serial/8250/8250_of.c57
-rw-r--r--drivers/tty/serial/8250/8250_omap.c360
-rw-r--r--drivers/tty/serial/8250/8250_parisc.c1
-rw-r--r--drivers/tty/serial/8250/8250_pci.c235
-rw-r--r--drivers/tty/serial/8250/8250_pci1xxxx.c130
-rw-r--r--drivers/tty/serial/8250/8250_pcilib.c23
-rw-r--r--drivers/tty/serial/8250/8250_pcilib.h4
-rw-r--r--drivers/tty/serial/8250/8250_platform.c386
-rw-r--r--drivers/tty/serial/8250/8250_pnp.c55
-rw-r--r--drivers/tty/serial/8250/8250_port.c1165
-rw-r--r--drivers/tty/serial/8250/8250_pxa.c20
-rw-r--r--drivers/tty/serial/8250/8250_rsa.c211
-rw-r--r--drivers/tty/serial/8250/8250_rt288x.c4
-rw-r--r--drivers/tty/serial/8250/8250_tegra.c2
-rw-r--r--drivers/tty/serial/8250/8250_uniphier.c6
-rw-r--r--drivers/tty/serial/8250/Kconfig58
-rw-r--r--drivers/tty/serial/8250/Makefile12
-rw-r--r--drivers/tty/serial/8250/serial_cs.c1
-rw-r--r--drivers/tty/serial/Kconfig120
-rw-r--r--drivers/tty/serial/Makefile7
-rw-r--r--drivers/tty/serial/altera_jtaguart.c16
-rw-r--r--drivers/tty/serial/altera_uart.c17
-rw-r--r--drivers/tty/serial/amba-pl010.c2
-rw-r--r--drivers/tty/serial/amba-pl011.c359
-rw-r--r--drivers/tty/serial/ar933x_uart.c82
-rw-r--r--drivers/tty/serial/arc_uart.c8
-rw-r--r--drivers/tty/serial/atmel_serial.c194
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c9
-rw-r--r--drivers/tty/serial/clps711x.c14
-rw-r--r--drivers/tty/serial/cpm_uart.c24
-rw-r--r--drivers/tty/serial/digicolor-usart.c14
-rw-r--r--drivers/tty/serial/dz.c13
-rw-r--r--drivers/tty/serial/earlycon.c23
-rw-r--r--drivers/tty/serial/esp32_acm.c3
-rw-r--r--drivers/tty/serial/esp32_uart.c3
-rw-r--r--drivers/tty/serial/fsl_linflexuart.c19
-rw-r--r--drivers/tty/serial/fsl_lpuart.c594
-rw-r--r--drivers/tty/serial/icom.c43
-rw-r--r--drivers/tty/serial/imx.c370
-rw-r--r--drivers/tty/serial/ip22zilog.c378
-rw-r--r--drivers/tty/serial/jsm/jsm_cls.c29
-rw-r--r--drivers/tty/serial/jsm/jsm_driver.c1
-rw-r--r--drivers/tty/serial/jsm/jsm_neo.c38
-rw-r--r--drivers/tty/serial/jsm/jsm_tty.c1
-rw-r--r--drivers/tty/serial/kgdb_nmi.c381
-rw-r--r--drivers/tty/serial/kgdboc.c9
-rw-r--r--drivers/tty/serial/lantiq.c6
-rw-r--r--drivers/tty/serial/liteuart.c8
-rw-r--r--drivers/tty/serial/lpc32xx_hs.c2
-rw-r--r--drivers/tty/serial/ma35d1_serial.c17
-rw-r--r--drivers/tty/serial/max3100.c343
-rw-r--r--drivers/tty/serial/max310x.c72
-rw-r--r--drivers/tty/serial/mcf.c4
-rw-r--r--drivers/tty/serial/men_z135_uart.c28
-rw-r--r--drivers/tty/serial/meson_uart.c14
-rw-r--r--drivers/tty/serial/milbeaut_usio.c22
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c5
-rw-r--r--drivers/tty/serial/msm_serial.c132
-rw-r--r--drivers/tty/serial/mux.c4
-rw-r--r--drivers/tty/serial/mvebu-uart.c18
-rw-r--r--drivers/tty/serial/mxs-auart.c25
-rw-r--r--drivers/tty/serial/omap-serial.c3
-rw-r--r--drivers/tty/serial/owl-uart.c3
-rw-r--r--drivers/tty/serial/pch_uart.c24
-rw-r--r--drivers/tty/serial/pic32_uart.c19
-rw-r--r--drivers/tty/serial/pmac_zilog.c33
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c878
-rw-r--r--drivers/tty/serial/rda-uart.c19
-rw-r--r--drivers/tty/serial/rp2.c14
-rw-r--r--drivers/tty/serial/rsci.c480
-rw-r--r--drivers/tty/serial/rsci.h10
-rw-r--r--drivers/tty/serial/sa1100.c8
-rw-r--r--drivers/tty/serial/samsung_tty.c128
-rw-r--r--drivers/tty/serial/sb1250-duart.c13
-rw-r--r--drivers/tty/serial/sc16is7xx.c983
-rw-r--r--drivers/tty/serial/sc16is7xx.h40
-rw-r--r--drivers/tty/serial/sc16is7xx_i2c.c67
-rw-r--r--drivers/tty/serial/sc16is7xx_spi.c90
-rw-r--r--drivers/tty/serial/sccnxp.c22
-rw-r--r--drivers/tty/serial/serial-tegra.c45
-rw-r--r--drivers/tty/serial/serial_base.h16
-rw-r--r--drivers/tty/serial/serial_base_bus.c43
-rw-r--r--drivers/tty/serial/serial_core.c1059
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.c63
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.h27
-rw-r--r--drivers/tty/serial/serial_port.c28
-rw-r--r--drivers/tty/serial/serial_txx9.c2
-rw-r--r--drivers/tty/serial/sh-sci-common.h175
-rw-r--r--drivers/tty/serial/sh-sci.c1245
-rw-r--r--drivers/tty/serial/sh-sci.h180
-rw-r--r--drivers/tty/serial/sifive.c100
-rw-r--r--drivers/tty/serial/sprd_serial.c69
-rw-r--r--drivers/tty/serial/st-asc.c16
-rw-r--r--drivers/tty/serial/stm32-usart.c62
-rw-r--r--drivers/tty/serial/sunhv.c37
-rw-r--r--drivers/tty/serial/sunplus-uart.c18
-rw-r--r--drivers/tty/serial/sunsab.c32
-rw-r--r--drivers/tty/serial/sunsu.c232
-rw-r--r--drivers/tty/serial/sunzilog.c29
-rw-r--r--drivers/tty/serial/tegra-tcu.c12
-rw-r--r--drivers/tty/serial/tegra-utc.c625
-rw-r--r--drivers/tty/serial/timbuart.c19
-rw-r--r--drivers/tty/serial/uartlite.c40
-rw-r--r--drivers/tty/serial/ucc_uart.c24
-rw-r--r--drivers/tty/serial/xilinx_uartps.c80
-rw-r--r--drivers/tty/serial/zs.c13
-rw-r--r--drivers/tty/synclink_gt.c33
-rw-r--r--drivers/tty/sysrq.c83
-rw-r--r--drivers/tty/tty_audit.c10
-rw-r--r--drivers/tty/tty_buffer.c11
-rw-r--r--drivers/tty/tty_io.c133
-rw-r--r--drivers/tty/tty_ioctl.c50
-rw-r--r--drivers/tty/tty_ldisc.c6
-rw-r--r--drivers/tty/tty_ldsem.c17
-rw-r--r--drivers/tty/tty_port.c195
-rw-r--r--drivers/tty/ttynull.c1
-rw-r--r--drivers/tty/vcc.c10
-rw-r--r--drivers/tty/vt/.gitignore3
-rw-r--r--drivers/tty/vt/Makefile34
-rw-r--r--drivers/tty/vt/conmakehash.c9
-rw-r--r--drivers/tty/vt/consolemap.c118
-rw-r--r--drivers/tty/vt/defkeymap.c_shipped112
-rwxr-xr-xdrivers/tty/vt/gen_ucs_fallback_table.py360
-rwxr-xr-xdrivers/tty/vt/gen_ucs_recompose_table.py257
-rwxr-xr-xdrivers/tty/vt/gen_ucs_width_table.py307
-rw-r--r--drivers/tty/vt/keyboard.c363
-rw-r--r--drivers/tty/vt/selection.c73
-rw-r--r--drivers/tty/vt/ucs.c251
-rw-r--r--drivers/tty/vt/ucs_fallback_table.h_shipped3346
-rw-r--r--drivers/tty/vt/ucs_recompose_table.h_shipped102
-rw-r--r--drivers/tty/vt/ucs_width_table.h_shipped453
-rw-r--r--drivers/tty/vt/vc_screen.c76
-rw-r--r--drivers/tty/vt/vt.c512
-rw-r--r--drivers/tty/vt/vt_ioctl.c212
-rw-r--r--drivers/ufs/core/Makefile1
-rw-r--r--drivers/ufs/core/ufs-fault-injection.c1
-rw-r--r--drivers/ufs/core/ufs-mcq.c229
-rw-r--r--drivers/ufs/core/ufs-rpmb.c254
-rw-r--r--drivers/ufs/core/ufs-sysfs.c574
-rw-r--r--drivers/ufs/core/ufs_bsg.c12
-rw-r--r--drivers/ufs/core/ufs_trace.h408
-rw-r--r--drivers/ufs/core/ufs_trace_types.h23
-rw-r--r--drivers/ufs/core/ufshcd-crypto.c63
-rw-r--r--drivers/ufs/core/ufshcd-crypto.h40
-rw-r--r--drivers/ufs/core/ufshcd-priv.h103
-rw-r--r--drivers/ufs/core/ufshcd.c2809
-rw-r--r--drivers/ufs/host/Kconfig25
-rw-r--r--drivers/ufs/host/Makefile2
-rw-r--r--drivers/ufs/host/cdns-pltfrm.c6
-rw-r--r--drivers/ufs/host/tc-dwc-g210-pci.c8
-rw-r--r--drivers/ufs/host/tc-dwc-g210-pltfrm.c7
-rw-r--r--drivers/ufs/host/ti-j721e-ufs.c39
-rw-r--r--drivers/ufs/host/ufs-amd-versal2.c564
-rw-r--r--drivers/ufs/host/ufs-exynos.c490
-rw-r--r--drivers/ufs/host/ufs-exynos.h10
-rw-r--r--drivers/ufs/host/ufs-hisi.c12
-rw-r--r--drivers/ufs/host/ufs-mediatek.c824
-rw-r--r--drivers/ufs/host/ufs-mediatek.h40
-rw-r--r--drivers/ufs/host/ufs-qcom.c962
-rw-r--r--drivers/ufs/host/ufs-qcom.h102
-rw-r--r--drivers/ufs/host/ufs-renesas.c738
-rw-r--r--drivers/ufs/host/ufs-rockchip.c362
-rw-r--r--drivers/ufs/host/ufs-rockchip.h90
-rw-r--r--drivers/ufs/host/ufs-sprd.c13
-rw-r--r--drivers/ufs/host/ufshcd-dwc.h46
-rw-r--r--drivers/ufs/host/ufshcd-pci.c163
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.c89
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.h2
-rw-r--r--drivers/uio/Kconfig32
-rw-r--r--drivers/uio/Makefile2
-rw-r--r--drivers/uio/uio.c31
-rw-r--r--drivers/uio/uio_aec.c3
-rw-r--r--drivers/uio/uio_cif.c3
-rw-r--r--drivers/uio/uio_dmem_genirq.c30
-rw-r--r--drivers/uio/uio_fsl_elbc_gpcm.c11
-rw-r--r--drivers/uio/uio_hv_generic.c195
-rw-r--r--drivers/uio/uio_mf624.c3
-rw-r--r--drivers/uio/uio_netx.c3
-rw-r--r--drivers/uio/uio_pci_generic_sva.c192
-rw-r--r--drivers/uio/uio_pdrv_genirq.c39
-rw-r--r--drivers/uio/uio_pruss.c255
-rw-r--r--drivers/uio/uio_sercos3.c2
-rw-r--r--drivers/usb/Makefile1
-rw-r--r--drivers/usb/atm/cxacru.c129
-rw-r--r--drivers/usb/atm/speedtch.c18
-rw-r--r--drivers/usb/atm/ueagle-atm.c8
-rw-r--r--drivers/usb/atm/usbatm.c8
-rw-r--r--drivers/usb/c67x00/c67x00-drv.c2
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.c7
-rw-r--r--drivers/usb/cdns3/cdns3-imx.c2
-rw-r--r--drivers/usb/cdns3/cdns3-pci-wrap.c10
-rw-r--r--drivers/usb/cdns3/cdns3-plat.c4
-rw-r--r--drivers/usb/cdns3/cdns3-starfive.c2
-rw-r--r--drivers/usb/cdns3/cdns3-ti.c124
-rw-r--r--drivers/usb/cdns3/cdns3-trace.h83
-rw-r--r--drivers/usb/cdns3/cdnsp-debug.h5
-rw-r--r--drivers/usb/cdns3/cdnsp-ep0.c18
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.c76
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.h19
-rw-r--r--drivers/usb/cdns3/cdnsp-pci.c42
-rw-r--r--drivers/usb/cdns3/cdnsp-ring.c46
-rw-r--r--drivers/usb/cdns3/cdnsp-trace.h35
-rw-r--r--drivers/usb/cdns3/core.c9
-rw-r--r--drivers/usb/cdns3/core.h6
-rw-r--r--drivers/usb/cdns3/drd.c10
-rw-r--r--drivers/usb/cdns3/drd.h3
-rw-r--r--drivers/usb/cdns3/host.c15
-rw-r--r--drivers/usb/chipidea/ci.h2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c149
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.h1
-rw-r--r--drivers/usb/chipidea/ci_hdrc_msm.c3
-rw-r--r--drivers/usb/chipidea/ci_hdrc_npcm.c8
-rw-r--r--drivers/usb/chipidea/ci_hdrc_tegra.c11
-rw-r--r--drivers/usb/chipidea/ci_hdrc_usb2.c2
-rw-r--r--drivers/usb/chipidea/core.c10
-rw-r--r--drivers/usb/chipidea/host.c13
-rw-r--r--drivers/usb/chipidea/otg_fsm.c4
-rw-r--r--drivers/usb/chipidea/trace.h4
-rw-r--r--drivers/usb/chipidea/udc.c198
-rw-r--r--drivers/usb/chipidea/udc.h2
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c240
-rw-r--r--drivers/usb/class/cdc-acm.c55
-rw-r--r--drivers/usb/class/cdc-wdm.c50
-rw-r--r--drivers/usb/class/usblp.c45
-rw-r--r--drivers/usb/class/usbtmc.c95
-rw-r--r--drivers/usb/common/common.c40
-rw-r--r--drivers/usb/common/ulpi.c2
-rw-r--r--drivers/usb/common/usb-conn-gpio.c32
-rw-r--r--drivers/usb/common/usb-otg-fsm.c1
-rw-r--r--drivers/usb/core/Makefile10
-rw-r--r--drivers/usb/core/config.c108
-rw-r--r--drivers/usb/core/devio.c5
-rw-r--r--drivers/usb/core/driver.c109
-rw-r--r--drivers/usb/core/endpoint.c11
-rw-r--r--drivers/usb/core/generic.c14
-rw-r--r--drivers/usb/core/hcd-pci.c20
-rw-r--r--drivers/usb/core/hcd.c81
-rw-r--r--drivers/usb/core/hub.c260
-rw-r--r--drivers/usb/core/hub.h3
-rw-r--r--drivers/usb/core/ledtrig-usbport.c3
-rw-r--r--drivers/usb/core/message.c2
-rw-r--r--drivers/usb/core/of.c7
-rw-r--r--drivers/usb/core/offload.c136
-rw-r--r--drivers/usb/core/port.c21
-rw-r--r--drivers/usb/core/quirks.c36
-rw-r--r--drivers/usb/core/sysfs.c15
-rw-r--r--drivers/usb/core/trace.c6
-rw-r--r--drivers/usb/core/trace.h61
-rw-r--r--drivers/usb/core/urb.c47
-rw-r--r--drivers/usb/core/usb-acpi.c60
-rw-r--r--drivers/usb/core/usb.c148
-rw-r--r--drivers/usb/core/usb.h2
-rw-r--r--drivers/usb/dwc2/Kconfig2
-rw-r--r--drivers/usb/dwc2/core.c43
-rw-r--r--drivers/usb/dwc2/core.h31
-rw-r--r--drivers/usb/dwc2/core_intr.c26
-rw-r--r--drivers/usb/dwc2/debugfs.c2
-rw-r--r--drivers/usb/dwc2/drd.c9
-rw-r--r--drivers/usb/dwc2/gadget.c175
-rw-r--r--drivers/usb/dwc2/hcd.c132
-rw-r--r--drivers/usb/dwc2/hcd_queue.c61
-rw-r--r--drivers/usb/dwc2/hw.h14
-rw-r--r--drivers/usb/dwc2/params.c100
-rw-r--r--drivers/usb/dwc2/platform.c82
-rw-r--r--drivers/usb/dwc3/Kconfig24
-rw-r--r--drivers/usb/dwc3/Makefile3
-rw-r--r--drivers/usb/dwc3/core.c872
-rw-r--r--drivers/usb/dwc3/core.h92
-rw-r--r--drivers/usb/dwc3/debug.h18
-rw-r--r--drivers/usb/dwc3/debugfs.c12
-rw-r--r--drivers/usb/dwc3/drd.c21
-rw-r--r--drivers/usb/dwc3/dwc3-am62.c97
-rw-r--r--drivers/usb/dwc3/dwc3-apple.c489
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c51
-rw-r--r--drivers/usb/dwc3/dwc3-generic-plat.c233
-rw-r--r--drivers/usb/dwc3/dwc3-imx8mp.c132
-rw-r--r--drivers/usb/dwc3/dwc3-keystone.c2
-rw-r--r--drivers/usb/dwc3/dwc3-meson-g12a.c5
-rw-r--r--drivers/usb/dwc3/dwc3-octeon.c21
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c2
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c21
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c105
-rw-r--r--drivers/usb/dwc3/dwc3-qcom-legacy.c935
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c655
-rw-r--r--drivers/usb/dwc3/dwc3-rtk.c54
-rw-r--r--drivers/usb/dwc3/dwc3-st.c58
-rw-r--r--drivers/usb/dwc3/dwc3-xilinx.c90
-rw-r--r--drivers/usb/dwc3/ep0.c26
-rw-r--r--drivers/usb/dwc3/gadget.c575
-rw-r--r--drivers/usb/dwc3/glue.h193
-rw-r--r--drivers/usb/dwc3/host.c16
-rw-r--r--drivers/usb/dwc3/trace.h25
-rw-r--r--drivers/usb/early/xhci-dbc.c4
-rw-r--r--drivers/usb/fotg210/Makefile10
-rw-r--r--drivers/usb/fotg210/fotg210-core.c8
-rw-r--r--drivers/usb/fotg210/fotg210-hcd.c5
-rw-r--r--drivers/usb/gadget/Kconfig4
-rw-r--r--drivers/usb/gadget/composite.c93
-rw-r--r--drivers/usb/gadget/config.c53
-rw-r--r--drivers/usb/gadget/configfs.c29
-rw-r--r--drivers/usb/gadget/epautoconf.c2
-rw-r--r--drivers/usb/gadget/function/Makefile4
-rw-r--r--drivers/usb/gadget/function/f_acm.c95
-rw-r--r--drivers/usb/gadget/function/f_ecm.c60
-rw-r--r--drivers/usb/gadget/function/f_eem.c8
-rw-r--r--drivers/usb/gadget/function/f_fs.c252
-rw-r--r--drivers/usb/gadget/function/f_hid.c438
-rw-r--r--drivers/usb/gadget/function/f_loopback.c3
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c18
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.h2
-rw-r--r--drivers/usb/gadget/function/f_midi.c30
-rw-r--r--drivers/usb/gadget/function/f_midi2.c66
-rw-r--r--drivers/usb/gadget/function/f_ncm.c88
-rw-r--r--drivers/usb/gadget/function/f_obex.c1
-rw-r--r--drivers/usb/gadget/function/f_phonet.c1
-rw-r--r--drivers/usb/gadget/function/f_printer.c49
-rw-r--r--drivers/usb/gadget/function/f_rndis.c86
-rw-r--r--drivers/usb/gadget/function/f_serial.c8
-rw-r--r--drivers/usb/gadget/function/f_sourcesink.c3
-rw-r--r--drivers/usb/gadget/function/f_subset.c1
-rw-r--r--drivers/usb/gadget/function/f_tcm.c726
-rw-r--r--drivers/usb/gadget/function/f_uac1.c66
-rw-r--r--drivers/usb/gadget/function/f_uac1_legacy.c1
-rw-r--r--drivers/usb/gadget/function/f_uac2.c93
-rw-r--r--drivers/usb/gadget/function/f_uvc.c5
-rw-r--r--drivers/usb/gadget/function/rndis.c6
-rw-r--r--drivers/usb/gadget/function/storage_common.c1
-rw-r--r--drivers/usb/gadget/function/storage_common.h4
-rw-r--r--drivers/usb/gadget/function/tcm.h28
-rw-r--r--drivers/usb/gadget/function/u_audio.c84
-rw-r--r--drivers/usb/gadget/function/u_ether.c11
-rw-r--r--drivers/usb/gadget/function/u_hid.h2
-rw-r--r--drivers/usb/gadget/function/u_serial.c92
-rw-r--r--drivers/usb/gadget/function/u_serial.h4
-rw-r--r--drivers/usb/gadget/function/u_uac1.h12
-rw-r--r--drivers/usb/gadget/function/u_uac2.h15
-rw-r--r--drivers/usb/gadget/function/uvc.h18
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c372
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.h20
-rw-r--r--drivers/usb/gadget/function/uvc_queue.c28
-rw-r--r--drivers/usb/gadget/function/uvc_queue.h2
-rw-r--r--drivers/usb/gadget/function/uvc_trace.c11
-rw-r--r--drivers/usb/gadget/function/uvc_trace.h60
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c108
-rw-r--r--drivers/usb/gadget/function/uvc_video.c271
-rw-r--r--drivers/usb/gadget/legacy/dbgp.c1
-rw-r--r--drivers/usb/gadget/legacy/g_ffs.c2
-rw-r--r--drivers/usb/gadget/legacy/gmidi.c1
-rw-r--r--drivers/usb/gadget/legacy/hid.c2
-rw-r--r--drivers/usb/gadget/legacy/inode.c63
-rw-r--r--drivers/usb/gadget/legacy/raw_gadget.c8
-rw-r--r--drivers/usb/gadget/legacy/tcm_usb_gadget.c2
-rw-r--r--drivers/usb/gadget/legacy/zero.c32
-rw-r--r--drivers/usb/gadget/u_f.c2
-rw-r--r--drivers/usb/gadget/u_f.h86
-rw-r--r--drivers/usb/gadget/u_os_desc.h2
-rw-r--r--drivers/usb/gadget/udc/Kconfig44
-rw-r--r--drivers/usb/gadget/udc/Makefile5
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/core.c2
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/dev.c3
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/hub.c3
-rw-r--r--drivers/usb/gadget/udc/aspeed_udc.c10
-rw-r--r--drivers/usb/gadget/udc/at91_udc.c7
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c3
-rw-r--r--drivers/usb/gadget/udc/bcm63xx_udc.c2
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc.h2
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_core.c3
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.c2
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_udc.c2
-rw-r--r--drivers/usb/gadget/udc/cdns2/cdns2-ep0.c2
-rw-r--r--drivers/usb/gadget/udc/cdns2/cdns2-gadget.c30
-rw-r--r--drivers/usb/gadget/udc/cdns2/cdns2-gadget.h9
-rw-r--r--drivers/usb/gadget/udc/cdns2/cdns2-pci.c8
-rw-r--r--drivers/usb/gadget/udc/cdns2/cdns2-trace.h89
-rw-r--r--drivers/usb/gadget/udc/core.c47
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c79
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.c8
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c7
-rw-r--r--drivers/usb/gadget/udc/fusb300_udc.c1516
-rw-r--r--drivers/usb/gadget/udc/fusb300_udc.h675
-rw-r--r--drivers/usb/gadget/udc/goku_udc.c2
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c2
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c71
-rw-r--r--drivers/usb/gadget/udc/m66592-udc.c10
-rw-r--r--drivers/usb/gadget/udc/mv_u3d.h317
-rw-r--r--drivers/usb/gadget/udc/mv_u3d_core.c2062
-rw-r--r--drivers/usb/gadget/udc/mv_udc.h309
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c2426
-rw-r--r--drivers/usb/gadget/udc/net2272.c2723
-rw-r--r--drivers/usb/gadget/udc/net2272.h584
-rw-r--r--drivers/usb/gadget/udc/net2280.c10
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c25
-rw-r--r--drivers/usb/gadget/udc/pch_udc.c2
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.c19
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.c12
-rw-r--r--drivers/usb/gadget/udc/r8a66597-udc.c6
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c20
-rw-r--r--drivers/usb/gadget/udc/renesas_usbf.c8
-rw-r--r--drivers/usb/gadget/udc/rzv2m_usb3drd.c2
-rw-r--r--drivers/usb/gadget/udc/snps_udc_core.c8
-rw-r--r--drivers/usb/gadget/udc/snps_udc_plat.c2
-rw-r--r--drivers/usb/gadget/udc/tegra-xudc.c24
-rw-r--r--drivers/usb/gadget/udc/trace.h9
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c10
-rw-r--r--drivers/usb/gadget/usbstring.c2
-rw-r--r--drivers/usb/host/Kconfig15
-rw-r--r--drivers/usb/host/Makefile4
-rw-r--r--drivers/usb/host/bcma-hcd.c1
-rw-r--r--drivers/usb/host/ehci-atmel.c2
-rw-r--r--drivers/usb/host/ehci-brcm.c3
-rw-r--r--drivers/usb/host/ehci-dbg.c10
-rw-r--r--drivers/usb/host/ehci-exynos.c38
-rw-r--r--drivers/usb/host/ehci-fsl.c27
-rw-r--r--drivers/usb/host/ehci-grlib.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c7
-rw-r--r--drivers/usb/host/ehci-mv.c2
-rw-r--r--drivers/usb/host/ehci-npcm7xx.c2
-rw-r--r--drivers/usb/host/ehci-omap.c2
-rw-r--r--drivers/usb/host/ehci-orion.c2
-rw-r--r--drivers/usb/host/ehci-platform.c47
-rw-r--r--drivers/usb/host/ehci-ppc-of.c2
-rw-r--r--drivers/usb/host/ehci-q.c20
-rw-r--r--drivers/usb/host/ehci-sh.c11
-rw-r--r--drivers/usb/host/ehci-spear.c9
-rw-r--r--drivers/usb/host/ehci-st.c2
-rw-r--r--drivers/usb/host/ehci-sysfs.c18
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c2
-rw-r--r--drivers/usb/host/ehci.h8
-rw-r--r--drivers/usb/host/fhci-hcd.c2
-rw-r--r--drivers/usb/host/fhci-sched.c4
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c5
-rw-r--r--drivers/usb/host/isp116x-hcd.c2
-rw-r--r--drivers/usb/host/isp1362-hcd.c6
-rw-r--r--drivers/usb/host/max3421-hcd.c25
-rw-r--r--drivers/usb/host/octeon-hcd.c6
-rw-r--r--drivers/usb/host/ohci-at91.c4
-rw-r--r--drivers/usb/host/ohci-da8xx.c21
-rw-r--r--drivers/usb/host/ohci-exynos.c39
-rw-r--r--drivers/usb/host/ohci-hcd.c7
-rw-r--r--drivers/usb/host/ohci-hub.c2
-rw-r--r--drivers/usb/host/ohci-nxp.c20
-rw-r--r--drivers/usb/host/ohci-omap.c4
-rw-r--r--drivers/usb/host/ohci-pci.c23
-rw-r--r--drivers/usb/host/ohci-platform.c26
-rw-r--r--drivers/usb/host/ohci-ppc-of.c6
-rw-r--r--drivers/usb/host/ohci-pxa27x.c2
-rw-r--r--drivers/usb/host/ohci-s3c2410.c10
-rw-r--r--drivers/usb/host/ohci-sm501.c2
-rw-r--r--drivers/usb/host/ohci-spear.c5
-rw-r--r--drivers/usb/host/ohci-st.c2
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c46
-rw-r--r--drivers/usb/host/pci-quirks.c9
-rw-r--r--drivers/usb/host/r8a66597-hcd.c21
-rw-r--r--drivers/usb/host/sl811-hcd.c12
-rw-r--r--drivers/usb/host/uhci-grlib.c2
-rw-r--r--drivers/usb/host/uhci-hcd.c2
-rw-r--r--drivers/usb/host/uhci-hcd.h1
-rw-r--r--drivers/usb/host/uhci-platform.c32
-rw-r--r--drivers/usb/host/uhci-q.c4
-rw-r--r--drivers/usb/host/xen-hcd.c10
-rw-r--r--drivers/usb/host/xhci-caps.h161
-rw-r--r--drivers/usb/host/xhci-dbgcap.c320
-rw-r--r--drivers/usb/host/xhci-dbgcap.h10
-rw-r--r--drivers/usb/host/xhci-dbgtty.c197
-rw-r--r--drivers/usb/host/xhci-debugfs.c192
-rw-r--r--drivers/usb/host/xhci-ext-caps.h5
-rw-r--r--drivers/usb/host/xhci-histb.c4
-rw-r--r--drivers/usb/host/xhci-hub.c212
-rw-r--r--drivers/usb/host/xhci-mem.c685
-rw-r--r--drivers/usb/host/xhci-mtk.c7
-rw-r--r--drivers/usb/host/xhci-mtk.h10
-rw-r--r--drivers/usb/host/xhci-mvebu.c10
-rw-r--r--drivers/usb/host/xhci-mvebu.h6
-rw-r--r--drivers/usb/host/xhci-pci-renesas.c56
-rw-r--r--drivers/usb/host/xhci-pci.c277
-rw-r--r--drivers/usb/host/xhci-pci.h19
-rw-r--r--drivers/usb/host/xhci-plat.c94
-rw-r--r--drivers/usb/host/xhci-plat.h3
-rw-r--r--drivers/usb/host/xhci-port.h5
-rw-r--r--drivers/usb/host/xhci-rcar-regs.h49
-rw-r--r--drivers/usb/host/xhci-rcar.c108
-rw-r--r--drivers/usb/host/xhci-ring.c1457
-rw-r--r--drivers/usb/host/xhci-rzg3e-regs.h12
-rw-r--r--drivers/usb/host/xhci-sideband.c507
-rw-r--r--drivers/usb/host/xhci-tegra.c119
-rw-r--r--drivers/usb/host/xhci-trace.h143
-rw-r--r--drivers/usb/host/xhci.c621
-rw-r--r--drivers/usb/host/xhci.h399
-rw-r--r--drivers/usb/image/microtek.c4
-rw-r--r--drivers/usb/isp1760/Kconfig2
-rw-r--r--drivers/usb/isp1760/isp1760-hcd.c4
-rw-r--r--drivers/usb/isp1760/isp1760-if.c2
-rw-r--r--drivers/usb/isp1760/isp1760-udc.c6
-rw-r--r--drivers/usb/misc/Kconfig48
-rw-r--r--drivers/usb/misc/Makefile3
-rw-r--r--drivers/usb/misc/apple-mfi-fastcharge.c25
-rw-r--r--drivers/usb/misc/appledisplay.c15
-rw-r--r--drivers/usb/misc/brcmstb-usb-pinmap.c1
-rw-r--r--drivers/usb/misc/chaoskey.c51
-rw-r--r--drivers/usb/misc/cypress_cy7c63.c4
-rw-r--r--drivers/usb/misc/ezusb.c1
-rw-r--r--drivers/usb/misc/iowarrior.c50
-rw-r--r--drivers/usb/misc/isight_firmware.c1
-rw-r--r--drivers/usb/misc/ldusb.c1
-rw-r--r--drivers/usb/misc/onboard_usb_dev.c726
-rw-r--r--drivers/usb/misc/onboard_usb_dev.h158
-rw-r--r--drivers/usb/misc/onboard_usb_dev_pdevs.c144
-rw-r--r--drivers/usb/misc/onboard_usb_hub.c507
-rw-r--r--drivers/usb/misc/onboard_usb_hub.h90
-rw-r--r--drivers/usb/misc/onboard_usb_hub_pdevs.c143
-rw-r--r--drivers/usb/misc/qcom_eud.c40
-rw-r--r--drivers/usb/misc/usb-ljca.c68
-rw-r--r--drivers/usb/misc/usb251xb.c136
-rw-r--r--drivers/usb/misc/usb3503.c4
-rw-r--r--drivers/usb/misc/usb4604.c2
-rw-r--r--drivers/usb/misc/usbio.c749
-rw-r--r--drivers/usb/misc/usbtest.c9
-rw-r--r--drivers/usb/misc/uss720.c40
-rw-r--r--drivers/usb/misc/yurex.c27
-rw-r--r--drivers/usb/mon/mon_bin.c17
-rw-r--r--drivers/usb/mon/mon_main.c1
-rw-r--r--drivers/usb/mon/mon_stat.c1
-rw-r--r--drivers/usb/mon/mon_text.c2
-rw-r--r--drivers/usb/mtu3/Kconfig2
-rw-r--r--drivers/usb/mtu3/mtu3.h34
-rw-r--r--drivers/usb/mtu3/mtu3_core.c2
-rw-r--r--drivers/usb/mtu3/mtu3_debugfs.c43
-rw-r--r--drivers/usb/mtu3/mtu3_dr.c3
-rw-r--r--drivers/usb/mtu3/mtu3_gadget.c3
-rw-r--r--drivers/usb/mtu3/mtu3_plat.c5
-rw-r--r--drivers/usb/mtu3/mtu3_qmu.c2
-rw-r--r--drivers/usb/mtu3/mtu3_trace.h8
-rw-r--r--drivers/usb/musb/Kconfig5
-rw-r--r--drivers/usb/musb/da8xx.c42
-rw-r--r--drivers/usb/musb/jz4740.c6
-rw-r--r--drivers/usb/musb/mediatek.c31
-rw-r--r--drivers/usb/musb/mpfs.c167
-rw-r--r--drivers/usb/musb/musb_core.c32
-rw-r--r--drivers/usb/musb/musb_cppi41.c4
-rw-r--r--drivers/usb/musb/musb_debugfs.c5
-rw-r--r--drivers/usb/musb/musb_dsps.c18
-rw-r--r--drivers/usb/musb/musb_gadget.c31
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c2
-rw-r--r--drivers/usb/musb/musb_host.c8
-rw-r--r--drivers/usb/musb/musb_trace.h12
-rw-r--r--drivers/usb/musb/musb_virthub.c2
-rw-r--r--drivers/usb/musb/omap2430.c40
-rw-r--r--drivers/usb/musb/sunxi.c8
-rw-r--r--drivers/usb/musb/tusb6010.c12
-rw-r--r--drivers/usb/musb/ux500.c2
-rw-r--r--drivers/usb/phy/Kconfig12
-rw-r--r--drivers/usb/phy/Makefile1
-rw-r--r--drivers/usb/phy/phy-ab8500-usb.c2
-rw-r--r--drivers/usb/phy/phy-am335x-control.c1
-rw-r--r--drivers/usb/phy/phy-am335x.c3
-rw-r--r--drivers/usb/phy/phy-fsl-usb.c8
-rw-r--r--drivers/usb/phy/phy-generic.c5
-rw-r--r--drivers/usb/phy/phy-gpio-vbus-usb.c3
-rw-r--r--drivers/usb/phy/phy-isp1301.c2
-rw-r--r--drivers/usb/phy/phy-keystone.c2
-rw-r--r--drivers/usb/phy/phy-mv-usb.c880
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c142
-rw-r--r--drivers/usb/phy/phy-tahvo.c5
-rw-r--r--drivers/usb/phy/phy-tegra-usb.c91
-rw-r--r--drivers/usb/phy/phy-twl6030-usb.c5
-rw-r--r--drivers/usb/phy/phy-ulpi.c23
-rw-r--r--drivers/usb/phy/phy.c32
-rw-r--r--drivers/usb/renesas_usbhs/common.c164
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c2
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c2
-rw-r--r--drivers/usb/renesas_usbhs/rza.h1
-rw-r--r--drivers/usb/renesas_usbhs/rza2.c13
-rw-r--r--drivers/usb/roles/class.c12
-rw-r--r--drivers/usb/roles/intel-xhci-usb-role-switch.c2
-rw-r--r--drivers/usb/serial/aircable.c3
-rw-r--r--drivers/usb/serial/ark3116.c1
-rw-r--r--drivers/usb/serial/belkin_sa.c43
-rw-r--r--drivers/usb/serial/bus.c8
-rw-r--r--drivers/usb/serial/ch341.c39
-rw-r--r--drivers/usb/serial/cp210x.c10
-rw-r--r--drivers/usb/serial/cyberjack.c1
-rw-r--r--drivers/usb/serial/cypress_m8.c5
-rw-r--r--drivers/usb/serial/digi_acceleport.c2
-rw-r--r--drivers/usb/serial/empeg.c1
-rw-r--r--drivers/usb/serial/f81232.c2
-rw-r--r--drivers/usb/serial/f81534.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c234
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h22
-rw-r--r--drivers/usb/serial/garmin_gps.c9
-rw-r--r--drivers/usb/serial/generic.c1
-rw-r--r--drivers/usb/serial/io_edgeport.c14
-rw-r--r--drivers/usb/serial/io_ti.c2
-rw-r--r--drivers/usb/serial/ipaq.c1
-rw-r--r--drivers/usb/serial/ipw.c1
-rw-r--r--drivers/usb/serial/ir-usb.c1
-rw-r--r--drivers/usb/serial/iuu_phoenix.c1
-rw-r--r--drivers/usb/serial/keyspan.c4
-rw-r--r--drivers/usb/serial/keyspan_pda.c2
-rw-r--r--drivers/usb/serial/kl5kusb105.c3
-rw-r--r--drivers/usb/serial/kobil_sct.c214
-rw-r--r--drivers/usb/serial/mct_u232.c3
-rw-r--r--drivers/usb/serial/metro-usb.c1
-rw-r--r--drivers/usb/serial/mos7720.c1
-rw-r--r--drivers/usb/serial/mos7840.c63
-rw-r--r--drivers/usb/serial/mxuport.c4
-rw-r--r--drivers/usb/serial/navman.c2
-rw-r--r--drivers/usb/serial/omninet.c1
-rw-r--r--drivers/usb/serial/opticon.c1
-rw-r--r--drivers/usb/serial/option.c209
-rw-r--r--drivers/usb/serial/oti6858.c3
-rw-r--r--drivers/usb/serial/pl2303.c44
-rw-r--r--drivers/usb/serial/pl2303.h4
-rw-r--r--drivers/usb/serial/qcaux.c2
-rw-r--r--drivers/usb/serial/qcserial.c3
-rw-r--r--drivers/usb/serial/quatech2.c5
-rw-r--r--drivers/usb/serial/safe_serial.c1
-rw-r--r--drivers/usb/serial/sierra.c3
-rw-r--r--drivers/usb/serial/spcp8x5.c11
-rw-r--r--drivers/usb/serial/ssu100.c1
-rw-r--r--drivers/usb/serial/symbolserial.c2
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c7
-rw-r--r--drivers/usb/serial/upd78f0730.c1
-rw-r--r--drivers/usb/serial/usb-serial-simple.c9
-rw-r--r--drivers/usb/serial/usb-serial.c23
-rw-r--r--drivers/usb/serial/usb_debug.c10
-rw-r--r--drivers/usb/serial/visor.c3
-rw-r--r--drivers/usb/serial/whiteheat.c2
-rw-r--r--drivers/usb/serial/wishbone-serial.c1
-rw-r--r--drivers/usb/serial/xr_serial.c1
-rw-r--r--drivers/usb/serial/xsens_mt.c1
-rw-r--r--drivers/usb/storage/Kconfig3
-rw-r--r--drivers/usb/storage/Makefile2
-rw-r--r--drivers/usb/storage/alauda.c23
-rw-r--r--drivers/usb/storage/cypress_atacb.c6
-rw-r--r--drivers/usb/storage/datafab.c20
-rw-r--r--drivers/usb/storage/debug.c4
-rw-r--r--drivers/usb/storage/ene_ub6250.c16
-rw-r--r--drivers/usb/storage/freecom.c6
-rw-r--r--drivers/usb/storage/initializers.c2
-rw-r--r--drivers/usb/storage/isd200.c6
-rw-r--r--drivers/usb/storage/jumpshot.c16
-rw-r--r--drivers/usb/storage/karma.c6
-rw-r--r--drivers/usb/storage/onetouch.c6
-rw-r--r--drivers/usb/storage/protocol.c3
-rw-r--r--drivers/usb/storage/realtek_cr.c25
-rw-r--r--drivers/usb/storage/scsiglue.c21
-rw-r--r--drivers/usb/storage/sddr09.c24
-rw-r--r--drivers/usb/storage/sddr55.c24
-rw-r--r--drivers/usb/storage/shuttle_usbat.c12
-rw-r--r--drivers/usb/storage/transport.c28
-rw-r--r--drivers/usb/storage/uas.c58
-rw-r--r--drivers/usb/storage/unusual_devs.h47
-rw-r--r--drivers/usb/storage/unusual_uas.h16
-rw-r--r--drivers/usb/storage/usb.c121
-rw-r--r--drivers/usb/typec/altmodes/Kconfig9
-rw-r--r--drivers/usb/typec/altmodes/Makefile2
-rw-r--r--drivers/usb/typec/altmodes/displayport.c46
-rw-r--r--drivers/usb/typec/altmodes/nvidia.c3
-rw-r--r--drivers/usb/typec/altmodes/thunderbolt.c388
-rw-r--r--drivers/usb/typec/anx7411.c88
-rw-r--r--drivers/usb/typec/bus.c10
-rw-r--r--drivers/usb/typec/class.c316
-rw-r--r--drivers/usb/typec/class.h4
-rw-r--r--drivers/usb/typec/hd3ss3220.c282
-rw-r--r--drivers/usb/typec/mux.c4
-rw-r--r--drivers/usb/typec/mux/Kconfig21
-rw-r--r--drivers/usb/typec/mux/Makefile2
-rw-r--r--drivers/usb/typec/mux/fsa4480.c19
-rw-r--r--drivers/usb/typec/mux/gpio-sbu-mux.c16
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c9
-rw-r--r--drivers/usb/typec/mux/nb7vpq904m.c97
-rw-r--r--drivers/usb/typec/mux/ps883x.c499
-rw-r--r--drivers/usb/typec/mux/ptn36502.c77
-rw-r--r--drivers/usb/typec/mux/tusb1046.c196
-rw-r--r--drivers/usb/typec/pd.c95
-rw-r--r--drivers/usb/typec/port-mapper.c23
-rw-r--r--drivers/usb/typec/rt1719.c11
-rw-r--r--drivers/usb/typec/stusb160x.c9
-rw-r--r--drivers/usb/typec/tcpm/fusb302.c36
-rw-r--r--drivers/usb/typec/tcpm/maxim_contaminant.c115
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c22
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c13
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy_stub.c3
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c7
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c234
-rw-r--r--drivers/usb/typec/tcpm/tcpci_maxim.h35
-rw-r--r--drivers/usb/typec/tcpm/tcpci_maxim_core.c129
-rw-r--r--drivers/usb/typec/tcpm/tcpci_mt6360.c2
-rw-r--r--drivers/usb/typec/tcpm/tcpci_mt6370.c3
-rw-r--r--drivers/usb/typec/tcpm/tcpci_rt1711h.c38
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c536
-rw-r--r--drivers/usb/typec/tcpm/wcove.c6
-rw-r--r--drivers/usb/typec/tipd/core.c663
-rw-r--r--drivers/usb/typec/tipd/tps6598x.h18
-rw-r--r--drivers/usb/typec/tipd/trace.h41
-rw-r--r--drivers/usb/typec/ucsi/Kconfig35
-rw-r--r--drivers/usb/typec/ucsi/Makefile3
-rw-r--r--drivers/usb/typec/ucsi/cros_ec_ucsi.c341
-rw-r--r--drivers/usb/typec/ucsi/debugfs.c79
-rw-r--r--drivers/usb/typec/ucsi/displayport.c38
-rw-r--r--drivers/usb/typec/ucsi/psy.c98
-rw-r--r--drivers/usb/typec/ucsi/trace.c19
-rw-r--r--drivers/usb/typec/ucsi/trace.h29
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c970
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h415
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c180
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c197
-rw-r--r--drivers/usb/typec/ucsi/ucsi_glink.c279
-rw-r--r--drivers/usb/typec/ucsi/ucsi_huawei_gaokun.c528
-rw-r--r--drivers/usb/typec/ucsi/ucsi_stm32g0.c81
-rw-r--r--drivers/usb/typec/ucsi/ucsi_yoga_c630.c332
-rw-r--r--drivers/usb/usbip/stub_rx.c79
-rw-r--r--drivers/usb/usbip/stub_tx.c11
-rw-r--r--drivers/usb/usbip/vhci_hcd.c178
-rw-r--r--drivers/usb/usbip/vhci_rx.c6
-rw-r--r--drivers/usb/usbip/vhci_sysfs.c3
-rw-r--r--drivers/usb/usbip/vudc_main.c2
-rw-r--r--drivers/usb/usbip/vudc_sysfs.c6
-rw-r--r--drivers/usb/usbip/vudc_transfer.c2
-rw-r--r--drivers/usb/usbip/vudc_tx.c2
-rw-r--r--drivers/vdpa/Kconfig17
-rw-r--r--drivers/vdpa/Makefile1
-rw-r--r--drivers/vdpa/alibaba/eni_vdpa.c5
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.c2
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.h3
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c6
-rw-r--r--drivers/vdpa/mlx5/core/mlx5_vdpa.h47
-rw-r--r--drivers/vdpa/mlx5/core/mr.c317
-rw-r--r--drivers/vdpa/mlx5/core/resources.c76
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c819
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.h1
-rw-r--r--drivers/vdpa/octeon_ep/Makefile4
-rw-r--r--drivers/vdpa/octeon_ep/octep_vdpa.h114
-rw-r--r--drivers/vdpa/octeon_ep/octep_vdpa_hw.c549
-rw-r--r--drivers/vdpa/octeon_ep/octep_vdpa_main.c900
-rw-r--r--drivers/vdpa/pds/cmds.h1
-rw-r--r--drivers/vdpa/pds/vdpa_dev.c7
-rw-r--r--drivers/vdpa/solidrun/snet_main.c71
-rw-r--r--drivers/vdpa/vdpa.c89
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c6
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_net.c21
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.c141
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.h8
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c108
-rw-r--r--drivers/vdpa/virtio_pci/vp_vdpa.c51
-rw-r--r--drivers/vfio/cdx/Makefile6
-rw-r--r--drivers/vfio/cdx/main.c31
-rw-r--r--drivers/vfio/cdx/private.h14
-rw-r--r--drivers/vfio/debugfs.c19
-rw-r--r--drivers/vfio/device_cdev.c105
-rw-r--r--drivers/vfio/fsl-mc/Kconfig5
-rw-r--r--drivers/vfio/fsl-mc/vfio_fsl_mc.c45
-rw-r--r--drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c4
-rw-r--r--drivers/vfio/group.c62
-rw-r--r--drivers/vfio/iommufd.c68
-rw-r--r--drivers/vfio/mdev/mdev_core.c4
-rw-r--r--drivers/vfio/mdev/mdev_driver.c2
-rw-r--r--drivers/vfio/mdev/mdev_private.h3
-rw-r--r--drivers/vfio/mdev/mdev_sysfs.c2
-rw-r--r--drivers/vfio/pci/Kconfig11
-rw-r--r--drivers/vfio/pci/Makefile3
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c539
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h56
-rw-r--r--drivers/vfio/pci/mlx5/cmd.c416
-rw-r--r--drivers/vfio/pci/mlx5/cmd.h35
-rw-r--r--drivers/vfio/pci/mlx5/main.c128
-rw-r--r--drivers/vfio/pci/nvgrace-gpu/main.c493
-rw-r--r--drivers/vfio/pci/pds/dirty.c2
-rw-r--r--drivers/vfio/pci/pds/lm.c5
-rw-r--r--drivers/vfio/pci/pds/pci_drv.c2
-rw-r--r--drivers/vfio/pci/pds/vfio_dev.c3
-rw-r--r--drivers/vfio/pci/qat/main.c12
-rw-r--r--drivers/vfio/pci/vfio_pci.c13
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c55
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c618
-rw-r--r--drivers/vfio/pci/vfio_pci_dmabuf.c350
-rw-r--r--drivers/vfio/pci/vfio_pci_igd.c5
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c73
-rw-r--r--drivers/vfio/pci/vfio_pci_priv.h34
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c148
-rw-r--r--drivers/vfio/pci/virtio/Kconfig42
-rw-r--r--drivers/vfio/pci/virtio/Makefile3
-rw-r--r--drivers/vfio/pci/virtio/common.h126
-rw-r--r--drivers/vfio/pci/virtio/legacy_io.c398
-rw-r--r--drivers/vfio/pci/virtio/main.c489
-rw-r--r--drivers/vfio/pci/virtio/migrate.c1336
-rw-r--r--drivers/vfio/pci/xe/Kconfig12
-rw-r--r--drivers/vfio/pci/xe/Makefile3
-rw-r--r--drivers/vfio/pci/xe/main.c573
-rw-r--r--drivers/vfio/platform/Kconfig5
-rw-r--r--drivers/vfio/platform/reset/Kconfig6
-rw-r--r--drivers/vfio/platform/reset/vfio_platform_amdxgbe.c2
-rw-r--r--drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c2
-rw-r--r--drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c2
-rw-r--r--drivers/vfio/platform/vfio_amba.c3
-rw-r--r--drivers/vfio/platform/vfio_platform.c3
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c50
-rw-r--r--drivers/vfio/platform/vfio_platform_private.h3
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c13
-rw-r--r--drivers/vfio/vfio_iommu_type1.c495
-rw-r--r--drivers/vfio/vfio_main.c122
-rw-r--r--drivers/vfio/virqfd.c20
-rw-r--r--drivers/vhost/Kconfig19
-rw-r--r--drivers/vhost/net.c277
-rw-r--r--drivers/vhost/scsi.c915
-rw-r--r--drivers/vhost/test.c10
-rw-r--r--drivers/vhost/vdpa.c56
-rw-r--r--drivers/vhost/vhost.c713
-rw-r--r--drivers/vhost/vhost.h89
-rw-r--r--drivers/vhost/vringh.c152
-rw-r--r--drivers/vhost/vsock.c33
-rw-r--r--drivers/video/Kconfig19
-rw-r--r--drivers/video/aperture.c11
-rw-r--r--drivers/video/backlight/88pm860x_bl.c6
-rw-r--r--drivers/video/backlight/Kconfig36
-rw-r--r--drivers/video/backlight/Makefile4
-rw-r--r--drivers/video/backlight/aat2870_bl.c13
-rw-r--r--drivers/video/backlight/adp5520_bl.c3
-rw-r--r--drivers/video/backlight/adp8860_bl.c1
-rw-r--r--drivers/video/backlight/adp8870_bl.c3
-rw-r--r--drivers/video/backlight/ams369fg06.c25
-rw-r--r--drivers/video/backlight/apple_dwi_bl.c124
-rw-r--r--drivers/video/backlight/as3711_bl.c2
-rw-r--r--drivers/video/backlight/aw99706.c471
-rw-r--r--drivers/video/backlight/backlight.c161
-rw-r--r--drivers/video/backlight/bd6107.c15
-rw-r--r--drivers/video/backlight/corgi_lcd.c23
-rw-r--r--drivers/video/backlight/da903x_bl.c1
-rw-r--r--drivers/video/backlight/da9052_bl.c4
-rw-r--r--drivers/video/backlight/ep93xx_bl.c1
-rw-r--r--drivers/video/backlight/gpio_backlight.c21
-rw-r--r--drivers/video/backlight/hp680_bl.c3
-rw-r--r--drivers/video/backlight/hx8357.c4
-rw-r--r--drivers/video/backlight/ili922x.c9
-rw-r--r--drivers/video/backlight/ili9320.c17
-rw-r--r--drivers/video/backlight/ipaq_micro_bl.c3
-rw-r--r--drivers/video/backlight/jornada720_bl.c4
-rw-r--r--drivers/video/backlight/jornada720_lcd.c12
-rw-r--r--drivers/video/backlight/kb3886_bl.c4
-rw-r--r--drivers/video/backlight/ktd253-backlight.c5
-rw-r--r--drivers/video/backlight/ktd2801-backlight.c3
-rw-r--r--drivers/video/backlight/ktz8866.c5
-rw-r--r--drivers/video/backlight/l4f00242t03.c39
-rw-r--r--drivers/video/backlight/lcd.c115
-rw-r--r--drivers/video/backlight/led_bl.c29
-rw-r--r--drivers/video/backlight/lm3509_bl.c343
-rw-r--r--drivers/video/backlight/lm3533_bl.c5
-rw-r--r--drivers/video/backlight/lm3630a_bl.c2
-rw-r--r--drivers/video/backlight/lm3639_bl.c2
-rw-r--r--drivers/video/backlight/lms283gf05.c4
-rw-r--r--drivers/video/backlight/lms501kf03.c26
-rw-r--r--drivers/video/backlight/locomolcd.c1
-rw-r--r--drivers/video/backlight/lp855x_bl.c2
-rw-r--r--drivers/video/backlight/lp8788_bl.c153
-rw-r--r--drivers/video/backlight/ltv350qv.c17
-rw-r--r--drivers/video/backlight/lv5207lp.c15
-rw-r--r--drivers/video/backlight/max8925_bl.c1
-rw-r--r--drivers/video/backlight/mp3309c.c27
-rw-r--r--drivers/video/backlight/mt6370-backlight.c2
-rw-r--r--drivers/video/backlight/omap1_bl.c47
-rw-r--r--drivers/video/backlight/otm3225a.c5
-rw-r--r--drivers/video/backlight/pandora_bl.c3
-rw-r--r--drivers/video/backlight/pcf50633-backlight.c155
-rw-r--r--drivers/video/backlight/platform_lcd.c23
-rw-r--r--drivers/video/backlight/pwm_bl.c18
-rw-r--r--drivers/video/backlight/qcom-wled.c8
-rw-r--r--drivers/video/backlight/rave-sp-backlight.c4
-rw-r--r--drivers/video/backlight/rt4831-backlight.c4
-rw-r--r--drivers/video/backlight/sky81452-backlight.c12
-rw-r--r--drivers/video/backlight/tdo24m.c21
-rw-r--r--drivers/video/backlight/tps65217_bl.c1
-rw-r--r--drivers/video/backlight/vgg2432a4.c1
-rw-r--r--drivers/video/backlight/wm831x_bl.c1
-rw-r--r--drivers/video/console/Kconfig9
-rw-r--r--drivers/video/console/dummycon.c18
-rw-r--r--drivers/video/console/mdacon.c1
-rw-r--r--drivers/video/console/newport_con.c1
-rw-r--r--drivers/video/console/sticon.c1
-rw-r--r--drivers/video/console/vgacon.c1
-rw-r--r--drivers/video/fbdev/Kconfig58
-rw-r--r--drivers/video/fbdev/Makefile1
-rw-r--r--drivers/video/fbdev/amifb.c5
-rw-r--r--drivers/video/fbdev/arcfb.c2
-rw-r--r--drivers/video/fbdev/arkfb.c5
-rw-r--r--drivers/video/fbdev/atmel_lcdfb.c7
-rw-r--r--drivers/video/fbdev/aty/aty128fb.c6
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c10
-rw-r--r--drivers/video/fbdev/aty/mach64_accel.c2
-rw-r--r--drivers/video/fbdev/aty/mach64_cursor.c7
-rw-r--r--drivers/video/fbdev/aty/radeon_backlight.c4
-rw-r--r--drivers/video/fbdev/aty/radeon_base.c10
-rw-r--r--drivers/video/fbdev/aty/radeon_pm.c2
-rw-r--r--drivers/video/fbdev/au1100fb.c6
-rw-r--r--drivers/video/fbdev/au1200fb.c2
-rw-r--r--drivers/video/fbdev/broadsheetfb.c2
-rw-r--r--drivers/video/fbdev/bw2.c4
-rw-r--r--drivers/video/fbdev/c2p_iplan2.c3
-rw-r--r--drivers/video/fbdev/c2p_planar.c4
-rw-r--r--drivers/video/fbdev/carminefb.c8
-rw-r--r--drivers/video/fbdev/carminefb.h2
-rw-r--r--drivers/video/fbdev/cg14.c4
-rw-r--r--drivers/video/fbdev/cg3.c4
-rw-r--r--drivers/video/fbdev/cg6.c4
-rw-r--r--drivers/video/fbdev/chipsfb.c2
-rw-r--r--drivers/video/fbdev/clps711x-fb.c33
-rw-r--r--drivers/video/fbdev/cobalt_lcdfb.c2
-rw-r--r--drivers/video/fbdev/core/Kconfig32
-rw-r--r--drivers/video/fbdev/core/bitblit.c160
-rw-r--r--drivers/video/fbdev/core/cfbcopyarea.c428
-rw-r--r--drivers/video/fbdev/core/cfbfillrect.c364
-rw-r--r--drivers/video/fbdev/core/cfbimgblt.c359
-rw-r--r--drivers/video/fbdev/core/cfbmem.h43
-rw-r--r--drivers/video/fbdev/core/fb_backlight.c18
-rw-r--r--drivers/video/fbdev/core/fb_cmdline.c2
-rw-r--r--drivers/video/fbdev/core/fb_copyarea.h405
-rw-r--r--drivers/video/fbdev/core/fb_ddc.c1
-rw-r--r--drivers/video/fbdev/core/fb_defio.c105
-rw-r--r--drivers/video/fbdev/core/fb_draw.h274
-rw-r--r--drivers/video/fbdev/core/fb_fillrect.h279
-rw-r--r--drivers/video/fbdev/core/fb_imageblit.h495
-rw-r--r--drivers/video/fbdev/core/fb_info.c1
-rw-r--r--drivers/video/fbdev/core/fb_io_fops.c1
-rw-r--r--drivers/video/fbdev/core/fb_sys_fops.c2
-rw-r--r--drivers/video/fbdev/core/fbcmap.c1
-rw-r--r--drivers/video/fbdev/core/fbcon.c666
-rw-r--r--drivers/video/fbdev/core/fbcon.h55
-rw-r--r--drivers/video/fbdev/core/fbcon_ccw.c156
-rw-r--r--drivers/video/fbdev/core/fbcon_cw.c156
-rw-r--r--drivers/video/fbdev/core/fbcon_rotate.c47
-rw-r--r--drivers/video/fbdev/core/fbcon_rotate.h18
-rw-r--r--drivers/video/fbdev/core/fbcon_ud.c172
-rw-r--r--drivers/video/fbdev/core/fbcvt.c2
-rw-r--r--drivers/video/fbdev/core/fbmem.c138
-rw-r--r--drivers/video/fbdev/core/fbmon.c12
-rw-r--r--drivers/video/fbdev/core/fbsysfs.c77
-rw-r--r--drivers/video/fbdev/core/modedb.c1
-rw-r--r--drivers/video/fbdev/core/softcursor.c18
-rw-r--r--drivers/video/fbdev/core/svgalib.c96
-rw-r--r--drivers/video/fbdev/core/syscopyarea.c371
-rw-r--r--drivers/video/fbdev/core/sysfillrect.c326
-rw-r--r--drivers/video/fbdev/core/sysimgblt.c335
-rw-r--r--drivers/video/fbdev/core/sysmem.h39
-rw-r--r--drivers/video/fbdev/core/tileblit.c77
-rw-r--r--drivers/video/fbdev/cyber2000fb.c36
-rw-r--r--drivers/video/fbdev/cyber2000fb.h2
-rw-r--r--drivers/video/fbdev/da8xx-fb.c1665
-rw-r--r--drivers/video/fbdev/efifb.c31
-rw-r--r--drivers/video/fbdev/ep93xx-fb.c2
-rw-r--r--drivers/video/fbdev/ffb.c4
-rw-r--r--drivers/video/fbdev/fsl-diu-fb.c7
-rw-r--r--drivers/video/fbdev/gbefb.c11
-rw-r--r--drivers/video/fbdev/geode/display_gx.c1
-rw-r--r--drivers/video/fbdev/geode/gxfb_core.c3
-rw-r--r--drivers/video/fbdev/geode/lxfb_ops.c23
-rw-r--r--drivers/video/fbdev/geode/suspend_gx.c10
-rw-r--r--drivers/video/fbdev/geode/video_gx.c16
-rw-r--r--drivers/video/fbdev/goldfishfb.c3
-rw-r--r--drivers/video/fbdev/grvga.c2
-rw-r--r--drivers/video/fbdev/gxt4500.c2
-rw-r--r--drivers/video/fbdev/hecubafb.c2
-rw-r--r--drivers/video/fbdev/hgafb.c2
-rw-r--r--drivers/video/fbdev/hitfb.c2
-rw-r--r--drivers/video/fbdev/hpfb.c1
-rw-r--r--drivers/video/fbdev/hyperv_fb.c56
-rw-r--r--drivers/video/fbdev/i810/i810_main.c46
-rw-r--r--drivers/video/fbdev/imsttfb.c4
-rw-r--r--drivers/video/fbdev/imxfb.c45
-rw-r--r--drivers/video/fbdev/kyro/fbdev.c25
-rw-r--r--drivers/video/fbdev/leo.c4
-rw-r--r--drivers/video/fbdev/macmodes.c4
-rw-r--r--drivers/video/fbdev/matrox/g450_pll.c26
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_DAC1064.c48
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_Ti3026.c2
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_accel.c3
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.c1
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.h2
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_g450.c62
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_maven.c2
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_misc.c21
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xx-i2c.c1
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xxfbdrv.c4
-rw-r--r--drivers/video/fbdev/metronomefb.c4
-rw-r--r--drivers/video/fbdev/mmp/hw/mmp_ctrl.c8
-rw-r--r--drivers/video/fbdev/mmp/hw/mmp_spi.c6
-rw-r--r--drivers/video/fbdev/nvidia/nv_backlight.c2
-rw-r--r--drivers/video/fbdev/nvidia/nv_hw.c8
-rw-r--r--drivers/video/fbdev/nvidia/nv_local.h2
-rw-r--r--drivers/video/fbdev/nvidia/nvidia.c5
-rw-r--r--drivers/video/fbdev/ocfb.c2
-rw-r--r--drivers/video/fbdev/offb.c5
-rw-r--r--drivers/video/fbdev/omap/hwa742.c2
-rw-r--r--drivers/video/fbdev/omap/lcd_ams_delta.c10
-rw-r--r--drivers/video/fbdev/omap/lcd_dma.c5
-rw-r--r--drivers/video/fbdev/omap/lcdc.c2
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c40
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c13
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c27
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/apply.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/core.c7
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc.c55
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/display.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dpi.c8
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dsi.c4
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss-of.c90
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss.c22
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss.h3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss_features.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi.h2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c17
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.h1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/manager.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/output.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/overlay.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/sdi.c10
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/venc.c11
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-main.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/vrfb.c1
-rw-r--r--drivers/video/fbdev/p9100.c4
-rw-r--r--drivers/video/fbdev/platinumfb.c2
-rw-r--r--drivers/video/fbdev/pvr2fb.c2
-rw-r--r--drivers/video/fbdev/pxa168fb.c2
-rw-r--r--drivers/video/fbdev/pxa3xx-gcu.c12
-rw-r--r--drivers/video/fbdev/pxafb.c58
-rw-r--r--drivers/video/fbdev/riva/fbdev.c2
-rw-r--r--drivers/video/fbdev/s1d13xxxfb.c2
-rw-r--r--drivers/video/fbdev/s3c-fb.c2
-rw-r--r--drivers/video/fbdev/s3fb.c177
-rw-r--r--drivers/video/fbdev/sbuslib.c3
-rw-r--r--drivers/video/fbdev/sbuslib.h2
-rw-r--r--drivers/video/fbdev/sh7760fb.c5
-rw-r--r--drivers/video/fbdev/sh_mobile_lcdcfb.c48
-rw-r--r--drivers/video/fbdev/simplefb.c56
-rw-r--r--drivers/video/fbdev/sis/sis.h2
-rw-r--r--drivers/video/fbdev/sis/sis_main.c27
-rw-r--r--drivers/video/fbdev/sm501fb.c14
-rw-r--r--drivers/video/fbdev/smscufx.c4
-rw-r--r--drivers/video/fbdev/ssd1307fb.c43
-rw-r--r--drivers/video/fbdev/sstfb.c9
-rw-r--r--drivers/video/fbdev/tcx.c6
-rw-r--r--drivers/video/fbdev/tridentfb.c4
-rw-r--r--drivers/video/fbdev/udlfb.c6
-rw-r--r--drivers/video/fbdev/uvesafb.c2
-rw-r--r--drivers/video/fbdev/valkyriefb.c2
-rw-r--r--drivers/video/fbdev/vesafb.c33
-rw-r--r--drivers/video/fbdev/vfb.c3
-rw-r--r--drivers/video/fbdev/vga16fb.c30
-rw-r--r--drivers/video/fbdev/via/chip.h8
-rw-r--r--drivers/video/fbdev/via/dvi.c24
-rw-r--r--drivers/video/fbdev/via/lcd.c6
-rw-r--r--drivers/video/fbdev/via/via-core.c1
-rw-r--r--drivers/video/fbdev/via/via-gpio.c11
-rw-r--r--drivers/video/fbdev/via/via_aux.h2
-rw-r--r--drivers/video/fbdev/via/via_i2c.c15
-rw-r--r--drivers/video/fbdev/via/viafbdev.c1
-rw-r--r--drivers/video/fbdev/via/vt1636.c6
-rw-r--r--drivers/video/fbdev/vt8500lcdfb.c2
-rw-r--r--drivers/video/fbdev/wm8505fb.c2
-rw-r--r--drivers/video/fbdev/wmt_ge_rops.c33
-rw-r--r--drivers/video/fbdev/xen-fbfront.c3
-rw-r--r--drivers/video/fbdev/xilinxfb.c2
-rw-r--r--drivers/video/hdmi.c28
-rw-r--r--drivers/video/logo/Kconfig2
-rw-r--r--drivers/video/logo/pnmtologo.c4
-rw-r--r--drivers/video/screen_info_generic.c91
-rw-r--r--drivers/video/screen_info_pci.c79
-rw-r--r--drivers/virt/Kconfig4
-rw-r--r--drivers/virt/acrn/hsm.c6
-rw-r--r--drivers/virt/acrn/ioreq.c4
-rw-r--r--drivers/virt/acrn/irqfd.c19
-rw-r--r--drivers/virt/acrn/mm.c24
-rw-r--r--drivers/virt/coco/Kconfig15
-rw-r--r--drivers/virt/coco/Makefile5
-rw-r--r--drivers/virt/coco/arm-cca-guest/Kconfig10
-rw-r--r--drivers/virt/coco/arm-cca-guest/Makefile2
-rw-r--r--drivers/virt/coco/arm-cca-guest/arm-cca-guest.c232
-rw-r--r--drivers/virt/coco/efi_secret/Kconfig2
-rw-r--r--drivers/virt/coco/efi_secret/efi_secret.c49
-rw-r--r--drivers/virt/coco/guest/Kconfig17
-rw-r--r--drivers/virt/coco/guest/Makefile4
-rw-r--r--drivers/virt/coco/guest/report.c539
-rw-r--r--drivers/virt/coco/guest/tsm-mr.c251
-rw-r--r--drivers/virt/coco/pkvm-guest/Kconfig10
-rw-r--r--drivers/virt/coco/pkvm-guest/Makefile2
-rw-r--r--drivers/virt/coco/pkvm-guest/arm-pkvm-guest.c123
-rw-r--r--drivers/virt/coco/sev-guest/Kconfig3
-rw-r--r--drivers/virt/coco/sev-guest/sev-guest.c983
-rw-r--r--drivers/virt/coco/sev-guest/sev-guest.h63
-rw-r--r--drivers/virt/coco/tdx-guest/Kconfig1
-rw-r--r--drivers/virt/coco/tdx-guest/tdx-guest.c288
-rw-r--r--drivers/virt/coco/tsm-core.c163
-rw-r--r--drivers/virt/coco/tsm.c425
-rw-r--r--drivers/virt/vboxguest/Kconfig3
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.c4
-rw-r--r--drivers/virtio/Kconfig76
-rw-r--r--drivers/virtio/Makefile5
-rw-r--r--drivers/virtio/virtio.c278
-rw-r--r--drivers/virtio/virtio_balloon.c199
-rw-r--r--drivers/virtio/virtio_debug.c27
-rw-r--r--drivers/virtio/virtio_dma_buf.c5
-rw-r--r--drivers/virtio/virtio_input.c14
-rw-r--r--drivers/virtio/virtio_mem.c207
-rw-r--r--drivers/virtio/virtio_mmio.c68
-rw-r--r--drivers/virtio/virtio_pci_common.c283
-rw-r--r--drivers/virtio/virtio_pci_common.h39
-rw-r--r--drivers/virtio/virtio_pci_legacy_dev.c4
-rw-r--r--drivers/virtio/virtio_pci_modern.c606
-rw-r--r--drivers/virtio/virtio_pci_modern_dev.c73
-rw-r--r--drivers/virtio/virtio_ring.c1034
-rw-r--r--drivers/virtio/virtio_rtc_arm.c23
-rw-r--r--drivers/virtio/virtio_rtc_class.c262
-rw-r--r--drivers/virtio/virtio_rtc_driver.c1407
-rw-r--r--drivers/virtio/virtio_rtc_internal.h122
-rw-r--r--drivers/virtio/virtio_rtc_ptp.c347
-rw-r--r--drivers/virtio/virtio_vdpa.c93
-rw-r--r--drivers/w1/masters/amd_axi_w1.c2
-rw-r--r--drivers/w1/masters/ds2482.c30
-rw-r--r--drivers/w1/masters/matrox_w1.c10
-rw-r--r--drivers/w1/masters/mxc_w1.c2
-rw-r--r--drivers/w1/masters/omap_hdq.c7
-rw-r--r--drivers/w1/masters/sgi_w1.c2
-rw-r--r--drivers/w1/masters/w1-gpio.c64
-rw-r--r--drivers/w1/masters/w1-uart.c4
-rw-r--r--drivers/w1/slaves/w1_ds2406.c18
-rw-r--r--drivers/w1/slaves/w1_ds2408.c40
-rw-r--r--drivers/w1/slaves/w1_ds2413.c12
-rw-r--r--drivers/w1/slaves/w1_ds2430.c8
-rw-r--r--drivers/w1/slaves/w1_ds2431.c8
-rw-r--r--drivers/w1/slaves/w1_ds2433.c12
-rw-r--r--drivers/w1/slaves/w1_ds2438.c32
-rw-r--r--drivers/w1/slaves/w1_ds2780.c6
-rw-r--r--drivers/w1/slaves/w1_ds2781.c6
-rw-r--r--drivers/w1/slaves/w1_ds2805.c6
-rw-r--r--drivers/w1/slaves/w1_ds28e04.c16
-rw-r--r--drivers/w1/slaves/w1_ds28e17.c8
-rw-r--r--drivers/w1/slaves/w1_therm.c12
-rw-r--r--drivers/w1/w1.c32
-rw-r--r--drivers/w1/w1_int.c6
-rw-r--r--drivers/w1/w1_netlink.c42
-rw-r--r--drivers/watchdog/Kconfig122
-rw-r--r--drivers/watchdog/Makefile10
-rw-r--r--drivers/watchdog/acquirewdt.c3
-rw-r--r--drivers/watchdog/advantechwdt.c3
-rw-r--r--drivers/watchdog/airoha_wdt.c216
-rw-r--r--drivers/watchdog/alim1535_wdt.c1
-rw-r--r--drivers/watchdog/alim7101_wdt.c5
-rw-r--r--drivers/watchdog/apple_wdt.c15
-rw-r--r--drivers/watchdog/arm_smc_wdt.c17
-rw-r--r--drivers/watchdog/armada_37xx_wdt.c10
-rw-r--r--drivers/watchdog/aspeed_wdt.c111
-rw-r--r--drivers/watchdog/at91rm9200_wdt.c3
-rw-r--r--drivers/watchdog/at91sam9_wdt.c8
-rw-r--r--drivers/watchdog/ath79_wdt.c3
-rw-r--r--drivers/watchdog/bcm2835_wdt.c2
-rw-r--r--drivers/watchdog/bcm47xx_wdt.c6
-rw-r--r--drivers/watchdog/bcm_kona_wdt.c2
-rw-r--r--drivers/watchdog/bd96801_wdt.c417
-rw-r--r--drivers/watchdog/cgbc_wdt.c211
-rw-r--r--drivers/watchdog/cpu5wdt.c285
-rw-r--r--drivers/watchdog/cpwd.c7
-rw-r--r--drivers/watchdog/cros_ec_wdt.c40
-rw-r--r--drivers/watchdog/da9052_wdt.c40
-rw-r--r--drivers/watchdog/da9055_wdt.c7
-rw-r--r--drivers/watchdog/da9063_wdt.c19
-rw-r--r--drivers/watchdog/diag288_wdt.c69
-rw-r--r--drivers/watchdog/dw_wdt.c4
-rw-r--r--drivers/watchdog/eurotechwdt.c1
-rw-r--r--drivers/watchdog/exar_wdt.c2
-rw-r--r--drivers/watchdog/gef_wdt.c3
-rw-r--r--drivers/watchdog/geodewdt.c3
-rw-r--r--drivers/watchdog/gxp-wdt.c4
-rw-r--r--drivers/watchdog/iTCO_wdt.c60
-rw-r--r--drivers/watchdog/ib700wdt.c3
-rw-r--r--drivers/watchdog/ibmasr.c1
-rw-r--r--drivers/watchdog/ie6xx_wdt.c2
-rw-r--r--drivers/watchdog/imx2_wdt.c10
-rw-r--r--drivers/watchdog/imx7ulp_wdt.c26
-rw-r--r--drivers/watchdog/imx_sc_wdt.c46
-rw-r--r--drivers/watchdog/indydog.c1
-rw-r--r--drivers/watchdog/intel-mid_wdt.c5
-rw-r--r--drivers/watchdog/intel_oc_wdt.c233
-rw-r--r--drivers/watchdog/it8712f_wdt.c1
-rw-r--r--drivers/watchdog/it87_wdt.c47
-rw-r--r--drivers/watchdog/lenovo_se10_wdt.c4
-rw-r--r--drivers/watchdog/lenovo_se30_wdt.c396
-rw-r--r--drivers/watchdog/loongson1_wdt.c89
-rw-r--r--drivers/watchdog/lpc18xx_wdt.c9
-rw-r--r--drivers/watchdog/m54xx_wdt.c1
-rw-r--r--drivers/watchdog/machzwd.c5
-rw-r--r--drivers/watchdog/marvell_gti_wdt.c4
-rw-r--r--drivers/watchdog/max77620_wdt.c1
-rw-r--r--drivers/watchdog/menz69_wdt.c3
-rw-r--r--drivers/watchdog/mixcomwd.c5
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c2
-rw-r--r--drivers/watchdog/mtk_wdt.c12
-rw-r--r--drivers/watchdog/mtx-1_wdt.c3
-rw-r--r--drivers/watchdog/nct6694_wdt.c307
-rw-r--r--drivers/watchdog/nic7018_wdt.c11
-rw-r--r--drivers/watchdog/npcm_wdt.c9
-rw-r--r--drivers/watchdog/nv_tco.c3
-rw-r--r--drivers/watchdog/octeon-wdt-main.c4
-rw-r--r--drivers/watchdog/omap_wdt.c3
-rw-r--r--drivers/watchdog/orion_wdt.c2
-rw-r--r--drivers/watchdog/pc87413_wdt.c1
-rw-r--r--drivers/watchdog/pcwd.c6
-rw-r--r--drivers/watchdog/pcwd_pci.c2
-rw-r--r--drivers/watchdog/pcwd_usb.c8
-rw-r--r--drivers/watchdog/pika_wdt.c3
-rw-r--r--drivers/watchdog/pm8916_wdt.c2
-rw-r--r--drivers/watchdog/pretimeout_noop.c2
-rw-r--r--drivers/watchdog/pretimeout_panic.c2
-rw-r--r--drivers/watchdog/qcom-wdt.c7
-rw-r--r--drivers/watchdog/rc32434_wdt.c3
-rw-r--r--drivers/watchdog/rdc321x_wdt.c3
-rw-r--r--drivers/watchdog/renesas_wdt.c10
-rw-r--r--drivers/watchdog/renesas_wwdt.c163
-rw-r--r--drivers/watchdog/riowd.c3
-rw-r--r--drivers/watchdog/rti_wdt.c25
-rw-r--r--drivers/watchdog/rza_wdt.c7
-rw-r--r--drivers/watchdog/rzg2l_wdt.c137
-rw-r--r--drivers/watchdog/rzn1_wdt.c8
-rw-r--r--drivers/watchdog/rzv2h_wdt.c385
-rw-r--r--drivers/watchdog/s32g_wdt.c315
-rw-r--r--drivers/watchdog/s3c2410_wdt.c126
-rw-r--r--drivers/watchdog/sa1100_wdt.c5
-rw-r--r--drivers/watchdog/sb_wdog.c1
-rw-r--r--drivers/watchdog/sbc60xxwdt.c5
-rw-r--r--drivers/watchdog/sbc7240_wdt.c1
-rw-r--r--drivers/watchdog/sbc8360.c1
-rw-r--r--drivers/watchdog/sbc_epx_c3.c1
-rw-r--r--drivers/watchdog/sbc_fitpc2_wdt.c1
-rw-r--r--drivers/watchdog/sbsa_gwdt.c50
-rw-r--r--drivers/watchdog/sc1200wdt.c1
-rw-r--r--drivers/watchdog/sc520_wdt.c3
-rw-r--r--drivers/watchdog/sch311x_wdt.c3
-rw-r--r--drivers/watchdog/scx200_wdt.c1
-rw-r--r--drivers/watchdog/shwdt.c6
-rw-r--r--drivers/watchdog/simatic-ipc-wdt.c1
-rw-r--r--drivers/watchdog/sl28cpld_wdt.c4
-rw-r--r--drivers/watchdog/smsc37b787_wdt.c3
-rw-r--r--drivers/watchdog/softdog.c8
-rw-r--r--drivers/watchdog/sp805_wdt.c3
-rw-r--r--drivers/watchdog/st_lpc_wdt.c2
-rw-r--r--drivers/watchdog/starfive-wdt.c12
-rw-r--r--drivers/watchdog/stm32_iwdg.c95
-rw-r--r--drivers/watchdog/stmp3xxx_rtc_wdt.c2
-rw-r--r--drivers/watchdog/sunxi_wdt.c11
-rw-r--r--drivers/watchdog/ts4800_wdt.c1
-rw-r--r--drivers/watchdog/ts72xx_wdt.c8
-rw-r--r--drivers/watchdog/twl4030_wdt.c1
-rw-r--r--drivers/watchdog/txx9wdt.c2
-rw-r--r--drivers/watchdog/via_wdt.c3
-rw-r--r--drivers/watchdog/visconti_wdt.c5
-rw-r--r--drivers/watchdog/w83877f_wdt.c5
-rw-r--r--drivers/watchdog/w83977f_wdt.c1
-rw-r--r--drivers/watchdog/wafer5823wdt.c1
-rw-r--r--drivers/watchdog/watchdog_core.c32
-rw-r--r--drivers/watchdog/watchdog_core.h8
-rw-r--r--drivers/watchdog/watchdog_dev.c8
-rw-r--r--drivers/watchdog/watchdog_hrtimer_pretimeout.c4
-rw-r--r--drivers/watchdog/watchdog_pretimeout.c2
-rw-r--r--drivers/watchdog/wdat_wdt.c64
-rw-r--r--drivers/watchdog/wdrtas.c2
-rw-r--r--drivers/watchdog/wdt.c2
-rw-r--r--drivers/watchdog/wdt285.c1
-rw-r--r--drivers/watchdog/wdt977.c1
-rw-r--r--drivers/watchdog/wdt_pci.c4
-rw-r--r--drivers/watchdog/xilinx_wwdt.c75
-rw-r--r--drivers/watchdog/ziirave_wdt.c7
-rw-r--r--drivers/xen/Kconfig7
-rw-r--r--drivers/xen/acpi.c74
-rw-r--r--drivers/xen/balloon.c56
-rw-r--r--drivers/xen/events/events_base.c45
-rw-r--r--drivers/xen/evtchn.c2
-rw-r--r--drivers/xen/gntdev-common.h4
-rw-r--r--drivers/xen/gntdev-dmabuf.c37
-rw-r--r--drivers/xen/gntdev-dmabuf.h2
-rw-r--r--drivers/xen/gntdev.c109
-rw-r--r--drivers/xen/grant-dma-iommu.c2
-rw-r--r--drivers/xen/grant-dma-ops.c20
-rw-r--r--drivers/xen/grant-table.c8
-rw-r--r--drivers/xen/manage.c22
-rw-r--r--drivers/xen/mcelog.c1
-rw-r--r--drivers/xen/pci.c59
-rw-r--r--drivers/xen/pcpu.c2
-rw-r--r--drivers/xen/platform-pci.c4
-rw-r--r--drivers/xen/privcmd-buf.c1
-rw-r--r--drivers/xen/privcmd.c110
-rw-r--r--drivers/xen/pvcalls-back.c4
-rw-r--r--drivers/xen/pvcalls-front.c14
-rw-r--r--drivers/xen/pvcalls-front.h2
-rw-r--r--drivers/xen/swiotlb-xen.c95
-rw-r--r--drivers/xen/time.c8
-rw-r--r--drivers/xen/unpopulated-alloc.c4
-rw-r--r--drivers/xen/xen-acpi-processor.c12
-rw-r--r--drivers/xen/xen-pciback/conf_space_capability.c2
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c116
-rw-r--r--drivers/xen/xen-pciback/pciback.h2
-rw-r--r--drivers/xen/xenbus/Makefile14
-rw-r--r--drivers/xen/xenbus/xenbus.h4
-rw-r--r--drivers/xen/xenbus/xenbus_client.c4
-rw-r--r--drivers/xen/xenbus/xenbus_comms.c9
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c3
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c62
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c1
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c84
-rw-r--r--drivers/xen/xenfs/super.c2
-rw-r--r--drivers/xen/xenfs/xensyms.c4
-rw-r--r--drivers/zorro/names.c12
-rw-r--r--drivers/zorro/zorro-driver.c4
-rw-r--r--drivers/zorro/zorro-sysfs.c6
-rw-r--r--drivers/zorro/zorro.c3
23077 files changed, 2733868 insertions, 1014132 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 7bdad836fc62..c0f1fb893ec0 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -10,6 +10,12 @@ source "drivers/cxl/Kconfig"
source "drivers/pcmcia/Kconfig"
source "drivers/rapidio/Kconfig"
+config PC104
+ bool "PC/104 support" if EXPERT
+ help
+ Expose PC/104 form factor device drivers and options available for
+ selection and configuration. Enable this option if your target
+ machine has a PC/104 bus.
source "drivers/base/Kconfig"
@@ -21,6 +27,8 @@ source "drivers/connector/Kconfig"
source "drivers/firmware/Kconfig"
+source "drivers/fwctl/Kconfig"
+
source "drivers/gnss/Kconfig"
source "drivers/mtd/Kconfig"
@@ -75,6 +83,8 @@ source "drivers/pps/Kconfig"
source "drivers/ptp/Kconfig"
+source "drivers/dpll/Kconfig"
+
source "drivers/pinctrl/Kconfig"
source "drivers/gpio/Kconfig"
@@ -151,6 +161,8 @@ source "drivers/greybus/Kconfig"
source "drivers/comedi/Kconfig"
+source "drivers/gpib/Kconfig"
+
source "drivers/staging/Kconfig"
source "drivers/platform/Kconfig"
@@ -207,8 +219,6 @@ source "drivers/thunderbolt/Kconfig"
source "drivers/android/Kconfig"
-source "drivers/gpu/trace/Kconfig"
-
source "drivers/nvdimm/Kconfig"
source "drivers/dax/Kconfig"
@@ -243,6 +253,6 @@ source "drivers/hte/Kconfig"
source "drivers/cdx/Kconfig"
-source "drivers/dpll/Kconfig"
+source "drivers/resctrl/Kconfig"
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index fe9ceb0d2288..ccc05f1eae3e 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -17,6 +17,9 @@ obj-$(CONFIG_PINCTRL) += pinctrl/
obj-$(CONFIG_GPIOLIB) += gpio/
obj-y += pwm/
+# LEDs must come before PCI, it is needed by NPEM driver
+obj-y += leds/
+
obj-y += pci/
obj-$(CONFIG_PARISC) += parisc/
@@ -130,9 +133,9 @@ obj-$(CONFIG_CPU_IDLE) += cpuidle/
obj-y += mmc/
obj-y += ufs/
obj-$(CONFIG_MEMSTICK) += memstick/
-obj-y += leds/
obj-$(CONFIG_INFINIBAND) += infiniband/
obj-y += firmware/
+obj-$(CONFIG_FWCTL) += fwctl/
obj-$(CONFIG_CRYPTO) += crypto/
obj-$(CONFIG_SUPERH) += sh/
obj-y += clocksource/
@@ -147,6 +150,7 @@ obj-$(CONFIG_VHOST_IOTLB) += vhost/
obj-$(CONFIG_VHOST) += vhost/
obj-$(CONFIG_GREYBUS) += greybus/
obj-$(CONFIG_COMEDI) += comedi/
+obj-$(CONFIG_GPIB) += gpib/
obj-$(CONFIG_STAGING) += staging/
obj-y += platform/
@@ -157,8 +161,8 @@ obj-$(CONFIG_RPMSG) += rpmsg/
obj-$(CONFIG_SOUNDWIRE) += soundwire/
# Virtualization drivers
-obj-$(CONFIG_VIRT_DRIVERS) += virt/
-obj-$(subst m,y,$(CONFIG_HYPERV)) += hv/
+obj-y += virt/
+obj-$(CONFIG_HYPERV) += hv/
obj-$(CONFIG_PM_DEVFREQ) += devfreq/
obj-$(CONFIG_EXTCON) += extcon/
@@ -191,5 +195,7 @@ obj-$(CONFIG_HTE) += hte/
obj-$(CONFIG_DRM_ACCEL) += accel/
obj-$(CONFIG_CDX_BUS) += cdx/
obj-$(CONFIG_DPLL) += dpll/
+obj-y += resctrl/
+obj-$(CONFIG_DIBS) += dibs/
obj-$(CONFIG_S390) += s390/
diff --git a/drivers/accel/Kconfig b/drivers/accel/Kconfig
index 64065fb8922b..bdf48ccafcf2 100644
--- a/drivers/accel/Kconfig
+++ b/drivers/accel/Kconfig
@@ -24,8 +24,11 @@ menuconfig DRM_ACCEL
different device files, called accel/accel* (in /dev, sysfs
and debugfs).
+source "drivers/accel/amdxdna/Kconfig"
+source "drivers/accel/ethosu/Kconfig"
source "drivers/accel/habanalabs/Kconfig"
source "drivers/accel/ivpu/Kconfig"
source "drivers/accel/qaic/Kconfig"
+source "drivers/accel/rocket/Kconfig"
endif
diff --git a/drivers/accel/Makefile b/drivers/accel/Makefile
index ab3df932937f..1d3a7251b950 100644
--- a/drivers/accel/Makefile
+++ b/drivers/accel/Makefile
@@ -1,5 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_DRM_ACCEL_AMDXDNA) += amdxdna/
+obj-$(CONFIG_DRM_ACCEL_ARM_ETHOSU) += ethosu/
obj-$(CONFIG_DRM_ACCEL_HABANALABS) += habanalabs/
obj-$(CONFIG_DRM_ACCEL_IVPU) += ivpu/
obj-$(CONFIG_DRM_ACCEL_QAIC) += qaic/
+obj-$(CONFIG_DRM_ACCEL_ROCKET) += rocket/ \ No newline at end of file
diff --git a/drivers/accel/amdxdna/Kconfig b/drivers/accel/amdxdna/Kconfig
new file mode 100644
index 000000000000..f39d7a87296c
--- /dev/null
+++ b/drivers/accel/amdxdna/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config DRM_ACCEL_AMDXDNA
+ tristate "AMD AI Engine"
+ depends on AMD_IOMMU
+ depends on DRM_ACCEL
+ depends on PCI && HAS_IOMEM
+ depends on X86_64
+ select DRM_SCHED
+ select DRM_GEM_SHMEM_HELPER
+ select FW_LOADER
+ select HMM_MIRROR
+ help
+ Choose this option to enable support for NPU integrated into AMD
+ client CPUs like AMD Ryzen AI 300 Series. AMD NPU can be used to
+ accelerate machine learning applications.
+
+ If "M" is selected, the driver module will be amdxdna.
diff --git a/drivers/accel/amdxdna/Makefile b/drivers/accel/amdxdna/Makefile
new file mode 100644
index 000000000000..6344aaf523fa
--- /dev/null
+++ b/drivers/accel/amdxdna/Makefile
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+amdxdna-y := \
+ aie2_ctx.o \
+ aie2_error.o \
+ aie2_message.o \
+ aie2_pci.o \
+ aie2_pm.o \
+ aie2_psp.o \
+ aie2_smu.o \
+ aie2_solver.o \
+ amdxdna_ctx.o \
+ amdxdna_gem.o \
+ amdxdna_mailbox.o \
+ amdxdna_mailbox_helper.o \
+ amdxdna_pci_drv.o \
+ amdxdna_pm.o \
+ amdxdna_sysfs.o \
+ amdxdna_ubuf.o \
+ npu1_regs.o \
+ npu2_regs.o \
+ npu4_regs.o \
+ npu5_regs.o \
+ npu6_regs.o
+obj-$(CONFIG_DRM_ACCEL_AMDXDNA) = amdxdna.o
diff --git a/drivers/accel/amdxdna/TODO b/drivers/accel/amdxdna/TODO
new file mode 100644
index 000000000000..0e4bbebeaedf
--- /dev/null
+++ b/drivers/accel/amdxdna/TODO
@@ -0,0 +1 @@
+- Add debugfs support
diff --git a/drivers/accel/amdxdna/aie2_ctx.c b/drivers/accel/amdxdna/aie2_ctx.c
new file mode 100644
index 000000000000..42d876a427c5
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_ctx.c
@@ -0,0 +1,1079 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_print.h>
+#include <drm/drm_syncobj.h>
+#include <linux/hmm.h>
+#include <linux/types.h>
+#include <linux/xarray.h>
+#include <trace/events/amdxdna.h>
+
+#include "aie2_msg_priv.h"
+#include "aie2_pci.h"
+#include "aie2_solver.h"
+#include "amdxdna_ctx.h"
+#include "amdxdna_gem.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+#include "amdxdna_pm.h"
+
+static bool force_cmdlist;
+module_param(force_cmdlist, bool, 0600);
+MODULE_PARM_DESC(force_cmdlist, "Force use command list (Default false)");
+
+#define HWCTX_MAX_TIMEOUT 60000 /* milliseconds */
+
+static void aie2_job_release(struct kref *ref)
+{
+ struct amdxdna_sched_job *job;
+
+ job = container_of(ref, struct amdxdna_sched_job, refcnt);
+ amdxdna_sched_job_cleanup(job);
+ atomic64_inc(&job->hwctx->job_free_cnt);
+ wake_up(&job->hwctx->priv->job_free_wq);
+ if (job->out_fence)
+ dma_fence_put(job->out_fence);
+ kfree(job);
+}
+
+static void aie2_job_put(struct amdxdna_sched_job *job)
+{
+ kref_put(&job->refcnt, aie2_job_release);
+}
+
+static void aie2_hwctx_status_shift_stop(struct amdxdna_hwctx *hwctx)
+{
+ hwctx->old_status = hwctx->status;
+ hwctx->status = HWCTX_STAT_STOP;
+}
+
+static void aie2_hwctx_status_restore(struct amdxdna_hwctx *hwctx)
+{
+ hwctx->status = hwctx->old_status;
+}
+
+/* The bad_job is used in aie2_sched_job_timedout, otherwise, set it to NULL */
+static void aie2_hwctx_stop(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hwctx,
+ struct drm_sched_job *bad_job)
+{
+ drm_sched_stop(&hwctx->priv->sched, bad_job);
+ aie2_destroy_context(xdna->dev_handle, hwctx);
+}
+
+static int aie2_hwctx_restart(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_gem_obj *heap = hwctx->priv->heap;
+ int ret;
+
+ ret = aie2_create_context(xdna->dev_handle, hwctx);
+ if (ret) {
+ XDNA_ERR(xdna, "Create hwctx failed, ret %d", ret);
+ goto out;
+ }
+
+ ret = aie2_map_host_buf(xdna->dev_handle, hwctx->fw_ctx_id,
+ heap->mem.userptr, heap->mem.size);
+ if (ret) {
+ XDNA_ERR(xdna, "Map host buf failed, ret %d", ret);
+ goto out;
+ }
+
+ if (hwctx->status != HWCTX_STAT_READY) {
+ XDNA_DBG(xdna, "hwctx is not ready, status %d", hwctx->status);
+ goto out;
+ }
+
+ ret = aie2_config_cu(hwctx, NULL);
+ if (ret) {
+ XDNA_ERR(xdna, "Config cu failed, ret %d", ret);
+ goto out;
+ }
+
+out:
+ drm_sched_start(&hwctx->priv->sched, 0);
+ XDNA_DBG(xdna, "%s restarted, ret %d", hwctx->name, ret);
+ return ret;
+}
+
+static struct dma_fence *aie2_cmd_get_out_fence(struct amdxdna_hwctx *hwctx, u64 seq)
+{
+ struct dma_fence *fence, *out_fence = NULL;
+ int ret;
+
+ fence = drm_syncobj_fence_get(hwctx->priv->syncobj);
+ if (!fence)
+ return NULL;
+
+ ret = dma_fence_chain_find_seqno(&fence, seq);
+ if (ret)
+ goto out;
+
+ out_fence = dma_fence_get(dma_fence_chain_contained(fence));
+
+out:
+ dma_fence_put(fence);
+ return out_fence;
+}
+
+static void aie2_hwctx_wait_for_idle(struct amdxdna_hwctx *hwctx)
+{
+ struct dma_fence *fence;
+
+ fence = aie2_cmd_get_out_fence(hwctx, hwctx->priv->seq - 1);
+ if (!fence)
+ return;
+
+ /* Wait up to 2 seconds for fw to finish all pending requests */
+ dma_fence_wait_timeout(fence, false, msecs_to_jiffies(2000));
+ dma_fence_put(fence);
+}
+
+static int aie2_hwctx_suspend_cb(struct amdxdna_hwctx *hwctx, void *arg)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+
+ aie2_hwctx_wait_for_idle(hwctx);
+ aie2_hwctx_stop(xdna, hwctx, NULL);
+ aie2_hwctx_status_shift_stop(hwctx);
+
+ return 0;
+}
+
+void aie2_hwctx_suspend(struct amdxdna_client *client)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+
+ /*
+ * Command timeout is unlikely. But if it happens, it doesn't
+ * break the system. aie2_hwctx_stop() will destroy mailbox
+ * and abort all commands.
+ */
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+ amdxdna_hwctx_walk(client, NULL, aie2_hwctx_suspend_cb);
+}
+
+static int aie2_hwctx_resume_cb(struct amdxdna_hwctx *hwctx, void *arg)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+
+ aie2_hwctx_status_restore(hwctx);
+ return aie2_hwctx_restart(xdna, hwctx);
+}
+
+int aie2_hwctx_resume(struct amdxdna_client *client)
+{
+ /*
+ * The resume path cannot guarantee that mailbox channel can be
+ * regenerated. If this happen, when submit message to this
+ * mailbox channel, error will return.
+ */
+ return amdxdna_hwctx_walk(client, NULL, aie2_hwctx_resume_cb);
+}
+
+static void
+aie2_sched_notify(struct amdxdna_sched_job *job)
+{
+ struct dma_fence *fence = job->fence;
+
+ trace_xdna_job(&job->base, job->hwctx->name, "signaled fence", job->seq);
+
+ amdxdna_pm_suspend_put(job->hwctx->client->xdna);
+ job->hwctx->priv->completed++;
+ dma_fence_signal(fence);
+
+ up(&job->hwctx->priv->job_sem);
+ job->job_done = true;
+ mmput_async(job->mm);
+ aie2_job_put(job);
+}
+
+static int
+aie2_sched_resp_handler(void *handle, void __iomem *data, size_t size)
+{
+ struct amdxdna_sched_job *job = handle;
+ struct amdxdna_gem_obj *cmd_abo;
+ int ret = 0;
+ u32 status;
+
+ cmd_abo = job->cmd_bo;
+
+ if (unlikely(job->job_timeout)) {
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_TIMEOUT);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (unlikely(!data) || unlikely(size != sizeof(u32))) {
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ status = readl(data);
+ XDNA_DBG(job->hwctx->client->xdna, "Resp status 0x%x", status);
+ if (status == AIE2_STATUS_SUCCESS)
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_COMPLETED);
+ else
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ERROR);
+
+out:
+ aie2_sched_notify(job);
+ return ret;
+}
+
+static int
+aie2_sched_drvcmd_resp_handler(void *handle, void __iomem *data, size_t size)
+{
+ struct amdxdna_sched_job *job = handle;
+ int ret = 0;
+
+ if (unlikely(!data))
+ goto out;
+
+ if (unlikely(size != sizeof(u32))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ job->drv_cmd->result = readl(data);
+
+out:
+ aie2_sched_notify(job);
+ return ret;
+}
+
+static int
+aie2_sched_cmdlist_resp_handler(void *handle, void __iomem *data, size_t size)
+{
+ struct amdxdna_sched_job *job = handle;
+ struct amdxdna_gem_obj *cmd_abo;
+ struct amdxdna_dev *xdna;
+ u32 fail_cmd_status;
+ u32 fail_cmd_idx;
+ u32 cmd_status;
+ int ret = 0;
+
+ cmd_abo = job->cmd_bo;
+
+ if (unlikely(job->job_timeout)) {
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_TIMEOUT);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (unlikely(!data) || unlikely(size != sizeof(u32) * 3)) {
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ cmd_status = readl(data + offsetof(struct cmd_chain_resp, status));
+ xdna = job->hwctx->client->xdna;
+ XDNA_DBG(xdna, "Status 0x%x", cmd_status);
+ if (cmd_status == AIE2_STATUS_SUCCESS) {
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_COMPLETED);
+ goto out;
+ }
+
+ /* Slow path to handle error, read from ringbuf on BAR */
+ fail_cmd_idx = readl(data + offsetof(struct cmd_chain_resp, fail_cmd_idx));
+ fail_cmd_status = readl(data + offsetof(struct cmd_chain_resp, fail_cmd_status));
+ XDNA_DBG(xdna, "Failed cmd idx %d, status 0x%x",
+ fail_cmd_idx, fail_cmd_status);
+
+ if (fail_cmd_status == AIE2_STATUS_SUCCESS) {
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
+ ret = -EINVAL;
+ goto out;
+ }
+ amdxdna_cmd_set_state(cmd_abo, fail_cmd_status);
+
+ if (amdxdna_cmd_get_op(cmd_abo) == ERT_CMD_CHAIN) {
+ struct amdxdna_cmd_chain *cc = amdxdna_cmd_get_payload(cmd_abo, NULL);
+
+ cc->error_index = fail_cmd_idx;
+ if (cc->error_index >= cc->command_count)
+ cc->error_index = 0;
+ }
+out:
+ aie2_sched_notify(job);
+ return ret;
+}
+
+static struct dma_fence *
+aie2_sched_job_run(struct drm_sched_job *sched_job)
+{
+ struct amdxdna_sched_job *job = drm_job_to_xdna_job(sched_job);
+ struct amdxdna_gem_obj *cmd_abo = job->cmd_bo;
+ struct amdxdna_hwctx *hwctx = job->hwctx;
+ struct dma_fence *fence;
+ int ret;
+
+ if (!mmget_not_zero(job->mm))
+ return ERR_PTR(-ESRCH);
+
+ kref_get(&job->refcnt);
+ fence = dma_fence_get(job->fence);
+
+ if (job->drv_cmd) {
+ switch (job->drv_cmd->opcode) {
+ case SYNC_DEBUG_BO:
+ ret = aie2_sync_bo(hwctx, job, aie2_sched_drvcmd_resp_handler);
+ break;
+ case ATTACH_DEBUG_BO:
+ ret = aie2_config_debug_bo(hwctx, job, aie2_sched_drvcmd_resp_handler);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ goto out;
+ }
+
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_NEW);
+
+ if (amdxdna_cmd_get_op(cmd_abo) == ERT_CMD_CHAIN)
+ ret = aie2_cmdlist_multi_execbuf(hwctx, job, aie2_sched_cmdlist_resp_handler);
+ else if (force_cmdlist)
+ ret = aie2_cmdlist_single_execbuf(hwctx, job, aie2_sched_cmdlist_resp_handler);
+ else
+ ret = aie2_execbuf(hwctx, job, aie2_sched_resp_handler);
+
+out:
+ if (ret) {
+ dma_fence_put(job->fence);
+ aie2_job_put(job);
+ mmput(job->mm);
+ fence = ERR_PTR(ret);
+ }
+ trace_xdna_job(sched_job, hwctx->name, "sent to device", job->seq);
+
+ return fence;
+}
+
+static void aie2_sched_job_free(struct drm_sched_job *sched_job)
+{
+ struct amdxdna_sched_job *job = drm_job_to_xdna_job(sched_job);
+ struct amdxdna_hwctx *hwctx = job->hwctx;
+
+ trace_xdna_job(sched_job, hwctx->name, "job free", job->seq);
+ if (!job->job_done)
+ up(&hwctx->priv->job_sem);
+
+ drm_sched_job_cleanup(sched_job);
+ aie2_job_put(job);
+}
+
+static enum drm_gpu_sched_stat
+aie2_sched_job_timedout(struct drm_sched_job *sched_job)
+{
+ struct amdxdna_sched_job *job = drm_job_to_xdna_job(sched_job);
+ struct amdxdna_hwctx *hwctx = job->hwctx;
+ struct amdxdna_dev *xdna;
+
+ xdna = hwctx->client->xdna;
+ trace_xdna_job(sched_job, hwctx->name, "job timedout", job->seq);
+ job->job_timeout = true;
+ mutex_lock(&xdna->dev_lock);
+ aie2_hwctx_stop(xdna, hwctx, sched_job);
+
+ aie2_hwctx_restart(xdna, hwctx);
+ mutex_unlock(&xdna->dev_lock);
+
+ return DRM_GPU_SCHED_STAT_RESET;
+}
+
+static const struct drm_sched_backend_ops sched_ops = {
+ .run_job = aie2_sched_job_run,
+ .free_job = aie2_sched_job_free,
+ .timedout_job = aie2_sched_job_timedout,
+};
+
+static int aie2_hwctx_col_list(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ struct amdxdna_dev_hdl *ndev;
+ int start, end, first, last;
+ u32 width = 1, entries = 0;
+ int i;
+
+ if (!hwctx->num_tiles) {
+ XDNA_ERR(xdna, "Number of tiles is zero");
+ return -EINVAL;
+ }
+
+ ndev = xdna->dev_handle;
+ if (unlikely(!ndev->metadata.core.row_count)) {
+ XDNA_WARN(xdna, "Core tile row count is zero");
+ return -EINVAL;
+ }
+
+ hwctx->num_col = hwctx->num_tiles / ndev->metadata.core.row_count;
+ if (!hwctx->num_col || hwctx->num_col > ndev->total_col) {
+ XDNA_ERR(xdna, "Invalid num_col %d", hwctx->num_col);
+ return -EINVAL;
+ }
+
+ if (ndev->priv->col_align == COL_ALIGN_NATURE)
+ width = hwctx->num_col;
+
+ /*
+ * In range [start, end], find out columns that is multiple of width.
+ * 'first' is the first column,
+ * 'last' is the last column,
+ * 'entries' is the total number of columns.
+ */
+ start = xdna->dev_info->first_col;
+ end = ndev->total_col - hwctx->num_col;
+ if (start > 0 && end == 0) {
+ XDNA_DBG(xdna, "Force start from col 0");
+ start = 0;
+ }
+ first = start + (width - start % width) % width;
+ last = end - end % width;
+ if (last >= first)
+ entries = (last - first) / width + 1;
+ XDNA_DBG(xdna, "start %d end %d first %d last %d",
+ start, end, first, last);
+
+ if (unlikely(!entries)) {
+ XDNA_ERR(xdna, "Start %d end %d width %d",
+ start, end, width);
+ return -EINVAL;
+ }
+
+ hwctx->col_list = kmalloc_array(entries, sizeof(*hwctx->col_list), GFP_KERNEL);
+ if (!hwctx->col_list)
+ return -ENOMEM;
+
+ hwctx->col_list_len = entries;
+ hwctx->col_list[0] = first;
+ for (i = 1; i < entries; i++)
+ hwctx->col_list[i] = hwctx->col_list[i - 1] + width;
+
+ print_hex_dump_debug("col_list: ", DUMP_PREFIX_OFFSET, 16, 4, hwctx->col_list,
+ entries * sizeof(*hwctx->col_list), false);
+ return 0;
+}
+
+static int aie2_alloc_resource(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ struct alloc_requests *xrs_req;
+ int ret;
+
+ xrs_req = kzalloc(sizeof(*xrs_req), GFP_KERNEL);
+ if (!xrs_req)
+ return -ENOMEM;
+
+ xrs_req->cdo.start_cols = hwctx->col_list;
+ xrs_req->cdo.cols_len = hwctx->col_list_len;
+ xrs_req->cdo.ncols = hwctx->num_col;
+ xrs_req->cdo.qos_cap.opc = hwctx->max_opc;
+
+ xrs_req->rqos.gops = hwctx->qos.gops;
+ xrs_req->rqos.fps = hwctx->qos.fps;
+ xrs_req->rqos.dma_bw = hwctx->qos.dma_bandwidth;
+ xrs_req->rqos.latency = hwctx->qos.latency;
+ xrs_req->rqos.exec_time = hwctx->qos.frame_exec_time;
+ xrs_req->rqos.priority = hwctx->qos.priority;
+
+ xrs_req->rid = (uintptr_t)hwctx;
+
+ ret = xrs_allocate_resource(xdna->xrs_hdl, xrs_req, hwctx);
+ if (ret)
+ XDNA_ERR(xdna, "Allocate AIE resource failed, ret %d", ret);
+
+ kfree(xrs_req);
+ return ret;
+}
+
+static void aie2_release_resource(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ int ret;
+
+ ret = xrs_release_resource(xdna->xrs_hdl, (uintptr_t)hwctx);
+ if (ret)
+ XDNA_ERR(xdna, "Release AIE resource failed, ret %d", ret);
+}
+
+static int aie2_ctx_syncobj_create(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ struct drm_file *filp = hwctx->client->filp;
+ struct drm_syncobj *syncobj;
+ u32 hdl;
+ int ret;
+
+ hwctx->syncobj_hdl = AMDXDNA_INVALID_FENCE_HANDLE;
+
+ ret = drm_syncobj_create(&syncobj, 0, NULL);
+ if (ret) {
+ XDNA_ERR(xdna, "Create ctx syncobj failed, ret %d", ret);
+ return ret;
+ }
+ ret = drm_syncobj_get_handle(filp, syncobj, &hdl);
+ if (ret) {
+ drm_syncobj_put(syncobj);
+ XDNA_ERR(xdna, "Create ctx syncobj handle failed, ret %d", ret);
+ return ret;
+ }
+ hwctx->priv->syncobj = syncobj;
+ hwctx->syncobj_hdl = hdl;
+
+ return 0;
+}
+
+static void aie2_ctx_syncobj_destroy(struct amdxdna_hwctx *hwctx)
+{
+ /*
+ * The syncobj_hdl is owned by user space and will be cleaned up
+ * separately.
+ */
+ drm_syncobj_put(hwctx->priv->syncobj);
+}
+
+int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_client *client = hwctx->client;
+ struct amdxdna_dev *xdna = client->xdna;
+ const struct drm_sched_init_args args = {
+ .ops = &sched_ops,
+ .num_rqs = DRM_SCHED_PRIORITY_COUNT,
+ .credit_limit = HWCTX_MAX_CMDS,
+ .timeout = msecs_to_jiffies(HWCTX_MAX_TIMEOUT),
+ .name = "amdxdna_js",
+ .dev = xdna->ddev.dev,
+ };
+ struct drm_gpu_scheduler *sched;
+ struct amdxdna_hwctx_priv *priv;
+ struct amdxdna_gem_obj *heap;
+ int i, ret;
+
+ priv = kzalloc(sizeof(*hwctx->priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ hwctx->priv = priv;
+
+ mutex_lock(&client->mm_lock);
+ heap = client->dev_heap;
+ if (!heap) {
+ XDNA_ERR(xdna, "The client dev heap object not exist");
+ mutex_unlock(&client->mm_lock);
+ ret = -ENOENT;
+ goto free_priv;
+ }
+ drm_gem_object_get(to_gobj(heap));
+ mutex_unlock(&client->mm_lock);
+ priv->heap = heap;
+ sema_init(&priv->job_sem, HWCTX_MAX_CMDS);
+
+ ret = amdxdna_gem_pin(heap);
+ if (ret) {
+ XDNA_ERR(xdna, "Dev heap pin failed, ret %d", ret);
+ goto put_heap;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(priv->cmd_buf); i++) {
+ struct amdxdna_gem_obj *abo;
+ struct amdxdna_drm_create_bo args = {
+ .flags = 0,
+ .type = AMDXDNA_BO_DEV,
+ .vaddr = 0,
+ .size = MAX_CHAIN_CMDBUF_SIZE,
+ };
+
+ abo = amdxdna_drm_alloc_dev_bo(&xdna->ddev, &args, client->filp);
+ if (IS_ERR(abo)) {
+ ret = PTR_ERR(abo);
+ goto free_cmd_bufs;
+ }
+
+ XDNA_DBG(xdna, "Command buf %d addr 0x%llx size 0x%lx",
+ i, abo->mem.dev_addr, abo->mem.size);
+ priv->cmd_buf[i] = abo;
+ }
+
+ sched = &priv->sched;
+ mutex_init(&priv->io_lock);
+
+ fs_reclaim_acquire(GFP_KERNEL);
+ might_lock(&priv->io_lock);
+ fs_reclaim_release(GFP_KERNEL);
+
+ ret = drm_sched_init(sched, &args);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to init DRM scheduler. ret %d", ret);
+ goto free_cmd_bufs;
+ }
+
+ ret = drm_sched_entity_init(&priv->entity, DRM_SCHED_PRIORITY_NORMAL,
+ &sched, 1, NULL);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to initial sched entiry. ret %d", ret);
+ goto free_sched;
+ }
+
+ ret = aie2_hwctx_col_list(hwctx);
+ if (ret) {
+ XDNA_ERR(xdna, "Create col list failed, ret %d", ret);
+ goto free_entity;
+ }
+
+ ret = amdxdna_pm_resume_get(xdna);
+ if (ret)
+ goto free_col_list;
+
+ ret = aie2_alloc_resource(hwctx);
+ if (ret) {
+ XDNA_ERR(xdna, "Alloc hw resource failed, ret %d", ret);
+ goto suspend_put;
+ }
+
+ ret = aie2_map_host_buf(xdna->dev_handle, hwctx->fw_ctx_id,
+ heap->mem.userptr, heap->mem.size);
+ if (ret) {
+ XDNA_ERR(xdna, "Map host buffer failed, ret %d", ret);
+ goto release_resource;
+ }
+
+ ret = aie2_ctx_syncobj_create(hwctx);
+ if (ret) {
+ XDNA_ERR(xdna, "Create syncobj failed, ret %d", ret);
+ goto release_resource;
+ }
+ amdxdna_pm_suspend_put(xdna);
+
+ hwctx->status = HWCTX_STAT_INIT;
+ init_waitqueue_head(&priv->job_free_wq);
+
+ XDNA_DBG(xdna, "hwctx %s init completed", hwctx->name);
+
+ return 0;
+
+release_resource:
+ aie2_release_resource(hwctx);
+suspend_put:
+ amdxdna_pm_suspend_put(xdna);
+free_col_list:
+ kfree(hwctx->col_list);
+free_entity:
+ drm_sched_entity_destroy(&priv->entity);
+free_sched:
+ drm_sched_fini(&priv->sched);
+free_cmd_bufs:
+ for (i = 0; i < ARRAY_SIZE(priv->cmd_buf); i++) {
+ if (!priv->cmd_buf[i])
+ continue;
+ drm_gem_object_put(to_gobj(priv->cmd_buf[i]));
+ }
+ amdxdna_gem_unpin(heap);
+put_heap:
+ drm_gem_object_put(to_gobj(heap));
+free_priv:
+ kfree(priv);
+ return ret;
+}
+
+void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_dev *xdna;
+ int idx;
+
+ xdna = hwctx->client->xdna;
+
+ XDNA_DBG(xdna, "%s sequence number %lld", hwctx->name, hwctx->priv->seq);
+ aie2_hwctx_wait_for_idle(hwctx);
+
+ /* Request fw to destroy hwctx and cancel the rest pending requests */
+ aie2_release_resource(hwctx);
+
+ mutex_unlock(&xdna->dev_lock);
+ drm_sched_entity_destroy(&hwctx->priv->entity);
+
+ /* Wait for all submitted jobs to be completed or canceled */
+ wait_event(hwctx->priv->job_free_wq,
+ atomic64_read(&hwctx->job_submit_cnt) ==
+ atomic64_read(&hwctx->job_free_cnt));
+ mutex_lock(&xdna->dev_lock);
+
+ drm_sched_fini(&hwctx->priv->sched);
+ aie2_ctx_syncobj_destroy(hwctx);
+
+ for (idx = 0; idx < ARRAY_SIZE(hwctx->priv->cmd_buf); idx++)
+ drm_gem_object_put(to_gobj(hwctx->priv->cmd_buf[idx]));
+ amdxdna_gem_unpin(hwctx->priv->heap);
+ drm_gem_object_put(to_gobj(hwctx->priv->heap));
+
+ mutex_destroy(&hwctx->priv->io_lock);
+ kfree(hwctx->col_list);
+ kfree(hwctx->priv);
+ kfree(hwctx->cus);
+}
+
+static int aie2_config_cu_resp_handler(void *handle, void __iomem *data, size_t size)
+{
+ struct amdxdna_hwctx *hwctx = handle;
+
+ amdxdna_pm_suspend_put(hwctx->client->xdna);
+ return 0;
+}
+
+static int aie2_hwctx_cu_config(struct amdxdna_hwctx *hwctx, void *buf, u32 size)
+{
+ struct amdxdna_hwctx_param_config_cu *config = buf;
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ u32 total_size;
+ int ret;
+
+ XDNA_DBG(xdna, "Config %d CU to %s", config->num_cus, hwctx->name);
+ if (XDNA_MBZ_DBG(xdna, config->pad, sizeof(config->pad)))
+ return -EINVAL;
+
+ if (hwctx->status != HWCTX_STAT_INIT) {
+ XDNA_ERR(xdna, "Not support re-config CU");
+ return -EINVAL;
+ }
+
+ if (!config->num_cus) {
+ XDNA_ERR(xdna, "Number of CU is zero");
+ return -EINVAL;
+ }
+
+ total_size = struct_size(config, cu_configs, config->num_cus);
+ if (total_size > size) {
+ XDNA_ERR(xdna, "CU config larger than size");
+ return -EINVAL;
+ }
+
+ hwctx->cus = kmemdup(config, total_size, GFP_KERNEL);
+ if (!hwctx->cus)
+ return -ENOMEM;
+
+ ret = amdxdna_pm_resume_get(xdna);
+ if (ret)
+ goto free_cus;
+
+ ret = aie2_config_cu(hwctx, aie2_config_cu_resp_handler);
+ if (ret) {
+ XDNA_ERR(xdna, "Config CU to firmware failed, ret %d", ret);
+ goto pm_suspend_put;
+ }
+
+ wmb(); /* To avoid locking in command submit when check status */
+ hwctx->status = HWCTX_STAT_READY;
+
+ return 0;
+
+pm_suspend_put:
+ amdxdna_pm_suspend_put(xdna);
+free_cus:
+ kfree(hwctx->cus);
+ hwctx->cus = NULL;
+ return ret;
+}
+
+static void aie2_cmd_wait(struct amdxdna_hwctx *hwctx, u64 seq)
+{
+ struct dma_fence *out_fence = aie2_cmd_get_out_fence(hwctx, seq);
+
+ if (!out_fence) {
+ XDNA_ERR(hwctx->client->xdna, "Failed to get fence");
+ return;
+ }
+
+ dma_fence_wait_timeout(out_fence, false, MAX_SCHEDULE_TIMEOUT);
+ dma_fence_put(out_fence);
+}
+
+static int aie2_hwctx_cfg_debug_bo(struct amdxdna_hwctx *hwctx, u32 bo_hdl,
+ bool attach)
+{
+ struct amdxdna_client *client = hwctx->client;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_drv_cmd cmd = { 0 };
+ struct amdxdna_gem_obj *abo;
+ u64 seq;
+ int ret;
+
+ abo = amdxdna_gem_get_obj(client, bo_hdl, AMDXDNA_BO_DEV);
+ if (!abo) {
+ XDNA_ERR(xdna, "Get bo %d failed", bo_hdl);
+ return -EINVAL;
+ }
+
+ if (attach) {
+ if (abo->assigned_hwctx != AMDXDNA_INVALID_CTX_HANDLE) {
+ ret = -EBUSY;
+ goto put_obj;
+ }
+ cmd.opcode = ATTACH_DEBUG_BO;
+ } else {
+ if (abo->assigned_hwctx != hwctx->id) {
+ ret = -EINVAL;
+ goto put_obj;
+ }
+ cmd.opcode = DETACH_DEBUG_BO;
+ }
+
+ ret = amdxdna_cmd_submit(client, &cmd, AMDXDNA_INVALID_BO_HANDLE,
+ &bo_hdl, 1, hwctx->id, &seq);
+ if (ret) {
+ XDNA_ERR(xdna, "Submit command failed");
+ goto put_obj;
+ }
+
+ aie2_cmd_wait(hwctx, seq);
+ if (cmd.result) {
+ XDNA_ERR(xdna, "Response failure 0x%x", cmd.result);
+ goto put_obj;
+ }
+
+ if (attach)
+ abo->assigned_hwctx = hwctx->id;
+ else
+ abo->assigned_hwctx = AMDXDNA_INVALID_CTX_HANDLE;
+
+ XDNA_DBG(xdna, "Config debug BO %d to %s", bo_hdl, hwctx->name);
+
+put_obj:
+ amdxdna_gem_put_obj(abo);
+ return ret;
+}
+
+int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+ switch (type) {
+ case DRM_AMDXDNA_HWCTX_CONFIG_CU:
+ return aie2_hwctx_cu_config(hwctx, buf, size);
+ case DRM_AMDXDNA_HWCTX_ASSIGN_DBG_BUF:
+ return aie2_hwctx_cfg_debug_bo(hwctx, (u32)value, true);
+ case DRM_AMDXDNA_HWCTX_REMOVE_DBG_BUF:
+ return aie2_hwctx_cfg_debug_bo(hwctx, (u32)value, false);
+ default:
+ XDNA_DBG(xdna, "Not supported type %d", type);
+ return -EOPNOTSUPP;
+ }
+}
+
+int aie2_hwctx_sync_debug_bo(struct amdxdna_hwctx *hwctx, u32 debug_bo_hdl)
+{
+ struct amdxdna_client *client = hwctx->client;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_drv_cmd cmd = { 0 };
+ u64 seq;
+ int ret;
+
+ cmd.opcode = SYNC_DEBUG_BO;
+ ret = amdxdna_cmd_submit(client, &cmd, AMDXDNA_INVALID_BO_HANDLE,
+ &debug_bo_hdl, 1, hwctx->id, &seq);
+ if (ret) {
+ XDNA_ERR(xdna, "Submit command failed");
+ return ret;
+ }
+
+ aie2_cmd_wait(hwctx, seq);
+ if (cmd.result) {
+ XDNA_ERR(xdna, "Response failure 0x%x", cmd.result);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int aie2_populate_range(struct amdxdna_gem_obj *abo)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ struct amdxdna_umap *mapp;
+ unsigned long timeout;
+ struct mm_struct *mm;
+ bool found;
+ int ret;
+
+ timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+again:
+ found = false;
+ down_write(&xdna->notifier_lock);
+ list_for_each_entry(mapp, &abo->mem.umap_list, node) {
+ if (mapp->invalid) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ abo->mem.map_invalid = false;
+ up_write(&xdna->notifier_lock);
+ return 0;
+ }
+ kref_get(&mapp->refcnt);
+ up_write(&xdna->notifier_lock);
+
+ XDNA_DBG(xdna, "populate memory range %lx %lx",
+ mapp->vma->vm_start, mapp->vma->vm_end);
+ mm = mapp->notifier.mm;
+ if (!mmget_not_zero(mm)) {
+ amdxdna_umap_put(mapp);
+ return -EFAULT;
+ }
+
+ mapp->range.notifier_seq = mmu_interval_read_begin(&mapp->notifier);
+ mmap_read_lock(mm);
+ ret = hmm_range_fault(&mapp->range);
+ mmap_read_unlock(mm);
+ if (ret) {
+ if (time_after(jiffies, timeout)) {
+ ret = -ETIME;
+ goto put_mm;
+ }
+
+ if (ret == -EBUSY) {
+ amdxdna_umap_put(mapp);
+ goto again;
+ }
+
+ goto put_mm;
+ }
+
+ down_write(&xdna->notifier_lock);
+ if (mmu_interval_read_retry(&mapp->notifier, mapp->range.notifier_seq)) {
+ up_write(&xdna->notifier_lock);
+ amdxdna_umap_put(mapp);
+ goto again;
+ }
+ mapp->invalid = false;
+ up_write(&xdna->notifier_lock);
+ amdxdna_umap_put(mapp);
+ goto again;
+
+put_mm:
+ amdxdna_umap_put(mapp);
+ mmput(mm);
+ return ret;
+}
+
+int aie2_cmd_submit(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ struct ww_acquire_ctx acquire_ctx;
+ struct dma_fence_chain *chain;
+ struct amdxdna_gem_obj *abo;
+ unsigned long timeout = 0;
+ int ret, i;
+
+ ret = down_interruptible(&hwctx->priv->job_sem);
+ if (ret) {
+ XDNA_ERR(xdna, "Grab job sem failed, ret %d", ret);
+ return ret;
+ }
+
+ chain = dma_fence_chain_alloc();
+ if (!chain) {
+ XDNA_ERR(xdna, "Alloc fence chain failed");
+ ret = -ENOMEM;
+ goto up_sem;
+ }
+
+ ret = drm_sched_job_init(&job->base, &hwctx->priv->entity, 1, hwctx,
+ hwctx->client->filp->client_id);
+ if (ret) {
+ XDNA_ERR(xdna, "DRM job init failed, ret %d", ret);
+ goto free_chain;
+ }
+
+ ret = amdxdna_pm_resume_get(xdna);
+ if (ret)
+ goto cleanup_job;
+
+retry:
+ ret = drm_gem_lock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
+ if (ret) {
+ XDNA_WARN(xdna, "Failed to lock BOs, ret %d", ret);
+ goto suspend_put;
+ }
+
+ for (i = 0; i < job->bo_cnt; i++) {
+ ret = dma_resv_reserve_fences(job->bos[i]->resv, 1);
+ if (ret) {
+ XDNA_WARN(xdna, "Failed to reserve fences %d", ret);
+ drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
+ goto suspend_put;
+ }
+ }
+
+ down_read(&xdna->notifier_lock);
+ for (i = 0; i < job->bo_cnt; i++) {
+ abo = to_xdna_obj(job->bos[i]);
+ if (abo->mem.map_invalid) {
+ up_read(&xdna->notifier_lock);
+ drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
+ if (!timeout) {
+ timeout = jiffies +
+ msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+ } else if (time_after(jiffies, timeout)) {
+ ret = -ETIME;
+ goto suspend_put;
+ }
+
+ ret = aie2_populate_range(abo);
+ if (ret)
+ goto suspend_put;
+ goto retry;
+ }
+ }
+
+ mutex_lock(&hwctx->priv->io_lock);
+ drm_sched_job_arm(&job->base);
+ job->out_fence = dma_fence_get(&job->base.s_fence->finished);
+ for (i = 0; i < job->bo_cnt; i++)
+ dma_resv_add_fence(job->bos[i]->resv, job->out_fence, DMA_RESV_USAGE_WRITE);
+ job->seq = hwctx->priv->seq++;
+ kref_get(&job->refcnt);
+ drm_sched_entity_push_job(&job->base);
+
+ *seq = job->seq;
+ drm_syncobj_add_point(hwctx->priv->syncobj, chain, job->out_fence, *seq);
+ mutex_unlock(&hwctx->priv->io_lock);
+
+ up_read(&xdna->notifier_lock);
+ drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
+
+ aie2_job_put(job);
+ atomic64_inc(&hwctx->job_submit_cnt);
+
+ return 0;
+
+suspend_put:
+ amdxdna_pm_suspend_put(xdna);
+cleanup_job:
+ drm_sched_job_cleanup(&job->base);
+free_chain:
+ dma_fence_chain_free(chain);
+up_sem:
+ up(&hwctx->priv->job_sem);
+ job->job_done = true;
+ return ret;
+}
+
+void aie2_hmm_invalidate(struct amdxdna_gem_obj *abo,
+ unsigned long cur_seq)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ struct drm_gem_object *gobj = to_gobj(abo);
+ long ret;
+
+ ret = dma_resv_wait_timeout(gobj->resv, DMA_RESV_USAGE_BOOKKEEP,
+ true, MAX_SCHEDULE_TIMEOUT);
+ if (!ret || ret == -ERESTARTSYS)
+ XDNA_ERR(xdna, "Failed to wait for bo, ret %ld", ret);
+}
diff --git a/drivers/accel/amdxdna/aie2_error.c b/drivers/accel/amdxdna/aie2_error.c
new file mode 100644
index 000000000000..d452008ec4f4
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_error.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/drm_cache.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/dma-mapping.h>
+#include <linux/kthread.h>
+#include <linux/kernel.h>
+
+#include "aie2_msg_priv.h"
+#include "aie2_pci.h"
+#include "amdxdna_error.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+struct async_event {
+ struct amdxdna_dev_hdl *ndev;
+ struct async_event_msg_resp resp;
+ struct workqueue_struct *wq;
+ struct work_struct work;
+ u8 *buf;
+ dma_addr_t addr;
+ u32 size;
+};
+
+struct async_events {
+ struct workqueue_struct *wq;
+ u8 *buf;
+ dma_addr_t addr;
+ u32 size;
+ u32 event_cnt;
+ struct async_event event[] __counted_by(event_cnt);
+};
+
+/*
+ * Below enum, struct and lookup tables are porting from XAIE util header file.
+ *
+ * Below data is defined by AIE device and it is used for decode error message
+ * from the device.
+ */
+
+enum aie_module_type {
+ AIE_MEM_MOD = 0,
+ AIE_CORE_MOD,
+ AIE_PL_MOD,
+ AIE_UNKNOWN_MOD,
+};
+
+enum aie_error_category {
+ AIE_ERROR_SATURATION = 0,
+ AIE_ERROR_FP,
+ AIE_ERROR_STREAM,
+ AIE_ERROR_ACCESS,
+ AIE_ERROR_BUS,
+ AIE_ERROR_INSTRUCTION,
+ AIE_ERROR_ECC,
+ AIE_ERROR_LOCK,
+ AIE_ERROR_DMA,
+ AIE_ERROR_MEM_PARITY,
+ /* Unknown is not from XAIE, added for better category */
+ AIE_ERROR_UNKNOWN,
+};
+
+/* Don't pack, unless XAIE side changed */
+struct aie_error {
+ __u8 row;
+ __u8 col;
+ __u32 mod_type;
+ __u8 event_id;
+};
+
+struct aie_err_info {
+ u32 err_cnt;
+ u32 ret_code;
+ u32 rsvd;
+ struct aie_error payload[] __counted_by(err_cnt);
+};
+
+struct aie_event_category {
+ u8 event_id;
+ enum aie_error_category category;
+};
+
+#define EVENT_CATEGORY(id, cat) { id, cat }
+static const struct aie_event_category aie_ml_mem_event_cat[] = {
+ EVENT_CATEGORY(88U, AIE_ERROR_ECC),
+ EVENT_CATEGORY(90U, AIE_ERROR_ECC),
+ EVENT_CATEGORY(91U, AIE_ERROR_MEM_PARITY),
+ EVENT_CATEGORY(92U, AIE_ERROR_MEM_PARITY),
+ EVENT_CATEGORY(93U, AIE_ERROR_MEM_PARITY),
+ EVENT_CATEGORY(94U, AIE_ERROR_MEM_PARITY),
+ EVENT_CATEGORY(95U, AIE_ERROR_MEM_PARITY),
+ EVENT_CATEGORY(96U, AIE_ERROR_MEM_PARITY),
+ EVENT_CATEGORY(97U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(98U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(99U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(100U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(101U, AIE_ERROR_LOCK),
+};
+
+static const struct aie_event_category aie_ml_core_event_cat[] = {
+ EVENT_CATEGORY(55U, AIE_ERROR_ACCESS),
+ EVENT_CATEGORY(56U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(57U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(58U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(59U, AIE_ERROR_INSTRUCTION),
+ EVENT_CATEGORY(60U, AIE_ERROR_ACCESS),
+ EVENT_CATEGORY(62U, AIE_ERROR_ECC),
+ EVENT_CATEGORY(64U, AIE_ERROR_ECC),
+ EVENT_CATEGORY(65U, AIE_ERROR_ACCESS),
+ EVENT_CATEGORY(66U, AIE_ERROR_ACCESS),
+ EVENT_CATEGORY(67U, AIE_ERROR_LOCK),
+ EVENT_CATEGORY(70U, AIE_ERROR_INSTRUCTION),
+ EVENT_CATEGORY(71U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(72U, AIE_ERROR_BUS),
+};
+
+static const struct aie_event_category aie_ml_mem_tile_event_cat[] = {
+ EVENT_CATEGORY(130U, AIE_ERROR_ECC),
+ EVENT_CATEGORY(132U, AIE_ERROR_ECC),
+ EVENT_CATEGORY(133U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(134U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(135U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(136U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(137U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(138U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(139U, AIE_ERROR_LOCK),
+};
+
+static const struct aie_event_category aie_ml_shim_tile_event_cat[] = {
+ EVENT_CATEGORY(64U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(65U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(66U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(67U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(68U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(69U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(70U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(71U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(72U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(73U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(74U, AIE_ERROR_LOCK),
+};
+
+static const enum amdxdna_error_num aie_cat_err_num_map[] = {
+ [AIE_ERROR_SATURATION] = AMDXDNA_ERROR_NUM_AIE_SATURATION,
+ [AIE_ERROR_FP] = AMDXDNA_ERROR_NUM_AIE_FP,
+ [AIE_ERROR_STREAM] = AMDXDNA_ERROR_NUM_AIE_STREAM,
+ [AIE_ERROR_ACCESS] = AMDXDNA_ERROR_NUM_AIE_ACCESS,
+ [AIE_ERROR_BUS] = AMDXDNA_ERROR_NUM_AIE_BUS,
+ [AIE_ERROR_INSTRUCTION] = AMDXDNA_ERROR_NUM_AIE_INSTRUCTION,
+ [AIE_ERROR_ECC] = AMDXDNA_ERROR_NUM_AIE_ECC,
+ [AIE_ERROR_LOCK] = AMDXDNA_ERROR_NUM_AIE_LOCK,
+ [AIE_ERROR_DMA] = AMDXDNA_ERROR_NUM_AIE_DMA,
+ [AIE_ERROR_MEM_PARITY] = AMDXDNA_ERROR_NUM_AIE_MEM_PARITY,
+ [AIE_ERROR_UNKNOWN] = AMDXDNA_ERROR_NUM_UNKNOWN,
+};
+
+static_assert(ARRAY_SIZE(aie_cat_err_num_map) == AIE_ERROR_UNKNOWN + 1);
+
+static const enum amdxdna_error_module aie_err_mod_map[] = {
+ [AIE_MEM_MOD] = AMDXDNA_ERROR_MODULE_AIE_MEMORY,
+ [AIE_CORE_MOD] = AMDXDNA_ERROR_MODULE_AIE_CORE,
+ [AIE_PL_MOD] = AMDXDNA_ERROR_MODULE_AIE_PL,
+ [AIE_UNKNOWN_MOD] = AMDXDNA_ERROR_MODULE_UNKNOWN,
+};
+
+static_assert(ARRAY_SIZE(aie_err_mod_map) == AIE_UNKNOWN_MOD + 1);
+
+static enum aie_error_category
+aie_get_error_category(u8 row, u8 event_id, enum aie_module_type mod_type)
+{
+ const struct aie_event_category *lut;
+ int num_entry;
+ int i;
+
+ switch (mod_type) {
+ case AIE_PL_MOD:
+ lut = aie_ml_shim_tile_event_cat;
+ num_entry = ARRAY_SIZE(aie_ml_shim_tile_event_cat);
+ break;
+ case AIE_CORE_MOD:
+ lut = aie_ml_core_event_cat;
+ num_entry = ARRAY_SIZE(aie_ml_core_event_cat);
+ break;
+ case AIE_MEM_MOD:
+ if (row == 1) {
+ lut = aie_ml_mem_tile_event_cat;
+ num_entry = ARRAY_SIZE(aie_ml_mem_tile_event_cat);
+ } else {
+ lut = aie_ml_mem_event_cat;
+ num_entry = ARRAY_SIZE(aie_ml_mem_event_cat);
+ }
+ break;
+ default:
+ return AIE_ERROR_UNKNOWN;
+ }
+
+ for (i = 0; i < num_entry; i++) {
+ if (event_id != lut[i].event_id)
+ continue;
+
+ if (lut[i].category > AIE_ERROR_UNKNOWN)
+ return AIE_ERROR_UNKNOWN;
+
+ return lut[i].category;
+ }
+
+ return AIE_ERROR_UNKNOWN;
+}
+
+static void aie2_update_last_async_error(struct amdxdna_dev_hdl *ndev, void *err_info, u32 num_err)
+{
+ struct aie_error *errs = err_info;
+ enum amdxdna_error_module err_mod;
+ enum aie_error_category aie_err;
+ enum amdxdna_error_num err_num;
+ struct aie_error *last_err;
+
+ last_err = &errs[num_err - 1];
+ if (last_err->mod_type >= AIE_UNKNOWN_MOD) {
+ err_num = aie_cat_err_num_map[AIE_ERROR_UNKNOWN];
+ err_mod = aie_err_mod_map[AIE_UNKNOWN_MOD];
+ } else {
+ aie_err = aie_get_error_category(last_err->row,
+ last_err->event_id,
+ last_err->mod_type);
+ err_num = aie_cat_err_num_map[aie_err];
+ err_mod = aie_err_mod_map[last_err->mod_type];
+ }
+
+ ndev->last_async_err.err_code = AMDXDNA_ERROR_ENCODE(err_num, err_mod);
+ ndev->last_async_err.ts_us = ktime_to_us(ktime_get_real());
+ ndev->last_async_err.ex_err_code = AMDXDNA_EXTRA_ERR_ENCODE(last_err->row, last_err->col);
+}
+
+static u32 aie2_error_backtrack(struct amdxdna_dev_hdl *ndev, void *err_info, u32 num_err)
+{
+ struct aie_error *errs = err_info;
+ u32 err_col = 0; /* assume that AIE has less than 32 columns */
+ int i;
+
+ /* Get err column bitmap */
+ for (i = 0; i < num_err; i++) {
+ struct aie_error *err = &errs[i];
+ enum aie_error_category cat;
+
+ cat = aie_get_error_category(err->row, err->event_id, err->mod_type);
+ XDNA_ERR(ndev->xdna, "Row: %d, Col: %d, module %d, event ID %d, category %d",
+ err->row, err->col, err->mod_type,
+ err->event_id, cat);
+
+ if (err->col >= 32) {
+ XDNA_WARN(ndev->xdna, "Invalid column number");
+ break;
+ }
+
+ err_col |= (1 << err->col);
+ }
+
+ return err_col;
+}
+
+static int aie2_error_async_cb(void *handle, void __iomem *data, size_t size)
+{
+ struct async_event *e = handle;
+
+ if (data) {
+ e->resp.type = readl(data + offsetof(struct async_event_msg_resp, type));
+ wmb(); /* Update status in the end, so that no lock for here */
+ e->resp.status = readl(data + offsetof(struct async_event_msg_resp, status));
+ }
+ queue_work(e->wq, &e->work);
+ return 0;
+}
+
+static int aie2_error_event_send(struct async_event *e)
+{
+ drm_clflush_virt_range(e->buf, e->size); /* device can access */
+ return aie2_register_asyn_event_msg(e->ndev, e->addr, e->size, e,
+ aie2_error_async_cb);
+}
+
+static void aie2_error_worker(struct work_struct *err_work)
+{
+ struct aie_err_info *info;
+ struct amdxdna_dev *xdna;
+ struct async_event *e;
+ u32 max_err;
+ u32 err_col;
+
+ e = container_of(err_work, struct async_event, work);
+
+ xdna = e->ndev->xdna;
+
+ if (e->resp.status == MAX_AIE2_STATUS_CODE)
+ return;
+
+ e->resp.status = MAX_AIE2_STATUS_CODE;
+
+ print_hex_dump_debug("AIE error: ", DUMP_PREFIX_OFFSET, 16, 4,
+ e->buf, 0x100, false);
+
+ info = (struct aie_err_info *)e->buf;
+ XDNA_DBG(xdna, "Error count %d return code %d", info->err_cnt, info->ret_code);
+
+ max_err = (e->size - sizeof(*info)) / sizeof(struct aie_error);
+ if (unlikely(info->err_cnt > max_err)) {
+ WARN_ONCE(1, "Error count too large %d\n", info->err_cnt);
+ return;
+ }
+ err_col = aie2_error_backtrack(e->ndev, info->payload, info->err_cnt);
+ if (!err_col) {
+ XDNA_WARN(xdna, "Did not get error column");
+ return;
+ }
+
+ mutex_lock(&xdna->dev_lock);
+ aie2_update_last_async_error(e->ndev, info->payload, info->err_cnt);
+
+ /* Re-sent this event to firmware */
+ if (aie2_error_event_send(e))
+ XDNA_WARN(xdna, "Unable to register async event");
+ mutex_unlock(&xdna->dev_lock);
+}
+
+void aie2_error_async_events_free(struct amdxdna_dev_hdl *ndev)
+{
+ struct amdxdna_dev *xdna = ndev->xdna;
+ struct async_events *events;
+
+ events = ndev->async_events;
+
+ mutex_unlock(&xdna->dev_lock);
+ destroy_workqueue(events->wq);
+ mutex_lock(&xdna->dev_lock);
+
+ dma_free_noncoherent(xdna->ddev.dev, events->size, events->buf,
+ events->addr, DMA_FROM_DEVICE);
+ kfree(events);
+}
+
+int aie2_error_async_events_alloc(struct amdxdna_dev_hdl *ndev)
+{
+ struct amdxdna_dev *xdna = ndev->xdna;
+ u32 total_col = ndev->total_col;
+ u32 total_size = ASYNC_BUF_SIZE * total_col;
+ struct async_events *events;
+ int i, ret;
+
+ events = kzalloc(struct_size(events, event, total_col), GFP_KERNEL);
+ if (!events)
+ return -ENOMEM;
+
+ events->buf = dma_alloc_noncoherent(xdna->ddev.dev, total_size, &events->addr,
+ DMA_FROM_DEVICE, GFP_KERNEL);
+ if (!events->buf) {
+ ret = -ENOMEM;
+ goto free_events;
+ }
+ events->size = total_size;
+ events->event_cnt = total_col;
+
+ events->wq = alloc_ordered_workqueue("async_wq", 0);
+ if (!events->wq) {
+ ret = -ENOMEM;
+ goto free_buf;
+ }
+
+ for (i = 0; i < events->event_cnt; i++) {
+ struct async_event *e = &events->event[i];
+ u32 offset = i * ASYNC_BUF_SIZE;
+
+ e->ndev = ndev;
+ e->wq = events->wq;
+ e->buf = &events->buf[offset];
+ e->addr = events->addr + offset;
+ e->size = ASYNC_BUF_SIZE;
+ e->resp.status = MAX_AIE2_STATUS_CODE;
+ INIT_WORK(&e->work, aie2_error_worker);
+
+ ret = aie2_error_event_send(e);
+ if (ret)
+ goto free_wq;
+ }
+
+ ndev->async_events = events;
+
+ XDNA_DBG(xdna, "Async event count %d, buf total size 0x%x",
+ events->event_cnt, events->size);
+ return 0;
+
+free_wq:
+ destroy_workqueue(events->wq);
+free_buf:
+ dma_free_noncoherent(xdna->ddev.dev, events->size, events->buf,
+ events->addr, DMA_FROM_DEVICE);
+free_events:
+ kfree(events);
+ return ret;
+}
+
+int aie2_get_array_async_error(struct amdxdna_dev_hdl *ndev, struct amdxdna_drm_get_array *args)
+{
+ struct amdxdna_dev *xdna = ndev->xdna;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+
+ args->num_element = 1;
+ args->element_size = sizeof(ndev->last_async_err);
+ if (copy_to_user(u64_to_user_ptr(args->buffer),
+ &ndev->last_async_err, args->element_size))
+ return -EFAULT;
+
+ return 0;
+}
diff --git a/drivers/accel/amdxdna/aie2_message.c b/drivers/accel/amdxdna/aie2_message.c
new file mode 100644
index 000000000000..d493bb1c3360
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_message.c
@@ -0,0 +1,1074 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_cache.h>
+#include <drm/drm_device.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/bitfield.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/xarray.h>
+
+#include "aie2_msg_priv.h"
+#include "aie2_pci.h"
+#include "amdxdna_ctx.h"
+#include "amdxdna_gem.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_mailbox_helper.h"
+#include "amdxdna_pci_drv.h"
+
+#define DECLARE_AIE2_MSG(name, op) \
+ DECLARE_XDNA_MSG_COMMON(name, op, MAX_AIE2_STATUS_CODE)
+
+#define EXEC_MSG_OPS(xdna) ((xdna)->dev_handle->exec_msg_ops)
+
+static int aie2_send_mgmt_msg_wait(struct amdxdna_dev_hdl *ndev,
+ struct xdna_mailbox_msg *msg)
+{
+ struct amdxdna_dev *xdna = ndev->xdna;
+ struct xdna_notify *hdl = msg->handle;
+ int ret;
+
+ if (!ndev->mgmt_chann)
+ return -ENODEV;
+
+ drm_WARN_ON(&xdna->ddev, xdna->rpm_on && !mutex_is_locked(&xdna->dev_lock));
+ ret = xdna_send_msg_wait(xdna, ndev->mgmt_chann, msg);
+ if (ret == -ETIME) {
+ xdna_mailbox_stop_channel(ndev->mgmt_chann);
+ xdna_mailbox_destroy_channel(ndev->mgmt_chann);
+ ndev->mgmt_chann = NULL;
+ }
+
+ if (!ret && *hdl->status != AIE2_STATUS_SUCCESS) {
+ XDNA_ERR(xdna, "command opcode 0x%x failed, status 0x%x",
+ msg->opcode, *hdl->data);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+int aie2_suspend_fw(struct amdxdna_dev_hdl *ndev)
+{
+ DECLARE_AIE2_MSG(suspend, MSG_OP_SUSPEND);
+
+ return aie2_send_mgmt_msg_wait(ndev, &msg);
+}
+
+int aie2_resume_fw(struct amdxdna_dev_hdl *ndev)
+{
+ DECLARE_AIE2_MSG(suspend, MSG_OP_RESUME);
+
+ return aie2_send_mgmt_msg_wait(ndev, &msg);
+}
+
+int aie2_set_runtime_cfg(struct amdxdna_dev_hdl *ndev, u32 type, u64 value)
+{
+ DECLARE_AIE2_MSG(set_runtime_cfg, MSG_OP_SET_RUNTIME_CONFIG);
+ int ret;
+
+ req.type = type;
+ req.value = value;
+
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Failed to set runtime config, ret %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int aie2_get_runtime_cfg(struct amdxdna_dev_hdl *ndev, u32 type, u64 *value)
+{
+ DECLARE_AIE2_MSG(get_runtime_cfg, MSG_OP_GET_RUNTIME_CONFIG);
+ int ret;
+
+ req.type = type;
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Failed to get runtime config, ret %d", ret);
+ return ret;
+ }
+
+ *value = resp.value;
+ return 0;
+}
+
+int aie2_assign_mgmt_pasid(struct amdxdna_dev_hdl *ndev, u16 pasid)
+{
+ DECLARE_AIE2_MSG(assign_mgmt_pasid, MSG_OP_ASSIGN_MGMT_PASID);
+
+ req.pasid = pasid;
+
+ return aie2_send_mgmt_msg_wait(ndev, &msg);
+}
+
+int aie2_query_aie_version(struct amdxdna_dev_hdl *ndev, struct aie_version *version)
+{
+ DECLARE_AIE2_MSG(aie_version_info, MSG_OP_QUERY_AIE_VERSION);
+ struct amdxdna_dev *xdna = ndev->xdna;
+ int ret;
+
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret)
+ return ret;
+
+ XDNA_DBG(xdna, "Query AIE version - major: %u minor: %u completed",
+ resp.major, resp.minor);
+
+ version->major = resp.major;
+ version->minor = resp.minor;
+
+ return 0;
+}
+
+int aie2_query_aie_metadata(struct amdxdna_dev_hdl *ndev, struct aie_metadata *metadata)
+{
+ DECLARE_AIE2_MSG(aie_tile_info, MSG_OP_QUERY_AIE_TILE_INFO);
+ int ret;
+
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret)
+ return ret;
+
+ metadata->size = resp.info.size;
+ metadata->cols = resp.info.cols;
+ metadata->rows = resp.info.rows;
+
+ metadata->version.major = resp.info.major;
+ metadata->version.minor = resp.info.minor;
+
+ metadata->core.row_count = resp.info.core_rows;
+ metadata->core.row_start = resp.info.core_row_start;
+ metadata->core.dma_channel_count = resp.info.core_dma_channels;
+ metadata->core.lock_count = resp.info.core_locks;
+ metadata->core.event_reg_count = resp.info.core_events;
+
+ metadata->mem.row_count = resp.info.mem_rows;
+ metadata->mem.row_start = resp.info.mem_row_start;
+ metadata->mem.dma_channel_count = resp.info.mem_dma_channels;
+ metadata->mem.lock_count = resp.info.mem_locks;
+ metadata->mem.event_reg_count = resp.info.mem_events;
+
+ metadata->shim.row_count = resp.info.shim_rows;
+ metadata->shim.row_start = resp.info.shim_row_start;
+ metadata->shim.dma_channel_count = resp.info.shim_dma_channels;
+ metadata->shim.lock_count = resp.info.shim_locks;
+ metadata->shim.event_reg_count = resp.info.shim_events;
+
+ return 0;
+}
+
+int aie2_query_firmware_version(struct amdxdna_dev_hdl *ndev,
+ struct amdxdna_fw_ver *fw_ver)
+{
+ DECLARE_AIE2_MSG(firmware_version, MSG_OP_GET_FIRMWARE_VERSION);
+ int ret;
+
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret)
+ return ret;
+
+ fw_ver->major = resp.major;
+ fw_ver->minor = resp.minor;
+ fw_ver->sub = resp.sub;
+ fw_ver->build = resp.build;
+
+ return 0;
+}
+
+int aie2_create_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwctx)
+{
+ DECLARE_AIE2_MSG(create_ctx, MSG_OP_CREATE_CONTEXT);
+ struct amdxdna_dev *xdna = ndev->xdna;
+ struct xdna_mailbox_chann_res x2i;
+ struct xdna_mailbox_chann_res i2x;
+ struct cq_pair *cq_pair;
+ u32 intr_reg;
+ int ret;
+
+ req.aie_type = 1;
+ req.start_col = hwctx->start_col;
+ req.num_col = hwctx->num_col;
+ req.num_cq_pairs_requested = 1;
+ req.pasid = hwctx->client->pasid;
+ req.context_priority = 2;
+
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret)
+ return ret;
+
+ hwctx->fw_ctx_id = resp.context_id;
+ WARN_ONCE(hwctx->fw_ctx_id == -1, "Unexpected context id");
+
+ if (ndev->force_preempt_enabled) {
+ ret = aie2_runtime_cfg(ndev, AIE2_RT_CFG_FORCE_PREEMPT, &hwctx->fw_ctx_id);
+ if (ret) {
+ XDNA_ERR(xdna, "failed to enable force preempt %d", ret);
+ return ret;
+ }
+ }
+
+ cq_pair = &resp.cq_pair[0];
+ x2i.mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, cq_pair->x2i_q.head_addr);
+ x2i.mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, cq_pair->x2i_q.tail_addr);
+ x2i.rb_start_addr = AIE2_SRAM_OFF(ndev, cq_pair->x2i_q.buf_addr);
+ x2i.rb_size = cq_pair->x2i_q.buf_size;
+
+ i2x.mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, cq_pair->i2x_q.head_addr);
+ i2x.mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, cq_pair->i2x_q.tail_addr);
+ i2x.rb_start_addr = AIE2_SRAM_OFF(ndev, cq_pair->i2x_q.buf_addr);
+ i2x.rb_size = cq_pair->i2x_q.buf_size;
+
+ ret = pci_irq_vector(to_pci_dev(xdna->ddev.dev), resp.msix_id);
+ if (ret == -EINVAL) {
+ XDNA_ERR(xdna, "not able to create channel");
+ goto out_destroy_context;
+ }
+
+ intr_reg = i2x.mb_head_ptr_reg + 4;
+ hwctx->priv->mbox_chann = xdna_mailbox_create_channel(ndev->mbox, &x2i, &i2x,
+ intr_reg, ret);
+ if (!hwctx->priv->mbox_chann) {
+ XDNA_ERR(xdna, "not able to create channel");
+ ret = -EINVAL;
+ goto out_destroy_context;
+ }
+ ndev->hwctx_num++;
+
+ XDNA_DBG(xdna, "%s mailbox channel irq: %d, msix_id: %d",
+ hwctx->name, ret, resp.msix_id);
+ XDNA_DBG(xdna, "%s created fw ctx %d pasid %d", hwctx->name,
+ hwctx->fw_ctx_id, hwctx->client->pasid);
+
+ return 0;
+
+out_destroy_context:
+ aie2_destroy_context(ndev, hwctx);
+ return ret;
+}
+
+int aie2_destroy_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwctx)
+{
+ DECLARE_AIE2_MSG(destroy_ctx, MSG_OP_DESTROY_CONTEXT);
+ struct amdxdna_dev *xdna = ndev->xdna;
+ int ret;
+
+ if (hwctx->fw_ctx_id == -1)
+ return 0;
+
+ xdna_mailbox_stop_channel(hwctx->priv->mbox_chann);
+
+ req.context_id = hwctx->fw_ctx_id;
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret)
+ XDNA_WARN(xdna, "%s destroy context failed, ret %d", hwctx->name, ret);
+
+ xdna_mailbox_destroy_channel(hwctx->priv->mbox_chann);
+ XDNA_DBG(xdna, "%s destroyed fw ctx %d", hwctx->name,
+ hwctx->fw_ctx_id);
+ hwctx->priv->mbox_chann = NULL;
+ hwctx->fw_ctx_id = -1;
+ ndev->hwctx_num--;
+
+ return ret;
+}
+
+int aie2_map_host_buf(struct amdxdna_dev_hdl *ndev, u32 context_id, u64 addr, u64 size)
+{
+ DECLARE_AIE2_MSG(map_host_buffer, MSG_OP_MAP_HOST_BUFFER);
+ struct amdxdna_dev *xdna = ndev->xdna;
+ int ret;
+
+ req.context_id = context_id;
+ req.buf_addr = addr;
+ req.buf_size = size;
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret)
+ return ret;
+
+ XDNA_DBG(xdna, "fw ctx %d map host buf addr 0x%llx size 0x%llx",
+ context_id, addr, size);
+
+ return 0;
+}
+
+static int amdxdna_hwctx_col_map(struct amdxdna_hwctx *hwctx, void *arg)
+{
+ u32 *bitmap = arg;
+
+ *bitmap |= GENMASK(hwctx->start_col + hwctx->num_col - 1, hwctx->start_col);
+
+ return 0;
+}
+
+int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf,
+ u32 size, u32 *cols_filled)
+{
+ DECLARE_AIE2_MSG(aie_column_info, MSG_OP_QUERY_COL_STATUS);
+ struct amdxdna_dev *xdna = ndev->xdna;
+ struct amdxdna_client *client;
+ dma_addr_t dma_addr;
+ u32 aie_bitmap = 0;
+ u8 *buff_addr;
+ int ret;
+
+ buff_addr = dma_alloc_noncoherent(xdna->ddev.dev, size, &dma_addr,
+ DMA_FROM_DEVICE, GFP_KERNEL);
+ if (!buff_addr)
+ return -ENOMEM;
+
+ /* Go through each hardware context and mark the AIE columns that are active */
+ list_for_each_entry(client, &xdna->client_list, node)
+ amdxdna_hwctx_walk(client, &aie_bitmap, amdxdna_hwctx_col_map);
+
+ *cols_filled = 0;
+ req.dump_buff_addr = dma_addr;
+ req.dump_buff_size = size;
+ req.num_cols = hweight32(aie_bitmap);
+ req.aie_bitmap = aie_bitmap;
+
+ drm_clflush_virt_range(buff_addr, size); /* device can access */
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret) {
+ XDNA_ERR(xdna, "Error during NPU query, status %d", ret);
+ goto fail;
+ }
+
+ XDNA_DBG(xdna, "Query NPU status completed");
+
+ if (size < resp.size) {
+ ret = -EINVAL;
+ XDNA_ERR(xdna, "Bad buffer size. Available: %u. Needs: %u", size, resp.size);
+ goto fail;
+ }
+
+ if (copy_to_user(buf, buff_addr, resp.size)) {
+ ret = -EFAULT;
+ XDNA_ERR(xdna, "Failed to copy NPU status to user space");
+ goto fail;
+ }
+
+ *cols_filled = aie_bitmap;
+
+fail:
+ dma_free_noncoherent(xdna->ddev.dev, size, buff_addr, dma_addr, DMA_FROM_DEVICE);
+ return ret;
+}
+
+int aie2_query_telemetry(struct amdxdna_dev_hdl *ndev,
+ char __user *buf, u32 size,
+ struct amdxdna_drm_query_telemetry_header *header)
+{
+ DECLARE_AIE2_MSG(get_telemetry, MSG_OP_GET_TELEMETRY);
+ struct amdxdna_dev *xdna = ndev->xdna;
+ dma_addr_t dma_addr;
+ u8 *addr;
+ int ret;
+
+ if (header->type >= MAX_TELEMETRY_TYPE)
+ return -EINVAL;
+
+ addr = dma_alloc_noncoherent(xdna->ddev.dev, size, &dma_addr,
+ DMA_FROM_DEVICE, GFP_KERNEL);
+ if (!addr)
+ return -ENOMEM;
+
+ req.buf_addr = dma_addr;
+ req.buf_size = size;
+ req.type = header->type;
+
+ drm_clflush_virt_range(addr, size); /* device can access */
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret) {
+ XDNA_ERR(xdna, "Query telemetry failed, status %d", ret);
+ goto free_buf;
+ }
+
+ if (size < resp.size) {
+ ret = -EINVAL;
+ XDNA_ERR(xdna, "Bad buffer size. Available: %u. Needs: %u", size, resp.size);
+ goto free_buf;
+ }
+
+ if (copy_to_user(buf, addr, resp.size)) {
+ ret = -EFAULT;
+ XDNA_ERR(xdna, "Failed to copy telemetry to user space");
+ goto free_buf;
+ }
+
+ header->major = resp.major;
+ header->minor = resp.minor;
+
+free_buf:
+ dma_free_noncoherent(xdna->ddev.dev, size, addr, dma_addr, DMA_FROM_DEVICE);
+ return ret;
+}
+
+int aie2_register_asyn_event_msg(struct amdxdna_dev_hdl *ndev, dma_addr_t addr, u32 size,
+ void *handle, int (*cb)(void*, void __iomem *, size_t))
+{
+ struct async_event_msg_req req = { 0 };
+ struct xdna_mailbox_msg msg = {
+ .send_data = (u8 *)&req,
+ .send_size = sizeof(req),
+ .handle = handle,
+ .opcode = MSG_OP_REGISTER_ASYNC_EVENT_MSG,
+ .notify_cb = cb,
+ };
+
+ req.buf_addr = addr;
+ req.buf_size = size;
+
+ XDNA_DBG(ndev->xdna, "Register addr 0x%llx size 0x%x", addr, size);
+ return xdna_mailbox_send_msg(ndev->mgmt_chann, &msg, TX_TIMEOUT);
+}
+
+int aie2_config_cu(struct amdxdna_hwctx *hwctx,
+ int (*notify_cb)(void *, void __iomem *, size_t))
+{
+ struct mailbox_channel *chann = hwctx->priv->mbox_chann;
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ u32 shift = xdna->dev_info->dev_mem_buf_shift;
+ struct config_cu_req req = { 0 };
+ struct xdna_mailbox_msg msg;
+ struct drm_gem_object *gobj;
+ struct amdxdna_gem_obj *abo;
+ int i;
+
+ if (!chann)
+ return -ENODEV;
+
+ if (hwctx->cus->num_cus > MAX_NUM_CUS) {
+ XDNA_DBG(xdna, "Exceed maximum CU %d", MAX_NUM_CUS);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < hwctx->cus->num_cus; i++) {
+ struct amdxdna_cu_config *cu = &hwctx->cus->cu_configs[i];
+
+ if (XDNA_MBZ_DBG(xdna, cu->pad, sizeof(cu->pad)))
+ return -EINVAL;
+
+ gobj = drm_gem_object_lookup(hwctx->client->filp, cu->cu_bo);
+ if (!gobj) {
+ XDNA_ERR(xdna, "Lookup GEM object failed");
+ return -EINVAL;
+ }
+ abo = to_xdna_obj(gobj);
+
+ if (abo->type != AMDXDNA_BO_DEV) {
+ drm_gem_object_put(gobj);
+ XDNA_ERR(xdna, "Invalid BO type");
+ return -EINVAL;
+ }
+
+ req.cfgs[i] = FIELD_PREP(AIE2_MSG_CFG_CU_PDI_ADDR,
+ abo->mem.dev_addr >> shift);
+ req.cfgs[i] |= FIELD_PREP(AIE2_MSG_CFG_CU_FUNC, cu->cu_func);
+ XDNA_DBG(xdna, "CU %d full addr 0x%llx, cfg 0x%x", i,
+ abo->mem.dev_addr, req.cfgs[i]);
+ drm_gem_object_put(gobj);
+ }
+ req.num_cus = hwctx->cus->num_cus;
+
+ msg.send_data = (u8 *)&req;
+ msg.send_size = sizeof(req);
+ msg.handle = hwctx;
+ msg.opcode = MSG_OP_CONFIG_CU;
+ msg.notify_cb = notify_cb;
+ return xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
+}
+
+static int aie2_init_exec_cu_req(struct amdxdna_gem_obj *cmd_bo, void *req,
+ size_t *size, u32 *msg_op)
+{
+ struct execute_buffer_req *cu_req = req;
+ u32 cmd_len;
+ void *cmd;
+
+ cmd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len);
+ if (cmd_len > sizeof(cu_req->payload))
+ return -EINVAL;
+
+ cu_req->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo);
+ if (cu_req->cu_idx == INVALID_CU_IDX)
+ return -EINVAL;
+
+ memcpy(cu_req->payload, cmd, cmd_len);
+
+ *size = sizeof(*cu_req);
+ *msg_op = MSG_OP_EXECUTE_BUFFER_CF;
+ return 0;
+}
+
+static int aie2_init_exec_dpu_req(struct amdxdna_gem_obj *cmd_bo, void *req,
+ size_t *size, u32 *msg_op)
+{
+ struct exec_dpu_req *dpu_req = req;
+ struct amdxdna_cmd_start_npu *sn;
+ u32 cmd_len;
+
+ sn = amdxdna_cmd_get_payload(cmd_bo, &cmd_len);
+ if (cmd_len - sizeof(*sn) > sizeof(dpu_req->payload))
+ return -EINVAL;
+
+ dpu_req->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo);
+ if (dpu_req->cu_idx == INVALID_CU_IDX)
+ return -EINVAL;
+
+ dpu_req->inst_buf_addr = sn->buffer;
+ dpu_req->inst_size = sn->buffer_size;
+ dpu_req->inst_prop_cnt = sn->prop_count;
+ memcpy(dpu_req->payload, sn->prop_args, cmd_len - sizeof(*sn));
+
+ *size = sizeof(*dpu_req);
+ *msg_op = MSG_OP_EXEC_DPU;
+ return 0;
+}
+
+static void aie2_init_exec_chain_req(void *req, u64 slot_addr, size_t size, u32 cmd_cnt)
+{
+ struct cmd_chain_req *chain_req = req;
+
+ chain_req->buf_addr = slot_addr;
+ chain_req->buf_size = size;
+ chain_req->count = cmd_cnt;
+}
+
+static void aie2_init_npu_chain_req(void *req, u64 slot_addr, size_t size, u32 cmd_cnt)
+{
+ struct cmd_chain_npu_req *npu_chain_req = req;
+
+ npu_chain_req->flags = 0;
+ npu_chain_req->reserved = 0;
+ npu_chain_req->buf_addr = slot_addr;
+ npu_chain_req->buf_size = size;
+ npu_chain_req->count = cmd_cnt;
+}
+
+static int
+aie2_cmdlist_fill_cf(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size)
+{
+ struct cmd_chain_slot_execbuf_cf *cf_slot = slot;
+ u32 cmd_len;
+ void *cmd;
+
+ cmd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len);
+ if (*size < sizeof(*cf_slot) + cmd_len)
+ return -EINVAL;
+
+ cf_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo);
+ if (cf_slot->cu_idx == INVALID_CU_IDX)
+ return -EINVAL;
+
+ cf_slot->arg_cnt = cmd_len / sizeof(u32);
+ memcpy(cf_slot->args, cmd, cmd_len);
+ /* Accurate slot size to hint firmware to do necessary copy */
+ *size = sizeof(*cf_slot) + cmd_len;
+ return 0;
+}
+
+static int
+aie2_cmdlist_fill_dpu(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size)
+{
+ struct cmd_chain_slot_dpu *dpu_slot = slot;
+ struct amdxdna_cmd_start_npu *sn;
+ u32 cmd_len;
+ u32 arg_sz;
+
+ sn = amdxdna_cmd_get_payload(cmd_bo, &cmd_len);
+ arg_sz = cmd_len - sizeof(*sn);
+ if (cmd_len < sizeof(*sn) || arg_sz > MAX_DPU_ARGS_SIZE)
+ return -EINVAL;
+
+ if (*size < sizeof(*dpu_slot) + arg_sz)
+ return -EINVAL;
+
+ dpu_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo);
+ if (dpu_slot->cu_idx == INVALID_CU_IDX)
+ return -EINVAL;
+
+ dpu_slot->inst_buf_addr = sn->buffer;
+ dpu_slot->inst_size = sn->buffer_size;
+ dpu_slot->inst_prop_cnt = sn->prop_count;
+ dpu_slot->arg_cnt = arg_sz / sizeof(u32);
+ memcpy(dpu_slot->args, sn->prop_args, arg_sz);
+
+ /* Accurate slot size to hint firmware to do necessary copy */
+ *size = sizeof(*dpu_slot) + arg_sz;
+ return 0;
+}
+
+static int aie2_cmdlist_unsupp(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size)
+{
+ return -EOPNOTSUPP;
+}
+
+static u32 aie2_get_chain_msg_op(u32 cmd_op)
+{
+ switch (cmd_op) {
+ case ERT_START_CU:
+ return MSG_OP_CHAIN_EXEC_BUFFER_CF;
+ case ERT_START_NPU:
+ return MSG_OP_CHAIN_EXEC_DPU;
+ default:
+ break;
+ }
+
+ return MSG_OP_MAX_OPCODE;
+}
+
+static struct aie2_exec_msg_ops legacy_exec_message_ops = {
+ .init_cu_req = aie2_init_exec_cu_req,
+ .init_dpu_req = aie2_init_exec_dpu_req,
+ .init_chain_req = aie2_init_exec_chain_req,
+ .fill_cf_slot = aie2_cmdlist_fill_cf,
+ .fill_dpu_slot = aie2_cmdlist_fill_dpu,
+ .fill_preempt_slot = aie2_cmdlist_unsupp,
+ .fill_elf_slot = aie2_cmdlist_unsupp,
+ .get_chain_msg_op = aie2_get_chain_msg_op,
+};
+
+static int
+aie2_cmdlist_fill_npu_cf(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size)
+{
+ struct cmd_chain_slot_npu *npu_slot = slot;
+ u32 cmd_len;
+ void *cmd;
+
+ cmd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len);
+ if (*size < sizeof(*npu_slot) + cmd_len)
+ return -EINVAL;
+
+ npu_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo);
+ if (npu_slot->cu_idx == INVALID_CU_IDX)
+ return -EINVAL;
+
+ memset(npu_slot, 0, sizeof(*npu_slot));
+ npu_slot->type = EXEC_NPU_TYPE_NON_ELF;
+ npu_slot->arg_cnt = cmd_len / sizeof(u32);
+ memcpy(npu_slot->args, cmd, cmd_len);
+
+ *size = sizeof(*npu_slot) + cmd_len;
+ return 0;
+}
+
+static int
+aie2_cmdlist_fill_npu_dpu(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size)
+{
+ struct cmd_chain_slot_npu *npu_slot = slot;
+ struct amdxdna_cmd_start_npu *sn;
+ u32 cmd_len;
+ u32 arg_sz;
+
+ sn = amdxdna_cmd_get_payload(cmd_bo, &cmd_len);
+ arg_sz = cmd_len - sizeof(*sn);
+ if (cmd_len < sizeof(*sn) || arg_sz > MAX_NPU_ARGS_SIZE)
+ return -EINVAL;
+
+ if (*size < sizeof(*npu_slot) + arg_sz)
+ return -EINVAL;
+
+ npu_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo);
+ if (npu_slot->cu_idx == INVALID_CU_IDX)
+ return -EINVAL;
+
+ memset(npu_slot, 0, sizeof(*npu_slot));
+ npu_slot->type = EXEC_NPU_TYPE_PARTIAL_ELF;
+ npu_slot->inst_buf_addr = sn->buffer;
+ npu_slot->inst_size = sn->buffer_size;
+ npu_slot->inst_prop_cnt = sn->prop_count;
+ npu_slot->arg_cnt = arg_sz / sizeof(u32);
+ memcpy(npu_slot->args, sn->prop_args, arg_sz);
+
+ *size = sizeof(*npu_slot) + arg_sz;
+ return 0;
+}
+
+static int
+aie2_cmdlist_fill_npu_preempt(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size)
+{
+ struct cmd_chain_slot_npu *npu_slot = slot;
+ struct amdxdna_cmd_preempt_data *pd;
+ u32 cmd_len;
+ u32 arg_sz;
+
+ pd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len);
+ arg_sz = cmd_len - sizeof(*pd);
+ if (cmd_len < sizeof(*pd) || arg_sz > MAX_NPU_ARGS_SIZE)
+ return -EINVAL;
+
+ if (*size < sizeof(*npu_slot) + arg_sz)
+ return -EINVAL;
+
+ npu_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo);
+ if (npu_slot->cu_idx == INVALID_CU_IDX)
+ return -EINVAL;
+
+ memset(npu_slot, 0, sizeof(*npu_slot));
+ npu_slot->type = EXEC_NPU_TYPE_PREEMPT;
+ npu_slot->inst_buf_addr = pd->inst_buf;
+ npu_slot->save_buf_addr = pd->save_buf;
+ npu_slot->restore_buf_addr = pd->restore_buf;
+ npu_slot->inst_size = pd->inst_size;
+ npu_slot->save_size = pd->save_size;
+ npu_slot->restore_size = pd->restore_size;
+ npu_slot->inst_prop_cnt = pd->inst_prop_cnt;
+ npu_slot->arg_cnt = arg_sz / sizeof(u32);
+ memcpy(npu_slot->args, pd->prop_args, arg_sz);
+
+ *size = sizeof(*npu_slot) + arg_sz;
+ return 0;
+}
+
+static int
+aie2_cmdlist_fill_npu_elf(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size)
+{
+ struct cmd_chain_slot_npu *npu_slot = slot;
+ struct amdxdna_cmd_preempt_data *pd;
+ u32 cmd_len;
+ u32 arg_sz;
+
+ pd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len);
+ arg_sz = cmd_len - sizeof(*pd);
+ if (cmd_len < sizeof(*pd) || arg_sz > MAX_NPU_ARGS_SIZE)
+ return -EINVAL;
+
+ if (*size < sizeof(*npu_slot) + arg_sz)
+ return -EINVAL;
+
+ memset(npu_slot, 0, sizeof(*npu_slot));
+ npu_slot->type = EXEC_NPU_TYPE_ELF;
+ npu_slot->inst_buf_addr = pd->inst_buf;
+ npu_slot->save_buf_addr = pd->save_buf;
+ npu_slot->restore_buf_addr = pd->restore_buf;
+ npu_slot->inst_size = pd->inst_size;
+ npu_slot->save_size = pd->save_size;
+ npu_slot->restore_size = pd->restore_size;
+ npu_slot->inst_prop_cnt = pd->inst_prop_cnt;
+ npu_slot->arg_cnt = 1;
+ npu_slot->args[0] = AIE2_EXEC_BUFFER_KERNEL_OP_TXN;
+
+ *size = struct_size(npu_slot, args, npu_slot->arg_cnt);
+ return 0;
+}
+
+static u32 aie2_get_npu_chain_msg_op(u32 cmd_op)
+{
+ return MSG_OP_CHAIN_EXEC_NPU;
+}
+
+static struct aie2_exec_msg_ops npu_exec_message_ops = {
+ .init_cu_req = aie2_init_exec_cu_req,
+ .init_dpu_req = aie2_init_exec_dpu_req,
+ .init_chain_req = aie2_init_npu_chain_req,
+ .fill_cf_slot = aie2_cmdlist_fill_npu_cf,
+ .fill_dpu_slot = aie2_cmdlist_fill_npu_dpu,
+ .fill_preempt_slot = aie2_cmdlist_fill_npu_preempt,
+ .fill_elf_slot = aie2_cmdlist_fill_npu_elf,
+ .get_chain_msg_op = aie2_get_npu_chain_msg_op,
+};
+
+static int aie2_init_exec_req(void *req, struct amdxdna_gem_obj *cmd_abo,
+ size_t *size, u32 *msg_op)
+{
+ struct amdxdna_dev *xdna = cmd_abo->client->xdna;
+ int ret;
+ u32 op;
+
+
+ op = amdxdna_cmd_get_op(cmd_abo);
+ switch (op) {
+ case ERT_START_CU:
+ ret = EXEC_MSG_OPS(xdna)->init_cu_req(cmd_abo, req, size, msg_op);
+ if (ret) {
+ XDNA_DBG(xdna, "Init CU req failed ret %d", ret);
+ return ret;
+ }
+ break;
+ case ERT_START_NPU:
+ ret = EXEC_MSG_OPS(xdna)->init_dpu_req(cmd_abo, req, size, msg_op);
+ if (ret) {
+ XDNA_DBG(xdna, "Init DPU req failed ret %d", ret);
+ return ret;
+ }
+
+ break;
+ default:
+ XDNA_ERR(xdna, "Unsupported op %d", op);
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+aie2_cmdlist_fill_slot(void *slot, struct amdxdna_gem_obj *cmd_abo,
+ size_t *size, u32 *cmd_op)
+{
+ struct amdxdna_dev *xdna = cmd_abo->client->xdna;
+ int ret;
+ u32 op;
+
+ op = amdxdna_cmd_get_op(cmd_abo);
+ if (*cmd_op == ERT_INVALID_CMD)
+ *cmd_op = op;
+ else if (op != *cmd_op)
+ return -EINVAL;
+
+ switch (op) {
+ case ERT_START_CU:
+ ret = EXEC_MSG_OPS(xdna)->fill_cf_slot(cmd_abo, slot, size);
+ break;
+ case ERT_START_NPU:
+ ret = EXEC_MSG_OPS(xdna)->fill_dpu_slot(cmd_abo, slot, size);
+ break;
+ case ERT_START_NPU_PREEMPT:
+ if (!AIE2_FEATURE_ON(xdna->dev_handle, AIE2_PREEMPT))
+ return -EOPNOTSUPP;
+ ret = EXEC_MSG_OPS(xdna)->fill_preempt_slot(cmd_abo, slot, size);
+ break;
+ case ERT_START_NPU_PREEMPT_ELF:
+ if (!AIE2_FEATURE_ON(xdna->dev_handle, AIE2_PREEMPT))
+ return -EOPNOTSUPP;
+ ret = EXEC_MSG_OPS(xdna)->fill_elf_slot(cmd_abo, slot, size);
+ break;
+ default:
+ XDNA_INFO(xdna, "Unsupported op %d", op);
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+void aie2_msg_init(struct amdxdna_dev_hdl *ndev)
+{
+ if (AIE2_FEATURE_ON(ndev, AIE2_NPU_COMMAND))
+ ndev->exec_msg_ops = &npu_exec_message_ops;
+ else
+ ndev->exec_msg_ops = &legacy_exec_message_ops;
+}
+
+static inline struct amdxdna_gem_obj *
+aie2_cmdlist_get_cmd_buf(struct amdxdna_sched_job *job)
+{
+ int idx = get_job_idx(job->seq);
+
+ return job->hwctx->priv->cmd_buf[idx];
+}
+
+int aie2_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, void __iomem *, size_t))
+{
+ struct mailbox_channel *chann = hwctx->priv->mbox_chann;
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ struct amdxdna_gem_obj *cmd_abo = job->cmd_bo;
+ struct xdna_mailbox_msg msg;
+ union exec_req req;
+ int ret;
+
+ if (!chann)
+ return -ENODEV;
+
+ ret = aie2_init_exec_req(&req, cmd_abo, &msg.send_size, &msg.opcode);
+ if (ret)
+ return ret;
+
+ msg.handle = job;
+ msg.notify_cb = notify_cb;
+ msg.send_data = (u8 *)&req;
+ print_hex_dump_debug("cmd: ", DUMP_PREFIX_OFFSET, 16, 4, &req,
+ 0x40, false);
+
+ ret = xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
+ if (ret) {
+ XDNA_ERR(xdna, "Send message failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+int aie2_cmdlist_multi_execbuf(struct amdxdna_hwctx *hwctx,
+ struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, void __iomem *, size_t))
+{
+ struct amdxdna_gem_obj *cmdbuf_abo = aie2_cmdlist_get_cmd_buf(job);
+ struct mailbox_channel *chann = hwctx->priv->mbox_chann;
+ struct amdxdna_client *client = hwctx->client;
+ struct amdxdna_gem_obj *cmd_abo = job->cmd_bo;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_cmd_chain *payload;
+ struct xdna_mailbox_msg msg;
+ union exec_chain_req req;
+ u32 payload_len;
+ u32 offset = 0;
+ size_t size;
+ int ret;
+ u32 op;
+ u32 i;
+
+ op = amdxdna_cmd_get_op(cmd_abo);
+ payload = amdxdna_cmd_get_payload(cmd_abo, &payload_len);
+ if (op != ERT_CMD_CHAIN || !payload ||
+ payload_len < struct_size(payload, data, payload->command_count))
+ return -EINVAL;
+
+ op = ERT_INVALID_CMD;
+ for (i = 0; i < payload->command_count; i++) {
+ u32 boh = (u32)(payload->data[i]);
+ struct amdxdna_gem_obj *abo;
+
+ abo = amdxdna_gem_get_obj(client, boh, AMDXDNA_BO_CMD);
+ if (!abo) {
+ XDNA_ERR(xdna, "Failed to find cmd BO %d", boh);
+ return -ENOENT;
+ }
+
+ size = cmdbuf_abo->mem.size - offset;
+ ret = aie2_cmdlist_fill_slot(cmdbuf_abo->mem.kva + offset,
+ abo, &size, &op);
+ amdxdna_gem_put_obj(abo);
+ if (ret)
+ return ret;
+
+ offset += size;
+ }
+ msg.opcode = EXEC_MSG_OPS(xdna)->get_chain_msg_op(op);
+ if (msg.opcode == MSG_OP_MAX_OPCODE)
+ return -EOPNOTSUPP;
+
+ /* The offset is the accumulated total size of the cmd buffer */
+ EXEC_MSG_OPS(xdna)->init_chain_req(&req, cmdbuf_abo->mem.dev_addr,
+ offset, payload->command_count);
+ drm_clflush_virt_range(cmdbuf_abo->mem.kva, offset);
+
+ msg.handle = job;
+ msg.notify_cb = notify_cb;
+ msg.send_data = (u8 *)&req;
+ msg.send_size = sizeof(req);
+ ret = xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
+ if (ret) {
+ XDNA_ERR(xdna, "Send message failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+int aie2_cmdlist_single_execbuf(struct amdxdna_hwctx *hwctx,
+ struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, void __iomem *, size_t))
+{
+ struct amdxdna_gem_obj *cmdbuf_abo = aie2_cmdlist_get_cmd_buf(job);
+ struct mailbox_channel *chann = hwctx->priv->mbox_chann;
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ struct amdxdna_gem_obj *cmd_abo = job->cmd_bo;
+ struct xdna_mailbox_msg msg;
+ union exec_chain_req req;
+ u32 op = ERT_INVALID_CMD;
+ size_t size;
+ int ret;
+
+ size = cmdbuf_abo->mem.size;
+ ret = aie2_cmdlist_fill_slot(cmdbuf_abo->mem.kva, cmd_abo, &size, &op);
+ if (ret)
+ return ret;
+
+ msg.opcode = EXEC_MSG_OPS(xdna)->get_chain_msg_op(op);
+ if (msg.opcode == MSG_OP_MAX_OPCODE)
+ return -EOPNOTSUPP;
+
+ EXEC_MSG_OPS(xdna)->init_chain_req(&req, cmdbuf_abo->mem.dev_addr,
+ size, 1);
+ drm_clflush_virt_range(cmdbuf_abo->mem.kva, size);
+
+ msg.handle = job;
+ msg.notify_cb = notify_cb;
+ msg.send_data = (u8 *)&req;
+ msg.send_size = sizeof(req);
+ ret = xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
+ if (ret) {
+ XDNA_ERR(hwctx->client->xdna, "Send message failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+int aie2_sync_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, void __iomem *, size_t))
+{
+ struct mailbox_channel *chann = hwctx->priv->mbox_chann;
+ struct amdxdna_gem_obj *abo = to_xdna_obj(job->bos[0]);
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ struct xdna_mailbox_msg msg;
+ struct sync_bo_req req;
+ int ret = 0;
+
+ req.src_addr = 0;
+ req.dst_addr = amdxdna_dev_bo_offset(abo);
+ req.size = abo->mem.size;
+
+ /* Device to Host */
+ req.type = FIELD_PREP(AIE2_MSG_SYNC_BO_SRC_TYPE, SYNC_BO_DEV_MEM) |
+ FIELD_PREP(AIE2_MSG_SYNC_BO_DST_TYPE, SYNC_BO_HOST_MEM);
+
+ XDNA_DBG(xdna, "sync %d bytes src(0x%llx) to dst(0x%llx) completed",
+ req.size, req.src_addr, req.dst_addr);
+
+ msg.handle = job;
+ msg.notify_cb = notify_cb;
+ msg.send_data = (u8 *)&req;
+ msg.send_size = sizeof(req);
+ msg.opcode = MSG_OP_SYNC_BO;
+
+ ret = xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
+ if (ret) {
+ XDNA_ERR(xdna, "Send message failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+int aie2_config_debug_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, void __iomem *, size_t))
+{
+ struct mailbox_channel *chann = hwctx->priv->mbox_chann;
+ struct amdxdna_gem_obj *abo = to_xdna_obj(job->bos[0]);
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ struct config_debug_bo_req req;
+ struct xdna_mailbox_msg msg;
+
+ if (job->drv_cmd->opcode == ATTACH_DEBUG_BO)
+ req.config = DEBUG_BO_REGISTER;
+ else
+ req.config = DEBUG_BO_UNREGISTER;
+
+ req.offset = amdxdna_dev_bo_offset(abo);
+ req.size = abo->mem.size;
+
+ XDNA_DBG(xdna, "offset 0x%llx size 0x%llx config %d",
+ req.offset, req.size, req.config);
+
+ msg.handle = job;
+ msg.notify_cb = notify_cb;
+ msg.send_data = (u8 *)&req;
+ msg.send_size = sizeof(req);
+ msg.opcode = MSG_OP_CONFIG_DEBUG_BO;
+
+ return xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
+}
diff --git a/drivers/accel/amdxdna/aie2_msg_priv.h b/drivers/accel/amdxdna/aie2_msg_priv.h
new file mode 100644
index 000000000000..1c957a6298d3
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_msg_priv.h
@@ -0,0 +1,448 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AIE2_MSG_PRIV_H_
+#define _AIE2_MSG_PRIV_H_
+
+enum aie2_msg_opcode {
+ MSG_OP_CREATE_CONTEXT = 0x2,
+ MSG_OP_DESTROY_CONTEXT = 0x3,
+ MSG_OP_GET_TELEMETRY = 0x4,
+ MSG_OP_SYNC_BO = 0x7,
+ MSG_OP_EXECUTE_BUFFER_CF = 0xC,
+ MSG_OP_QUERY_COL_STATUS = 0xD,
+ MSG_OP_QUERY_AIE_TILE_INFO = 0xE,
+ MSG_OP_QUERY_AIE_VERSION = 0xF,
+ MSG_OP_EXEC_DPU = 0x10,
+ MSG_OP_CONFIG_CU = 0x11,
+ MSG_OP_CHAIN_EXEC_BUFFER_CF = 0x12,
+ MSG_OP_CHAIN_EXEC_DPU = 0x13,
+ MSG_OP_CONFIG_DEBUG_BO = 0x14,
+ MSG_OP_CHAIN_EXEC_NPU = 0x18,
+ MSG_OP_MAX_XRT_OPCODE,
+ MSG_OP_SUSPEND = 0x101,
+ MSG_OP_RESUME = 0x102,
+ MSG_OP_ASSIGN_MGMT_PASID = 0x103,
+ MSG_OP_INVOKE_SELF_TEST = 0x104,
+ MSG_OP_MAP_HOST_BUFFER = 0x106,
+ MSG_OP_GET_FIRMWARE_VERSION = 0x108,
+ MSG_OP_SET_RUNTIME_CONFIG = 0x10A,
+ MSG_OP_GET_RUNTIME_CONFIG = 0x10B,
+ MSG_OP_REGISTER_ASYNC_EVENT_MSG = 0x10C,
+ MSG_OP_MAX_DRV_OPCODE,
+ MSG_OP_GET_PROTOCOL_VERSION = 0x301,
+ MSG_OP_MAX_OPCODE
+};
+
+enum aie2_msg_status {
+ AIE2_STATUS_SUCCESS = 0x0,
+ /* AIE Error codes */
+ AIE2_STATUS_AIE_SATURATION_ERROR = 0x1000001,
+ AIE2_STATUS_AIE_FP_ERROR = 0x1000002,
+ AIE2_STATUS_AIE_STREAM_ERROR = 0x1000003,
+ AIE2_STATUS_AIE_ACCESS_ERROR = 0x1000004,
+ AIE2_STATUS_AIE_BUS_ERROR = 0x1000005,
+ AIE2_STATUS_AIE_INSTRUCTION_ERROR = 0x1000006,
+ AIE2_STATUS_AIE_ECC_ERROR = 0x1000007,
+ AIE2_STATUS_AIE_LOCK_ERROR = 0x1000008,
+ AIE2_STATUS_AIE_DMA_ERROR = 0x1000009,
+ AIE2_STATUS_AIE_MEM_PARITY_ERROR = 0x100000a,
+ AIE2_STATUS_AIE_PWR_CFG_ERROR = 0x100000b,
+ AIE2_STATUS_AIE_BACKTRACK_ERROR = 0x100000c,
+ AIE2_STATUS_MAX_AIE_STATUS_CODE,
+ /* MGMT ERT Error codes */
+ AIE2_STATUS_MGMT_ERT_SELF_TEST_FAILURE = 0x2000001,
+ AIE2_STATUS_MGMT_ERT_HASH_MISMATCH,
+ AIE2_STATUS_MGMT_ERT_NOAVAIL,
+ AIE2_STATUS_MGMT_ERT_INVALID_PARAM,
+ AIE2_STATUS_MGMT_ERT_ENTER_SUSPEND_FAILURE,
+ AIE2_STATUS_MGMT_ERT_BUSY,
+ AIE2_STATUS_MGMT_ERT_APPLICATION_ACTIVE,
+ MAX_MGMT_ERT_STATUS_CODE,
+ /* APP ERT Error codes */
+ AIE2_STATUS_APP_ERT_FIRST_ERROR = 0x3000001,
+ AIE2_STATUS_APP_INVALID_INSTR,
+ AIE2_STATUS_APP_LOAD_PDI_FAIL,
+ MAX_APP_ERT_STATUS_CODE,
+ /* NPU RTOS Error Codes */
+ AIE2_STATUS_INVALID_INPUT_BUFFER = 0x4000001,
+ AIE2_STATUS_INVALID_COMMAND,
+ AIE2_STATUS_INVALID_PARAM,
+ AIE2_STATUS_INVALID_OPERATION = 0x4000006,
+ AIE2_STATUS_ASYNC_EVENT_MSGS_FULL,
+ AIE2_STATUS_MAX_RTOS_STATUS_CODE,
+ MAX_AIE2_STATUS_CODE
+};
+
+struct assign_mgmt_pasid_req {
+ __u16 pasid;
+ __u16 reserved;
+} __packed;
+
+struct assign_mgmt_pasid_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct map_host_buffer_req {
+ __u32 context_id;
+ __u64 buf_addr;
+ __u64 buf_size;
+} __packed;
+
+struct map_host_buffer_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+#define MAX_CQ_PAIRS 2
+struct cq_info {
+ __u32 head_addr;
+ __u32 tail_addr;
+ __u32 buf_addr;
+ __u32 buf_size;
+};
+
+struct cq_pair {
+ struct cq_info x2i_q;
+ struct cq_info i2x_q;
+};
+
+struct create_ctx_req {
+ __u32 aie_type;
+ __u8 start_col;
+ __u8 num_col;
+ __u16 reserved;
+ __u8 num_cq_pairs_requested;
+ __u8 reserved1;
+ __u16 pasid;
+ __u32 pad[2];
+ __u32 sec_comm_target_type;
+ __u32 context_priority;
+} __packed;
+
+struct create_ctx_resp {
+ enum aie2_msg_status status;
+ __u32 context_id;
+ __u16 msix_id;
+ __u8 num_cq_pairs_allocated;
+ __u8 reserved;
+ struct cq_pair cq_pair[MAX_CQ_PAIRS];
+} __packed;
+
+struct destroy_ctx_req {
+ __u32 context_id;
+} __packed;
+
+struct destroy_ctx_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+enum telemetry_type {
+ TELEMETRY_TYPE_DISABLED,
+ TELEMETRY_TYPE_HEALTH,
+ TELEMETRY_TYPE_ERROR_INFO,
+ TELEMETRY_TYPE_PROFILING,
+ TELEMETRY_TYPE_DEBUG,
+ MAX_TELEMETRY_TYPE
+};
+
+struct get_telemetry_req {
+ enum telemetry_type type;
+ __u64 buf_addr;
+ __u32 buf_size;
+} __packed;
+
+struct get_telemetry_resp {
+ __u32 major;
+ __u32 minor;
+ __u32 size;
+ enum aie2_msg_status status;
+} __packed;
+
+struct execute_buffer_req {
+ __u32 cu_idx;
+ __u32 payload[19];
+} __packed;
+
+struct exec_dpu_req {
+ __u64 inst_buf_addr;
+ __u32 inst_size;
+ __u32 inst_prop_cnt;
+ __u32 cu_idx;
+ __u32 payload[35];
+} __packed;
+
+enum exec_npu_type {
+ EXEC_NPU_TYPE_NON_ELF = 0x1,
+ EXEC_NPU_TYPE_PARTIAL_ELF = 0x2,
+ EXEC_NPU_TYPE_PREEMPT = 0x3,
+ EXEC_NPU_TYPE_ELF = 0x4,
+};
+
+union exec_req {
+ struct execute_buffer_req ebuf;
+ struct exec_dpu_req dpu_req;
+};
+
+struct execute_buffer_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct aie_tile_info {
+ __u32 size;
+ __u16 major;
+ __u16 minor;
+ __u16 cols;
+ __u16 rows;
+ __u16 core_rows;
+ __u16 mem_rows;
+ __u16 shim_rows;
+ __u16 core_row_start;
+ __u16 mem_row_start;
+ __u16 shim_row_start;
+ __u16 core_dma_channels;
+ __u16 mem_dma_channels;
+ __u16 shim_dma_channels;
+ __u16 core_locks;
+ __u16 mem_locks;
+ __u16 shim_locks;
+ __u16 core_events;
+ __u16 mem_events;
+ __u16 shim_events;
+ __u16 reserved;
+};
+
+struct aie_tile_info_req {
+ __u32 reserved;
+} __packed;
+
+struct aie_tile_info_resp {
+ enum aie2_msg_status status;
+ struct aie_tile_info info;
+} __packed;
+
+struct aie_version_info_req {
+ __u32 reserved;
+} __packed;
+
+struct aie_version_info_resp {
+ enum aie2_msg_status status;
+ __u16 major;
+ __u16 minor;
+} __packed;
+
+struct aie_column_info_req {
+ __u64 dump_buff_addr;
+ __u32 dump_buff_size;
+ __u32 num_cols;
+ __u32 aie_bitmap;
+} __packed;
+
+struct aie_column_info_resp {
+ enum aie2_msg_status status;
+ __u32 size;
+} __packed;
+
+struct suspend_req {
+ __u32 place_holder;
+} __packed;
+
+struct suspend_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct resume_req {
+ __u32 place_holder;
+} __packed;
+
+struct resume_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct check_header_hash_req {
+ __u64 hash_high;
+ __u64 hash_low;
+} __packed;
+
+struct check_header_hash_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct query_error_req {
+ __u64 buf_addr;
+ __u32 buf_size;
+ __u32 next_row;
+ __u32 next_column;
+ __u32 next_module;
+} __packed;
+
+struct query_error_resp {
+ enum aie2_msg_status status;
+ __u32 num_err;
+ __u32 has_next_err;
+ __u32 next_row;
+ __u32 next_column;
+ __u32 next_module;
+} __packed;
+
+struct protocol_version_req {
+ __u32 reserved;
+} __packed;
+
+struct protocol_version_resp {
+ enum aie2_msg_status status;
+ __u32 major;
+ __u32 minor;
+} __packed;
+
+struct firmware_version_req {
+ __u32 reserved;
+} __packed;
+
+struct firmware_version_resp {
+ enum aie2_msg_status status;
+ __u32 major;
+ __u32 minor;
+ __u32 sub;
+ __u32 build;
+} __packed;
+
+#define MAX_NUM_CUS 32
+#define AIE2_MSG_CFG_CU_PDI_ADDR GENMASK(16, 0)
+#define AIE2_MSG_CFG_CU_FUNC GENMASK(24, 17)
+struct config_cu_req {
+ __u32 num_cus;
+ __u32 cfgs[MAX_NUM_CUS];
+} __packed;
+
+struct config_cu_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct set_runtime_cfg_req {
+ __u32 type;
+ __u64 value;
+} __packed;
+
+struct set_runtime_cfg_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct get_runtime_cfg_req {
+ __u32 type;
+} __packed;
+
+struct get_runtime_cfg_resp {
+ enum aie2_msg_status status;
+ __u64 value;
+} __packed;
+
+enum async_event_type {
+ ASYNC_EVENT_TYPE_AIE_ERROR,
+ ASYNC_EVENT_TYPE_EXCEPTION,
+ MAX_ASYNC_EVENT_TYPE
+};
+
+#define ASYNC_BUF_SIZE SZ_8K
+struct async_event_msg_req {
+ __u64 buf_addr;
+ __u32 buf_size;
+} __packed;
+
+struct async_event_msg_resp {
+ enum aie2_msg_status status;
+ enum async_event_type type;
+} __packed;
+
+#define MAX_CHAIN_CMDBUF_SIZE SZ_4K
+
+struct cmd_chain_slot_execbuf_cf {
+ __u32 cu_idx;
+ __u32 arg_cnt;
+ __u32 args[] __counted_by(arg_cnt);
+};
+
+struct cmd_chain_slot_dpu {
+ __u64 inst_buf_addr;
+ __u32 inst_size;
+ __u32 inst_prop_cnt;
+ __u32 cu_idx;
+ __u32 arg_cnt;
+#define MAX_DPU_ARGS_SIZE (34 * sizeof(__u32))
+ __u32 args[] __counted_by(arg_cnt);
+};
+
+#define MAX_NPU_ARGS_SIZE (26 * sizeof(__u32))
+#define AIE2_EXEC_BUFFER_KERNEL_OP_TXN 3
+struct cmd_chain_slot_npu {
+ enum exec_npu_type type;
+ u64 inst_buf_addr;
+ u64 save_buf_addr;
+ u64 restore_buf_addr;
+ u32 inst_size;
+ u32 save_size;
+ u32 restore_size;
+ u32 inst_prop_cnt;
+ u32 cu_idx;
+ u32 arg_cnt;
+ u32 args[] __counted_by(arg_cnt);
+} __packed;
+
+struct cmd_chain_req {
+ __u64 buf_addr;
+ __u32 buf_size;
+ __u32 count;
+} __packed;
+
+struct cmd_chain_npu_req {
+ u32 flags;
+ u32 reserved;
+ u64 buf_addr;
+ u32 buf_size;
+ u32 count;
+} __packed;
+
+union exec_chain_req {
+ struct cmd_chain_npu_req npu_req;
+ struct cmd_chain_req req;
+};
+
+struct cmd_chain_resp {
+ enum aie2_msg_status status;
+ __u32 fail_cmd_idx;
+ enum aie2_msg_status fail_cmd_status;
+} __packed;
+
+#define AIE2_MSG_SYNC_BO_SRC_TYPE GENMASK(3, 0)
+#define AIE2_MSG_SYNC_BO_DST_TYPE GENMASK(7, 4)
+struct sync_bo_req {
+ __u64 src_addr;
+ __u64 dst_addr;
+ __u32 size;
+#define SYNC_BO_DEV_MEM 0
+#define SYNC_BO_HOST_MEM 2
+ __u32 type;
+} __packed;
+
+struct sync_bo_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+#define DEBUG_BO_UNREGISTER 0
+#define DEBUG_BO_REGISTER 1
+struct config_debug_bo_req {
+ __u64 offset;
+ __u64 size;
+ /*
+ * config operations.
+ * DEBUG_BO_REGISTER: Register debug buffer
+ * DEBUG_BO_UNREGISTER: Unregister debug buffer
+ */
+ __u32 config;
+} __packed;
+
+struct config_debug_bo_resp {
+ enum aie2_msg_status status;
+} __packed;
+#endif /* _AIE2_MSG_PRIV_H_ */
diff --git a/drivers/accel/amdxdna/aie2_pci.c b/drivers/accel/amdxdna/aie2_pci.c
new file mode 100644
index 000000000000..ceef1c502e9e
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_pci.c
@@ -0,0 +1,1187 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/cleanup.h>
+#include <linux/errno.h>
+#include <linux/firmware.h>
+#include <linux/iommu.h>
+#include <linux/iopoll.h>
+#include <linux/pci.h>
+#include <linux/xarray.h>
+
+#include "aie2_msg_priv.h"
+#include "aie2_pci.h"
+#include "aie2_solver.h"
+#include "amdxdna_ctx.h"
+#include "amdxdna_gem.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+#include "amdxdna_pm.h"
+
+static int aie2_max_col = XRS_MAX_COL;
+module_param(aie2_max_col, uint, 0600);
+MODULE_PARM_DESC(aie2_max_col, "Maximum column could be used");
+
+/*
+ * The management mailbox channel is allocated by firmware.
+ * The related register and ring buffer information is on SRAM BAR.
+ * This struct is the register layout.
+ */
+#define MGMT_MBOX_MAGIC 0x55504e5f /* _NPU */
+struct mgmt_mbox_chann_info {
+ __u32 x2i_tail;
+ __u32 x2i_head;
+ __u32 x2i_buf;
+ __u32 x2i_buf_sz;
+ __u32 i2x_tail;
+ __u32 i2x_head;
+ __u32 i2x_buf;
+ __u32 i2x_buf_sz;
+ __u32 magic;
+ __u32 msi_id;
+ __u32 prot_major;
+ __u32 prot_minor;
+ __u32 rsvd[4];
+};
+
+static int aie2_check_protocol(struct amdxdna_dev_hdl *ndev, u32 fw_major, u32 fw_minor)
+{
+ const struct aie2_fw_feature_tbl *feature;
+ struct amdxdna_dev *xdna = ndev->xdna;
+
+ /*
+ * The driver supported mailbox behavior is defined by
+ * ndev->priv->protocol_major and protocol_minor.
+ *
+ * When protocol_major and fw_major are different, it means driver
+ * and firmware are incompatible.
+ */
+ if (ndev->priv->protocol_major != fw_major) {
+ XDNA_ERR(xdna, "Incompatible firmware protocol major %d minor %d",
+ fw_major, fw_minor);
+ return -EINVAL;
+ }
+
+ /*
+ * When protocol_minor is greater then fw_minor, that means driver
+ * relies on operation the installed firmware does not support.
+ */
+ if (ndev->priv->protocol_minor > fw_minor) {
+ XDNA_ERR(xdna, "Firmware minor version smaller than supported");
+ return -EINVAL;
+ }
+
+ for (feature = ndev->priv->fw_feature_tbl; feature && feature->min_minor;
+ feature++) {
+ if (fw_minor < feature->min_minor)
+ continue;
+ if (feature->max_minor > 0 && fw_minor > feature->max_minor)
+ continue;
+
+ set_bit(feature->feature, &ndev->feature_mask);
+ }
+
+ return 0;
+}
+
+static void aie2_dump_chann_info_debug(struct amdxdna_dev_hdl *ndev)
+{
+ struct amdxdna_dev *xdna = ndev->xdna;
+
+ XDNA_DBG(xdna, "i2x tail 0x%x", ndev->mgmt_i2x.mb_tail_ptr_reg);
+ XDNA_DBG(xdna, "i2x head 0x%x", ndev->mgmt_i2x.mb_head_ptr_reg);
+ XDNA_DBG(xdna, "i2x ringbuf 0x%x", ndev->mgmt_i2x.rb_start_addr);
+ XDNA_DBG(xdna, "i2x rsize 0x%x", ndev->mgmt_i2x.rb_size);
+ XDNA_DBG(xdna, "x2i tail 0x%x", ndev->mgmt_x2i.mb_tail_ptr_reg);
+ XDNA_DBG(xdna, "x2i head 0x%x", ndev->mgmt_x2i.mb_head_ptr_reg);
+ XDNA_DBG(xdna, "x2i ringbuf 0x%x", ndev->mgmt_x2i.rb_start_addr);
+ XDNA_DBG(xdna, "x2i rsize 0x%x", ndev->mgmt_x2i.rb_size);
+ XDNA_DBG(xdna, "x2i chann index 0x%x", ndev->mgmt_chan_idx);
+ XDNA_DBG(xdna, "mailbox protocol major 0x%x", ndev->mgmt_prot_major);
+ XDNA_DBG(xdna, "mailbox protocol minor 0x%x", ndev->mgmt_prot_minor);
+}
+
+static int aie2_get_mgmt_chann_info(struct amdxdna_dev_hdl *ndev)
+{
+ struct mgmt_mbox_chann_info info_regs;
+ struct xdna_mailbox_chann_res *i2x;
+ struct xdna_mailbox_chann_res *x2i;
+ u32 addr, off;
+ u32 *reg;
+ int ret;
+ int i;
+
+ /*
+ * Once firmware is alive, it will write management channel
+ * information in SRAM BAR and write the address of that information
+ * at FW_ALIVE_OFF offset in SRMA BAR.
+ *
+ * Read a non-zero value from FW_ALIVE_OFF implies that firmware
+ * is alive.
+ */
+ ret = readx_poll_timeout(readl, SRAM_GET_ADDR(ndev, FW_ALIVE_OFF),
+ addr, addr, AIE2_INTERVAL, AIE2_TIMEOUT);
+ if (ret || !addr)
+ return -ETIME;
+
+ off = AIE2_SRAM_OFF(ndev, addr);
+ reg = (u32 *)&info_regs;
+ for (i = 0; i < sizeof(info_regs) / sizeof(u32); i++)
+ reg[i] = readl(ndev->sram_base + off + i * sizeof(u32));
+
+ if (info_regs.magic != MGMT_MBOX_MAGIC) {
+ XDNA_ERR(ndev->xdna, "Invalid mbox magic 0x%x", info_regs.magic);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ i2x = &ndev->mgmt_i2x;
+ x2i = &ndev->mgmt_x2i;
+
+ i2x->mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.i2x_head);
+ i2x->mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.i2x_tail);
+ i2x->rb_start_addr = AIE2_SRAM_OFF(ndev, info_regs.i2x_buf);
+ i2x->rb_size = info_regs.i2x_buf_sz;
+
+ x2i->mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.x2i_head);
+ x2i->mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.x2i_tail);
+ x2i->rb_start_addr = AIE2_SRAM_OFF(ndev, info_regs.x2i_buf);
+ x2i->rb_size = info_regs.x2i_buf_sz;
+
+ ndev->mgmt_chan_idx = info_regs.msi_id;
+ ndev->mgmt_prot_major = info_regs.prot_major;
+ ndev->mgmt_prot_minor = info_regs.prot_minor;
+
+ ret = aie2_check_protocol(ndev, ndev->mgmt_prot_major, ndev->mgmt_prot_minor);
+
+done:
+ aie2_dump_chann_info_debug(ndev);
+
+ /* Must clear address at FW_ALIVE_OFF */
+ writel(0, SRAM_GET_ADDR(ndev, FW_ALIVE_OFF));
+
+ return ret;
+}
+
+int aie2_runtime_cfg(struct amdxdna_dev_hdl *ndev,
+ enum rt_config_category category, u32 *val)
+{
+ const struct rt_config *cfg;
+ u32 value;
+ int ret;
+
+ for (cfg = ndev->priv->rt_config; cfg->type; cfg++) {
+ if (cfg->category != category)
+ continue;
+
+ if (cfg->feature_mask &&
+ bitmap_subset(&cfg->feature_mask, &ndev->feature_mask, AIE2_FEATURE_MAX))
+ continue;
+
+ value = val ? *val : cfg->value;
+ ret = aie2_set_runtime_cfg(ndev, cfg->type, value);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Set type %d value %d failed",
+ cfg->type, value);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int aie2_xdna_reset(struct amdxdna_dev_hdl *ndev)
+{
+ int ret;
+
+ ret = aie2_suspend_fw(ndev);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Suspend firmware failed");
+ return ret;
+ }
+
+ ret = aie2_resume_fw(ndev);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Resume firmware failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int aie2_mgmt_fw_init(struct amdxdna_dev_hdl *ndev)
+{
+ int ret;
+
+ ret = aie2_runtime_cfg(ndev, AIE2_RT_CFG_INIT, NULL);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Runtime config failed");
+ return ret;
+ }
+
+ ret = aie2_assign_mgmt_pasid(ndev, 0);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Can not assign PASID");
+ return ret;
+ }
+
+ ret = aie2_xdna_reset(ndev);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Reset firmware failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int aie2_mgmt_fw_query(struct amdxdna_dev_hdl *ndev)
+{
+ int ret;
+
+ ret = aie2_query_firmware_version(ndev, &ndev->xdna->fw_ver);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "query firmware version failed");
+ return ret;
+ }
+
+ ret = aie2_query_aie_version(ndev, &ndev->version);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Query AIE version failed");
+ return ret;
+ }
+
+ ret = aie2_query_aie_metadata(ndev, &ndev->metadata);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Query AIE metadata failed");
+ return ret;
+ }
+
+ ndev->total_col = min(aie2_max_col, ndev->metadata.cols);
+
+ return 0;
+}
+
+static void aie2_mgmt_fw_fini(struct amdxdna_dev_hdl *ndev)
+{
+ if (aie2_suspend_fw(ndev))
+ XDNA_ERR(ndev->xdna, "Suspend_fw failed");
+ XDNA_DBG(ndev->xdna, "Firmware suspended");
+}
+
+static int aie2_xrs_load(void *cb_arg, struct xrs_action_load *action)
+{
+ struct amdxdna_hwctx *hwctx = cb_arg;
+ struct amdxdna_dev *xdna;
+ int ret;
+
+ xdna = hwctx->client->xdna;
+
+ hwctx->start_col = action->part.start_col;
+ hwctx->num_col = action->part.ncols;
+ ret = aie2_create_context(xdna->dev_handle, hwctx);
+ if (ret)
+ XDNA_ERR(xdna, "create context failed, ret %d", ret);
+
+ return ret;
+}
+
+static int aie2_xrs_unload(void *cb_arg)
+{
+ struct amdxdna_hwctx *hwctx = cb_arg;
+ struct amdxdna_dev *xdna;
+ int ret;
+
+ xdna = hwctx->client->xdna;
+
+ ret = aie2_destroy_context(xdna->dev_handle, hwctx);
+ if (ret)
+ XDNA_ERR(xdna, "destroy context failed, ret %d", ret);
+
+ return ret;
+}
+
+static int aie2_xrs_set_dft_dpm_level(struct drm_device *ddev, u32 dpm_level)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(ddev);
+ struct amdxdna_dev_hdl *ndev;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+
+ ndev = xdna->dev_handle;
+ ndev->dft_dpm_level = dpm_level;
+ if (ndev->pw_mode != POWER_MODE_DEFAULT || ndev->dpm_level == dpm_level)
+ return 0;
+
+ return ndev->priv->hw_ops.set_dpm(ndev, dpm_level);
+}
+
+static struct xrs_action_ops aie2_xrs_actions = {
+ .load = aie2_xrs_load,
+ .unload = aie2_xrs_unload,
+ .set_dft_dpm_level = aie2_xrs_set_dft_dpm_level,
+};
+
+static void aie2_hw_stop(struct amdxdna_dev *xdna)
+{
+ struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
+ struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
+
+ if (ndev->dev_status <= AIE2_DEV_INIT) {
+ XDNA_ERR(xdna, "device is already stopped");
+ return;
+ }
+
+ aie2_mgmt_fw_fini(ndev);
+ xdna_mailbox_stop_channel(ndev->mgmt_chann);
+ xdna_mailbox_destroy_channel(ndev->mgmt_chann);
+ ndev->mgmt_chann = NULL;
+ drmm_kfree(&xdna->ddev, ndev->mbox);
+ ndev->mbox = NULL;
+ aie2_psp_stop(ndev->psp_hdl);
+ aie2_smu_fini(ndev);
+ aie2_error_async_events_free(ndev);
+ pci_disable_device(pdev);
+
+ ndev->dev_status = AIE2_DEV_INIT;
+}
+
+static int aie2_hw_start(struct amdxdna_dev *xdna)
+{
+ struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
+ struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
+ struct xdna_mailbox_res mbox_res;
+ u32 xdna_mailbox_intr_reg;
+ int mgmt_mb_irq, ret;
+
+ if (ndev->dev_status >= AIE2_DEV_START) {
+ XDNA_INFO(xdna, "device is already started");
+ return 0;
+ }
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ XDNA_ERR(xdna, "failed to enable device, ret %d", ret);
+ return ret;
+ }
+ pci_set_master(pdev);
+
+ ret = aie2_smu_init(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "failed to init smu, ret %d", ret);
+ goto disable_dev;
+ }
+
+ ret = aie2_psp_start(ndev->psp_hdl);
+ if (ret) {
+ XDNA_ERR(xdna, "failed to start psp, ret %d", ret);
+ goto fini_smu;
+ }
+
+ ret = aie2_get_mgmt_chann_info(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "firmware is not alive");
+ goto stop_psp;
+ }
+
+ mbox_res.ringbuf_base = ndev->sram_base;
+ mbox_res.ringbuf_size = pci_resource_len(pdev, xdna->dev_info->sram_bar);
+ mbox_res.mbox_base = ndev->mbox_base;
+ mbox_res.mbox_size = MBOX_SIZE(ndev);
+ mbox_res.name = "xdna_mailbox";
+ ndev->mbox = xdnam_mailbox_create(&xdna->ddev, &mbox_res);
+ if (!ndev->mbox) {
+ XDNA_ERR(xdna, "failed to create mailbox device");
+ ret = -ENODEV;
+ goto stop_psp;
+ }
+
+ mgmt_mb_irq = pci_irq_vector(pdev, ndev->mgmt_chan_idx);
+ if (mgmt_mb_irq < 0) {
+ ret = mgmt_mb_irq;
+ XDNA_ERR(xdna, "failed to alloc irq vector, ret %d", ret);
+ goto stop_psp;
+ }
+
+ xdna_mailbox_intr_reg = ndev->mgmt_i2x.mb_head_ptr_reg + 4;
+ ndev->mgmt_chann = xdna_mailbox_create_channel(ndev->mbox,
+ &ndev->mgmt_x2i,
+ &ndev->mgmt_i2x,
+ xdna_mailbox_intr_reg,
+ mgmt_mb_irq);
+ if (!ndev->mgmt_chann) {
+ XDNA_ERR(xdna, "failed to create management mailbox channel");
+ ret = -EINVAL;
+ goto stop_psp;
+ }
+
+ ret = aie2_pm_init(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "failed to init pm, ret %d", ret);
+ goto destroy_mgmt_chann;
+ }
+
+ ret = aie2_mgmt_fw_init(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "initial mgmt firmware failed, ret %d", ret);
+ goto destroy_mgmt_chann;
+ }
+
+ ret = aie2_mgmt_fw_query(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "failed to query fw, ret %d", ret);
+ goto destroy_mgmt_chann;
+ }
+
+ ret = aie2_error_async_events_alloc(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "Allocate async events failed, ret %d", ret);
+ goto destroy_mgmt_chann;
+ }
+
+ ndev->dev_status = AIE2_DEV_START;
+
+ return 0;
+
+destroy_mgmt_chann:
+ xdna_mailbox_stop_channel(ndev->mgmt_chann);
+ xdna_mailbox_destroy_channel(ndev->mgmt_chann);
+stop_psp:
+ aie2_psp_stop(ndev->psp_hdl);
+fini_smu:
+ aie2_smu_fini(ndev);
+disable_dev:
+ pci_disable_device(pdev);
+
+ return ret;
+}
+
+static int aie2_hw_suspend(struct amdxdna_dev *xdna)
+{
+ struct amdxdna_client *client;
+
+ guard(mutex)(&xdna->dev_lock);
+ list_for_each_entry(client, &xdna->client_list, node)
+ aie2_hwctx_suspend(client);
+
+ aie2_hw_stop(xdna);
+
+ return 0;
+}
+
+static int aie2_hw_resume(struct amdxdna_dev *xdna)
+{
+ struct amdxdna_client *client;
+ int ret;
+
+ ret = aie2_hw_start(xdna);
+ if (ret) {
+ XDNA_ERR(xdna, "Start hardware failed, %d", ret);
+ return ret;
+ }
+
+ list_for_each_entry(client, &xdna->client_list, node) {
+ ret = aie2_hwctx_resume(client);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int aie2_init(struct amdxdna_dev *xdna)
+{
+ struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
+ void __iomem *tbl[PCI_NUM_RESOURCES] = {0};
+ struct init_config xrs_cfg = { 0 };
+ struct amdxdna_dev_hdl *ndev;
+ struct psp_config psp_conf;
+ const struct firmware *fw;
+ unsigned long bars = 0;
+ int i, nvec, ret;
+
+ ndev = drmm_kzalloc(&xdna->ddev, sizeof(*ndev), GFP_KERNEL);
+ if (!ndev)
+ return -ENOMEM;
+
+ ndev->priv = xdna->dev_info->dev_priv;
+ ndev->xdna = xdna;
+
+ ret = request_firmware(&fw, ndev->priv->fw_path, &pdev->dev);
+ if (ret) {
+ XDNA_ERR(xdna, "failed to request_firmware %s, ret %d",
+ ndev->priv->fw_path, ret);
+ return ret;
+ }
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ XDNA_ERR(xdna, "pcim enable device failed, ret %d", ret);
+ goto release_fw;
+ }
+
+ for (i = 0; i < PSP_MAX_REGS; i++)
+ set_bit(PSP_REG_BAR(ndev, i), &bars);
+
+ set_bit(xdna->dev_info->sram_bar, &bars);
+ set_bit(xdna->dev_info->smu_bar, &bars);
+ set_bit(xdna->dev_info->mbox_bar, &bars);
+
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ if (!test_bit(i, &bars))
+ continue;
+ tbl[i] = pcim_iomap(pdev, i, 0);
+ if (!tbl[i]) {
+ XDNA_ERR(xdna, "map bar %d failed", i);
+ ret = -ENOMEM;
+ goto release_fw;
+ }
+ }
+
+ ndev->sram_base = tbl[xdna->dev_info->sram_bar];
+ ndev->smu_base = tbl[xdna->dev_info->smu_bar];
+ ndev->mbox_base = tbl[xdna->dev_info->mbox_bar];
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to set DMA mask: %d", ret);
+ goto release_fw;
+ }
+
+ nvec = pci_msix_vec_count(pdev);
+ if (nvec <= 0) {
+ XDNA_ERR(xdna, "does not get number of interrupt vector");
+ ret = -EINVAL;
+ goto release_fw;
+ }
+
+ ret = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ XDNA_ERR(xdna, "failed to alloc irq vectors, ret %d", ret);
+ goto release_fw;
+ }
+
+ psp_conf.fw_size = fw->size;
+ psp_conf.fw_buf = fw->data;
+ for (i = 0; i < PSP_MAX_REGS; i++)
+ psp_conf.psp_regs[i] = tbl[PSP_REG_BAR(ndev, i)] + PSP_REG_OFF(ndev, i);
+ ndev->psp_hdl = aie2m_psp_create(&xdna->ddev, &psp_conf);
+ if (!ndev->psp_hdl) {
+ XDNA_ERR(xdna, "failed to create psp");
+ ret = -ENOMEM;
+ goto release_fw;
+ }
+ xdna->dev_handle = ndev;
+
+ ret = aie2_hw_start(xdna);
+ if (ret) {
+ XDNA_ERR(xdna, "start npu failed, ret %d", ret);
+ goto release_fw;
+ }
+
+ xrs_cfg.clk_list.num_levels = ndev->max_dpm_level + 1;
+ for (i = 0; i < xrs_cfg.clk_list.num_levels; i++)
+ xrs_cfg.clk_list.cu_clk_list[i] = ndev->priv->dpm_clk_tbl[i].hclk;
+ xrs_cfg.sys_eff_factor = 1;
+ xrs_cfg.ddev = &xdna->ddev;
+ xrs_cfg.actions = &aie2_xrs_actions;
+ xrs_cfg.total_col = ndev->total_col;
+
+ xdna->xrs_hdl = xrsm_init(&xrs_cfg);
+ if (!xdna->xrs_hdl) {
+ XDNA_ERR(xdna, "Initialize resolver failed");
+ ret = -EINVAL;
+ goto stop_hw;
+ }
+
+ release_firmware(fw);
+ aie2_msg_init(ndev);
+ amdxdna_pm_init(xdna);
+ return 0;
+
+stop_hw:
+ aie2_hw_stop(xdna);
+release_fw:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static void aie2_fini(struct amdxdna_dev *xdna)
+{
+ amdxdna_pm_fini(xdna);
+ aie2_hw_stop(xdna);
+}
+
+static int aie2_get_aie_status(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_query_aie_status status;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_dev_hdl *ndev;
+ int ret;
+
+ ndev = xdna->dev_handle;
+ if (copy_from_user(&status, u64_to_user_ptr(args->buffer), sizeof(status))) {
+ XDNA_ERR(xdna, "Failed to copy AIE request into kernel");
+ return -EFAULT;
+ }
+
+ if (ndev->metadata.cols * ndev->metadata.size < status.buffer_size) {
+ XDNA_ERR(xdna, "Invalid buffer size. Given Size: %u. Need Size: %u.",
+ status.buffer_size, ndev->metadata.cols * ndev->metadata.size);
+ return -EINVAL;
+ }
+
+ ret = aie2_query_status(ndev, u64_to_user_ptr(status.buffer),
+ status.buffer_size, &status.cols_filled);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to get AIE status info. Ret: %d", ret);
+ return ret;
+ }
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer), &status, sizeof(status))) {
+ XDNA_ERR(xdna, "Failed to copy AIE request info to user space");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int aie2_get_aie_metadata(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_query_aie_metadata *meta;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_dev_hdl *ndev;
+ int ret = 0;
+
+ ndev = xdna->dev_handle;
+ meta = kzalloc(sizeof(*meta), GFP_KERNEL);
+ if (!meta)
+ return -ENOMEM;
+
+ meta->col_size = ndev->metadata.size;
+ meta->cols = ndev->metadata.cols;
+ meta->rows = ndev->metadata.rows;
+
+ meta->version.major = ndev->metadata.version.major;
+ meta->version.minor = ndev->metadata.version.minor;
+
+ meta->core.row_count = ndev->metadata.core.row_count;
+ meta->core.row_start = ndev->metadata.core.row_start;
+ meta->core.dma_channel_count = ndev->metadata.core.dma_channel_count;
+ meta->core.lock_count = ndev->metadata.core.lock_count;
+ meta->core.event_reg_count = ndev->metadata.core.event_reg_count;
+
+ meta->mem.row_count = ndev->metadata.mem.row_count;
+ meta->mem.row_start = ndev->metadata.mem.row_start;
+ meta->mem.dma_channel_count = ndev->metadata.mem.dma_channel_count;
+ meta->mem.lock_count = ndev->metadata.mem.lock_count;
+ meta->mem.event_reg_count = ndev->metadata.mem.event_reg_count;
+
+ meta->shim.row_count = ndev->metadata.shim.row_count;
+ meta->shim.row_start = ndev->metadata.shim.row_start;
+ meta->shim.dma_channel_count = ndev->metadata.shim.dma_channel_count;
+ meta->shim.lock_count = ndev->metadata.shim.lock_count;
+ meta->shim.event_reg_count = ndev->metadata.shim.event_reg_count;
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer), meta, sizeof(*meta)))
+ ret = -EFAULT;
+
+ kfree(meta);
+ return ret;
+}
+
+static int aie2_get_aie_version(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_query_aie_version version;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_dev_hdl *ndev;
+
+ ndev = xdna->dev_handle;
+ version.major = ndev->version.major;
+ version.minor = ndev->version.minor;
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer), &version, sizeof(version)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int aie2_get_firmware_version(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_query_firmware_version version;
+ struct amdxdna_dev *xdna = client->xdna;
+
+ version.major = xdna->fw_ver.major;
+ version.minor = xdna->fw_ver.minor;
+ version.patch = xdna->fw_ver.sub;
+ version.build = xdna->fw_ver.build;
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer), &version, sizeof(version)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int aie2_get_power_mode(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_get_power_mode mode = {};
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_dev_hdl *ndev;
+
+ ndev = xdna->dev_handle;
+ mode.power_mode = ndev->pw_mode;
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer), &mode, sizeof(mode)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int aie2_get_clock_metadata(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_query_clock_metadata *clock;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_dev_hdl *ndev;
+ int ret = 0;
+
+ ndev = xdna->dev_handle;
+ clock = kzalloc(sizeof(*clock), GFP_KERNEL);
+ if (!clock)
+ return -ENOMEM;
+
+ snprintf(clock->mp_npu_clock.name, sizeof(clock->mp_npu_clock.name),
+ "MP-NPU Clock");
+ clock->mp_npu_clock.freq_mhz = ndev->npuclk_freq;
+ snprintf(clock->h_clock.name, sizeof(clock->h_clock.name), "H Clock");
+ clock->h_clock.freq_mhz = ndev->hclk_freq;
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer), clock, sizeof(*clock)))
+ ret = -EFAULT;
+
+ kfree(clock);
+ return ret;
+}
+
+static int aie2_hwctx_status_cb(struct amdxdna_hwctx *hwctx, void *arg)
+{
+ struct amdxdna_drm_hwctx_entry *tmp __free(kfree) = NULL;
+ struct amdxdna_drm_get_array *array_args = arg;
+ struct amdxdna_drm_hwctx_entry __user *buf;
+ u32 size;
+
+ if (!array_args->num_element)
+ return -EINVAL;
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ tmp->pid = hwctx->client->pid;
+ tmp->context_id = hwctx->id;
+ tmp->start_col = hwctx->start_col;
+ tmp->num_col = hwctx->num_col;
+ tmp->command_submissions = hwctx->priv->seq;
+ tmp->command_completions = hwctx->priv->completed;
+ tmp->pasid = hwctx->client->pasid;
+ tmp->priority = hwctx->qos.priority;
+ tmp->gops = hwctx->qos.gops;
+ tmp->fps = hwctx->qos.fps;
+ tmp->dma_bandwidth = hwctx->qos.dma_bandwidth;
+ tmp->latency = hwctx->qos.latency;
+ tmp->frame_exec_time = hwctx->qos.frame_exec_time;
+ tmp->state = AMDXDNA_HWCTX_STATE_ACTIVE;
+
+ buf = u64_to_user_ptr(array_args->buffer);
+ size = min(sizeof(*tmp), array_args->element_size);
+
+ if (copy_to_user(buf, tmp, size))
+ return -EFAULT;
+
+ array_args->buffer += size;
+ array_args->num_element--;
+
+ return 0;
+}
+
+static int aie2_get_hwctx_status(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_get_array array_args;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_client *tmp_client;
+ int ret;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+
+ array_args.element_size = sizeof(struct amdxdna_drm_query_hwctx);
+ array_args.buffer = args->buffer;
+ array_args.num_element = args->buffer_size / array_args.element_size;
+ list_for_each_entry(tmp_client, &xdna->client_list, node) {
+ ret = amdxdna_hwctx_walk(tmp_client, &array_args,
+ aie2_hwctx_status_cb);
+ if (ret)
+ break;
+ }
+
+ args->buffer_size -= (u32)(array_args.buffer - args->buffer);
+ return 0;
+}
+
+static int aie2_query_resource_info(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_get_resource_info res_info;
+ const struct amdxdna_dev_priv *priv;
+ struct amdxdna_dev_hdl *ndev;
+ struct amdxdna_dev *xdna;
+
+ xdna = client->xdna;
+ ndev = xdna->dev_handle;
+ priv = ndev->priv;
+
+ res_info.npu_clk_max = priv->dpm_clk_tbl[ndev->max_dpm_level].hclk;
+ res_info.npu_tops_max = ndev->max_tops;
+ res_info.npu_task_max = priv->hwctx_limit;
+ res_info.npu_tops_curr = ndev->curr_tops;
+ res_info.npu_task_curr = ndev->hwctx_num;
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer), &res_info, sizeof(res_info)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int aie2_fill_hwctx_map(struct amdxdna_hwctx *hwctx, void *arg)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ u32 *map = arg;
+
+ if (hwctx->fw_ctx_id >= xdna->dev_handle->priv->hwctx_limit) {
+ XDNA_ERR(xdna, "Invalid fw ctx id %d/%d ", hwctx->fw_ctx_id,
+ xdna->dev_handle->priv->hwctx_limit);
+ return -EINVAL;
+ }
+
+ map[hwctx->fw_ctx_id] = hwctx->id;
+ return 0;
+}
+
+static int aie2_get_telemetry(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_query_telemetry_header *header __free(kfree) = NULL;
+ u32 telemetry_data_sz, header_sz, elem_num;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_client *tmp_client;
+ int ret;
+
+ elem_num = xdna->dev_handle->priv->hwctx_limit;
+ header_sz = struct_size(header, map, elem_num);
+ if (args->buffer_size <= header_sz) {
+ XDNA_ERR(xdna, "Invalid buffer size");
+ return -EINVAL;
+ }
+
+ telemetry_data_sz = args->buffer_size - header_sz;
+ if (telemetry_data_sz > SZ_4M) {
+ XDNA_ERR(xdna, "Buffer size is too big, %d", telemetry_data_sz);
+ return -EINVAL;
+ }
+
+ header = kzalloc(header_sz, GFP_KERNEL);
+ if (!header)
+ return -ENOMEM;
+
+ if (copy_from_user(header, u64_to_user_ptr(args->buffer), sizeof(*header))) {
+ XDNA_ERR(xdna, "Failed to copy telemetry header from user");
+ return -EFAULT;
+ }
+
+ header->map_num_elements = elem_num;
+ list_for_each_entry(tmp_client, &xdna->client_list, node) {
+ ret = amdxdna_hwctx_walk(tmp_client, &header->map,
+ aie2_fill_hwctx_map);
+ if (ret)
+ return ret;
+ }
+
+ ret = aie2_query_telemetry(xdna->dev_handle,
+ u64_to_user_ptr(args->buffer + header_sz),
+ telemetry_data_sz, header);
+ if (ret) {
+ XDNA_ERR(xdna, "Query telemetry failed ret %d", ret);
+ return ret;
+ }
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer), header, header_sz)) {
+ XDNA_ERR(xdna, "Copy header failed");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int aie2_get_preempt_state(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_attribute_state state = {};
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_dev_hdl *ndev;
+
+ ndev = xdna->dev_handle;
+ if (args->param == DRM_AMDXDNA_GET_FORCE_PREEMPT_STATE)
+ state.state = ndev->force_preempt_enabled;
+ else if (args->param == DRM_AMDXDNA_GET_FRAME_BOUNDARY_PREEMPT_STATE)
+ state.state = ndev->frame_boundary_preempt;
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer), &state, sizeof(state)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ int ret, idx;
+
+ if (!drm_dev_enter(&xdna->ddev, &idx))
+ return -ENODEV;
+
+ ret = amdxdna_pm_resume_get(xdna);
+ if (ret)
+ goto dev_exit;
+
+ switch (args->param) {
+ case DRM_AMDXDNA_QUERY_AIE_STATUS:
+ ret = aie2_get_aie_status(client, args);
+ break;
+ case DRM_AMDXDNA_QUERY_AIE_METADATA:
+ ret = aie2_get_aie_metadata(client, args);
+ break;
+ case DRM_AMDXDNA_QUERY_AIE_VERSION:
+ ret = aie2_get_aie_version(client, args);
+ break;
+ case DRM_AMDXDNA_QUERY_CLOCK_METADATA:
+ ret = aie2_get_clock_metadata(client, args);
+ break;
+ case DRM_AMDXDNA_QUERY_HW_CONTEXTS:
+ ret = aie2_get_hwctx_status(client, args);
+ break;
+ case DRM_AMDXDNA_QUERY_FIRMWARE_VERSION:
+ ret = aie2_get_firmware_version(client, args);
+ break;
+ case DRM_AMDXDNA_GET_POWER_MODE:
+ ret = aie2_get_power_mode(client, args);
+ break;
+ case DRM_AMDXDNA_QUERY_TELEMETRY:
+ ret = aie2_get_telemetry(client, args);
+ break;
+ case DRM_AMDXDNA_QUERY_RESOURCE_INFO:
+ ret = aie2_query_resource_info(client, args);
+ break;
+ case DRM_AMDXDNA_GET_FORCE_PREEMPT_STATE:
+ case DRM_AMDXDNA_GET_FRAME_BOUNDARY_PREEMPT_STATE:
+ ret = aie2_get_preempt_state(client, args);
+ break;
+ default:
+ XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
+ ret = -EOPNOTSUPP;
+ }
+
+ amdxdna_pm_suspend_put(xdna);
+ XDNA_DBG(xdna, "Got param %d", args->param);
+
+dev_exit:
+ drm_dev_exit(idx);
+ return ret;
+}
+
+static int aie2_query_ctx_status_array(struct amdxdna_client *client,
+ struct amdxdna_drm_get_array *args)
+{
+ struct amdxdna_drm_get_array array_args;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_client *tmp_client;
+ int ret;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+
+ if (args->element_size > SZ_4K || args->num_element > SZ_1K) {
+ XDNA_DBG(xdna, "Invalid element size %d or number of element %d",
+ args->element_size, args->num_element);
+ return -EINVAL;
+ }
+
+ array_args.element_size = min(args->element_size,
+ sizeof(struct amdxdna_drm_hwctx_entry));
+ array_args.buffer = args->buffer;
+ array_args.num_element = args->num_element * args->element_size /
+ array_args.element_size;
+ list_for_each_entry(tmp_client, &xdna->client_list, node) {
+ ret = amdxdna_hwctx_walk(tmp_client, &array_args,
+ aie2_hwctx_status_cb);
+ if (ret)
+ break;
+ }
+
+ args->element_size = array_args.element_size;
+ args->num_element = (u32)((array_args.buffer - args->buffer) /
+ args->element_size);
+
+ return 0;
+}
+
+static int aie2_get_array(struct amdxdna_client *client,
+ struct amdxdna_drm_get_array *args)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ int ret, idx;
+
+ if (!drm_dev_enter(&xdna->ddev, &idx))
+ return -ENODEV;
+
+ ret = amdxdna_pm_resume_get(xdna);
+ if (ret)
+ goto dev_exit;
+
+ switch (args->param) {
+ case DRM_AMDXDNA_HW_CONTEXT_ALL:
+ ret = aie2_query_ctx_status_array(client, args);
+ break;
+ case DRM_AMDXDNA_HW_LAST_ASYNC_ERR:
+ ret = aie2_get_array_async_error(xdna->dev_handle, args);
+ break;
+ default:
+ XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
+ ret = -EOPNOTSUPP;
+ }
+
+ amdxdna_pm_suspend_put(xdna);
+ XDNA_DBG(xdna, "Got param %d", args->param);
+
+dev_exit:
+ drm_dev_exit(idx);
+ return ret;
+}
+
+static int aie2_set_power_mode(struct amdxdna_client *client,
+ struct amdxdna_drm_set_state *args)
+{
+ struct amdxdna_drm_set_power_mode power_state;
+ enum amdxdna_power_mode_type power_mode;
+ struct amdxdna_dev *xdna = client->xdna;
+
+ if (copy_from_user(&power_state, u64_to_user_ptr(args->buffer),
+ sizeof(power_state))) {
+ XDNA_ERR(xdna, "Failed to copy power mode request into kernel");
+ return -EFAULT;
+ }
+
+ if (XDNA_MBZ_DBG(xdna, power_state.pad, sizeof(power_state.pad)))
+ return -EINVAL;
+
+ power_mode = power_state.power_mode;
+ if (power_mode > POWER_MODE_TURBO) {
+ XDNA_ERR(xdna, "Invalid power mode %d", power_mode);
+ return -EINVAL;
+ }
+
+ return aie2_pm_set_mode(xdna->dev_handle, power_mode);
+}
+
+static int aie2_set_preempt_state(struct amdxdna_client *client,
+ struct amdxdna_drm_set_state *args)
+{
+ struct amdxdna_dev_hdl *ndev = client->xdna->dev_handle;
+ struct amdxdna_drm_attribute_state state;
+ u32 val;
+ int ret;
+
+ if (copy_from_user(&state, u64_to_user_ptr(args->buffer), sizeof(state)))
+ return -EFAULT;
+
+ if (state.state > 1)
+ return -EINVAL;
+
+ if (XDNA_MBZ_DBG(client->xdna, state.pad, sizeof(state.pad)))
+ return -EINVAL;
+
+ if (args->param == DRM_AMDXDNA_SET_FORCE_PREEMPT) {
+ ndev->force_preempt_enabled = state.state;
+ } else if (args->param == DRM_AMDXDNA_SET_FRAME_BOUNDARY_PREEMPT) {
+ val = state.state;
+ ret = aie2_runtime_cfg(ndev, AIE2_RT_CFG_FRAME_BOUNDARY_PREEMPT,
+ &val);
+ if (ret)
+ return ret;
+
+ ndev->frame_boundary_preempt = state.state;
+ }
+
+ return 0;
+}
+
+static int aie2_set_state(struct amdxdna_client *client,
+ struct amdxdna_drm_set_state *args)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ int ret, idx;
+
+ if (!drm_dev_enter(&xdna->ddev, &idx))
+ return -ENODEV;
+
+ ret = amdxdna_pm_resume_get(xdna);
+ if (ret)
+ goto dev_exit;
+
+ switch (args->param) {
+ case DRM_AMDXDNA_SET_POWER_MODE:
+ ret = aie2_set_power_mode(client, args);
+ break;
+ case DRM_AMDXDNA_SET_FORCE_PREEMPT:
+ case DRM_AMDXDNA_SET_FRAME_BOUNDARY_PREEMPT:
+ ret = aie2_set_preempt_state(client, args);
+ break;
+ default:
+ XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ amdxdna_pm_suspend_put(xdna);
+dev_exit:
+ drm_dev_exit(idx);
+ return ret;
+}
+
+const struct amdxdna_dev_ops aie2_ops = {
+ .init = aie2_init,
+ .fini = aie2_fini,
+ .resume = aie2_hw_resume,
+ .suspend = aie2_hw_suspend,
+ .get_aie_info = aie2_get_info,
+ .set_aie_state = aie2_set_state,
+ .hwctx_init = aie2_hwctx_init,
+ .hwctx_fini = aie2_hwctx_fini,
+ .hwctx_config = aie2_hwctx_config,
+ .hwctx_sync_debug_bo = aie2_hwctx_sync_debug_bo,
+ .cmd_submit = aie2_cmd_submit,
+ .hmm_invalidate = aie2_hmm_invalidate,
+ .get_array = aie2_get_array,
+};
diff --git a/drivers/accel/amdxdna/aie2_pci.h b/drivers/accel/amdxdna/aie2_pci.h
new file mode 100644
index 000000000000..a5f9c42155d1
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_pci.h
@@ -0,0 +1,346 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AIE2_PCI_H_
+#define _AIE2_PCI_H_
+
+#include <drm/amdxdna_accel.h>
+#include <linux/semaphore.h>
+
+#include "amdxdna_mailbox.h"
+
+#define AIE2_INTERVAL 20000 /* us */
+#define AIE2_TIMEOUT 1000000 /* us */
+
+/* Firmware determines device memory base address and size */
+#define AIE2_DEVM_BASE 0x4000000
+#define AIE2_DEVM_SIZE SZ_64M
+
+#define NDEV2PDEV(ndev) (to_pci_dev((ndev)->xdna->ddev.dev))
+
+#define AIE2_SRAM_OFF(ndev, addr) ((addr) - (ndev)->priv->sram_dev_addr)
+#define AIE2_MBOX_OFF(ndev, addr) ((addr) - (ndev)->priv->mbox_dev_addr)
+
+#define PSP_REG_BAR(ndev, idx) ((ndev)->priv->psp_regs_off[(idx)].bar_idx)
+#define PSP_REG_OFF(ndev, idx) ((ndev)->priv->psp_regs_off[(idx)].offset)
+#define SRAM_REG_OFF(ndev, idx) ((ndev)->priv->sram_offs[(idx)].offset)
+
+#define SMU_REG(ndev, idx) \
+({ \
+ typeof(ndev) _ndev = ndev; \
+ ((_ndev)->smu_base + (_ndev)->priv->smu_regs_off[(idx)].offset); \
+})
+#define SRAM_GET_ADDR(ndev, idx) \
+({ \
+ typeof(ndev) _ndev = ndev; \
+ ((_ndev)->sram_base + SRAM_REG_OFF((_ndev), (idx))); \
+})
+
+#define CHAN_SLOT_SZ SZ_8K
+#define MBOX_SIZE(ndev) \
+({ \
+ typeof(ndev) _ndev = (ndev); \
+ ((_ndev)->priv->mbox_size) ? (_ndev)->priv->mbox_size : \
+ pci_resource_len(NDEV2PDEV(_ndev), (_ndev)->xdna->dev_info->mbox_bar); \
+})
+
+enum aie2_smu_reg_idx {
+ SMU_CMD_REG = 0,
+ SMU_ARG_REG,
+ SMU_INTR_REG,
+ SMU_RESP_REG,
+ SMU_OUT_REG,
+ SMU_MAX_REGS /* Keep this at the end */
+};
+
+enum aie2_sram_reg_idx {
+ MBOX_CHANN_OFF = 0,
+ FW_ALIVE_OFF,
+ SRAM_MAX_INDEX /* Keep this at the end */
+};
+
+enum psp_reg_idx {
+ PSP_CMD_REG = 0,
+ PSP_ARG0_REG,
+ PSP_ARG1_REG,
+ PSP_ARG2_REG,
+ PSP_NUM_IN_REGS, /* number of input registers */
+ PSP_INTR_REG = PSP_NUM_IN_REGS,
+ PSP_STATUS_REG,
+ PSP_RESP_REG,
+ PSP_MAX_REGS /* Keep this at the end */
+};
+
+struct amdxdna_client;
+struct amdxdna_fw_ver;
+struct amdxdna_hwctx;
+struct amdxdna_sched_job;
+
+struct psp_config {
+ const void *fw_buf;
+ u32 fw_size;
+ void __iomem *psp_regs[PSP_MAX_REGS];
+};
+
+struct aie_version {
+ u16 major;
+ u16 minor;
+};
+
+struct aie_tile_metadata {
+ u16 row_count;
+ u16 row_start;
+ u16 dma_channel_count;
+ u16 lock_count;
+ u16 event_reg_count;
+};
+
+struct aie_metadata {
+ u32 size;
+ u16 cols;
+ u16 rows;
+ struct aie_version version;
+ struct aie_tile_metadata core;
+ struct aie_tile_metadata mem;
+ struct aie_tile_metadata shim;
+};
+
+enum rt_config_category {
+ AIE2_RT_CFG_INIT,
+ AIE2_RT_CFG_CLK_GATING,
+ AIE2_RT_CFG_FORCE_PREEMPT,
+ AIE2_RT_CFG_FRAME_BOUNDARY_PREEMPT,
+};
+
+struct rt_config {
+ u32 type;
+ u32 value;
+ u32 category;
+ unsigned long feature_mask;
+};
+
+struct dpm_clk_freq {
+ u32 npuclk;
+ u32 hclk;
+};
+
+/*
+ * Define the maximum number of pending commands in a hardware context.
+ * Must be power of 2!
+ */
+#define HWCTX_MAX_CMDS 4
+#define get_job_idx(seq) ((seq) & (HWCTX_MAX_CMDS - 1))
+struct amdxdna_hwctx_priv {
+ struct amdxdna_gem_obj *heap;
+ void *mbox_chann;
+
+ struct drm_gpu_scheduler sched;
+ struct drm_sched_entity entity;
+
+ struct mutex io_lock; /* protect seq and cmd order */
+ struct wait_queue_head job_free_wq;
+ u32 num_pending;
+ u64 seq;
+ struct semaphore job_sem;
+ bool job_done;
+
+ /* Completed job counter */
+ u64 completed;
+
+ struct amdxdna_gem_obj *cmd_buf[HWCTX_MAX_CMDS];
+ struct drm_syncobj *syncobj;
+};
+
+enum aie2_dev_status {
+ AIE2_DEV_UNINIT,
+ AIE2_DEV_INIT,
+ AIE2_DEV_START,
+};
+
+struct aie2_exec_msg_ops {
+ int (*init_cu_req)(struct amdxdna_gem_obj *cmd_bo, void *req,
+ size_t *size, u32 *msg_op);
+ int (*init_dpu_req)(struct amdxdna_gem_obj *cmd_bo, void *req,
+ size_t *size, u32 *msg_op);
+ void (*init_chain_req)(void *req, u64 slot_addr, size_t size, u32 cmd_cnt);
+ int (*fill_cf_slot)(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size);
+ int (*fill_dpu_slot)(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size);
+ int (*fill_preempt_slot)(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size);
+ int (*fill_elf_slot)(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size);
+ u32 (*get_chain_msg_op)(u32 cmd_op);
+};
+
+struct amdxdna_dev_hdl {
+ struct amdxdna_dev *xdna;
+ const struct amdxdna_dev_priv *priv;
+ void __iomem *sram_base;
+ void __iomem *smu_base;
+ void __iomem *mbox_base;
+ struct psp_device *psp_hdl;
+
+ struct xdna_mailbox_chann_res mgmt_x2i;
+ struct xdna_mailbox_chann_res mgmt_i2x;
+ u32 mgmt_chan_idx;
+ u32 mgmt_prot_major;
+ u32 mgmt_prot_minor;
+
+ u32 total_col;
+ struct aie_version version;
+ struct aie_metadata metadata;
+ unsigned long feature_mask;
+ struct aie2_exec_msg_ops *exec_msg_ops;
+
+ /* power management and clock*/
+ enum amdxdna_power_mode_type pw_mode;
+ u32 dpm_level;
+ u32 dft_dpm_level;
+ u32 max_dpm_level;
+ u32 clk_gating;
+ u32 npuclk_freq;
+ u32 hclk_freq;
+ u32 max_tops;
+ u32 curr_tops;
+ u32 force_preempt_enabled;
+ u32 frame_boundary_preempt;
+
+ /* Mailbox and the management channel */
+ struct mailbox *mbox;
+ struct mailbox_channel *mgmt_chann;
+ struct async_events *async_events;
+
+ enum aie2_dev_status dev_status;
+ u32 hwctx_num;
+
+ struct amdxdna_async_error last_async_err;
+};
+
+#define DEFINE_BAR_OFFSET(reg_name, bar, reg_addr) \
+ [reg_name] = {bar##_BAR_INDEX, (reg_addr) - bar##_BAR_BASE}
+
+struct aie2_bar_off_pair {
+ int bar_idx;
+ u32 offset;
+};
+
+struct aie2_hw_ops {
+ int (*set_dpm)(struct amdxdna_dev_hdl *ndev, u32 dpm_level);
+};
+
+enum aie2_fw_feature {
+ AIE2_NPU_COMMAND,
+ AIE2_PREEMPT,
+ AIE2_FEATURE_MAX
+};
+
+struct aie2_fw_feature_tbl {
+ enum aie2_fw_feature feature;
+ u32 max_minor;
+ u32 min_minor;
+};
+
+#define AIE2_FEATURE_ON(ndev, feature) test_bit(feature, &(ndev)->feature_mask)
+
+struct amdxdna_dev_priv {
+ const char *fw_path;
+ u64 protocol_major;
+ u64 protocol_minor;
+ const struct rt_config *rt_config;
+ const struct dpm_clk_freq *dpm_clk_tbl;
+ const struct aie2_fw_feature_tbl *fw_feature_tbl;
+
+#define COL_ALIGN_NONE 0
+#define COL_ALIGN_NATURE 1
+ u32 col_align;
+ u32 mbox_dev_addr;
+ /* If mbox_size is 0, use BAR size. See MBOX_SIZE macro */
+ u32 mbox_size;
+ u32 hwctx_limit;
+ u32 sram_dev_addr;
+ struct aie2_bar_off_pair sram_offs[SRAM_MAX_INDEX];
+ struct aie2_bar_off_pair psp_regs_off[PSP_MAX_REGS];
+ struct aie2_bar_off_pair smu_regs_off[SMU_MAX_REGS];
+ struct aie2_hw_ops hw_ops;
+};
+
+extern const struct amdxdna_dev_ops aie2_ops;
+
+int aie2_runtime_cfg(struct amdxdna_dev_hdl *ndev,
+ enum rt_config_category category, u32 *val);
+
+/* aie2 npu hw config */
+extern const struct dpm_clk_freq npu1_dpm_clk_table[];
+extern const struct dpm_clk_freq npu4_dpm_clk_table[];
+extern const struct rt_config npu1_default_rt_cfg[];
+extern const struct rt_config npu4_default_rt_cfg[];
+extern const struct aie2_fw_feature_tbl npu4_fw_feature_table[];
+
+/* aie2_smu.c */
+int aie2_smu_init(struct amdxdna_dev_hdl *ndev);
+void aie2_smu_fini(struct amdxdna_dev_hdl *ndev);
+int npu1_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level);
+int npu4_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level);
+
+/* aie2_pm.c */
+int aie2_pm_init(struct amdxdna_dev_hdl *ndev);
+int aie2_pm_set_mode(struct amdxdna_dev_hdl *ndev, enum amdxdna_power_mode_type target);
+
+/* aie2_psp.c */
+struct psp_device *aie2m_psp_create(struct drm_device *ddev, struct psp_config *conf);
+int aie2_psp_start(struct psp_device *psp);
+void aie2_psp_stop(struct psp_device *psp);
+
+/* aie2_error.c */
+int aie2_error_async_events_alloc(struct amdxdna_dev_hdl *ndev);
+void aie2_error_async_events_free(struct amdxdna_dev_hdl *ndev);
+int aie2_error_async_msg_thread(void *data);
+int aie2_get_array_async_error(struct amdxdna_dev_hdl *ndev,
+ struct amdxdna_drm_get_array *args);
+
+/* aie2_message.c */
+void aie2_msg_init(struct amdxdna_dev_hdl *ndev);
+int aie2_suspend_fw(struct amdxdna_dev_hdl *ndev);
+int aie2_resume_fw(struct amdxdna_dev_hdl *ndev);
+int aie2_set_runtime_cfg(struct amdxdna_dev_hdl *ndev, u32 type, u64 value);
+int aie2_get_runtime_cfg(struct amdxdna_dev_hdl *ndev, u32 type, u64 *value);
+int aie2_assign_mgmt_pasid(struct amdxdna_dev_hdl *ndev, u16 pasid);
+int aie2_query_aie_version(struct amdxdna_dev_hdl *ndev, struct aie_version *version);
+int aie2_query_aie_metadata(struct amdxdna_dev_hdl *ndev, struct aie_metadata *metadata);
+int aie2_query_firmware_version(struct amdxdna_dev_hdl *ndev,
+ struct amdxdna_fw_ver *fw_ver);
+int aie2_create_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwctx);
+int aie2_destroy_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwctx);
+int aie2_map_host_buf(struct amdxdna_dev_hdl *ndev, u32 context_id, u64 addr, u64 size);
+int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf, u32 size, u32 *cols_filled);
+int aie2_query_telemetry(struct amdxdna_dev_hdl *ndev,
+ char __user *buf, u32 size,
+ struct amdxdna_drm_query_telemetry_header *header);
+int aie2_register_asyn_event_msg(struct amdxdna_dev_hdl *ndev, dma_addr_t addr, u32 size,
+ void *handle, int (*cb)(void*, void __iomem *, size_t));
+int aie2_config_cu(struct amdxdna_hwctx *hwctx,
+ int (*notify_cb)(void *, void __iomem *, size_t));
+int aie2_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, void __iomem *, size_t));
+int aie2_cmdlist_single_execbuf(struct amdxdna_hwctx *hwctx,
+ struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, void __iomem *, size_t));
+int aie2_cmdlist_multi_execbuf(struct amdxdna_hwctx *hwctx,
+ struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, void __iomem *, size_t));
+int aie2_sync_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, void __iomem *, size_t));
+int aie2_config_debug_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, void __iomem *, size_t));
+
+/* aie2_hwctx.c */
+int aie2_hwctx_init(struct amdxdna_hwctx *hwctx);
+void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx);
+int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size);
+int aie2_hwctx_sync_debug_bo(struct amdxdna_hwctx *hwctx, u32 debug_bo_hdl);
+void aie2_hwctx_suspend(struct amdxdna_client *client);
+int aie2_hwctx_resume(struct amdxdna_client *client);
+int aie2_cmd_submit(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq);
+void aie2_hmm_invalidate(struct amdxdna_gem_obj *abo, unsigned long cur_seq);
+
+#endif /* _AIE2_PCI_H_ */
diff --git a/drivers/accel/amdxdna/aie2_pm.c b/drivers/accel/amdxdna/aie2_pm.c
new file mode 100644
index 000000000000..426c38fce848
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_pm.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_pci_drv.h"
+
+#define AIE2_CLK_GATING_ENABLE 1
+#define AIE2_CLK_GATING_DISABLE 0
+
+static int aie2_pm_set_clk_gating(struct amdxdna_dev_hdl *ndev, u32 val)
+{
+ int ret;
+
+ ret = aie2_runtime_cfg(ndev, AIE2_RT_CFG_CLK_GATING, &val);
+ if (ret)
+ return ret;
+
+ ndev->clk_gating = val;
+ return 0;
+}
+
+int aie2_pm_init(struct amdxdna_dev_hdl *ndev)
+{
+ int ret;
+
+ if (ndev->dev_status != AIE2_DEV_UNINIT) {
+ /* Resume device */
+ ret = ndev->priv->hw_ops.set_dpm(ndev, ndev->dpm_level);
+ if (ret)
+ return ret;
+
+ ret = aie2_pm_set_clk_gating(ndev, ndev->clk_gating);
+ if (ret)
+ return ret;
+
+ return 0;
+ }
+
+ while (ndev->priv->dpm_clk_tbl[ndev->max_dpm_level].hclk)
+ ndev->max_dpm_level++;
+ ndev->max_dpm_level--;
+
+ ret = ndev->priv->hw_ops.set_dpm(ndev, ndev->max_dpm_level);
+ if (ret)
+ return ret;
+
+ ret = aie2_pm_set_clk_gating(ndev, AIE2_CLK_GATING_ENABLE);
+ if (ret)
+ return ret;
+
+ ndev->pw_mode = POWER_MODE_DEFAULT;
+ ndev->dft_dpm_level = ndev->max_dpm_level;
+
+ return 0;
+}
+
+int aie2_pm_set_mode(struct amdxdna_dev_hdl *ndev, enum amdxdna_power_mode_type target)
+{
+ struct amdxdna_dev *xdna = ndev->xdna;
+ u32 clk_gating, dpm_level;
+ int ret;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+
+ if (ndev->pw_mode == target)
+ return 0;
+
+ switch (target) {
+ case POWER_MODE_TURBO:
+ if (ndev->hwctx_num) {
+ XDNA_ERR(xdna, "Can not set turbo when there is active hwctx");
+ return -EINVAL;
+ }
+
+ clk_gating = AIE2_CLK_GATING_DISABLE;
+ dpm_level = ndev->max_dpm_level;
+ break;
+ case POWER_MODE_HIGH:
+ clk_gating = AIE2_CLK_GATING_ENABLE;
+ dpm_level = ndev->max_dpm_level;
+ break;
+ case POWER_MODE_DEFAULT:
+ clk_gating = AIE2_CLK_GATING_ENABLE;
+ dpm_level = ndev->dft_dpm_level;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ ret = ndev->priv->hw_ops.set_dpm(ndev, dpm_level);
+ if (ret)
+ return ret;
+
+ ret = aie2_pm_set_clk_gating(ndev, clk_gating);
+ if (ret)
+ return ret;
+
+ ndev->pw_mode = target;
+
+ return 0;
+}
diff --git a/drivers/accel/amdxdna/aie2_psp.c b/drivers/accel/amdxdna/aie2_psp.c
new file mode 100644
index 000000000000..f28a060a8810
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_psp.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/drm_device.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/bitfield.h>
+#include <linux/iopoll.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+#define PSP_STATUS_READY BIT(31)
+
+/* PSP commands */
+#define PSP_VALIDATE 1
+#define PSP_START 2
+#define PSP_RELEASE_TMR 3
+
+/* PSP special arguments */
+#define PSP_START_COPY_FW 1
+
+/* PSP response error code */
+#define PSP_ERROR_CANCEL 0xFFFF0002
+#define PSP_ERROR_BAD_STATE 0xFFFF0007
+
+#define PSP_FW_ALIGN 0x10000
+#define PSP_POLL_INTERVAL 20000 /* us */
+#define PSP_POLL_TIMEOUT 1000000 /* us */
+
+#define PSP_REG(p, reg) ((p)->psp_regs[reg])
+
+struct psp_device {
+ struct drm_device *ddev;
+ struct psp_config conf;
+ u32 fw_buf_sz;
+ u64 fw_paddr;
+ void *fw_buffer;
+ void __iomem *psp_regs[PSP_MAX_REGS];
+};
+
+static int psp_exec(struct psp_device *psp, u32 *reg_vals)
+{
+ u32 resp_code;
+ int ret, i;
+ u32 ready;
+
+ /* Write command and argument registers */
+ for (i = 0; i < PSP_NUM_IN_REGS; i++)
+ writel(reg_vals[i], PSP_REG(psp, i));
+
+ /* clear and set PSP INTR register to kick off */
+ writel(0, PSP_REG(psp, PSP_INTR_REG));
+ writel(1, PSP_REG(psp, PSP_INTR_REG));
+
+ /* PSP should be busy. Wait for ready, so we know task is done. */
+ ret = readx_poll_timeout(readl, PSP_REG(psp, PSP_STATUS_REG), ready,
+ FIELD_GET(PSP_STATUS_READY, ready),
+ PSP_POLL_INTERVAL, PSP_POLL_TIMEOUT);
+ if (ret) {
+ drm_err(psp->ddev, "PSP is not ready, ret 0x%x", ret);
+ return ret;
+ }
+
+ resp_code = readl(PSP_REG(psp, PSP_RESP_REG));
+ if (resp_code) {
+ drm_err(psp->ddev, "fw return error 0x%x", resp_code);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+void aie2_psp_stop(struct psp_device *psp)
+{
+ u32 reg_vals[PSP_NUM_IN_REGS] = { PSP_RELEASE_TMR, };
+ int ret;
+
+ ret = psp_exec(psp, reg_vals);
+ if (ret)
+ drm_err(psp->ddev, "release tmr failed, ret %d", ret);
+}
+
+int aie2_psp_start(struct psp_device *psp)
+{
+ u32 reg_vals[PSP_NUM_IN_REGS];
+ int ret;
+
+ reg_vals[0] = PSP_VALIDATE;
+ reg_vals[1] = lower_32_bits(psp->fw_paddr);
+ reg_vals[2] = upper_32_bits(psp->fw_paddr);
+ reg_vals[3] = psp->fw_buf_sz;
+
+ ret = psp_exec(psp, reg_vals);
+ if (ret) {
+ drm_err(psp->ddev, "failed to validate fw, ret %d", ret);
+ return ret;
+ }
+
+ memset(reg_vals, 0, sizeof(reg_vals));
+ reg_vals[0] = PSP_START;
+ reg_vals[1] = PSP_START_COPY_FW;
+ ret = psp_exec(psp, reg_vals);
+ if (ret) {
+ drm_err(psp->ddev, "failed to start fw, ret %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+struct psp_device *aie2m_psp_create(struct drm_device *ddev, struct psp_config *conf)
+{
+ struct psp_device *psp;
+ u64 offset;
+
+ psp = drmm_kzalloc(ddev, sizeof(*psp), GFP_KERNEL);
+ if (!psp)
+ return NULL;
+
+ psp->ddev = ddev;
+ memcpy(psp->psp_regs, conf->psp_regs, sizeof(psp->psp_regs));
+
+ psp->fw_buf_sz = ALIGN(conf->fw_size, PSP_FW_ALIGN);
+ psp->fw_buffer = drmm_kmalloc(ddev, psp->fw_buf_sz + PSP_FW_ALIGN, GFP_KERNEL);
+ if (!psp->fw_buffer) {
+ drm_err(ddev, "no memory for fw buffer");
+ return NULL;
+ }
+
+ /*
+ * AMD Platform Security Processor(PSP) requires host physical
+ * address to load NPU firmware.
+ */
+ psp->fw_paddr = virt_to_phys(psp->fw_buffer);
+ offset = ALIGN(psp->fw_paddr, PSP_FW_ALIGN) - psp->fw_paddr;
+ psp->fw_paddr += offset;
+ memcpy(psp->fw_buffer + offset, conf->fw_buf, conf->fw_size);
+
+ return psp;
+}
diff --git a/drivers/accel/amdxdna/aie2_smu.c b/drivers/accel/amdxdna/aie2_smu.c
new file mode 100644
index 000000000000..bd94ee96c2bc
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_smu.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/drm_device.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/iopoll.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_pci_drv.h"
+#include "amdxdna_pm.h"
+
+#define SMU_RESULT_OK 1
+
+/* SMU commands */
+#define AIE2_SMU_POWER_ON 0x3
+#define AIE2_SMU_POWER_OFF 0x4
+#define AIE2_SMU_SET_MPNPUCLK_FREQ 0x5
+#define AIE2_SMU_SET_HCLK_FREQ 0x6
+#define AIE2_SMU_SET_SOFT_DPMLEVEL 0x7
+#define AIE2_SMU_SET_HARD_DPMLEVEL 0x8
+
+#define NPU4_DPM_TOPS(ndev, dpm_level) \
+({ \
+ typeof(ndev) _ndev = ndev; \
+ (4096 * (_ndev)->total_col * \
+ (_ndev)->priv->dpm_clk_tbl[dpm_level].hclk / 1000000); \
+})
+
+static int aie2_smu_exec(struct amdxdna_dev_hdl *ndev, u32 reg_cmd,
+ u32 reg_arg, u32 *out)
+{
+ u32 resp;
+ int ret;
+
+ writel(0, SMU_REG(ndev, SMU_RESP_REG));
+ writel(reg_arg, SMU_REG(ndev, SMU_ARG_REG));
+ writel(reg_cmd, SMU_REG(ndev, SMU_CMD_REG));
+
+ /* Clear and set SMU_INTR_REG to kick off */
+ writel(0, SMU_REG(ndev, SMU_INTR_REG));
+ writel(1, SMU_REG(ndev, SMU_INTR_REG));
+
+ ret = readx_poll_timeout(readl, SMU_REG(ndev, SMU_RESP_REG), resp,
+ resp, AIE2_INTERVAL, AIE2_TIMEOUT);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "smu cmd %d timed out", reg_cmd);
+ return ret;
+ }
+
+ if (out)
+ *out = readl(SMU_REG(ndev, SMU_OUT_REG));
+
+ if (resp != SMU_RESULT_OK) {
+ XDNA_ERR(ndev->xdna, "smu cmd %d failed, 0x%x", reg_cmd, resp);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int npu1_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level)
+{
+ u32 freq;
+ int ret;
+
+ ret = amdxdna_pm_resume_get(ndev->xdna);
+ if (ret)
+ return ret;
+
+ ret = aie2_smu_exec(ndev, AIE2_SMU_SET_MPNPUCLK_FREQ,
+ ndev->priv->dpm_clk_tbl[dpm_level].npuclk, &freq);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Set npu clock to %d failed, ret %d\n",
+ ndev->priv->dpm_clk_tbl[dpm_level].npuclk, ret);
+ goto suspend_put;
+ }
+ ndev->npuclk_freq = freq;
+
+ ret = aie2_smu_exec(ndev, AIE2_SMU_SET_HCLK_FREQ,
+ ndev->priv->dpm_clk_tbl[dpm_level].hclk, &freq);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Set h clock to %d failed, ret %d\n",
+ ndev->priv->dpm_clk_tbl[dpm_level].hclk, ret);
+ goto suspend_put;
+ }
+
+ amdxdna_pm_suspend_put(ndev->xdna);
+ ndev->hclk_freq = freq;
+ ndev->dpm_level = dpm_level;
+ ndev->max_tops = 2 * ndev->total_col;
+ ndev->curr_tops = ndev->max_tops * freq / 1028;
+
+ XDNA_DBG(ndev->xdna, "MP-NPU clock %d, H clock %d\n",
+ ndev->npuclk_freq, ndev->hclk_freq);
+
+ return 0;
+
+suspend_put:
+ amdxdna_pm_suspend_put(ndev->xdna);
+ return ret;
+}
+
+int npu4_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level)
+{
+ int ret;
+
+ ret = amdxdna_pm_resume_get(ndev->xdna);
+ if (ret)
+ return ret;
+
+ ret = aie2_smu_exec(ndev, AIE2_SMU_SET_HARD_DPMLEVEL, dpm_level, NULL);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Set hard dpm level %d failed, ret %d ",
+ dpm_level, ret);
+ goto suspend_put;
+ }
+
+ ret = aie2_smu_exec(ndev, AIE2_SMU_SET_SOFT_DPMLEVEL, dpm_level, NULL);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Set soft dpm level %d failed, ret %d",
+ dpm_level, ret);
+ goto suspend_put;
+ }
+
+ amdxdna_pm_suspend_put(ndev->xdna);
+ ndev->npuclk_freq = ndev->priv->dpm_clk_tbl[dpm_level].npuclk;
+ ndev->hclk_freq = ndev->priv->dpm_clk_tbl[dpm_level].hclk;
+ ndev->dpm_level = dpm_level;
+ ndev->max_tops = NPU4_DPM_TOPS(ndev, ndev->max_dpm_level);
+ ndev->curr_tops = NPU4_DPM_TOPS(ndev, dpm_level);
+
+ XDNA_DBG(ndev->xdna, "MP-NPU clock %d, H clock %d\n",
+ ndev->npuclk_freq, ndev->hclk_freq);
+
+ return 0;
+
+suspend_put:
+ amdxdna_pm_suspend_put(ndev->xdna);
+ return ret;
+}
+
+int aie2_smu_init(struct amdxdna_dev_hdl *ndev)
+{
+ int ret;
+
+ /*
+ * Failing to set power off indicates an unrecoverable hardware or
+ * firmware error.
+ */
+ ret = aie2_smu_exec(ndev, AIE2_SMU_POWER_OFF, 0, NULL);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Access power failed, ret %d", ret);
+ return ret;
+ }
+
+ ret = aie2_smu_exec(ndev, AIE2_SMU_POWER_ON, 0, NULL);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Power on failed, ret %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void aie2_smu_fini(struct amdxdna_dev_hdl *ndev)
+{
+ int ret;
+
+ ndev->priv->hw_ops.set_dpm(ndev, 0);
+ ret = aie2_smu_exec(ndev, AIE2_SMU_POWER_OFF, 0, NULL);
+ if (ret)
+ XDNA_ERR(ndev->xdna, "Power off failed, ret %d", ret);
+}
diff --git a/drivers/accel/amdxdna/aie2_solver.c b/drivers/accel/amdxdna/aie2_solver.c
new file mode 100644
index 000000000000..2013d1f13aae
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_solver.c
@@ -0,0 +1,380 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/drm_device.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
+#include <linux/bitops.h>
+#include <linux/bitmap.h>
+#include <linux/slab.h>
+
+#include "aie2_solver.h"
+
+struct partition_node {
+ struct list_head list;
+ u32 nshared; /* # shared requests */
+ u32 start_col; /* start column */
+ u32 ncols; /* # columns */
+ bool exclusive; /* can not be shared if set */
+};
+
+struct solver_node {
+ struct list_head list;
+ u64 rid; /* Request ID from consumer */
+
+ struct partition_node *pt_node;
+ void *cb_arg;
+ u32 dpm_level;
+ u32 cols_len;
+ u32 start_cols[] __counted_by(cols_len);
+};
+
+struct solver_rgroup {
+ u32 rgid;
+ u32 nnode;
+ u32 npartition_node;
+
+ DECLARE_BITMAP(resbit, XRS_MAX_COL);
+ struct list_head node_list;
+ struct list_head pt_node_list;
+};
+
+struct solver_state {
+ struct solver_rgroup rgp;
+ struct init_config cfg;
+ struct xrs_action_ops *actions;
+};
+
+static u32 calculate_gops(struct aie_qos *rqos)
+{
+ u32 service_rate = 0;
+
+ if (rqos->latency)
+ service_rate = (1000 / rqos->latency);
+
+ if (rqos->fps > service_rate)
+ return rqos->fps * rqos->gops;
+
+ return service_rate * rqos->gops;
+}
+
+/*
+ * qos_meet() - Check the QOS request can be met.
+ */
+static int qos_meet(struct solver_state *xrs, struct aie_qos *rqos, u32 cgops)
+{
+ u32 request_gops = calculate_gops(rqos) * xrs->cfg.sys_eff_factor;
+
+ if (request_gops <= cgops)
+ return 0;
+
+ return -EINVAL;
+}
+
+/*
+ * sanity_check() - Do a basic sanity check on allocation request.
+ */
+static int sanity_check(struct solver_state *xrs, struct alloc_requests *req)
+{
+ struct cdo_parts *cdop = &req->cdo;
+ struct aie_qos *rqos = &req->rqos;
+ u32 cu_clk_freq;
+
+ if (cdop->ncols > xrs->cfg.total_col)
+ return -EINVAL;
+
+ /*
+ * We can find at least one CDOs groups that meet the
+ * GOPs requirement.
+ */
+ cu_clk_freq = xrs->cfg.clk_list.cu_clk_list[xrs->cfg.clk_list.num_levels - 1];
+
+ if (qos_meet(xrs, rqos, cdop->qos_cap.opc * cu_clk_freq / 1000))
+ return -EINVAL;
+
+ return 0;
+}
+
+static bool is_valid_qos_dpm_params(struct aie_qos *rqos)
+{
+ /*
+ * gops is retrieved from the xmodel, so it's always set
+ * fps and latency are the configurable params from the application
+ */
+ if (rqos->gops > 0 && (rqos->fps > 0 || rqos->latency > 0))
+ return true;
+
+ return false;
+}
+
+static int set_dpm_level(struct solver_state *xrs, struct alloc_requests *req, u32 *dpm_level)
+{
+ struct solver_rgroup *rgp = &xrs->rgp;
+ struct cdo_parts *cdop = &req->cdo;
+ struct aie_qos *rqos = &req->rqos;
+ u32 freq, max_dpm_level, level;
+ struct solver_node *node;
+
+ max_dpm_level = xrs->cfg.clk_list.num_levels - 1;
+ /* If no QoS parameters are passed, set it to the max DPM level */
+ if (!is_valid_qos_dpm_params(rqos)) {
+ level = max_dpm_level;
+ goto set_dpm;
+ }
+
+ /* Find one CDO group that meet the GOPs requirement. */
+ for (level = 0; level < max_dpm_level; level++) {
+ freq = xrs->cfg.clk_list.cu_clk_list[level];
+ if (!qos_meet(xrs, rqos, cdop->qos_cap.opc * freq / 1000))
+ break;
+ }
+
+ /* set the dpm level which fits all the sessions */
+ list_for_each_entry(node, &rgp->node_list, list) {
+ if (node->dpm_level > level)
+ level = node->dpm_level;
+ }
+
+set_dpm:
+ *dpm_level = level;
+ return xrs->cfg.actions->set_dft_dpm_level(xrs->cfg.ddev, level);
+}
+
+static struct solver_node *rg_search_node(struct solver_rgroup *rgp, u64 rid)
+{
+ struct solver_node *node;
+
+ list_for_each_entry(node, &rgp->node_list, list) {
+ if (node->rid == rid)
+ return node;
+ }
+
+ return NULL;
+}
+
+static void remove_partition_node(struct solver_rgroup *rgp,
+ struct partition_node *pt_node)
+{
+ pt_node->nshared--;
+ if (pt_node->nshared > 0)
+ return;
+
+ list_del(&pt_node->list);
+ rgp->npartition_node--;
+
+ bitmap_clear(rgp->resbit, pt_node->start_col, pt_node->ncols);
+ kfree(pt_node);
+}
+
+static void remove_solver_node(struct solver_rgroup *rgp,
+ struct solver_node *node)
+{
+ list_del(&node->list);
+ rgp->nnode--;
+
+ if (node->pt_node)
+ remove_partition_node(rgp, node->pt_node);
+
+ kfree(node);
+}
+
+static int get_free_partition(struct solver_state *xrs,
+ struct solver_node *snode,
+ struct alloc_requests *req)
+{
+ struct partition_node *pt_node;
+ u32 ncols = req->cdo.ncols;
+ u32 col, i;
+
+ for (i = 0; i < snode->cols_len; i++) {
+ col = snode->start_cols[i];
+ if (find_next_bit(xrs->rgp.resbit, XRS_MAX_COL, col) >= col + ncols)
+ break;
+ }
+
+ if (i == snode->cols_len)
+ return -ENODEV;
+
+ pt_node = kzalloc(sizeof(*pt_node), GFP_KERNEL);
+ if (!pt_node)
+ return -ENOMEM;
+
+ pt_node->nshared = 1;
+ pt_node->start_col = col;
+ pt_node->ncols = ncols;
+
+ /*
+ * Always set exclusive to false for now.
+ */
+ pt_node->exclusive = false;
+
+ list_add_tail(&pt_node->list, &xrs->rgp.pt_node_list);
+ xrs->rgp.npartition_node++;
+ bitmap_set(xrs->rgp.resbit, pt_node->start_col, pt_node->ncols);
+
+ snode->pt_node = pt_node;
+
+ return 0;
+}
+
+static int allocate_partition(struct solver_state *xrs,
+ struct solver_node *snode,
+ struct alloc_requests *req)
+{
+ struct partition_node *pt_node, *rpt_node = NULL;
+ int idx, ret;
+
+ ret = get_free_partition(xrs, snode, req);
+ if (!ret)
+ return ret;
+
+ /* try to get a share-able partition */
+ list_for_each_entry(pt_node, &xrs->rgp.pt_node_list, list) {
+ if (pt_node->exclusive)
+ continue;
+
+ if (rpt_node && pt_node->nshared >= rpt_node->nshared)
+ continue;
+
+ for (idx = 0; idx < snode->cols_len; idx++) {
+ if (snode->start_cols[idx] != pt_node->start_col)
+ continue;
+
+ if (req->cdo.ncols != pt_node->ncols)
+ continue;
+
+ rpt_node = pt_node;
+ break;
+ }
+ }
+
+ if (!rpt_node)
+ return -ENODEV;
+
+ rpt_node->nshared++;
+ snode->pt_node = rpt_node;
+
+ return 0;
+}
+
+static struct solver_node *create_solver_node(struct solver_state *xrs,
+ struct alloc_requests *req)
+{
+ struct cdo_parts *cdop = &req->cdo;
+ struct solver_node *node;
+ int ret;
+
+ node = kzalloc(struct_size(node, start_cols, cdop->cols_len), GFP_KERNEL);
+ if (!node)
+ return ERR_PTR(-ENOMEM);
+
+ node->rid = req->rid;
+ node->cols_len = cdop->cols_len;
+ memcpy(node->start_cols, cdop->start_cols, cdop->cols_len * sizeof(u32));
+
+ ret = allocate_partition(xrs, node, req);
+ if (ret)
+ goto free_node;
+
+ list_add_tail(&node->list, &xrs->rgp.node_list);
+ xrs->rgp.nnode++;
+ return node;
+
+free_node:
+ kfree(node);
+ return ERR_PTR(ret);
+}
+
+static void fill_load_action(struct solver_state *xrs,
+ struct solver_node *snode,
+ struct xrs_action_load *action)
+{
+ action->rid = snode->rid;
+ action->part.start_col = snode->pt_node->start_col;
+ action->part.ncols = snode->pt_node->ncols;
+}
+
+int xrs_allocate_resource(void *hdl, struct alloc_requests *req, void *cb_arg)
+{
+ struct xrs_action_load load_act;
+ struct solver_node *snode;
+ struct solver_state *xrs;
+ u32 dpm_level;
+ int ret;
+
+ xrs = (struct solver_state *)hdl;
+
+ ret = sanity_check(xrs, req);
+ if (ret) {
+ drm_err(xrs->cfg.ddev, "invalid request");
+ return ret;
+ }
+
+ if (rg_search_node(&xrs->rgp, req->rid)) {
+ drm_err(xrs->cfg.ddev, "rid %lld is in-use", req->rid);
+ return -EEXIST;
+ }
+
+ snode = create_solver_node(xrs, req);
+ if (IS_ERR(snode))
+ return PTR_ERR(snode);
+
+ fill_load_action(xrs, snode, &load_act);
+ ret = xrs->cfg.actions->load(cb_arg, &load_act);
+ if (ret)
+ goto free_node;
+
+ ret = set_dpm_level(xrs, req, &dpm_level);
+ if (ret)
+ goto free_node;
+
+ snode->dpm_level = dpm_level;
+ snode->cb_arg = cb_arg;
+
+ drm_dbg(xrs->cfg.ddev, "start col %d ncols %d\n",
+ snode->pt_node->start_col, snode->pt_node->ncols);
+
+ return 0;
+
+free_node:
+ remove_solver_node(&xrs->rgp, snode);
+
+ return ret;
+}
+
+int xrs_release_resource(void *hdl, u64 rid)
+{
+ struct solver_state *xrs = hdl;
+ struct solver_node *node;
+
+ node = rg_search_node(&xrs->rgp, rid);
+ if (!node) {
+ drm_err(xrs->cfg.ddev, "node not exist");
+ return -ENODEV;
+ }
+
+ xrs->cfg.actions->unload(node->cb_arg);
+ remove_solver_node(&xrs->rgp, node);
+
+ return 0;
+}
+
+void *xrsm_init(struct init_config *cfg)
+{
+ struct solver_rgroup *rgp;
+ struct solver_state *xrs;
+
+ xrs = drmm_kzalloc(cfg->ddev, sizeof(*xrs), GFP_KERNEL);
+ if (!xrs)
+ return NULL;
+
+ memcpy(&xrs->cfg, cfg, sizeof(*cfg));
+
+ rgp = &xrs->rgp;
+ INIT_LIST_HEAD(&rgp->node_list);
+ INIT_LIST_HEAD(&rgp->pt_node_list);
+
+ return xrs;
+}
diff --git a/drivers/accel/amdxdna/aie2_solver.h b/drivers/accel/amdxdna/aie2_solver.h
new file mode 100644
index 000000000000..a2e3c52229e9
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_solver.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AIE2_SOLVER_H
+#define _AIE2_SOLVER_H
+
+#define XRS_MAX_COL 128
+
+/*
+ * Structure used to describe a partition. A partition is column based
+ * allocation unit described by its start column and number of columns.
+ */
+struct aie_part {
+ u32 start_col;
+ u32 ncols;
+};
+
+/*
+ * The QoS capabilities of a given AIE partition.
+ */
+struct aie_qos_cap {
+ u32 opc; /* operations per cycle */
+ u32 dma_bw; /* DMA bandwidth */
+};
+
+/*
+ * QoS requirement of a resource allocation.
+ */
+struct aie_qos {
+ u32 gops; /* Giga operations */
+ u32 fps; /* Frames per second */
+ u32 dma_bw; /* DMA bandwidth */
+ u32 latency; /* Frame response latency */
+ u32 exec_time; /* Frame execution time */
+ u32 priority; /* Request priority */
+};
+
+/*
+ * Structure used to describe a relocatable CDO (Configuration Data Object).
+ */
+struct cdo_parts {
+ u32 *start_cols; /* Start column array */
+ u32 cols_len; /* Length of start column array */
+ u32 ncols; /* # of column */
+ struct aie_qos_cap qos_cap; /* CDO QoS capabilities */
+};
+
+/*
+ * Structure used to describe a request to allocate.
+ */
+struct alloc_requests {
+ u64 rid;
+ struct cdo_parts cdo;
+ struct aie_qos rqos; /* Requested QoS */
+};
+
+/*
+ * Load callback argument
+ */
+struct xrs_action_load {
+ u32 rid;
+ struct aie_part part;
+};
+
+/*
+ * Define the power level available
+ *
+ * POWER_LEVEL_MIN:
+ * Lowest power level. Usually set when all actions are unloaded.
+ *
+ * POWER_LEVEL_n
+ * Power levels 0 - n, is a step increase in system frequencies
+ */
+enum power_level {
+ POWER_LEVEL_MIN = 0x0,
+ POWER_LEVEL_0 = 0x1,
+ POWER_LEVEL_1 = 0x2,
+ POWER_LEVEL_2 = 0x3,
+ POWER_LEVEL_3 = 0x4,
+ POWER_LEVEL_4 = 0x5,
+ POWER_LEVEL_5 = 0x6,
+ POWER_LEVEL_6 = 0x7,
+ POWER_LEVEL_7 = 0x8,
+ POWER_LEVEL_NUM,
+};
+
+/*
+ * Structure used to describe the frequency table.
+ * Resource solver chooses the frequency from the table
+ * to meet the QOS requirements.
+ */
+struct clk_list_info {
+ u32 num_levels; /* available power levels */
+ u32 cu_clk_list[POWER_LEVEL_NUM]; /* available aie clock frequencies in Mhz*/
+};
+
+struct xrs_action_ops {
+ int (*load)(void *cb_arg, struct xrs_action_load *action);
+ int (*unload)(void *cb_arg);
+ int (*set_dft_dpm_level)(struct drm_device *ddev, u32 level);
+};
+
+/*
+ * Structure used to describe information for solver during initialization.
+ */
+struct init_config {
+ u32 total_col;
+ u32 sys_eff_factor; /* system efficiency factor */
+ u32 latency_adj; /* latency adjustment in ms */
+ struct clk_list_info clk_list; /* List of frequencies available in system */
+ struct drm_device *ddev;
+ struct xrs_action_ops *actions;
+};
+
+/*
+ * xrsm_init() - Register resource solver. Resource solver client needs
+ * to call this function to register itself.
+ *
+ * @cfg: The system metrics for resource solver to use
+ *
+ * Return: A resource solver handle
+ *
+ * Note: We should only create one handle per AIE array to be managed.
+ */
+void *xrsm_init(struct init_config *cfg);
+
+/*
+ * xrs_allocate_resource() - Request to allocate resources for a given context
+ * and a partition metadata. (See struct part_meta)
+ *
+ * @hdl: Resource solver handle obtained from xrs_init()
+ * @req: Input to the Resource solver including request id
+ * and partition metadata.
+ * @cb_arg: callback argument pointer
+ *
+ * Return: 0 when successful.
+ * Or standard error number when failing
+ *
+ * Note:
+ * There is no lock mechanism inside resource solver. So it is
+ * the caller's responsibility to lock down XCLBINs and grab
+ * necessary lock.
+ */
+int xrs_allocate_resource(void *hdl, struct alloc_requests *req, void *cb_arg);
+
+/*
+ * xrs_release_resource() - Request to free resources for a given context.
+ *
+ * @hdl: Resource solver handle obtained from xrs_init()
+ * @rid: The Request ID to identify the requesting context
+ */
+int xrs_release_resource(void *hdl, u64 rid);
+#endif /* _AIE2_SOLVER_H */
diff --git a/drivers/accel/amdxdna/amdxdna_ctx.c b/drivers/accel/amdxdna/amdxdna_ctx.c
new file mode 100644
index 000000000000..d17aef89a0ad
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_ctx.c
@@ -0,0 +1,572 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/xarray.h>
+#include <trace/events/amdxdna.h>
+
+#include "amdxdna_ctx.h"
+#include "amdxdna_gem.h"
+#include "amdxdna_pci_drv.h"
+
+#define MAX_HWCTX_ID 255
+#define MAX_ARG_COUNT 4095
+
+struct amdxdna_fence {
+ struct dma_fence base;
+ spinlock_t lock; /* for base */
+ struct amdxdna_hwctx *hwctx;
+};
+
+static const char *amdxdna_fence_get_driver_name(struct dma_fence *fence)
+{
+ return KBUILD_MODNAME;
+}
+
+static const char *amdxdna_fence_get_timeline_name(struct dma_fence *fence)
+{
+ struct amdxdna_fence *xdna_fence;
+
+ xdna_fence = container_of(fence, struct amdxdna_fence, base);
+
+ return xdna_fence->hwctx->name;
+}
+
+static const struct dma_fence_ops fence_ops = {
+ .get_driver_name = amdxdna_fence_get_driver_name,
+ .get_timeline_name = amdxdna_fence_get_timeline_name,
+};
+
+static struct dma_fence *amdxdna_fence_create(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_fence *fence;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return NULL;
+
+ fence->hwctx = hwctx;
+ spin_lock_init(&fence->lock);
+ dma_fence_init(&fence->base, &fence_ops, &fence->lock, hwctx->id, 0);
+ return &fence->base;
+}
+
+static void amdxdna_hwctx_destroy_rcu(struct amdxdna_hwctx *hwctx,
+ struct srcu_struct *ss)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+
+ synchronize_srcu(ss);
+
+ /* At this point, user is not able to submit new commands */
+ xdna->dev_info->ops->hwctx_fini(hwctx);
+
+ kfree(hwctx->name);
+ kfree(hwctx);
+}
+
+int amdxdna_hwctx_walk(struct amdxdna_client *client, void *arg,
+ int (*walk)(struct amdxdna_hwctx *hwctx, void *arg))
+{
+ struct amdxdna_hwctx *hwctx;
+ unsigned long hwctx_id;
+ int ret = 0, idx;
+
+ idx = srcu_read_lock(&client->hwctx_srcu);
+ amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
+ ret = walk(hwctx, arg);
+ if (ret)
+ break;
+ }
+ srcu_read_unlock(&client->hwctx_srcu, idx);
+
+ return ret;
+}
+
+void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size)
+{
+ struct amdxdna_cmd *cmd = abo->mem.kva;
+ u32 num_masks, count;
+
+ if (amdxdna_cmd_get_op(abo) == ERT_CMD_CHAIN)
+ num_masks = 0;
+ else
+ num_masks = 1 + FIELD_GET(AMDXDNA_CMD_EXTRA_CU_MASK, cmd->header);
+
+ if (size) {
+ count = FIELD_GET(AMDXDNA_CMD_COUNT, cmd->header);
+ if (unlikely(count <= num_masks)) {
+ *size = 0;
+ return NULL;
+ }
+ *size = (count - num_masks) * sizeof(u32);
+ }
+ return &cmd->data[num_masks];
+}
+
+u32 amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo)
+{
+ struct amdxdna_cmd *cmd = abo->mem.kva;
+ u32 num_masks, i;
+ u32 *cu_mask;
+
+ if (amdxdna_cmd_get_op(abo) == ERT_CMD_CHAIN)
+ return INVALID_CU_IDX;
+
+ num_masks = 1 + FIELD_GET(AMDXDNA_CMD_EXTRA_CU_MASK, cmd->header);
+ cu_mask = cmd->data;
+ for (i = 0; i < num_masks; i++) {
+ if (cu_mask[i])
+ return ffs(cu_mask[i]) - 1;
+ }
+
+ return INVALID_CU_IDX;
+}
+
+/*
+ * This should be called in close() and remove(). DO NOT call in other syscalls.
+ * This guarantee that when hwctx and resources will be released, if user
+ * doesn't call amdxdna_drm_destroy_hwctx_ioctl.
+ */
+void amdxdna_hwctx_remove_all(struct amdxdna_client *client)
+{
+ struct amdxdna_hwctx *hwctx;
+ unsigned long hwctx_id;
+
+ amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
+ XDNA_DBG(client->xdna, "PID %d close HW context %d",
+ client->pid, hwctx->id);
+ xa_erase(&client->hwctx_xa, hwctx->id);
+ amdxdna_hwctx_destroy_rcu(hwctx, &client->hwctx_srcu);
+ }
+}
+
+int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_drm_create_hwctx *args = data;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_hwctx *hwctx;
+ int ret, idx;
+
+ if (args->ext || args->ext_flags)
+ return -EINVAL;
+
+ hwctx = kzalloc(sizeof(*hwctx), GFP_KERNEL);
+ if (!hwctx)
+ return -ENOMEM;
+
+ if (copy_from_user(&hwctx->qos, u64_to_user_ptr(args->qos_p), sizeof(hwctx->qos))) {
+ XDNA_ERR(xdna, "Access QoS info failed");
+ kfree(hwctx);
+ return -EFAULT;
+ }
+
+ hwctx->client = client;
+ hwctx->fw_ctx_id = -1;
+ hwctx->num_tiles = args->num_tiles;
+ hwctx->mem_size = args->mem_size;
+ hwctx->max_opc = args->max_opc;
+
+ guard(mutex)(&xdna->dev_lock);
+
+ if (!drm_dev_enter(dev, &idx)) {
+ ret = -ENODEV;
+ goto free_hwctx;
+ }
+
+ ret = xdna->dev_info->ops->hwctx_init(hwctx);
+ if (ret) {
+ XDNA_ERR(xdna, "Init hwctx failed, ret %d", ret);
+ goto dev_exit;
+ }
+
+ hwctx->name = kasprintf(GFP_KERNEL, "hwctx.%d.%d", client->pid, hwctx->fw_ctx_id);
+ if (!hwctx->name) {
+ ret = -ENOMEM;
+ goto fini_hwctx;
+ }
+
+ ret = xa_alloc_cyclic(&client->hwctx_xa, &hwctx->id, hwctx,
+ XA_LIMIT(AMDXDNA_INVALID_CTX_HANDLE + 1, MAX_HWCTX_ID),
+ &client->next_hwctxid, GFP_KERNEL);
+ if (ret < 0) {
+ XDNA_ERR(xdna, "Allocate hwctx ID failed, ret %d", ret);
+ goto free_name;
+ }
+
+ args->handle = hwctx->id;
+ args->syncobj_handle = hwctx->syncobj_hdl;
+
+ atomic64_set(&hwctx->job_submit_cnt, 0);
+ atomic64_set(&hwctx->job_free_cnt, 0);
+ XDNA_DBG(xdna, "PID %d create HW context %d, ret %d", client->pid, args->handle, ret);
+ drm_dev_exit(idx);
+ return 0;
+
+free_name:
+ kfree(hwctx->name);
+fini_hwctx:
+ xdna->dev_info->ops->hwctx_fini(hwctx);
+dev_exit:
+ drm_dev_exit(idx);
+free_hwctx:
+ kfree(hwctx);
+ return ret;
+}
+
+int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_drm_destroy_hwctx *args = data;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_hwctx *hwctx;
+ int ret = 0, idx;
+
+ if (XDNA_MBZ_DBG(xdna, &args->pad, sizeof(args->pad)))
+ return -EINVAL;
+
+ if (!drm_dev_enter(dev, &idx))
+ return -ENODEV;
+
+ mutex_lock(&xdna->dev_lock);
+ hwctx = xa_erase(&client->hwctx_xa, args->handle);
+ if (!hwctx) {
+ ret = -EINVAL;
+ XDNA_DBG(xdna, "PID %d HW context %d not exist",
+ client->pid, args->handle);
+ goto out;
+ }
+
+ /*
+ * The pushed jobs are handled by DRM scheduler during destroy.
+ * SRCU to synchronize with exec command ioctls.
+ */
+ amdxdna_hwctx_destroy_rcu(hwctx, &client->hwctx_srcu);
+
+ XDNA_DBG(xdna, "PID %d destroyed HW context %d", client->pid, args->handle);
+out:
+ mutex_unlock(&xdna->dev_lock);
+ drm_dev_exit(idx);
+ return ret;
+}
+
+int amdxdna_drm_config_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_drm_config_hwctx *args = data;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_hwctx *hwctx;
+ int ret, idx;
+ u32 buf_size;
+ void *buf;
+ u64 val;
+
+ if (XDNA_MBZ_DBG(xdna, &args->pad, sizeof(args->pad)))
+ return -EINVAL;
+
+ if (!xdna->dev_info->ops->hwctx_config)
+ return -EOPNOTSUPP;
+
+ val = args->param_val;
+ buf_size = args->param_val_size;
+
+ switch (args->param_type) {
+ case DRM_AMDXDNA_HWCTX_CONFIG_CU:
+ /* For those types that param_val is pointer */
+ if (buf_size > PAGE_SIZE) {
+ XDNA_ERR(xdna, "Config CU param buffer too large");
+ return -E2BIG;
+ }
+
+ /* Hwctx needs to keep buf */
+ buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, u64_to_user_ptr(val), buf_size)) {
+ kfree(buf);
+ return -EFAULT;
+ }
+
+ break;
+ case DRM_AMDXDNA_HWCTX_ASSIGN_DBG_BUF:
+ case DRM_AMDXDNA_HWCTX_REMOVE_DBG_BUF:
+ /* For those types that param_val is a value */
+ buf = NULL;
+ buf_size = 0;
+ break;
+ default:
+ XDNA_DBG(xdna, "Unknown HW context config type %d", args->param_type);
+ return -EINVAL;
+ }
+
+ mutex_lock(&xdna->dev_lock);
+ idx = srcu_read_lock(&client->hwctx_srcu);
+ hwctx = xa_load(&client->hwctx_xa, args->handle);
+ if (!hwctx) {
+ XDNA_DBG(xdna, "PID %d failed to get hwctx %d", client->pid, args->handle);
+ ret = -EINVAL;
+ goto unlock_srcu;
+ }
+
+ ret = xdna->dev_info->ops->hwctx_config(hwctx, args->param_type, val, buf, buf_size);
+
+unlock_srcu:
+ srcu_read_unlock(&client->hwctx_srcu, idx);
+ mutex_unlock(&xdna->dev_lock);
+ kfree(buf);
+ return ret;
+}
+
+int amdxdna_hwctx_sync_debug_bo(struct amdxdna_client *client, u32 debug_bo_hdl)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_hwctx *hwctx;
+ struct amdxdna_gem_obj *abo;
+ struct drm_gem_object *gobj;
+ int ret, idx;
+
+ if (!xdna->dev_info->ops->hwctx_sync_debug_bo)
+ return -EOPNOTSUPP;
+
+ gobj = drm_gem_object_lookup(client->filp, debug_bo_hdl);
+ if (!gobj)
+ return -EINVAL;
+
+ abo = to_xdna_obj(gobj);
+ guard(mutex)(&xdna->dev_lock);
+ idx = srcu_read_lock(&client->hwctx_srcu);
+ hwctx = xa_load(&client->hwctx_xa, abo->assigned_hwctx);
+ if (!hwctx) {
+ ret = -EINVAL;
+ goto unlock_srcu;
+ }
+
+ ret = xdna->dev_info->ops->hwctx_sync_debug_bo(hwctx, debug_bo_hdl);
+
+unlock_srcu:
+ srcu_read_unlock(&client->hwctx_srcu, idx);
+ drm_gem_object_put(gobj);
+ return ret;
+}
+
+static void
+amdxdna_arg_bos_put(struct amdxdna_sched_job *job)
+{
+ int i;
+
+ for (i = 0; i < job->bo_cnt; i++) {
+ if (!job->bos[i])
+ break;
+ drm_gem_object_put(job->bos[i]);
+ }
+}
+
+static int
+amdxdna_arg_bos_lookup(struct amdxdna_client *client,
+ struct amdxdna_sched_job *job,
+ u32 *bo_hdls, u32 bo_cnt)
+{
+ struct drm_gem_object *gobj;
+ int i, ret;
+
+ job->bo_cnt = bo_cnt;
+ for (i = 0; i < job->bo_cnt; i++) {
+ struct amdxdna_gem_obj *abo;
+
+ gobj = drm_gem_object_lookup(client->filp, bo_hdls[i]);
+ if (!gobj) {
+ ret = -ENOENT;
+ goto put_shmem_bo;
+ }
+ abo = to_xdna_obj(gobj);
+
+ mutex_lock(&abo->lock);
+ if (abo->pinned) {
+ mutex_unlock(&abo->lock);
+ job->bos[i] = gobj;
+ continue;
+ }
+
+ ret = amdxdna_gem_pin_nolock(abo);
+ if (ret) {
+ mutex_unlock(&abo->lock);
+ drm_gem_object_put(gobj);
+ goto put_shmem_bo;
+ }
+ abo->pinned = true;
+ mutex_unlock(&abo->lock);
+
+ job->bos[i] = gobj;
+ }
+
+ return 0;
+
+put_shmem_bo:
+ amdxdna_arg_bos_put(job);
+ return ret;
+}
+
+void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job)
+{
+ trace_amdxdna_debug_point(job->hwctx->name, job->seq, "job release");
+ amdxdna_arg_bos_put(job);
+ amdxdna_gem_put_obj(job->cmd_bo);
+ dma_fence_put(job->fence);
+}
+
+int amdxdna_cmd_submit(struct amdxdna_client *client,
+ struct amdxdna_drv_cmd *drv_cmd,
+ u32 cmd_bo_hdl, u32 *arg_bo_hdls, u32 arg_bo_cnt,
+ u32 hwctx_hdl, u64 *seq)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_sched_job *job;
+ struct amdxdna_hwctx *hwctx;
+ int ret, idx;
+
+ XDNA_DBG(xdna, "Command BO hdl %d, Arg BO count %d", cmd_bo_hdl, arg_bo_cnt);
+ job = kzalloc(struct_size(job, bos, arg_bo_cnt), GFP_KERNEL);
+ if (!job)
+ return -ENOMEM;
+
+ job->drv_cmd = drv_cmd;
+
+ if (cmd_bo_hdl != AMDXDNA_INVALID_BO_HANDLE) {
+ job->cmd_bo = amdxdna_gem_get_obj(client, cmd_bo_hdl, AMDXDNA_BO_CMD);
+ if (!job->cmd_bo) {
+ XDNA_ERR(xdna, "Failed to get cmd bo from %d", cmd_bo_hdl);
+ ret = -EINVAL;
+ goto free_job;
+ }
+ }
+
+ ret = amdxdna_arg_bos_lookup(client, job, arg_bo_hdls, arg_bo_cnt);
+ if (ret) {
+ XDNA_ERR(xdna, "Argument BOs lookup failed, ret %d", ret);
+ goto cmd_put;
+ }
+
+ idx = srcu_read_lock(&client->hwctx_srcu);
+ hwctx = xa_load(&client->hwctx_xa, hwctx_hdl);
+ if (!hwctx) {
+ XDNA_DBG(xdna, "PID %d failed to get hwctx %d",
+ client->pid, hwctx_hdl);
+ ret = -EINVAL;
+ goto unlock_srcu;
+ }
+
+
+ job->hwctx = hwctx;
+ job->mm = current->mm;
+
+ job->fence = amdxdna_fence_create(hwctx);
+ if (!job->fence) {
+ XDNA_ERR(xdna, "Failed to create fence");
+ ret = -ENOMEM;
+ goto unlock_srcu;
+ }
+ kref_init(&job->refcnt);
+
+ ret = xdna->dev_info->ops->cmd_submit(hwctx, job, seq);
+ if (ret)
+ goto put_fence;
+
+ /*
+ * The amdxdna_hwctx_destroy_rcu() will release hwctx and associated
+ * resource after synchronize_srcu(). The submitted jobs should be
+ * handled by the queue, for example DRM scheduler, in device layer.
+ * For here we can unlock SRCU.
+ */
+ srcu_read_unlock(&client->hwctx_srcu, idx);
+ trace_amdxdna_debug_point(hwctx->name, *seq, "job pushed");
+
+ return 0;
+
+put_fence:
+ dma_fence_put(job->fence);
+unlock_srcu:
+ srcu_read_unlock(&client->hwctx_srcu, idx);
+ amdxdna_arg_bos_put(job);
+cmd_put:
+ amdxdna_gem_put_obj(job->cmd_bo);
+free_job:
+ kfree(job);
+ return ret;
+}
+
+/*
+ * The submit command ioctl submits a command to firmware. One firmware command
+ * may contain multiple command BOs for processing as a whole.
+ * The command sequence number is returned which can be used for wait command ioctl.
+ */
+static int amdxdna_drm_submit_execbuf(struct amdxdna_client *client,
+ struct amdxdna_drm_exec_cmd *args)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ u32 *arg_bo_hdls = NULL;
+ u32 cmd_bo_hdl;
+ int ret;
+
+ if (args->arg_count > MAX_ARG_COUNT) {
+ XDNA_ERR(xdna, "Invalid arg bo count %d", args->arg_count);
+ return -EINVAL;
+ }
+
+ /* Only support single command for now. */
+ if (args->cmd_count != 1) {
+ XDNA_ERR(xdna, "Invalid cmd bo count %d", args->cmd_count);
+ return -EINVAL;
+ }
+
+ cmd_bo_hdl = (u32)args->cmd_handles;
+ if (args->arg_count) {
+ arg_bo_hdls = kcalloc(args->arg_count, sizeof(u32), GFP_KERNEL);
+ if (!arg_bo_hdls)
+ return -ENOMEM;
+ ret = copy_from_user(arg_bo_hdls, u64_to_user_ptr(args->args),
+ args->arg_count * sizeof(u32));
+ if (ret) {
+ ret = -EFAULT;
+ goto free_cmd_bo_hdls;
+ }
+ }
+
+ ret = amdxdna_cmd_submit(client, NULL, cmd_bo_hdl, arg_bo_hdls,
+ args->arg_count, args->hwctx, &args->seq);
+ if (ret)
+ XDNA_DBG(xdna, "Submit cmds failed, ret %d", ret);
+
+free_cmd_bo_hdls:
+ kfree(arg_bo_hdls);
+ if (!ret)
+ XDNA_DBG(xdna, "Pushed cmd %lld to scheduler", args->seq);
+ return ret;
+}
+
+int amdxdna_drm_submit_cmd_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_drm_exec_cmd *args = data;
+
+ if (args->ext || args->ext_flags)
+ return -EINVAL;
+
+ switch (args->type) {
+ case AMDXDNA_CMD_SUBMIT_EXEC_BUF:
+ return amdxdna_drm_submit_execbuf(client, args);
+ }
+
+ XDNA_ERR(client->xdna, "Invalid command type %d", args->type);
+ return -EINVAL;
+}
diff --git a/drivers/accel/amdxdna/amdxdna_ctx.h b/drivers/accel/amdxdna/amdxdna_ctx.h
new file mode 100644
index 000000000000..b6151244d64f
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_ctx.h
@@ -0,0 +1,194 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AMDXDNA_CTX_H_
+#define _AMDXDNA_CTX_H_
+
+#include <linux/bitfield.h>
+
+#include "amdxdna_gem.h"
+
+struct amdxdna_hwctx_priv;
+
+enum ert_cmd_opcode {
+ ERT_START_CU = 0,
+ ERT_CMD_CHAIN = 19,
+ ERT_START_NPU = 20,
+ ERT_START_NPU_PREEMPT = 21,
+ ERT_START_NPU_PREEMPT_ELF = 22,
+ ERT_INVALID_CMD = ~0U,
+};
+
+enum ert_cmd_state {
+ ERT_CMD_STATE_INVALID,
+ ERT_CMD_STATE_NEW,
+ ERT_CMD_STATE_QUEUED,
+ ERT_CMD_STATE_RUNNING,
+ ERT_CMD_STATE_COMPLETED,
+ ERT_CMD_STATE_ERROR,
+ ERT_CMD_STATE_ABORT,
+ ERT_CMD_STATE_SUBMITTED,
+ ERT_CMD_STATE_TIMEOUT,
+ ERT_CMD_STATE_NORESPONSE,
+};
+
+/*
+ * Interpretation of the beginning of data payload for ERT_START_NPU in
+ * amdxdna_cmd. The rest of the payload in amdxdna_cmd is regular kernel args.
+ */
+struct amdxdna_cmd_start_npu {
+ u64 buffer; /* instruction buffer address */
+ u32 buffer_size; /* size of buffer in bytes */
+ u32 prop_count; /* properties count */
+ u32 prop_args[]; /* properties and regular kernel arguments */
+};
+
+/*
+ * Interpretation of the beginning of data payload for ERT_CMD_CHAIN in
+ * amdxdna_cmd. The rest of the payload in amdxdna_cmd is cmd BO handles.
+ */
+struct amdxdna_cmd_chain {
+ u32 command_count;
+ u32 submit_index;
+ u32 error_index;
+ u32 reserved[3];
+ u64 data[] __counted_by(command_count);
+};
+
+/*
+ * Interpretation of the beginning of data payload for ERT_START_NPU_PREEMPT in
+ * amdxdna_cmd. The rest of the payload in amdxdna_cmd is regular kernel args.
+ */
+struct amdxdna_cmd_preempt_data {
+ u64 inst_buf; /* instruction buffer address */
+ u64 save_buf; /* save buffer address */
+ u64 restore_buf; /* restore buffer address */
+ u32 inst_size; /* size of instruction buffer in bytes */
+ u32 save_size; /* size of save buffer in bytes */
+ u32 restore_size; /* size of restore buffer in bytes */
+ u32 inst_prop_cnt; /* properties count */
+ u32 prop_args[]; /* properties and regular kernel arguments */
+};
+
+/* Exec buffer command header format */
+#define AMDXDNA_CMD_STATE GENMASK(3, 0)
+#define AMDXDNA_CMD_EXTRA_CU_MASK GENMASK(11, 10)
+#define AMDXDNA_CMD_COUNT GENMASK(22, 12)
+#define AMDXDNA_CMD_OPCODE GENMASK(27, 23)
+struct amdxdna_cmd {
+ u32 header;
+ u32 data[];
+};
+
+#define INVALID_CU_IDX (~0U)
+
+struct amdxdna_hwctx {
+ struct amdxdna_client *client;
+ struct amdxdna_hwctx_priv *priv;
+ char *name;
+
+ u32 id;
+ u32 max_opc;
+ u32 num_tiles;
+ u32 mem_size;
+ u32 fw_ctx_id;
+ u32 col_list_len;
+ u32 *col_list;
+ u32 start_col;
+ u32 num_col;
+#define HWCTX_STAT_INIT 0
+#define HWCTX_STAT_READY 1
+#define HWCTX_STAT_STOP 2
+ u32 status;
+ u32 old_status;
+
+ struct amdxdna_qos_info qos;
+ struct amdxdna_hwctx_param_config_cu *cus;
+ u32 syncobj_hdl;
+
+ atomic64_t job_submit_cnt;
+ atomic64_t job_free_cnt ____cacheline_aligned_in_smp;
+};
+
+#define drm_job_to_xdna_job(j) \
+ container_of(j, struct amdxdna_sched_job, base)
+
+enum amdxdna_job_opcode {
+ SYNC_DEBUG_BO,
+ ATTACH_DEBUG_BO,
+ DETACH_DEBUG_BO,
+};
+
+struct amdxdna_drv_cmd {
+ enum amdxdna_job_opcode opcode;
+ u32 result;
+};
+
+struct amdxdna_sched_job {
+ struct drm_sched_job base;
+ struct kref refcnt;
+ struct amdxdna_hwctx *hwctx;
+ struct mm_struct *mm;
+ /* The fence to notice DRM scheduler that job is done by hardware */
+ struct dma_fence *fence;
+ /* user can wait on this fence */
+ struct dma_fence *out_fence;
+ bool job_done;
+ bool job_timeout;
+ u64 seq;
+ struct amdxdna_drv_cmd *drv_cmd;
+ struct amdxdna_gem_obj *cmd_bo;
+ size_t bo_cnt;
+ struct drm_gem_object *bos[] __counted_by(bo_cnt);
+};
+
+static inline u32
+amdxdna_cmd_get_op(struct amdxdna_gem_obj *abo)
+{
+ struct amdxdna_cmd *cmd = abo->mem.kva;
+
+ return FIELD_GET(AMDXDNA_CMD_OPCODE, cmd->header);
+}
+
+static inline void
+amdxdna_cmd_set_state(struct amdxdna_gem_obj *abo, enum ert_cmd_state s)
+{
+ struct amdxdna_cmd *cmd = abo->mem.kva;
+
+ cmd->header &= ~AMDXDNA_CMD_STATE;
+ cmd->header |= FIELD_PREP(AMDXDNA_CMD_STATE, s);
+}
+
+static inline enum ert_cmd_state
+amdxdna_cmd_get_state(struct amdxdna_gem_obj *abo)
+{
+ struct amdxdna_cmd *cmd = abo->mem.kva;
+
+ return FIELD_GET(AMDXDNA_CMD_STATE, cmd->header);
+}
+
+void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size);
+u32 amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo);
+
+void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job);
+void amdxdna_hwctx_remove_all(struct amdxdna_client *client);
+int amdxdna_hwctx_walk(struct amdxdna_client *client, void *arg,
+ int (*walk)(struct amdxdna_hwctx *hwctx, void *arg));
+int amdxdna_hwctx_sync_debug_bo(struct amdxdna_client *client, u32 debug_bo_hdl);
+
+int amdxdna_cmd_submit(struct amdxdna_client *client,
+ struct amdxdna_drv_cmd *drv_cmd, u32 cmd_bo_hdls,
+ u32 *arg_bo_hdls, u32 arg_bo_cnt,
+ u32 hwctx_hdl, u64 *seq);
+
+int amdxdna_cmd_wait(struct amdxdna_client *client, u32 hwctx_hdl,
+ u64 seq, u32 timeout);
+
+int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int amdxdna_drm_config_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int amdxdna_drm_submit_cmd_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+
+#endif /* _AMDXDNA_CTX_H_ */
diff --git a/drivers/accel/amdxdna/amdxdna_error.h b/drivers/accel/amdxdna/amdxdna_error.h
new file mode 100644
index 000000000000..c51de86ec12b
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_error.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AMDXDNA_ERROR_H_
+#define _AMDXDNA_ERROR_H_
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+
+#define AMDXDNA_ERR_DRV_AIE 4
+#define AMDXDNA_ERR_SEV_CRITICAL 3
+#define AMDXDNA_ERR_CLASS_AIE 2
+
+#define AMDXDNA_ERR_NUM_MASK GENMASK_U64(15, 0)
+#define AMDXDNA_ERR_DRV_MASK GENMASK_U64(23, 16)
+#define AMDXDNA_ERR_SEV_MASK GENMASK_U64(31, 24)
+#define AMDXDNA_ERR_MOD_MASK GENMASK_U64(39, 32)
+#define AMDXDNA_ERR_CLASS_MASK GENMASK_U64(47, 40)
+
+enum amdxdna_error_num {
+ AMDXDNA_ERROR_NUM_AIE_SATURATION = 3,
+ AMDXDNA_ERROR_NUM_AIE_FP,
+ AMDXDNA_ERROR_NUM_AIE_STREAM,
+ AMDXDNA_ERROR_NUM_AIE_ACCESS,
+ AMDXDNA_ERROR_NUM_AIE_BUS,
+ AMDXDNA_ERROR_NUM_AIE_INSTRUCTION,
+ AMDXDNA_ERROR_NUM_AIE_ECC,
+ AMDXDNA_ERROR_NUM_AIE_LOCK,
+ AMDXDNA_ERROR_NUM_AIE_DMA,
+ AMDXDNA_ERROR_NUM_AIE_MEM_PARITY,
+ AMDXDNA_ERROR_NUM_UNKNOWN = 15,
+};
+
+enum amdxdna_error_module {
+ AMDXDNA_ERROR_MODULE_AIE_CORE = 3,
+ AMDXDNA_ERROR_MODULE_AIE_MEMORY,
+ AMDXDNA_ERROR_MODULE_AIE_SHIM,
+ AMDXDNA_ERROR_MODULE_AIE_NOC,
+ AMDXDNA_ERROR_MODULE_AIE_PL,
+ AMDXDNA_ERROR_MODULE_UNKNOWN = 8,
+};
+
+#define AMDXDNA_ERROR_ENCODE(err_num, err_mod) \
+ (FIELD_PREP(AMDXDNA_ERR_NUM_MASK, err_num) | \
+ FIELD_PREP_CONST(AMDXDNA_ERR_DRV_MASK, AMDXDNA_ERR_DRV_AIE) | \
+ FIELD_PREP_CONST(AMDXDNA_ERR_SEV_MASK, AMDXDNA_ERR_SEV_CRITICAL) | \
+ FIELD_PREP(AMDXDNA_ERR_MOD_MASK, err_mod) | \
+ FIELD_PREP_CONST(AMDXDNA_ERR_CLASS_MASK, AMDXDNA_ERR_CLASS_AIE))
+
+#define AMDXDNA_EXTRA_ERR_COL_MASK GENMASK_U64(7, 0)
+#define AMDXDNA_EXTRA_ERR_ROW_MASK GENMASK_U64(15, 8)
+
+#define AMDXDNA_EXTRA_ERR_ENCODE(row, col) \
+ (FIELD_PREP(AMDXDNA_EXTRA_ERR_COL_MASK, col) | \
+ FIELD_PREP(AMDXDNA_EXTRA_ERR_ROW_MASK, row))
+
+#endif /* _AMDXDNA_ERROR_H_ */
diff --git a/drivers/accel/amdxdna/amdxdna_gem.c b/drivers/accel/amdxdna/amdxdna_gem.c
new file mode 100644
index 000000000000..dfa916eeb2d9
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_gem.c
@@ -0,0 +1,972 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_cache.h>
+#include <drm/drm_device.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-direct.h>
+#include <linux/iosys-map.h>
+#include <linux/pagemap.h>
+#include <linux/vmalloc.h>
+
+#include "amdxdna_ctx.h"
+#include "amdxdna_gem.h"
+#include "amdxdna_pci_drv.h"
+#include "amdxdna_ubuf.h"
+
+#define XDNA_MAX_CMD_BO_SIZE SZ_32K
+
+MODULE_IMPORT_NS("DMA_BUF");
+
+static int
+amdxdna_gem_heap_alloc(struct amdxdna_gem_obj *abo)
+{
+ struct amdxdna_client *client = abo->client;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_mem *mem = &abo->mem;
+ struct amdxdna_gem_obj *heap;
+ u64 offset;
+ u32 align;
+ int ret;
+
+ mutex_lock(&client->mm_lock);
+
+ heap = client->dev_heap;
+ if (!heap) {
+ ret = -EINVAL;
+ goto unlock_out;
+ }
+
+ if (heap->mem.userptr == AMDXDNA_INVALID_ADDR) {
+ XDNA_ERR(xdna, "Invalid dev heap userptr");
+ ret = -EINVAL;
+ goto unlock_out;
+ }
+
+ if (mem->size == 0 || mem->size > heap->mem.size) {
+ XDNA_ERR(xdna, "Invalid dev bo size 0x%lx, limit 0x%lx",
+ mem->size, heap->mem.size);
+ ret = -EINVAL;
+ goto unlock_out;
+ }
+
+ align = 1 << max(PAGE_SHIFT, xdna->dev_info->dev_mem_buf_shift);
+ ret = drm_mm_insert_node_generic(&heap->mm, &abo->mm_node,
+ mem->size, align,
+ 0, DRM_MM_INSERT_BEST);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to alloc dev bo memory, ret %d", ret);
+ goto unlock_out;
+ }
+
+ mem->dev_addr = abo->mm_node.start;
+ offset = mem->dev_addr - heap->mem.dev_addr;
+ mem->userptr = heap->mem.userptr + offset;
+ mem->kva = heap->mem.kva + offset;
+
+ drm_gem_object_get(to_gobj(heap));
+
+unlock_out:
+ mutex_unlock(&client->mm_lock);
+
+ return ret;
+}
+
+static void
+amdxdna_gem_destroy_obj(struct amdxdna_gem_obj *abo)
+{
+ mutex_destroy(&abo->lock);
+ kfree(abo);
+}
+
+static void
+amdxdna_gem_heap_free(struct amdxdna_gem_obj *abo)
+{
+ struct amdxdna_gem_obj *heap;
+
+ mutex_lock(&abo->client->mm_lock);
+
+ drm_mm_remove_node(&abo->mm_node);
+
+ heap = abo->client->dev_heap;
+ drm_gem_object_put(to_gobj(heap));
+
+ mutex_unlock(&abo->client->mm_lock);
+}
+
+static bool amdxdna_hmm_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
+{
+ struct amdxdna_umap *mapp = container_of(mni, struct amdxdna_umap, notifier);
+ struct amdxdna_gem_obj *abo = mapp->abo;
+ struct amdxdna_dev *xdna;
+
+ xdna = to_xdna_dev(to_gobj(abo)->dev);
+ XDNA_DBG(xdna, "Invalidating range 0x%lx, 0x%lx, type %d",
+ mapp->vma->vm_start, mapp->vma->vm_end, abo->type);
+
+ if (!mmu_notifier_range_blockable(range))
+ return false;
+
+ down_write(&xdna->notifier_lock);
+ abo->mem.map_invalid = true;
+ mapp->invalid = true;
+ mmu_interval_set_seq(&mapp->notifier, cur_seq);
+ up_write(&xdna->notifier_lock);
+
+ xdna->dev_info->ops->hmm_invalidate(abo, cur_seq);
+
+ if (range->event == MMU_NOTIFY_UNMAP) {
+ down_write(&xdna->notifier_lock);
+ if (!mapp->unmapped) {
+ queue_work(xdna->notifier_wq, &mapp->hmm_unreg_work);
+ mapp->unmapped = true;
+ }
+ up_write(&xdna->notifier_lock);
+ }
+
+ return true;
+}
+
+static const struct mmu_interval_notifier_ops amdxdna_hmm_ops = {
+ .invalidate = amdxdna_hmm_invalidate,
+};
+
+static void amdxdna_hmm_unregister(struct amdxdna_gem_obj *abo,
+ struct vm_area_struct *vma)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ struct amdxdna_umap *mapp;
+
+ down_read(&xdna->notifier_lock);
+ list_for_each_entry(mapp, &abo->mem.umap_list, node) {
+ if (!vma || mapp->vma == vma) {
+ if (!mapp->unmapped) {
+ queue_work(xdna->notifier_wq, &mapp->hmm_unreg_work);
+ mapp->unmapped = true;
+ }
+ if (vma)
+ break;
+ }
+ }
+ up_read(&xdna->notifier_lock);
+}
+
+static void amdxdna_umap_release(struct kref *ref)
+{
+ struct amdxdna_umap *mapp = container_of(ref, struct amdxdna_umap, refcnt);
+ struct vm_area_struct *vma = mapp->vma;
+ struct amdxdna_dev *xdna;
+
+ mmu_interval_notifier_remove(&mapp->notifier);
+ if (is_import_bo(mapp->abo) && vma->vm_file && vma->vm_file->f_mapping)
+ mapping_clear_unevictable(vma->vm_file->f_mapping);
+
+ xdna = to_xdna_dev(to_gobj(mapp->abo)->dev);
+ down_write(&xdna->notifier_lock);
+ list_del(&mapp->node);
+ up_write(&xdna->notifier_lock);
+
+ kvfree(mapp->range.hmm_pfns);
+ kfree(mapp);
+}
+
+void amdxdna_umap_put(struct amdxdna_umap *mapp)
+{
+ kref_put(&mapp->refcnt, amdxdna_umap_release);
+}
+
+static void amdxdna_hmm_unreg_work(struct work_struct *work)
+{
+ struct amdxdna_umap *mapp = container_of(work, struct amdxdna_umap,
+ hmm_unreg_work);
+
+ amdxdna_umap_put(mapp);
+}
+
+static int amdxdna_hmm_register(struct amdxdna_gem_obj *abo,
+ struct vm_area_struct *vma)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ unsigned long len = vma->vm_end - vma->vm_start;
+ unsigned long addr = vma->vm_start;
+ struct amdxdna_umap *mapp;
+ u32 nr_pages;
+ int ret;
+
+ if (!xdna->dev_info->ops->hmm_invalidate)
+ return 0;
+
+ mapp = kzalloc(sizeof(*mapp), GFP_KERNEL);
+ if (!mapp)
+ return -ENOMEM;
+
+ nr_pages = (PAGE_ALIGN(addr + len) - (addr & PAGE_MASK)) >> PAGE_SHIFT;
+ mapp->range.hmm_pfns = kvcalloc(nr_pages, sizeof(*mapp->range.hmm_pfns),
+ GFP_KERNEL);
+ if (!mapp->range.hmm_pfns) {
+ ret = -ENOMEM;
+ goto free_map;
+ }
+
+ ret = mmu_interval_notifier_insert_locked(&mapp->notifier,
+ current->mm,
+ addr,
+ len,
+ &amdxdna_hmm_ops);
+ if (ret) {
+ XDNA_ERR(xdna, "Insert mmu notifier failed, ret %d", ret);
+ goto free_pfns;
+ }
+
+ mapp->range.notifier = &mapp->notifier;
+ mapp->range.start = vma->vm_start;
+ mapp->range.end = vma->vm_end;
+ mapp->range.default_flags = HMM_PFN_REQ_FAULT;
+ mapp->vma = vma;
+ mapp->abo = abo;
+ kref_init(&mapp->refcnt);
+
+ if (abo->mem.userptr == AMDXDNA_INVALID_ADDR)
+ abo->mem.userptr = addr;
+ INIT_WORK(&mapp->hmm_unreg_work, amdxdna_hmm_unreg_work);
+ if (is_import_bo(abo) && vma->vm_file && vma->vm_file->f_mapping)
+ mapping_set_unevictable(vma->vm_file->f_mapping);
+
+ down_write(&xdna->notifier_lock);
+ list_add_tail(&mapp->node, &abo->mem.umap_list);
+ up_write(&xdna->notifier_lock);
+
+ return 0;
+
+free_pfns:
+ kvfree(mapp->range.hmm_pfns);
+free_map:
+ kfree(mapp);
+ return ret;
+}
+
+static void amdxdna_gem_dev_obj_free(struct drm_gem_object *gobj)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
+ struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
+
+ XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr);
+ if (abo->pinned)
+ amdxdna_gem_unpin(abo);
+
+ amdxdna_gem_heap_free(abo);
+ drm_gem_object_release(gobj);
+ amdxdna_gem_destroy_obj(abo);
+}
+
+static int amdxdna_insert_pages(struct amdxdna_gem_obj *abo,
+ struct vm_area_struct *vma)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ unsigned long num_pages = vma_pages(vma);
+ unsigned long offset = 0;
+ int ret;
+
+ if (!is_import_bo(abo)) {
+ ret = drm_gem_shmem_mmap(&abo->base, vma);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed shmem mmap %d", ret);
+ return ret;
+ }
+
+ /* The buffer is based on memory pages. Fix the flag. */
+ vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
+ ret = vm_insert_pages(vma, vma->vm_start, abo->base.pages,
+ &num_pages);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed insert pages %d", ret);
+ vma->vm_ops->close(vma);
+ return ret;
+ }
+
+ return 0;
+ }
+
+ vma->vm_private_data = NULL;
+ vma->vm_ops = NULL;
+ ret = dma_buf_mmap(abo->dma_buf, vma, 0);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to mmap dma buf %d", ret);
+ return ret;
+ }
+
+ do {
+ vm_fault_t fault_ret;
+
+ fault_ret = handle_mm_fault(vma, vma->vm_start + offset,
+ FAULT_FLAG_WRITE, NULL);
+ if (fault_ret & VM_FAULT_ERROR) {
+ vma->vm_ops->close(vma);
+ XDNA_ERR(xdna, "Fault in page failed");
+ return -EFAULT;
+ }
+
+ offset += PAGE_SIZE;
+ } while (--num_pages);
+
+ /* Drop the reference drm_gem_mmap_obj() acquired.*/
+ drm_gem_object_put(to_gobj(abo));
+
+ return 0;
+}
+
+static int amdxdna_gem_obj_mmap(struct drm_gem_object *gobj,
+ struct vm_area_struct *vma)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
+ struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
+ int ret;
+
+ ret = amdxdna_hmm_register(abo, vma);
+ if (ret)
+ return ret;
+
+ ret = amdxdna_insert_pages(abo, vma);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed insert pages, ret %d", ret);
+ goto hmm_unreg;
+ }
+
+ XDNA_DBG(xdna, "BO map_offset 0x%llx type %d userptr 0x%lx size 0x%lx",
+ drm_vma_node_offset_addr(&gobj->vma_node), abo->type,
+ vma->vm_start, gobj->size);
+ return 0;
+
+hmm_unreg:
+ amdxdna_hmm_unregister(abo, vma);
+ return ret;
+}
+
+static int amdxdna_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+{
+ struct drm_gem_object *gobj = dma_buf->priv;
+ struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
+ unsigned long num_pages = vma_pages(vma);
+ int ret;
+
+ vma->vm_ops = &drm_gem_shmem_vm_ops;
+ vma->vm_private_data = gobj;
+
+ drm_gem_object_get(gobj);
+ ret = drm_gem_shmem_mmap(&abo->base, vma);
+ if (ret)
+ goto put_obj;
+
+ /* The buffer is based on memory pages. Fix the flag. */
+ vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
+ ret = vm_insert_pages(vma, vma->vm_start, abo->base.pages,
+ &num_pages);
+ if (ret)
+ goto close_vma;
+
+ return 0;
+
+close_vma:
+ vma->vm_ops->close(vma);
+put_obj:
+ drm_gem_object_put(gobj);
+ return ret;
+}
+
+static const struct dma_buf_ops amdxdna_dmabuf_ops = {
+ .attach = drm_gem_map_attach,
+ .detach = drm_gem_map_detach,
+ .map_dma_buf = drm_gem_map_dma_buf,
+ .unmap_dma_buf = drm_gem_unmap_dma_buf,
+ .release = drm_gem_dmabuf_release,
+ .mmap = amdxdna_gem_dmabuf_mmap,
+ .vmap = drm_gem_dmabuf_vmap,
+ .vunmap = drm_gem_dmabuf_vunmap,
+};
+
+static int amdxdna_gem_obj_vmap(struct amdxdna_gem_obj *abo, void **vaddr)
+{
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL);
+ int ret;
+
+ if (is_import_bo(abo))
+ ret = dma_buf_vmap_unlocked(abo->dma_buf, &map);
+ else
+ ret = drm_gem_vmap(to_gobj(abo), &map);
+
+ *vaddr = map.vaddr;
+ return ret;
+}
+
+static void amdxdna_gem_obj_vunmap(struct amdxdna_gem_obj *abo)
+{
+ struct iosys_map map;
+
+ if (!abo->mem.kva)
+ return;
+
+ iosys_map_set_vaddr(&map, abo->mem.kva);
+
+ if (is_import_bo(abo))
+ dma_buf_vunmap_unlocked(abo->dma_buf, &map);
+ else
+ drm_gem_vunmap(to_gobj(abo), &map);
+}
+
+static struct dma_buf *amdxdna_gem_prime_export(struct drm_gem_object *gobj, int flags)
+{
+ struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ if (abo->dma_buf) {
+ get_dma_buf(abo->dma_buf);
+ return abo->dma_buf;
+ }
+
+ exp_info.ops = &amdxdna_dmabuf_ops;
+ exp_info.size = gobj->size;
+ exp_info.flags = flags;
+ exp_info.priv = gobj;
+ exp_info.resv = gobj->resv;
+
+ return drm_gem_dmabuf_export(gobj->dev, &exp_info);
+}
+
+static void amdxdna_imported_obj_free(struct amdxdna_gem_obj *abo)
+{
+ dma_buf_unmap_attachment_unlocked(abo->attach, abo->base.sgt, DMA_BIDIRECTIONAL);
+ dma_buf_detach(abo->dma_buf, abo->attach);
+ dma_buf_put(abo->dma_buf);
+ drm_gem_object_release(to_gobj(abo));
+ kfree(abo);
+}
+
+static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
+ struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
+
+ XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr);
+
+ amdxdna_hmm_unregister(abo, NULL);
+ flush_workqueue(xdna->notifier_wq);
+
+ if (abo->pinned)
+ amdxdna_gem_unpin(abo);
+
+ if (abo->type == AMDXDNA_BO_DEV_HEAP)
+ drm_mm_takedown(&abo->mm);
+
+ amdxdna_gem_obj_vunmap(abo);
+ mutex_destroy(&abo->lock);
+
+ if (is_import_bo(abo)) {
+ amdxdna_imported_obj_free(abo);
+ return;
+ }
+
+ drm_gem_shmem_free(&abo->base);
+}
+
+static const struct drm_gem_object_funcs amdxdna_gem_dev_obj_funcs = {
+ .free = amdxdna_gem_dev_obj_free,
+};
+
+static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = {
+ .free = amdxdna_gem_obj_free,
+ .print_info = drm_gem_shmem_object_print_info,
+ .pin = drm_gem_shmem_object_pin,
+ .unpin = drm_gem_shmem_object_unpin,
+ .get_sg_table = drm_gem_shmem_object_get_sg_table,
+ .vmap = drm_gem_shmem_object_vmap,
+ .vunmap = drm_gem_shmem_object_vunmap,
+ .mmap = amdxdna_gem_obj_mmap,
+ .vm_ops = &drm_gem_shmem_vm_ops,
+ .export = amdxdna_gem_prime_export,
+};
+
+static struct amdxdna_gem_obj *
+amdxdna_gem_create_obj(struct drm_device *dev, size_t size)
+{
+ struct amdxdna_gem_obj *abo;
+
+ abo = kzalloc(sizeof(*abo), GFP_KERNEL);
+ if (!abo)
+ return ERR_PTR(-ENOMEM);
+
+ abo->pinned = false;
+ abo->assigned_hwctx = AMDXDNA_INVALID_CTX_HANDLE;
+ mutex_init(&abo->lock);
+
+ abo->mem.userptr = AMDXDNA_INVALID_ADDR;
+ abo->mem.dev_addr = AMDXDNA_INVALID_ADDR;
+ abo->mem.size = size;
+ INIT_LIST_HEAD(&abo->mem.umap_list);
+
+ return abo;
+}
+
+/* For drm_driver->gem_create_object callback */
+struct drm_gem_object *
+amdxdna_gem_create_object_cb(struct drm_device *dev, size_t size)
+{
+ struct amdxdna_gem_obj *abo;
+
+ abo = amdxdna_gem_create_obj(dev, size);
+ if (IS_ERR(abo))
+ return ERR_CAST(abo);
+
+ to_gobj(abo)->funcs = &amdxdna_gem_shmem_funcs;
+
+ return to_gobj(abo);
+}
+
+static struct amdxdna_gem_obj *
+amdxdna_gem_create_shmem_object(struct drm_device *dev, size_t size)
+{
+ struct drm_gem_shmem_object *shmem = drm_gem_shmem_create(dev, size);
+
+ if (IS_ERR(shmem))
+ return ERR_CAST(shmem);
+
+ shmem->map_wc = false;
+ return to_xdna_obj(&shmem->base);
+}
+
+static struct amdxdna_gem_obj *
+amdxdna_gem_create_ubuf_object(struct drm_device *dev, struct amdxdna_drm_create_bo *args)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ enum amdxdna_ubuf_flag flags = 0;
+ struct amdxdna_drm_va_tbl va_tbl;
+ struct drm_gem_object *gobj;
+ struct dma_buf *dma_buf;
+
+ if (copy_from_user(&va_tbl, u64_to_user_ptr(args->vaddr), sizeof(va_tbl))) {
+ XDNA_DBG(xdna, "Access va table failed");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (va_tbl.num_entries) {
+ if (args->type == AMDXDNA_BO_CMD)
+ flags |= AMDXDNA_UBUF_FLAG_MAP_DMA;
+
+ dma_buf = amdxdna_get_ubuf(dev, flags, va_tbl.num_entries,
+ u64_to_user_ptr(args->vaddr + sizeof(va_tbl)));
+ } else {
+ dma_buf = dma_buf_get(va_tbl.dmabuf_fd);
+ }
+
+ if (IS_ERR(dma_buf))
+ return ERR_CAST(dma_buf);
+
+ gobj = amdxdna_gem_prime_import(dev, dma_buf);
+ if (IS_ERR(gobj)) {
+ dma_buf_put(dma_buf);
+ return ERR_CAST(gobj);
+ }
+
+ dma_buf_put(dma_buf);
+
+ return to_xdna_obj(gobj);
+}
+
+static struct amdxdna_gem_obj *
+amdxdna_gem_create_object(struct drm_device *dev,
+ struct amdxdna_drm_create_bo *args)
+{
+ size_t aligned_sz = PAGE_ALIGN(args->size);
+
+ if (args->vaddr)
+ return amdxdna_gem_create_ubuf_object(dev, args);
+
+ return amdxdna_gem_create_shmem_object(dev, aligned_sz);
+}
+
+struct drm_gem_object *
+amdxdna_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct amdxdna_gem_obj *abo;
+ struct drm_gem_object *gobj;
+ struct sg_table *sgt;
+ int ret;
+
+ get_dma_buf(dma_buf);
+
+ attach = dma_buf_attach(dma_buf, dev->dev);
+ if (IS_ERR(attach)) {
+ ret = PTR_ERR(attach);
+ goto put_buf;
+ }
+
+ sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto fail_detach;
+ }
+
+ gobj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
+ if (IS_ERR(gobj)) {
+ ret = PTR_ERR(gobj);
+ goto fail_unmap;
+ }
+
+ abo = to_xdna_obj(gobj);
+ abo->attach = attach;
+ abo->dma_buf = dma_buf;
+
+ return gobj;
+
+fail_unmap:
+ dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL);
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+put_buf:
+ dma_buf_put(dma_buf);
+
+ return ERR_PTR(ret);
+}
+
+static struct amdxdna_gem_obj *
+amdxdna_drm_alloc_shmem(struct drm_device *dev,
+ struct amdxdna_drm_create_bo *args,
+ struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_gem_obj *abo;
+
+ abo = amdxdna_gem_create_object(dev, args);
+ if (IS_ERR(abo))
+ return ERR_CAST(abo);
+
+ abo->client = client;
+ abo->type = AMDXDNA_BO_SHMEM;
+
+ return abo;
+}
+
+static struct amdxdna_gem_obj *
+amdxdna_drm_create_dev_heap(struct drm_device *dev,
+ struct amdxdna_drm_create_bo *args,
+ struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_gem_obj *abo;
+ int ret;
+
+ if (args->size > xdna->dev_info->dev_mem_size) {
+ XDNA_DBG(xdna, "Invalid dev heap size 0x%llx, limit 0x%lx",
+ args->size, xdna->dev_info->dev_mem_size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ mutex_lock(&client->mm_lock);
+ if (client->dev_heap) {
+ XDNA_DBG(client->xdna, "dev heap is already created");
+ ret = -EBUSY;
+ goto mm_unlock;
+ }
+
+ abo = amdxdna_gem_create_object(dev, args);
+ if (IS_ERR(abo)) {
+ ret = PTR_ERR(abo);
+ goto mm_unlock;
+ }
+
+ abo->type = AMDXDNA_BO_DEV_HEAP;
+ abo->client = client;
+ abo->mem.dev_addr = client->xdna->dev_info->dev_mem_base;
+ drm_mm_init(&abo->mm, abo->mem.dev_addr, abo->mem.size);
+
+ ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva);
+ if (ret) {
+ XDNA_ERR(xdna, "Vmap heap bo failed, ret %d", ret);
+ goto release_obj;
+ }
+
+ client->dev_heap = abo;
+ drm_gem_object_get(to_gobj(abo));
+ mutex_unlock(&client->mm_lock);
+
+ return abo;
+
+release_obj:
+ drm_gem_object_put(to_gobj(abo));
+mm_unlock:
+ mutex_unlock(&client->mm_lock);
+ return ERR_PTR(ret);
+}
+
+struct amdxdna_gem_obj *
+amdxdna_drm_alloc_dev_bo(struct drm_device *dev,
+ struct amdxdna_drm_create_bo *args,
+ struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ size_t aligned_sz = PAGE_ALIGN(args->size);
+ struct amdxdna_gem_obj *abo;
+ int ret;
+
+ abo = amdxdna_gem_create_obj(&xdna->ddev, aligned_sz);
+ if (IS_ERR(abo))
+ return abo;
+
+ to_gobj(abo)->funcs = &amdxdna_gem_dev_obj_funcs;
+ abo->type = AMDXDNA_BO_DEV;
+ abo->client = client;
+
+ ret = amdxdna_gem_heap_alloc(abo);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to alloc dev bo memory, ret %d", ret);
+ amdxdna_gem_destroy_obj(abo);
+ return ERR_PTR(ret);
+ }
+
+ drm_gem_private_object_init(&xdna->ddev, to_gobj(abo), aligned_sz);
+
+ return abo;
+}
+
+static struct amdxdna_gem_obj *
+amdxdna_drm_create_cmd_bo(struct drm_device *dev,
+ struct amdxdna_drm_create_bo *args,
+ struct drm_file *filp)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_gem_obj *abo;
+ int ret;
+
+ if (args->size > XDNA_MAX_CMD_BO_SIZE) {
+ XDNA_ERR(xdna, "Command bo size 0x%llx too large", args->size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (args->size < sizeof(struct amdxdna_cmd)) {
+ XDNA_DBG(xdna, "Command BO size 0x%llx too small", args->size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ abo = amdxdna_gem_create_object(dev, args);
+ if (IS_ERR(abo))
+ return ERR_CAST(abo);
+
+ abo->type = AMDXDNA_BO_CMD;
+ abo->client = filp->driver_priv;
+
+ ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva);
+ if (ret) {
+ XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret);
+ goto release_obj;
+ }
+
+ return abo;
+
+release_obj:
+ drm_gem_object_put(to_gobj(abo));
+ return ERR_PTR(ret);
+}
+
+int amdxdna_drm_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_drm_create_bo *args = data;
+ struct amdxdna_gem_obj *abo;
+ int ret;
+
+ if (args->flags)
+ return -EINVAL;
+
+ XDNA_DBG(xdna, "BO arg type %d vaddr 0x%llx size 0x%llx flags 0x%llx",
+ args->type, args->vaddr, args->size, args->flags);
+ switch (args->type) {
+ case AMDXDNA_BO_SHMEM:
+ abo = amdxdna_drm_alloc_shmem(dev, args, filp);
+ break;
+ case AMDXDNA_BO_DEV_HEAP:
+ abo = amdxdna_drm_create_dev_heap(dev, args, filp);
+ break;
+ case AMDXDNA_BO_DEV:
+ abo = amdxdna_drm_alloc_dev_bo(dev, args, filp);
+ break;
+ case AMDXDNA_BO_CMD:
+ abo = amdxdna_drm_create_cmd_bo(dev, args, filp);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (IS_ERR(abo))
+ return PTR_ERR(abo);
+
+ /* ready to publish object to userspace */
+ ret = drm_gem_handle_create(filp, to_gobj(abo), &args->handle);
+ if (ret) {
+ XDNA_ERR(xdna, "Create handle failed");
+ goto put_obj;
+ }
+
+ XDNA_DBG(xdna, "BO hdl %d type %d userptr 0x%llx xdna_addr 0x%llx size 0x%lx",
+ args->handle, args->type, abo->mem.userptr,
+ abo->mem.dev_addr, abo->mem.size);
+put_obj:
+ /* Dereference object reference. Handle holds it now. */
+ drm_gem_object_put(to_gobj(abo));
+ return ret;
+}
+
+int amdxdna_gem_pin_nolock(struct amdxdna_gem_obj *abo)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ int ret;
+
+ if (abo->type == AMDXDNA_BO_DEV)
+ abo = abo->client->dev_heap;
+
+ if (is_import_bo(abo))
+ return 0;
+
+ ret = drm_gem_shmem_pin(&abo->base);
+
+ XDNA_DBG(xdna, "BO type %d ret %d", abo->type, ret);
+ return ret;
+}
+
+int amdxdna_gem_pin(struct amdxdna_gem_obj *abo)
+{
+ int ret;
+
+ mutex_lock(&abo->lock);
+ ret = amdxdna_gem_pin_nolock(abo);
+ mutex_unlock(&abo->lock);
+
+ return ret;
+}
+
+void amdxdna_gem_unpin(struct amdxdna_gem_obj *abo)
+{
+ if (abo->type == AMDXDNA_BO_DEV)
+ abo = abo->client->dev_heap;
+
+ if (is_import_bo(abo))
+ return;
+
+ mutex_lock(&abo->lock);
+ drm_gem_shmem_unpin(&abo->base);
+ mutex_unlock(&abo->lock);
+}
+
+struct amdxdna_gem_obj *amdxdna_gem_get_obj(struct amdxdna_client *client,
+ u32 bo_hdl, u8 bo_type)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_gem_obj *abo;
+ struct drm_gem_object *gobj;
+
+ gobj = drm_gem_object_lookup(client->filp, bo_hdl);
+ if (!gobj) {
+ XDNA_DBG(xdna, "Can not find bo %d", bo_hdl);
+ return NULL;
+ }
+
+ abo = to_xdna_obj(gobj);
+ if (bo_type == AMDXDNA_BO_INVALID || abo->type == bo_type)
+ return abo;
+
+ drm_gem_object_put(gobj);
+ return NULL;
+}
+
+int amdxdna_drm_get_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_drm_get_bo_info *args = data;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_gem_obj *abo;
+ struct drm_gem_object *gobj;
+ int ret = 0;
+
+ if (args->ext || args->ext_flags || args->pad)
+ return -EINVAL;
+
+ gobj = drm_gem_object_lookup(filp, args->handle);
+ if (!gobj) {
+ XDNA_DBG(xdna, "Lookup GEM object %d failed", args->handle);
+ return -ENOENT;
+ }
+
+ abo = to_xdna_obj(gobj);
+ args->vaddr = abo->mem.userptr;
+ args->xdna_addr = abo->mem.dev_addr;
+
+ if (abo->type != AMDXDNA_BO_DEV)
+ args->map_offset = drm_vma_node_offset_addr(&gobj->vma_node);
+ else
+ args->map_offset = AMDXDNA_INVALID_ADDR;
+
+ XDNA_DBG(xdna, "BO hdl %d map_offset 0x%llx vaddr 0x%llx xdna_addr 0x%llx",
+ args->handle, args->map_offset, args->vaddr, args->xdna_addr);
+
+ drm_gem_object_put(gobj);
+ return ret;
+}
+
+/*
+ * The sync bo ioctl is to make sure the CPU cache is in sync with memory.
+ * This is required because NPU is not cache coherent device. CPU cache
+ * flushing/invalidation is expensive so it is best to handle this outside
+ * of the command submission path. This ioctl allows explicit cache
+ * flushing/invalidation outside of the critical path.
+ */
+int amdxdna_drm_sync_bo_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *filp)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_drm_sync_bo *args = data;
+ struct amdxdna_gem_obj *abo;
+ struct drm_gem_object *gobj;
+ int ret;
+
+ gobj = drm_gem_object_lookup(filp, args->handle);
+ if (!gobj) {
+ XDNA_ERR(xdna, "Lookup GEM object failed");
+ return -ENOENT;
+ }
+ abo = to_xdna_obj(gobj);
+
+ ret = amdxdna_gem_pin(abo);
+ if (ret) {
+ XDNA_ERR(xdna, "Pin BO %d failed, ret %d", args->handle, ret);
+ goto put_obj;
+ }
+
+ if (is_import_bo(abo))
+ drm_clflush_sg(abo->base.sgt);
+ else if (abo->mem.kva)
+ drm_clflush_virt_range(abo->mem.kva + args->offset, args->size);
+ else if (abo->base.pages)
+ drm_clflush_pages(abo->base.pages, gobj->size >> PAGE_SHIFT);
+ else
+ drm_WARN(&xdna->ddev, 1, "Can not get flush memory");
+
+ amdxdna_gem_unpin(abo);
+
+ XDNA_DBG(xdna, "Sync bo %d offset 0x%llx, size 0x%llx\n",
+ args->handle, args->offset, args->size);
+
+ if (args->direction == SYNC_DIRECT_FROM_DEVICE)
+ ret = amdxdna_hwctx_sync_debug_bo(abo->client, args->handle);
+
+put_obj:
+ drm_gem_object_put(gobj);
+ return ret;
+}
diff --git a/drivers/accel/amdxdna/amdxdna_gem.h b/drivers/accel/amdxdna/amdxdna_gem.h
new file mode 100644
index 000000000000..f79fc7f3c93b
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_gem.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AMDXDNA_GEM_H_
+#define _AMDXDNA_GEM_H_
+
+#include <linux/hmm.h>
+#include "amdxdna_pci_drv.h"
+
+struct amdxdna_umap {
+ struct vm_area_struct *vma;
+ struct mmu_interval_notifier notifier;
+ struct hmm_range range;
+ struct work_struct hmm_unreg_work;
+ struct amdxdna_gem_obj *abo;
+ struct list_head node;
+ struct kref refcnt;
+ bool invalid;
+ bool unmapped;
+};
+
+struct amdxdna_mem {
+ u64 userptr;
+ void *kva;
+ u64 dev_addr;
+ size_t size;
+ struct page **pages;
+ u32 nr_pages;
+ struct list_head umap_list;
+ bool map_invalid;
+};
+
+struct amdxdna_gem_obj {
+ struct drm_gem_shmem_object base;
+ struct amdxdna_client *client;
+ u8 type;
+ bool pinned;
+ struct mutex lock; /* Protects: pinned */
+ struct amdxdna_mem mem;
+
+ /* Below members is uninitialized when needed */
+ struct drm_mm mm; /* For AMDXDNA_BO_DEV_HEAP */
+ struct drm_mm_node mm_node; /* For AMDXDNA_BO_DEV */
+ u32 assigned_hwctx;
+ struct dma_buf *dma_buf;
+ struct dma_buf_attachment *attach;
+};
+
+#define to_gobj(obj) (&(obj)->base.base)
+#define is_import_bo(obj) ((obj)->attach)
+
+static inline struct amdxdna_gem_obj *to_xdna_obj(struct drm_gem_object *gobj)
+{
+ return container_of(gobj, struct amdxdna_gem_obj, base.base);
+}
+
+struct amdxdna_gem_obj *amdxdna_gem_get_obj(struct amdxdna_client *client,
+ u32 bo_hdl, u8 bo_type);
+static inline void amdxdna_gem_put_obj(struct amdxdna_gem_obj *abo)
+{
+ drm_gem_object_put(to_gobj(abo));
+}
+
+static inline u64 amdxdna_dev_bo_offset(struct amdxdna_gem_obj *abo)
+{
+ return abo->mem.dev_addr - abo->client->dev_heap->mem.dev_addr;
+}
+
+void amdxdna_umap_put(struct amdxdna_umap *mapp);
+
+struct drm_gem_object *
+amdxdna_gem_create_object_cb(struct drm_device *dev, size_t size);
+struct drm_gem_object *
+amdxdna_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf);
+struct amdxdna_gem_obj *
+amdxdna_drm_alloc_dev_bo(struct drm_device *dev,
+ struct amdxdna_drm_create_bo *args,
+ struct drm_file *filp);
+
+int amdxdna_gem_pin_nolock(struct amdxdna_gem_obj *abo);
+int amdxdna_gem_pin(struct amdxdna_gem_obj *abo);
+void amdxdna_gem_unpin(struct amdxdna_gem_obj *abo);
+
+int amdxdna_drm_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int amdxdna_drm_get_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int amdxdna_drm_sync_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+
+#endif /* _AMDXDNA_GEM_H_ */
diff --git a/drivers/accel/amdxdna/amdxdna_mailbox.c b/drivers/accel/amdxdna/amdxdna_mailbox.c
new file mode 100644
index 000000000000..858df97cd3fb
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_mailbox.c
@@ -0,0 +1,575 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/drm_device.h>
+#include <drm/drm_managed.h>
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/slab.h>
+#include <linux/xarray.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/amdxdna.h>
+
+#include "amdxdna_mailbox.h"
+
+#define MB_ERR(chann, fmt, args...) \
+({ \
+ typeof(chann) _chann = chann; \
+ dev_err((_chann)->mb->dev, "xdna_mailbox.%d: "fmt, \
+ (_chann)->msix_irq, ##args); \
+})
+#define MB_DBG(chann, fmt, args...) \
+({ \
+ typeof(chann) _chann = chann; \
+ dev_dbg((_chann)->mb->dev, "xdna_mailbox.%d: "fmt, \
+ (_chann)->msix_irq, ##args); \
+})
+#define MB_WARN_ONCE(chann, fmt, args...) \
+({ \
+ typeof(chann) _chann = chann; \
+ dev_warn_once((_chann)->mb->dev, "xdna_mailbox.%d: "fmt, \
+ (_chann)->msix_irq, ##args); \
+})
+
+#define MAGIC_VAL 0x1D000000U
+#define MAGIC_VAL_MASK 0xFF000000
+#define MAX_MSG_ID_ENTRIES 256
+#define MSG_RX_TIMER 200 /* milliseconds */
+#define MAILBOX_NAME "xdna_mailbox"
+
+enum channel_res_type {
+ CHAN_RES_X2I,
+ CHAN_RES_I2X,
+ CHAN_RES_NUM
+};
+
+struct mailbox {
+ struct device *dev;
+ struct xdna_mailbox_res res;
+};
+
+struct mailbox_channel {
+ struct mailbox *mb;
+ struct xdna_mailbox_chann_res res[CHAN_RES_NUM];
+ int msix_irq;
+ u32 iohub_int_addr;
+ struct xarray chan_xa;
+ u32 next_msgid;
+ u32 x2i_tail;
+
+ /* Received msg related fields */
+ struct workqueue_struct *work_q;
+ struct work_struct rx_work;
+ u32 i2x_head;
+ bool bad_state;
+};
+
+#define MSG_BODY_SZ GENMASK(10, 0)
+#define MSG_PROTO_VER GENMASK(23, 16)
+struct xdna_msg_header {
+ __u32 total_size;
+ __u32 sz_ver;
+ __u32 id;
+ __u32 opcode;
+} __packed;
+
+static_assert(sizeof(struct xdna_msg_header) == 16);
+
+struct mailbox_pkg {
+ struct xdna_msg_header header;
+ __u32 payload[];
+};
+
+/* The protocol version. */
+#define MSG_PROTOCOL_VERSION 0x1
+/* The tombstone value. */
+#define TOMBSTONE 0xDEADFACE
+
+struct mailbox_msg {
+ void *handle;
+ int (*notify_cb)(void *handle, void __iomem *data, size_t size);
+ size_t pkg_size; /* package size in bytes */
+ struct mailbox_pkg pkg;
+};
+
+static void mailbox_reg_write(struct mailbox_channel *mb_chann, u32 mbox_reg, u32 data)
+{
+ struct xdna_mailbox_res *mb_res = &mb_chann->mb->res;
+ void __iomem *ringbuf_addr = mb_res->mbox_base + mbox_reg;
+
+ writel(data, ringbuf_addr);
+}
+
+static u32 mailbox_reg_read(struct mailbox_channel *mb_chann, u32 mbox_reg)
+{
+ struct xdna_mailbox_res *mb_res = &mb_chann->mb->res;
+ void __iomem *ringbuf_addr = mb_res->mbox_base + mbox_reg;
+
+ return readl(ringbuf_addr);
+}
+
+static int mailbox_reg_read_non_zero(struct mailbox_channel *mb_chann, u32 mbox_reg, u32 *val)
+{
+ struct xdna_mailbox_res *mb_res = &mb_chann->mb->res;
+ void __iomem *ringbuf_addr = mb_res->mbox_base + mbox_reg;
+ int ret, value;
+
+ /* Poll till value is not zero */
+ ret = readx_poll_timeout(readl, ringbuf_addr, value,
+ value, 1 /* us */, 100);
+ if (ret < 0)
+ return ret;
+
+ *val = value;
+ return 0;
+}
+
+static inline void
+mailbox_set_headptr(struct mailbox_channel *mb_chann, u32 headptr_val)
+{
+ mailbox_reg_write(mb_chann, mb_chann->res[CHAN_RES_I2X].mb_head_ptr_reg, headptr_val);
+ mb_chann->i2x_head = headptr_val;
+}
+
+static inline void
+mailbox_set_tailptr(struct mailbox_channel *mb_chann, u32 tailptr_val)
+{
+ mailbox_reg_write(mb_chann, mb_chann->res[CHAN_RES_X2I].mb_tail_ptr_reg, tailptr_val);
+ mb_chann->x2i_tail = tailptr_val;
+}
+
+static inline u32
+mailbox_get_headptr(struct mailbox_channel *mb_chann, enum channel_res_type type)
+{
+ return mailbox_reg_read(mb_chann, mb_chann->res[type].mb_head_ptr_reg);
+}
+
+static inline u32
+mailbox_get_tailptr(struct mailbox_channel *mb_chann, enum channel_res_type type)
+{
+ return mailbox_reg_read(mb_chann, mb_chann->res[type].mb_tail_ptr_reg);
+}
+
+static inline u32
+mailbox_get_ringbuf_size(struct mailbox_channel *mb_chann, enum channel_res_type type)
+{
+ return mb_chann->res[type].rb_size;
+}
+
+static inline int mailbox_validate_msgid(int msg_id)
+{
+ return (msg_id & MAGIC_VAL_MASK) == MAGIC_VAL;
+}
+
+static int mailbox_acquire_msgid(struct mailbox_channel *mb_chann, struct mailbox_msg *mb_msg)
+{
+ u32 msg_id;
+ int ret;
+
+ ret = xa_alloc_cyclic_irq(&mb_chann->chan_xa, &msg_id, mb_msg,
+ XA_LIMIT(0, MAX_MSG_ID_ENTRIES - 1),
+ &mb_chann->next_msgid, GFP_NOWAIT);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Add MAGIC_VAL to the higher bits.
+ */
+ msg_id |= MAGIC_VAL;
+ return msg_id;
+}
+
+static void mailbox_release_msgid(struct mailbox_channel *mb_chann, int msg_id)
+{
+ msg_id &= ~MAGIC_VAL_MASK;
+ xa_erase_irq(&mb_chann->chan_xa, msg_id);
+}
+
+static void mailbox_release_msg(struct mailbox_channel *mb_chann,
+ struct mailbox_msg *mb_msg)
+{
+ MB_DBG(mb_chann, "msg_id 0x%x msg opcode 0x%x",
+ mb_msg->pkg.header.id, mb_msg->pkg.header.opcode);
+ if (mb_msg->notify_cb)
+ mb_msg->notify_cb(mb_msg->handle, NULL, 0);
+ kfree(mb_msg);
+}
+
+static int
+mailbox_send_msg(struct mailbox_channel *mb_chann, struct mailbox_msg *mb_msg)
+{
+ void __iomem *write_addr;
+ u32 ringbuf_size;
+ u32 head, tail;
+ u32 start_addr;
+ u32 tmp_tail;
+
+ head = mailbox_get_headptr(mb_chann, CHAN_RES_X2I);
+ tail = mb_chann->x2i_tail;
+ ringbuf_size = mailbox_get_ringbuf_size(mb_chann, CHAN_RES_X2I);
+ start_addr = mb_chann->res[CHAN_RES_X2I].rb_start_addr;
+ tmp_tail = tail + mb_msg->pkg_size;
+
+ if (tail < head && tmp_tail >= head)
+ goto no_space;
+
+ if (tail >= head && (tmp_tail > ringbuf_size - sizeof(u32) &&
+ mb_msg->pkg_size >= head))
+ goto no_space;
+
+ if (tail >= head && tmp_tail > ringbuf_size - sizeof(u32)) {
+ write_addr = mb_chann->mb->res.ringbuf_base + start_addr + tail;
+ writel(TOMBSTONE, write_addr);
+
+ /* tombstone is set. Write from the start of the ringbuf */
+ tail = 0;
+ }
+
+ write_addr = mb_chann->mb->res.ringbuf_base + start_addr + tail;
+ memcpy_toio(write_addr, &mb_msg->pkg, mb_msg->pkg_size);
+ mailbox_set_tailptr(mb_chann, tail + mb_msg->pkg_size);
+
+ trace_mbox_set_tail(MAILBOX_NAME, mb_chann->msix_irq,
+ mb_msg->pkg.header.opcode,
+ mb_msg->pkg.header.id);
+
+ return 0;
+
+no_space:
+ return -ENOSPC;
+}
+
+static int
+mailbox_get_resp(struct mailbox_channel *mb_chann, struct xdna_msg_header *header,
+ void __iomem *data)
+{
+ struct mailbox_msg *mb_msg;
+ int msg_id;
+ int ret = 0;
+
+ msg_id = header->id;
+ if (!mailbox_validate_msgid(msg_id)) {
+ MB_ERR(mb_chann, "Bad message ID 0x%x", msg_id);
+ return -EINVAL;
+ }
+
+ msg_id &= ~MAGIC_VAL_MASK;
+ mb_msg = xa_erase_irq(&mb_chann->chan_xa, msg_id);
+ if (!mb_msg) {
+ MB_ERR(mb_chann, "Cannot find msg 0x%x", msg_id);
+ return -EINVAL;
+ }
+
+ MB_DBG(mb_chann, "opcode 0x%x size %d id 0x%x",
+ header->opcode, header->total_size, header->id);
+ if (mb_msg->notify_cb) {
+ ret = mb_msg->notify_cb(mb_msg->handle, data, header->total_size);
+ if (unlikely(ret))
+ MB_ERR(mb_chann, "Message callback ret %d", ret);
+ }
+
+ kfree(mb_msg);
+ return ret;
+}
+
+static int mailbox_get_msg(struct mailbox_channel *mb_chann)
+{
+ struct xdna_msg_header header;
+ void __iomem *read_addr;
+ u32 msg_size, rest;
+ u32 ringbuf_size;
+ u32 head, tail;
+ u32 start_addr;
+ int ret;
+
+ if (mailbox_reg_read_non_zero(mb_chann, mb_chann->res[CHAN_RES_I2X].mb_tail_ptr_reg, &tail))
+ return -EINVAL;
+ head = mb_chann->i2x_head;
+ ringbuf_size = mailbox_get_ringbuf_size(mb_chann, CHAN_RES_I2X);
+ start_addr = mb_chann->res[CHAN_RES_I2X].rb_start_addr;
+
+ if (unlikely(tail > ringbuf_size || !IS_ALIGNED(tail, 4))) {
+ MB_WARN_ONCE(mb_chann, "Invalid tail 0x%x", tail);
+ return -EINVAL;
+ }
+
+ /* ringbuf empty */
+ if (head == tail)
+ return -ENOENT;
+
+ if (head == ringbuf_size)
+ head = 0;
+
+ /* Peek size of the message or TOMBSTONE */
+ read_addr = mb_chann->mb->res.ringbuf_base + start_addr + head;
+ header.total_size = readl(read_addr);
+ /* size is TOMBSTONE, set next read from 0 */
+ if (header.total_size == TOMBSTONE) {
+ if (head < tail) {
+ MB_WARN_ONCE(mb_chann, "Tombstone, head 0x%x tail 0x%x",
+ head, tail);
+ return -EINVAL;
+ }
+ mailbox_set_headptr(mb_chann, 0);
+ return 0;
+ }
+
+ if (unlikely(!header.total_size || !IS_ALIGNED(header.total_size, 4))) {
+ MB_WARN_ONCE(mb_chann, "Invalid total size 0x%x", header.total_size);
+ return -EINVAL;
+ }
+ msg_size = sizeof(header) + header.total_size;
+
+ if (msg_size > ringbuf_size - head || msg_size > tail - head) {
+ MB_WARN_ONCE(mb_chann, "Invalid message size %d, tail %d, head %d",
+ msg_size, tail, head);
+ return -EINVAL;
+ }
+
+ rest = sizeof(header) - sizeof(u32);
+ read_addr += sizeof(u32);
+ memcpy_fromio((u32 *)&header + 1, read_addr, rest);
+ read_addr += rest;
+
+ ret = mailbox_get_resp(mb_chann, &header, read_addr);
+
+ mailbox_set_headptr(mb_chann, head + msg_size);
+ /* After update head, it can equal to ringbuf_size. This is expected. */
+ trace_mbox_set_head(MAILBOX_NAME, mb_chann->msix_irq,
+ header.opcode, header.id);
+
+ return ret;
+}
+
+static irqreturn_t mailbox_irq_handler(int irq, void *p)
+{
+ struct mailbox_channel *mb_chann = p;
+
+ trace_mbox_irq_handle(MAILBOX_NAME, irq);
+ /* Schedule a rx_work to call the callback functions */
+ queue_work(mb_chann->work_q, &mb_chann->rx_work);
+
+ return IRQ_HANDLED;
+}
+
+static void mailbox_rx_worker(struct work_struct *rx_work)
+{
+ struct mailbox_channel *mb_chann;
+ int ret;
+
+ mb_chann = container_of(rx_work, struct mailbox_channel, rx_work);
+
+ if (READ_ONCE(mb_chann->bad_state)) {
+ MB_ERR(mb_chann, "Channel in bad state, work aborted");
+ return;
+ }
+
+again:
+ mailbox_reg_write(mb_chann, mb_chann->iohub_int_addr, 0);
+
+ while (1) {
+ /*
+ * If return is 0, keep consuming next message, until there is
+ * no messages or an error happened.
+ */
+ ret = mailbox_get_msg(mb_chann);
+ if (ret == -ENOENT)
+ break;
+
+ /* Other error means device doesn't look good, disable irq. */
+ if (unlikely(ret)) {
+ MB_ERR(mb_chann, "Unexpected ret %d, disable irq", ret);
+ WRITE_ONCE(mb_chann->bad_state, true);
+ return;
+ }
+ }
+
+ /*
+ * The hardware will not generate interrupt if firmware creates a new
+ * response right after driver clears interrupt register. Check
+ * the interrupt register to make sure there is not any new response
+ * before exiting.
+ */
+ if (mailbox_reg_read(mb_chann, mb_chann->iohub_int_addr))
+ goto again;
+}
+
+int xdna_mailbox_send_msg(struct mailbox_channel *mb_chann,
+ const struct xdna_mailbox_msg *msg, u64 tx_timeout)
+{
+ struct xdna_msg_header *header;
+ struct mailbox_msg *mb_msg;
+ size_t pkg_size;
+ int ret;
+
+ pkg_size = sizeof(*header) + msg->send_size;
+ if (pkg_size > mailbox_get_ringbuf_size(mb_chann, CHAN_RES_X2I)) {
+ MB_ERR(mb_chann, "Message size larger than ringbuf size");
+ return -EINVAL;
+ }
+
+ if (unlikely(!IS_ALIGNED(msg->send_size, 4))) {
+ MB_ERR(mb_chann, "Message must be 4 bytes align");
+ return -EINVAL;
+ }
+
+ /* The fist word in payload can NOT be TOMBSTONE */
+ if (unlikely(((u32 *)msg->send_data)[0] == TOMBSTONE)) {
+ MB_ERR(mb_chann, "Tomb stone in data");
+ return -EINVAL;
+ }
+
+ if (READ_ONCE(mb_chann->bad_state)) {
+ MB_ERR(mb_chann, "Channel in bad state");
+ return -EPIPE;
+ }
+
+ mb_msg = kzalloc(sizeof(*mb_msg) + pkg_size, GFP_KERNEL);
+ if (!mb_msg)
+ return -ENOMEM;
+
+ mb_msg->handle = msg->handle;
+ mb_msg->notify_cb = msg->notify_cb;
+ mb_msg->pkg_size = pkg_size;
+
+ header = &mb_msg->pkg.header;
+ /*
+ * Hardware use total_size and size to split huge message.
+ * We do not support it here. Thus the values are the same.
+ */
+ header->total_size = msg->send_size;
+ header->sz_ver = FIELD_PREP(MSG_BODY_SZ, msg->send_size) |
+ FIELD_PREP(MSG_PROTO_VER, MSG_PROTOCOL_VERSION);
+ header->opcode = msg->opcode;
+ memcpy(mb_msg->pkg.payload, msg->send_data, msg->send_size);
+
+ ret = mailbox_acquire_msgid(mb_chann, mb_msg);
+ if (unlikely(ret < 0)) {
+ MB_ERR(mb_chann, "mailbox_acquire_msgid failed");
+ goto msg_id_failed;
+ }
+ header->id = ret;
+
+ MB_DBG(mb_chann, "opcode 0x%x size %d id 0x%x",
+ header->opcode, header->total_size, header->id);
+
+ ret = mailbox_send_msg(mb_chann, mb_msg);
+ if (ret) {
+ MB_DBG(mb_chann, "Error in mailbox send msg, ret %d", ret);
+ goto release_id;
+ }
+
+ return 0;
+
+release_id:
+ mailbox_release_msgid(mb_chann, header->id);
+msg_id_failed:
+ kfree(mb_msg);
+ return ret;
+}
+
+struct mailbox_channel *
+xdna_mailbox_create_channel(struct mailbox *mb,
+ const struct xdna_mailbox_chann_res *x2i,
+ const struct xdna_mailbox_chann_res *i2x,
+ u32 iohub_int_addr,
+ int mb_irq)
+{
+ struct mailbox_channel *mb_chann;
+ int ret;
+
+ if (!is_power_of_2(x2i->rb_size) || !is_power_of_2(i2x->rb_size)) {
+ pr_err("Ring buf size must be power of 2");
+ return NULL;
+ }
+
+ mb_chann = kzalloc(sizeof(*mb_chann), GFP_KERNEL);
+ if (!mb_chann)
+ return NULL;
+
+ mb_chann->mb = mb;
+ mb_chann->msix_irq = mb_irq;
+ mb_chann->iohub_int_addr = iohub_int_addr;
+ memcpy(&mb_chann->res[CHAN_RES_X2I], x2i, sizeof(*x2i));
+ memcpy(&mb_chann->res[CHAN_RES_I2X], i2x, sizeof(*i2x));
+
+ xa_init_flags(&mb_chann->chan_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
+ mb_chann->x2i_tail = mailbox_get_tailptr(mb_chann, CHAN_RES_X2I);
+ mb_chann->i2x_head = mailbox_get_headptr(mb_chann, CHAN_RES_I2X);
+
+ INIT_WORK(&mb_chann->rx_work, mailbox_rx_worker);
+ mb_chann->work_q = create_singlethread_workqueue(MAILBOX_NAME);
+ if (!mb_chann->work_q) {
+ MB_ERR(mb_chann, "Create workqueue failed");
+ goto free_and_out;
+ }
+
+ /* Everything look good. Time to enable irq handler */
+ ret = request_irq(mb_irq, mailbox_irq_handler, 0, MAILBOX_NAME, mb_chann);
+ if (ret) {
+ MB_ERR(mb_chann, "Failed to request irq %d ret %d", mb_irq, ret);
+ goto destroy_wq;
+ }
+
+ mb_chann->bad_state = false;
+ mailbox_reg_write(mb_chann, mb_chann->iohub_int_addr, 0);
+
+ MB_DBG(mb_chann, "Mailbox channel created (irq: %d)", mb_chann->msix_irq);
+ return mb_chann;
+
+destroy_wq:
+ destroy_workqueue(mb_chann->work_q);
+free_and_out:
+ kfree(mb_chann);
+ return NULL;
+}
+
+int xdna_mailbox_destroy_channel(struct mailbox_channel *mb_chann)
+{
+ struct mailbox_msg *mb_msg;
+ unsigned long msg_id;
+
+ MB_DBG(mb_chann, "IRQ disabled and RX work cancelled");
+ free_irq(mb_chann->msix_irq, mb_chann);
+ destroy_workqueue(mb_chann->work_q);
+ /* We can clean up and release resources */
+
+ xa_for_each(&mb_chann->chan_xa, msg_id, mb_msg)
+ mailbox_release_msg(mb_chann, mb_msg);
+
+ xa_destroy(&mb_chann->chan_xa);
+
+ MB_DBG(mb_chann, "Mailbox channel destroyed, irq: %d", mb_chann->msix_irq);
+ kfree(mb_chann);
+ return 0;
+}
+
+void xdna_mailbox_stop_channel(struct mailbox_channel *mb_chann)
+{
+ /* Disable an irq and wait. This might sleep. */
+ disable_irq(mb_chann->msix_irq);
+
+ /* Cancel RX work and wait for it to finish */
+ cancel_work_sync(&mb_chann->rx_work);
+ MB_DBG(mb_chann, "IRQ disabled and RX work cancelled");
+}
+
+struct mailbox *xdnam_mailbox_create(struct drm_device *ddev,
+ const struct xdna_mailbox_res *res)
+{
+ struct mailbox *mb;
+
+ mb = drmm_kzalloc(ddev, sizeof(*mb), GFP_KERNEL);
+ if (!mb)
+ return NULL;
+ mb->dev = ddev->dev;
+
+ /* mailbox and ring buf base and size information */
+ memcpy(&mb->res, res, sizeof(*res));
+
+ return mb;
+}
diff --git a/drivers/accel/amdxdna/amdxdna_mailbox.h b/drivers/accel/amdxdna/amdxdna_mailbox.h
new file mode 100644
index 000000000000..ea367f2fb738
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_mailbox.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AIE2_MAILBOX_H_
+#define _AIE2_MAILBOX_H_
+
+struct mailbox;
+struct mailbox_channel;
+
+/*
+ * xdna_mailbox_msg - message struct
+ *
+ * @opcode: opcode for firmware
+ * @handle: handle used for the notify callback
+ * @notify_cb: callback function to notify the sender when there is response
+ * @send_data: pointing to sending data
+ * @send_size: size of the sending data
+ *
+ * The mailbox will split the sending data in to multiple firmware message if
+ * the size of the data is too big. This is transparent to the sender. The
+ * sender will receive one notification.
+ */
+struct xdna_mailbox_msg {
+ u32 opcode;
+ void *handle;
+ int (*notify_cb)(void *handle, void __iomem *data, size_t size);
+ u8 *send_data;
+ size_t send_size;
+};
+
+/*
+ * xdna_mailbox_res - mailbox hardware resource
+ *
+ * @ringbuf_base: ring buffer base address
+ * @ringbuf_size: ring buffer size
+ * @mbox_base: mailbox base address
+ * @mbox_size: mailbox size
+ */
+struct xdna_mailbox_res {
+ void __iomem *ringbuf_base;
+ size_t ringbuf_size;
+ void __iomem *mbox_base;
+ size_t mbox_size;
+ const char *name;
+};
+
+/*
+ * xdna_mailbox_chann_res - resources
+ *
+ * @rb_start_addr: ring buffer start address
+ * @rb_size: ring buffer size
+ * @mb_head_ptr_reg: mailbox head pointer register
+ * @mb_tail_ptr_reg: mailbox tail pointer register
+ */
+struct xdna_mailbox_chann_res {
+ u32 rb_start_addr;
+ u32 rb_size;
+ u32 mb_head_ptr_reg;
+ u32 mb_tail_ptr_reg;
+};
+
+/*
+ * xdna_mailbox_create() -- create mailbox subsystem and initialize
+ *
+ * @ddev: device pointer
+ * @res: SRAM and mailbox resources
+ *
+ * Return: If success, return a handle of mailbox subsystem.
+ * Otherwise, return NULL pointer.
+ */
+struct mailbox *xdnam_mailbox_create(struct drm_device *ddev,
+ const struct xdna_mailbox_res *res);
+
+/*
+ * xdna_mailbox_create_channel() -- Create a mailbox channel instance
+ *
+ * @mailbox: the handle return from xdna_mailbox_create()
+ * @x2i: host to firmware mailbox resources
+ * @i2x: firmware to host mailbox resources
+ * @xdna_mailbox_intr_reg: register addr of MSI-X interrupt
+ * @mb_irq: Linux IRQ number associated with mailbox MSI-X interrupt vector index
+ *
+ * Return: If success, return a handle of mailbox channel. Otherwise, return NULL.
+ */
+struct mailbox_channel *
+xdna_mailbox_create_channel(struct mailbox *mailbox,
+ const struct xdna_mailbox_chann_res *x2i,
+ const struct xdna_mailbox_chann_res *i2x,
+ u32 xdna_mailbox_intr_reg,
+ int mb_irq);
+
+/*
+ * xdna_mailbox_destroy_channel() -- destroy mailbox channel
+ *
+ * @mailbox_chann: the handle return from xdna_mailbox_create_channel()
+ *
+ * Return: if success, return 0. otherwise return error code
+ */
+int xdna_mailbox_destroy_channel(struct mailbox_channel *mailbox_chann);
+
+/*
+ * xdna_mailbox_stop_channel() -- stop mailbox channel
+ *
+ * @mailbox_chann: the handle return from xdna_mailbox_create_channel()
+ *
+ * Return: if success, return 0. otherwise return error code
+ */
+void xdna_mailbox_stop_channel(struct mailbox_channel *mailbox_chann);
+
+/*
+ * xdna_mailbox_send_msg() -- Send a message
+ *
+ * @mailbox_chann: Mailbox channel handle
+ * @msg: message struct for message information
+ * @tx_timeout: the timeout value for sending the message in ms.
+ *
+ * Return: If success return 0, otherwise, return error code
+ */
+int xdna_mailbox_send_msg(struct mailbox_channel *mailbox_chann,
+ const struct xdna_mailbox_msg *msg, u64 tx_timeout);
+
+#endif /* _AIE2_MAILBOX_ */
diff --git a/drivers/accel/amdxdna/amdxdna_mailbox_helper.c b/drivers/accel/amdxdna/amdxdna_mailbox_helper.c
new file mode 100644
index 000000000000..6d0c24513476
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_mailbox_helper.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/completion.h>
+
+#include "amdxdna_gem.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_mailbox_helper.h"
+#include "amdxdna_pci_drv.h"
+
+int xdna_msg_cb(void *handle, void __iomem *data, size_t size)
+{
+ struct xdna_notify *cb_arg = handle;
+ int ret;
+
+ if (unlikely(!data))
+ goto out;
+
+ if (unlikely(cb_arg->size != size)) {
+ cb_arg->error = -EINVAL;
+ goto out;
+ }
+
+ memcpy_fromio(cb_arg->data, data, cb_arg->size);
+ print_hex_dump_debug("resp data: ", DUMP_PREFIX_OFFSET,
+ 16, 4, cb_arg->data, cb_arg->size, true);
+out:
+ ret = cb_arg->error;
+ complete(&cb_arg->comp);
+ return ret;
+}
+
+int xdna_send_msg_wait(struct amdxdna_dev *xdna, struct mailbox_channel *chann,
+ struct xdna_mailbox_msg *msg)
+{
+ struct xdna_notify *hdl = msg->handle;
+ int ret;
+
+ ret = xdna_mailbox_send_msg(chann, msg, TX_TIMEOUT);
+ if (ret) {
+ XDNA_ERR(xdna, "Send message failed, ret %d", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&hdl->comp,
+ msecs_to_jiffies(RX_TIMEOUT));
+ if (!ret) {
+ XDNA_ERR(xdna, "Wait for completion timeout");
+ return -ETIME;
+ }
+
+ return hdl->error;
+}
diff --git a/drivers/accel/amdxdna/amdxdna_mailbox_helper.h b/drivers/accel/amdxdna/amdxdna_mailbox_helper.h
new file mode 100644
index 000000000000..556c712cad0a
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_mailbox_helper.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AMDXDNA_MAILBOX_HELPER_H
+#define _AMDXDNA_MAILBOX_HELPER_H
+
+#define TX_TIMEOUT 2000 /* milliseconds */
+#define RX_TIMEOUT 5000 /* milliseconds */
+
+struct amdxdna_dev;
+
+struct xdna_notify {
+ struct completion comp;
+ u32 *data;
+ size_t size;
+ int error;
+ u32 *status;
+};
+
+#define DECLARE_XDNA_MSG_COMMON(name, op, s) \
+ struct name##_req req = { 0 }; \
+ struct name##_resp resp = { .status = s }; \
+ struct xdna_notify hdl = { \
+ .error = 0, \
+ .data = (u32 *)&resp, \
+ .size = sizeof(resp), \
+ .comp = COMPLETION_INITIALIZER_ONSTACK(hdl.comp), \
+ .status = (u32 *)&resp.status, \
+ }; \
+ struct xdna_mailbox_msg msg = { \
+ .send_data = (u8 *)&req, \
+ .send_size = sizeof(req), \
+ .handle = &hdl, \
+ .opcode = op, \
+ .notify_cb = xdna_msg_cb, \
+ }
+
+int xdna_msg_cb(void *handle, void __iomem *data, size_t size);
+int xdna_send_msg_wait(struct amdxdna_dev *xdna, struct mailbox_channel *chann,
+ struct xdna_mailbox_msg *msg);
+
+#endif /* _AMDXDNA_MAILBOX_HELPER_H */
diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.c b/drivers/accel/amdxdna/amdxdna_pci_drv.c
new file mode 100644
index 000000000000..1973ab67721b
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_pci_drv.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_accel.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/iommu.h>
+#include <linux/pci.h>
+
+#include "amdxdna_ctx.h"
+#include "amdxdna_gem.h"
+#include "amdxdna_pci_drv.h"
+#include "amdxdna_pm.h"
+
+MODULE_FIRMWARE("amdnpu/1502_00/npu.sbin");
+MODULE_FIRMWARE("amdnpu/17f0_10/npu.sbin");
+MODULE_FIRMWARE("amdnpu/17f0_11/npu.sbin");
+MODULE_FIRMWARE("amdnpu/17f0_20/npu.sbin");
+
+/*
+ * 0.0: Initial version
+ * 0.1: Support getting all hardware contexts by DRM_IOCTL_AMDXDNA_GET_ARRAY
+ * 0.2: Support getting last error hardware error
+ * 0.3: Support firmware debug buffer
+ * 0.4: Support getting resource information
+ * 0.5: Support getting telemetry data
+ * 0.6: Support preemption
+ */
+#define AMDXDNA_DRIVER_MAJOR 0
+#define AMDXDNA_DRIVER_MINOR 6
+
+/*
+ * Bind the driver base on (vendor_id, device_id) pair and later use the
+ * (device_id, rev_id) pair as a key to select the devices. The devices with
+ * same device_id have very similar interface to host driver.
+ */
+static const struct pci_device_id pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1502) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x17f0) },
+ {0}
+};
+
+MODULE_DEVICE_TABLE(pci, pci_ids);
+
+static const struct amdxdna_device_id amdxdna_ids[] = {
+ { 0x1502, 0x0, &dev_npu1_info },
+ { 0x17f0, 0x0, &dev_npu2_info },
+ { 0x17f0, 0x10, &dev_npu4_info },
+ { 0x17f0, 0x11, &dev_npu5_info },
+ { 0x17f0, 0x20, &dev_npu6_info },
+ {0}
+};
+
+static int amdxdna_drm_open(struct drm_device *ddev, struct drm_file *filp)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(ddev);
+ struct amdxdna_client *client;
+ int ret;
+
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return -ENOMEM;
+
+ client->pid = pid_nr(rcu_access_pointer(filp->pid));
+ client->xdna = xdna;
+
+ client->sva = iommu_sva_bind_device(xdna->ddev.dev, current->mm);
+ if (IS_ERR(client->sva)) {
+ ret = PTR_ERR(client->sva);
+ XDNA_ERR(xdna, "SVA bind device failed, ret %d", ret);
+ goto failed;
+ }
+ client->pasid = iommu_sva_get_pasid(client->sva);
+ if (client->pasid == IOMMU_PASID_INVALID) {
+ XDNA_ERR(xdna, "SVA get pasid failed");
+ ret = -ENODEV;
+ goto unbind_sva;
+ }
+ init_srcu_struct(&client->hwctx_srcu);
+ xa_init_flags(&client->hwctx_xa, XA_FLAGS_ALLOC);
+ mutex_init(&client->mm_lock);
+
+ mutex_lock(&xdna->dev_lock);
+ list_add_tail(&client->node, &xdna->client_list);
+ mutex_unlock(&xdna->dev_lock);
+
+ filp->driver_priv = client;
+ client->filp = filp;
+
+ XDNA_DBG(xdna, "pid %d opened", client->pid);
+ return 0;
+
+unbind_sva:
+ iommu_sva_unbind_device(client->sva);
+failed:
+ kfree(client);
+
+ return ret;
+}
+
+static void amdxdna_drm_close(struct drm_device *ddev, struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_dev *xdna = to_xdna_dev(ddev);
+
+ XDNA_DBG(xdna, "closing pid %d", client->pid);
+
+ xa_destroy(&client->hwctx_xa);
+ cleanup_srcu_struct(&client->hwctx_srcu);
+ mutex_destroy(&client->mm_lock);
+ if (client->dev_heap)
+ drm_gem_object_put(to_gobj(client->dev_heap));
+
+ iommu_sva_unbind_device(client->sva);
+
+ XDNA_DBG(xdna, "pid %d closed", client->pid);
+ kfree(client);
+}
+
+static int amdxdna_flush(struct file *f, fl_owner_t id)
+{
+ struct drm_file *filp = f->private_data;
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_dev *xdna = client->xdna;
+ int idx;
+
+ XDNA_DBG(xdna, "PID %d flushing...", client->pid);
+ if (!drm_dev_enter(&xdna->ddev, &idx))
+ return 0;
+
+ mutex_lock(&xdna->dev_lock);
+ list_del_init(&client->node);
+ amdxdna_hwctx_remove_all(client);
+ mutex_unlock(&xdna->dev_lock);
+
+ drm_dev_exit(idx);
+ return 0;
+}
+
+static int amdxdna_drm_get_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_drm_get_info *args = data;
+ int ret;
+
+ if (!xdna->dev_info->ops->get_aie_info)
+ return -EOPNOTSUPP;
+
+ XDNA_DBG(xdna, "Request parameter %u", args->param);
+ mutex_lock(&xdna->dev_lock);
+ ret = xdna->dev_info->ops->get_aie_info(client, args);
+ mutex_unlock(&xdna->dev_lock);
+ return ret;
+}
+
+static int amdxdna_drm_get_array_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_drm_get_array *args = data;
+
+ if (!xdna->dev_info->ops->get_array)
+ return -EOPNOTSUPP;
+
+ if (args->pad || !args->num_element || !args->element_size)
+ return -EINVAL;
+
+ guard(mutex)(&xdna->dev_lock);
+ return xdna->dev_info->ops->get_array(client, args);
+}
+
+static int amdxdna_drm_set_state_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_drm_set_state *args = data;
+ int ret;
+
+ if (!xdna->dev_info->ops->set_aie_state)
+ return -EOPNOTSUPP;
+
+ XDNA_DBG(xdna, "Request parameter %u", args->param);
+ mutex_lock(&xdna->dev_lock);
+ ret = xdna->dev_info->ops->set_aie_state(client, args);
+ mutex_unlock(&xdna->dev_lock);
+
+ return ret;
+}
+
+static const struct drm_ioctl_desc amdxdna_drm_ioctls[] = {
+ /* Context */
+ DRM_IOCTL_DEF_DRV(AMDXDNA_CREATE_HWCTX, amdxdna_drm_create_hwctx_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(AMDXDNA_DESTROY_HWCTX, amdxdna_drm_destroy_hwctx_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(AMDXDNA_CONFIG_HWCTX, amdxdna_drm_config_hwctx_ioctl, 0),
+ /* BO */
+ DRM_IOCTL_DEF_DRV(AMDXDNA_CREATE_BO, amdxdna_drm_create_bo_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(AMDXDNA_GET_BO_INFO, amdxdna_drm_get_bo_info_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(AMDXDNA_SYNC_BO, amdxdna_drm_sync_bo_ioctl, 0),
+ /* Execution */
+ DRM_IOCTL_DEF_DRV(AMDXDNA_EXEC_CMD, amdxdna_drm_submit_cmd_ioctl, 0),
+ /* AIE hardware */
+ DRM_IOCTL_DEF_DRV(AMDXDNA_GET_INFO, amdxdna_drm_get_info_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(AMDXDNA_GET_ARRAY, amdxdna_drm_get_array_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(AMDXDNA_SET_STATE, amdxdna_drm_set_state_ioctl, DRM_ROOT_ONLY),
+};
+
+static const struct file_operations amdxdna_fops = {
+ .owner = THIS_MODULE,
+ .open = accel_open,
+ .release = drm_release,
+ .flush = amdxdna_flush,
+ .unlocked_ioctl = drm_ioctl,
+ .compat_ioctl = drm_compat_ioctl,
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = noop_llseek,
+ .mmap = drm_gem_mmap,
+ .fop_flags = FOP_UNSIGNED_OFFSET,
+};
+
+const struct drm_driver amdxdna_drm_drv = {
+ .driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL |
+ DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE,
+ .fops = &amdxdna_fops,
+ .name = "amdxdna_accel_driver",
+ .desc = "AMD XDNA DRM implementation",
+ .major = AMDXDNA_DRIVER_MAJOR,
+ .minor = AMDXDNA_DRIVER_MINOR,
+ .open = amdxdna_drm_open,
+ .postclose = amdxdna_drm_close,
+ .ioctls = amdxdna_drm_ioctls,
+ .num_ioctls = ARRAY_SIZE(amdxdna_drm_ioctls),
+
+ .gem_create_object = amdxdna_gem_create_object_cb,
+ .gem_prime_import = amdxdna_gem_prime_import,
+};
+
+static const struct amdxdna_dev_info *
+amdxdna_get_dev_info(struct pci_dev *pdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(amdxdna_ids); i++) {
+ if (pdev->device == amdxdna_ids[i].device &&
+ pdev->revision == amdxdna_ids[i].revision)
+ return amdxdna_ids[i].dev_info;
+ }
+ return NULL;
+}
+
+static int amdxdna_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct amdxdna_dev *xdna;
+ int ret;
+
+ xdna = devm_drm_dev_alloc(dev, &amdxdna_drm_drv, typeof(*xdna), ddev);
+ if (IS_ERR(xdna))
+ return PTR_ERR(xdna);
+
+ xdna->dev_info = amdxdna_get_dev_info(pdev);
+ if (!xdna->dev_info)
+ return -ENODEV;
+
+ drmm_mutex_init(&xdna->ddev, &xdna->dev_lock);
+ init_rwsem(&xdna->notifier_lock);
+ INIT_LIST_HEAD(&xdna->client_list);
+ pci_set_drvdata(pdev, xdna);
+
+ if (IS_ENABLED(CONFIG_LOCKDEP)) {
+ fs_reclaim_acquire(GFP_KERNEL);
+ might_lock(&xdna->notifier_lock);
+ fs_reclaim_release(GFP_KERNEL);
+ }
+
+ xdna->notifier_wq = alloc_ordered_workqueue("notifier_wq", 0);
+ if (!xdna->notifier_wq)
+ return -ENOMEM;
+
+ mutex_lock(&xdna->dev_lock);
+ ret = xdna->dev_info->ops->init(xdna);
+ mutex_unlock(&xdna->dev_lock);
+ if (ret) {
+ XDNA_ERR(xdna, "Hardware init failed, ret %d", ret);
+ goto destroy_notifier_wq;
+ }
+
+ ret = amdxdna_sysfs_init(xdna);
+ if (ret) {
+ XDNA_ERR(xdna, "Create amdxdna attrs failed: %d", ret);
+ goto failed_dev_fini;
+ }
+
+ ret = drm_dev_register(&xdna->ddev, 0);
+ if (ret) {
+ XDNA_ERR(xdna, "DRM register failed, ret %d", ret);
+ goto failed_sysfs_fini;
+ }
+
+ return 0;
+
+failed_sysfs_fini:
+ amdxdna_sysfs_fini(xdna);
+failed_dev_fini:
+ mutex_lock(&xdna->dev_lock);
+ xdna->dev_info->ops->fini(xdna);
+ mutex_unlock(&xdna->dev_lock);
+destroy_notifier_wq:
+ destroy_workqueue(xdna->notifier_wq);
+ return ret;
+}
+
+static void amdxdna_remove(struct pci_dev *pdev)
+{
+ struct amdxdna_dev *xdna = pci_get_drvdata(pdev);
+ struct amdxdna_client *client;
+
+ destroy_workqueue(xdna->notifier_wq);
+
+ drm_dev_unplug(&xdna->ddev);
+ amdxdna_sysfs_fini(xdna);
+
+ mutex_lock(&xdna->dev_lock);
+ client = list_first_entry_or_null(&xdna->client_list,
+ struct amdxdna_client, node);
+ while (client) {
+ list_del_init(&client->node);
+ amdxdna_hwctx_remove_all(client);
+
+ client = list_first_entry_or_null(&xdna->client_list,
+ struct amdxdna_client, node);
+ }
+
+ xdna->dev_info->ops->fini(xdna);
+ mutex_unlock(&xdna->dev_lock);
+}
+
+static const struct dev_pm_ops amdxdna_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(amdxdna_pm_suspend, amdxdna_pm_resume)
+ RUNTIME_PM_OPS(amdxdna_pm_suspend, amdxdna_pm_resume, NULL)
+};
+
+static struct pci_driver amdxdna_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = pci_ids,
+ .probe = amdxdna_probe,
+ .remove = amdxdna_remove,
+ .driver.pm = &amdxdna_pm_ops,
+};
+
+module_pci_driver(amdxdna_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("XRT Team <runtimeca39d@amd.com>");
+MODULE_DESCRIPTION("amdxdna driver");
diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.h b/drivers/accel/amdxdna/amdxdna_pci_drv.h
new file mode 100644
index 000000000000..c99477f5e454
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_pci_drv.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AMDXDNA_PCI_DRV_H_
+#define _AMDXDNA_PCI_DRV_H_
+
+#include <drm/drm_print.h>
+#include <linux/workqueue.h>
+#include <linux/xarray.h>
+
+#define XDNA_INFO(xdna, fmt, args...) drm_info(&(xdna)->ddev, fmt, ##args)
+#define XDNA_WARN(xdna, fmt, args...) drm_warn(&(xdna)->ddev, "%s: "fmt, __func__, ##args)
+#define XDNA_ERR(xdna, fmt, args...) drm_err(&(xdna)->ddev, "%s: "fmt, __func__, ##args)
+#define XDNA_DBG(xdna, fmt, args...) drm_dbg(&(xdna)->ddev, fmt, ##args)
+#define XDNA_INFO_ONCE(xdna, fmt, args...) drm_info_once(&(xdna)->ddev, fmt, ##args)
+
+#define XDNA_MBZ_DBG(xdna, ptr, sz) \
+ ({ \
+ int __i; \
+ int __ret = 0; \
+ u8 *__ptr = (u8 *)(ptr); \
+ for (__i = 0; __i < (sz); __i++) { \
+ if (__ptr[__i]) { \
+ XDNA_DBG(xdna, "MBZ check failed"); \
+ __ret = -EINVAL; \
+ break; \
+ } \
+ } \
+ __ret; \
+ })
+
+#define to_xdna_dev(drm_dev) \
+ ((struct amdxdna_dev *)container_of(drm_dev, struct amdxdna_dev, ddev))
+
+extern const struct drm_driver amdxdna_drm_drv;
+
+struct amdxdna_client;
+struct amdxdna_dev;
+struct amdxdna_drm_get_info;
+struct amdxdna_drm_set_state;
+struct amdxdna_gem_obj;
+struct amdxdna_hwctx;
+struct amdxdna_sched_job;
+
+/*
+ * struct amdxdna_dev_ops - Device hardware operation callbacks
+ */
+struct amdxdna_dev_ops {
+ int (*init)(struct amdxdna_dev *xdna);
+ void (*fini)(struct amdxdna_dev *xdna);
+ int (*resume)(struct amdxdna_dev *xdna);
+ int (*suspend)(struct amdxdna_dev *xdna);
+ int (*hwctx_init)(struct amdxdna_hwctx *hwctx);
+ void (*hwctx_fini)(struct amdxdna_hwctx *hwctx);
+ int (*hwctx_config)(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size);
+ int (*hwctx_sync_debug_bo)(struct amdxdna_hwctx *hwctx, u32 debug_bo_hdl);
+ void (*hmm_invalidate)(struct amdxdna_gem_obj *abo, unsigned long cur_seq);
+ int (*cmd_submit)(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq);
+ int (*get_aie_info)(struct amdxdna_client *client, struct amdxdna_drm_get_info *args);
+ int (*set_aie_state)(struct amdxdna_client *client, struct amdxdna_drm_set_state *args);
+ int (*get_array)(struct amdxdna_client *client, struct amdxdna_drm_get_array *args);
+};
+
+/*
+ * struct amdxdna_dev_info - Device hardware information
+ * Record device static information, like reg, mbox, PSP, SMU bar index
+ */
+struct amdxdna_dev_info {
+ int reg_bar;
+ int mbox_bar;
+ int sram_bar;
+ int psp_bar;
+ int smu_bar;
+ int device_type;
+ int first_col;
+ u32 dev_mem_buf_shift;
+ u64 dev_mem_base;
+ size_t dev_mem_size;
+ char *vbnv;
+ const struct amdxdna_dev_priv *dev_priv;
+ const struct amdxdna_dev_ops *ops;
+};
+
+struct amdxdna_fw_ver {
+ u32 major;
+ u32 minor;
+ u32 sub;
+ u32 build;
+};
+
+struct amdxdna_dev {
+ struct drm_device ddev;
+ struct amdxdna_dev_hdl *dev_handle;
+ const struct amdxdna_dev_info *dev_info;
+ void *xrs_hdl;
+
+ struct mutex dev_lock; /* per device lock */
+ struct list_head client_list;
+ struct amdxdna_fw_ver fw_ver;
+ struct rw_semaphore notifier_lock; /* for mmu notifier*/
+ struct workqueue_struct *notifier_wq;
+ bool rpm_on;
+};
+
+/*
+ * struct amdxdna_device_id - PCI device info
+ */
+struct amdxdna_device_id {
+ unsigned short device;
+ u8 revision;
+ const struct amdxdna_dev_info *dev_info;
+};
+
+/*
+ * struct amdxdna_client - amdxdna client
+ * A per fd data structure for managing context and other user process stuffs.
+ */
+struct amdxdna_client {
+ struct list_head node;
+ pid_t pid;
+ struct srcu_struct hwctx_srcu;
+ struct xarray hwctx_xa;
+ u32 next_hwctxid;
+ struct amdxdna_dev *xdna;
+ struct drm_file *filp;
+
+ struct mutex mm_lock; /* protect memory related */
+ struct amdxdna_gem_obj *dev_heap;
+
+ struct iommu_sva *sva;
+ int pasid;
+};
+
+#define amdxdna_for_each_hwctx(client, hwctx_id, entry) \
+ xa_for_each(&(client)->hwctx_xa, hwctx_id, entry)
+
+/* Add device info below */
+extern const struct amdxdna_dev_info dev_npu1_info;
+extern const struct amdxdna_dev_info dev_npu2_info;
+extern const struct amdxdna_dev_info dev_npu4_info;
+extern const struct amdxdna_dev_info dev_npu5_info;
+extern const struct amdxdna_dev_info dev_npu6_info;
+
+int amdxdna_sysfs_init(struct amdxdna_dev *xdna);
+void amdxdna_sysfs_fini(struct amdxdna_dev *xdna);
+
+#endif /* _AMDXDNA_PCI_DRV_H_ */
diff --git a/drivers/accel/amdxdna/amdxdna_pm.c b/drivers/accel/amdxdna/amdxdna_pm.c
new file mode 100644
index 000000000000..fa38e65d617c
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_pm.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_drv.h>
+#include <linux/pm_runtime.h>
+
+#include "amdxdna_pm.h"
+
+#define AMDXDNA_AUTOSUSPEND_DELAY 5000 /* milliseconds */
+
+int amdxdna_pm_suspend(struct device *dev)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(dev_get_drvdata(dev));
+ int ret = -EOPNOTSUPP;
+ bool rpm;
+
+ if (xdna->dev_info->ops->suspend) {
+ rpm = xdna->rpm_on;
+ xdna->rpm_on = false;
+ ret = xdna->dev_info->ops->suspend(xdna);
+ xdna->rpm_on = rpm;
+ }
+
+ XDNA_DBG(xdna, "Suspend done ret %d", ret);
+ return ret;
+}
+
+int amdxdna_pm_resume(struct device *dev)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(dev_get_drvdata(dev));
+ int ret = -EOPNOTSUPP;
+ bool rpm;
+
+ if (xdna->dev_info->ops->resume) {
+ rpm = xdna->rpm_on;
+ xdna->rpm_on = false;
+ ret = xdna->dev_info->ops->resume(xdna);
+ xdna->rpm_on = rpm;
+ }
+
+ XDNA_DBG(xdna, "Resume done ret %d", ret);
+ return ret;
+}
+
+int amdxdna_pm_resume_get(struct amdxdna_dev *xdna)
+{
+ struct device *dev = xdna->ddev.dev;
+ int ret;
+
+ if (!xdna->rpm_on)
+ return 0;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret) {
+ XDNA_ERR(xdna, "Resume failed: %d", ret);
+ pm_runtime_set_suspended(dev);
+ }
+
+ return ret;
+}
+
+void amdxdna_pm_suspend_put(struct amdxdna_dev *xdna)
+{
+ struct device *dev = xdna->ddev.dev;
+
+ if (!xdna->rpm_on)
+ return;
+
+ pm_runtime_put_autosuspend(dev);
+}
+
+void amdxdna_pm_init(struct amdxdna_dev *xdna)
+{
+ struct device *dev = xdna->ddev.dev;
+
+ pm_runtime_set_active(dev);
+ pm_runtime_set_autosuspend_delay(dev, AMDXDNA_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_allow(dev);
+ pm_runtime_put_autosuspend(dev);
+ xdna->rpm_on = true;
+}
+
+void amdxdna_pm_fini(struct amdxdna_dev *xdna)
+{
+ struct device *dev = xdna->ddev.dev;
+
+ xdna->rpm_on = false;
+ pm_runtime_get_noresume(dev);
+ pm_runtime_forbid(dev);
+}
diff --git a/drivers/accel/amdxdna/amdxdna_pm.h b/drivers/accel/amdxdna/amdxdna_pm.h
new file mode 100644
index 000000000000..77b2d6e45570
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_pm.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AMDXDNA_PM_H_
+#define _AMDXDNA_PM_H_
+
+#include "amdxdna_pci_drv.h"
+
+int amdxdna_pm_suspend(struct device *dev);
+int amdxdna_pm_resume(struct device *dev);
+int amdxdna_pm_resume_get(struct amdxdna_dev *xdna);
+void amdxdna_pm_suspend_put(struct amdxdna_dev *xdna);
+void amdxdna_pm_init(struct amdxdna_dev *xdna);
+void amdxdna_pm_fini(struct amdxdna_dev *xdna);
+
+#endif /* _AMDXDNA_PM_H_ */
diff --git a/drivers/accel/amdxdna/amdxdna_sysfs.c b/drivers/accel/amdxdna/amdxdna_sysfs.c
new file mode 100644
index 000000000000..f27e4ee960a0
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_sysfs.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/types.h>
+
+#include "amdxdna_gem.h"
+#include "amdxdna_pci_drv.h"
+
+static ssize_t vbnv_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct amdxdna_dev *xdna = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", xdna->dev_info->vbnv);
+}
+static DEVICE_ATTR_RO(vbnv);
+
+static ssize_t device_type_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct amdxdna_dev *xdna = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", xdna->dev_info->device_type);
+}
+static DEVICE_ATTR_RO(device_type);
+
+static ssize_t fw_version_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct amdxdna_dev *xdna = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d.%d.%d.%d\n", xdna->fw_ver.major,
+ xdna->fw_ver.minor, xdna->fw_ver.sub,
+ xdna->fw_ver.build);
+}
+static DEVICE_ATTR_RO(fw_version);
+
+static struct attribute *amdxdna_attrs[] = {
+ &dev_attr_device_type.attr,
+ &dev_attr_vbnv.attr,
+ &dev_attr_fw_version.attr,
+ NULL,
+};
+
+static struct attribute_group amdxdna_attr_group = {
+ .attrs = amdxdna_attrs,
+};
+
+int amdxdna_sysfs_init(struct amdxdna_dev *xdna)
+{
+ int ret;
+
+ ret = sysfs_create_group(&xdna->ddev.dev->kobj, &amdxdna_attr_group);
+ if (ret)
+ XDNA_ERR(xdna, "Create attr group failed");
+
+ return ret;
+}
+
+void amdxdna_sysfs_fini(struct amdxdna_dev *xdna)
+{
+ sysfs_remove_group(&xdna->ddev.dev->kobj, &amdxdna_attr_group);
+}
diff --git a/drivers/accel/amdxdna/amdxdna_ubuf.c b/drivers/accel/amdxdna/amdxdna_ubuf.c
new file mode 100644
index 000000000000..077b2261cf2a
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_ubuf.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include <linux/dma-buf.h>
+#include <linux/pagemap.h>
+#include <linux/vmalloc.h>
+
+#include "amdxdna_pci_drv.h"
+#include "amdxdna_ubuf.h"
+
+struct amdxdna_ubuf_priv {
+ struct page **pages;
+ u64 nr_pages;
+ enum amdxdna_ubuf_flag flags;
+ struct mm_struct *mm;
+};
+
+static struct sg_table *amdxdna_ubuf_map(struct dma_buf_attachment *attach,
+ enum dma_data_direction direction)
+{
+ struct amdxdna_ubuf_priv *ubuf = attach->dmabuf->priv;
+ struct sg_table *sg;
+ int ret;
+
+ sg = kzalloc(sizeof(*sg), GFP_KERNEL);
+ if (!sg)
+ return ERR_PTR(-ENOMEM);
+
+ ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->nr_pages, 0,
+ ubuf->nr_pages << PAGE_SHIFT, GFP_KERNEL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (ubuf->flags & AMDXDNA_UBUF_FLAG_MAP_DMA) {
+ ret = dma_map_sgtable(attach->dev, sg, direction, 0);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ return sg;
+}
+
+static void amdxdna_ubuf_unmap(struct dma_buf_attachment *attach,
+ struct sg_table *sg,
+ enum dma_data_direction direction)
+{
+ struct amdxdna_ubuf_priv *ubuf = attach->dmabuf->priv;
+
+ if (ubuf->flags & AMDXDNA_UBUF_FLAG_MAP_DMA)
+ dma_unmap_sgtable(attach->dev, sg, direction, 0);
+
+ sg_free_table(sg);
+ kfree(sg);
+}
+
+static void amdxdna_ubuf_release(struct dma_buf *dbuf)
+{
+ struct amdxdna_ubuf_priv *ubuf = dbuf->priv;
+
+ unpin_user_pages(ubuf->pages, ubuf->nr_pages);
+ kvfree(ubuf->pages);
+ atomic64_sub(ubuf->nr_pages, &ubuf->mm->pinned_vm);
+ mmdrop(ubuf->mm);
+ kfree(ubuf);
+}
+
+static vm_fault_t amdxdna_ubuf_vm_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct amdxdna_ubuf_priv *ubuf;
+ unsigned long pfn;
+ pgoff_t pgoff;
+
+ ubuf = vma->vm_private_data;
+ pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
+
+ pfn = page_to_pfn(ubuf->pages[pgoff]);
+ return vmf_insert_pfn(vma, vmf->address, pfn);
+}
+
+static const struct vm_operations_struct amdxdna_ubuf_vm_ops = {
+ .fault = amdxdna_ubuf_vm_fault,
+};
+
+static int amdxdna_ubuf_mmap(struct dma_buf *dbuf, struct vm_area_struct *vma)
+{
+ struct amdxdna_ubuf_priv *ubuf = dbuf->priv;
+
+ vma->vm_ops = &amdxdna_ubuf_vm_ops;
+ vma->vm_private_data = ubuf;
+ vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
+
+ return 0;
+}
+
+static int amdxdna_ubuf_vmap(struct dma_buf *dbuf, struct iosys_map *map)
+{
+ struct amdxdna_ubuf_priv *ubuf = dbuf->priv;
+ void *kva;
+
+ kva = vmap(ubuf->pages, ubuf->nr_pages, VM_MAP, PAGE_KERNEL);
+ if (!kva)
+ return -EINVAL;
+
+ iosys_map_set_vaddr(map, kva);
+ return 0;
+}
+
+static void amdxdna_ubuf_vunmap(struct dma_buf *dbuf, struct iosys_map *map)
+{
+ vunmap(map->vaddr);
+}
+
+static const struct dma_buf_ops amdxdna_ubuf_dmabuf_ops = {
+ .map_dma_buf = amdxdna_ubuf_map,
+ .unmap_dma_buf = amdxdna_ubuf_unmap,
+ .release = amdxdna_ubuf_release,
+ .mmap = amdxdna_ubuf_mmap,
+ .vmap = amdxdna_ubuf_vmap,
+ .vunmap = amdxdna_ubuf_vunmap,
+};
+
+struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
+ enum amdxdna_ubuf_flag flags,
+ u32 num_entries, void __user *va_entries)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ unsigned long lock_limit, new_pinned;
+ struct amdxdna_drm_va_entry *va_ent;
+ struct amdxdna_ubuf_priv *ubuf;
+ u32 npages, start = 0;
+ struct dma_buf *dbuf;
+ int i, ret;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ if (!can_do_mlock())
+ return ERR_PTR(-EPERM);
+
+ ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
+ if (!ubuf)
+ return ERR_PTR(-ENOMEM);
+
+ ubuf->flags = flags;
+ ubuf->mm = current->mm;
+ mmgrab(ubuf->mm);
+
+ va_ent = kvcalloc(num_entries, sizeof(*va_ent), GFP_KERNEL);
+ if (!va_ent) {
+ ret = -ENOMEM;
+ goto free_ubuf;
+ }
+
+ if (copy_from_user(va_ent, va_entries, sizeof(*va_ent) * num_entries)) {
+ XDNA_DBG(xdna, "Access va entries failed");
+ ret = -EINVAL;
+ goto free_ent;
+ }
+
+ for (i = 0, exp_info.size = 0; i < num_entries; i++) {
+ if (!IS_ALIGNED(va_ent[i].vaddr, PAGE_SIZE) ||
+ !IS_ALIGNED(va_ent[i].len, PAGE_SIZE)) {
+ XDNA_ERR(xdna, "Invalid address or len %llx, %llx",
+ va_ent[i].vaddr, va_ent[i].len);
+ ret = -EINVAL;
+ goto free_ent;
+ }
+
+ exp_info.size += va_ent[i].len;
+ }
+
+ ubuf->nr_pages = exp_info.size >> PAGE_SHIFT;
+ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ new_pinned = atomic64_add_return(ubuf->nr_pages, &ubuf->mm->pinned_vm);
+ if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
+ XDNA_DBG(xdna, "New pin %ld, limit %ld, cap %d",
+ new_pinned, lock_limit, capable(CAP_IPC_LOCK));
+ ret = -ENOMEM;
+ goto sub_pin_cnt;
+ }
+
+ ubuf->pages = kvmalloc_array(ubuf->nr_pages, sizeof(*ubuf->pages), GFP_KERNEL);
+ if (!ubuf->pages) {
+ ret = -ENOMEM;
+ goto sub_pin_cnt;
+ }
+
+ for (i = 0; i < num_entries; i++) {
+ npages = va_ent[i].len >> PAGE_SHIFT;
+
+ ret = pin_user_pages_fast(va_ent[i].vaddr, npages,
+ FOLL_WRITE | FOLL_LONGTERM,
+ &ubuf->pages[start]);
+ if (ret < 0 || ret != npages) {
+ ret = -ENOMEM;
+ XDNA_ERR(xdna, "Failed to pin pages ret %d", ret);
+ goto destroy_pages;
+ }
+
+ start += ret;
+ }
+
+ exp_info.ops = &amdxdna_ubuf_dmabuf_ops;
+ exp_info.priv = ubuf;
+ exp_info.flags = O_RDWR | O_CLOEXEC;
+
+ dbuf = dma_buf_export(&exp_info);
+ if (IS_ERR(dbuf)) {
+ ret = PTR_ERR(dbuf);
+ goto destroy_pages;
+ }
+ kvfree(va_ent);
+
+ return dbuf;
+
+destroy_pages:
+ if (start)
+ unpin_user_pages(ubuf->pages, start);
+ kvfree(ubuf->pages);
+sub_pin_cnt:
+ atomic64_sub(ubuf->nr_pages, &ubuf->mm->pinned_vm);
+free_ent:
+ kvfree(va_ent);
+free_ubuf:
+ mmdrop(ubuf->mm);
+ kfree(ubuf);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/accel/amdxdna/amdxdna_ubuf.h b/drivers/accel/amdxdna/amdxdna_ubuf.h
new file mode 100644
index 000000000000..e5cb3bdb3ec9
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_ubuf.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ */
+#ifndef _AMDXDNA_UBUF_H_
+#define _AMDXDNA_UBUF_H_
+
+#include <drm/drm_device.h>
+#include <linux/dma-buf.h>
+
+enum amdxdna_ubuf_flag {
+ AMDXDNA_UBUF_FLAG_MAP_DMA = 1,
+};
+
+struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
+ enum amdxdna_ubuf_flag flags,
+ u32 num_entries, void __user *va_entries);
+
+#endif /* _AMDXDNA_UBUF_H_ */
diff --git a/drivers/accel/amdxdna/npu1_regs.c b/drivers/accel/amdxdna/npu1_regs.c
new file mode 100644
index 000000000000..ec407f3b48fc
--- /dev/null
+++ b/drivers/accel/amdxdna/npu1_regs.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/sizes.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+/* Address definition from NPU1 docs */
+#define MPNPU_PUB_SEC_INTR 0x3010090
+#define MPNPU_PUB_PWRMGMT_INTR 0x3010094
+#define MPNPU_PUB_SCRATCH2 0x30100A0
+#define MPNPU_PUB_SCRATCH3 0x30100A4
+#define MPNPU_PUB_SCRATCH4 0x30100A8
+#define MPNPU_PUB_SCRATCH5 0x30100AC
+#define MPNPU_PUB_SCRATCH6 0x30100B0
+#define MPNPU_PUB_SCRATCH7 0x30100B4
+#define MPNPU_PUB_SCRATCH9 0x30100BC
+
+#define MPNPU_SRAM_X2I_MAILBOX_0 0x30A0000
+#define MPNPU_SRAM_X2I_MAILBOX_1 0x30A2000
+#define MPNPU_SRAM_I2X_MAILBOX_15 0x30BF000
+
+#define MPNPU_APERTURE0_BASE 0x3000000
+#define MPNPU_APERTURE1_BASE 0x3080000
+#define MPNPU_APERTURE2_BASE 0x30C0000
+
+/* PCIe BAR Index for NPU1 */
+#define NPU1_REG_BAR_INDEX 0
+#define NPU1_MBOX_BAR_INDEX 4
+#define NPU1_PSP_BAR_INDEX 0
+#define NPU1_SMU_BAR_INDEX 0
+#define NPU1_SRAM_BAR_INDEX 2
+/* Associated BARs and Apertures */
+#define NPU1_REG_BAR_BASE MPNPU_APERTURE0_BASE
+#define NPU1_MBOX_BAR_BASE MPNPU_APERTURE2_BASE
+#define NPU1_PSP_BAR_BASE MPNPU_APERTURE0_BASE
+#define NPU1_SMU_BAR_BASE MPNPU_APERTURE0_BASE
+#define NPU1_SRAM_BAR_BASE MPNPU_APERTURE1_BASE
+
+const struct rt_config npu1_default_rt_cfg[] = {
+ { 2, 1, AIE2_RT_CFG_INIT }, /* PDI APP LOAD MODE */
+ { 4, 1, AIE2_RT_CFG_INIT }, /* Debug BO */
+ { 1, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
+ { 0 },
+};
+
+const struct dpm_clk_freq npu1_dpm_clk_table[] = {
+ {400, 800},
+ {600, 1024},
+ {600, 1024},
+ {600, 1024},
+ {600, 1024},
+ {720, 1309},
+ {720, 1309},
+ {847, 1600},
+ { 0 }
+};
+
+static const struct aie2_fw_feature_tbl npu1_fw_feature_table[] = {
+ { .feature = AIE2_NPU_COMMAND, .min_minor = 8 },
+ { 0 }
+};
+
+static const struct amdxdna_dev_priv npu1_dev_priv = {
+ .fw_path = "amdnpu/1502_00/npu.sbin",
+ .protocol_major = 0x5,
+ .protocol_minor = 0x7,
+ .rt_config = npu1_default_rt_cfg,
+ .dpm_clk_tbl = npu1_dpm_clk_table,
+ .fw_feature_tbl = npu1_fw_feature_table,
+ .col_align = COL_ALIGN_NONE,
+ .mbox_dev_addr = NPU1_MBOX_BAR_BASE,
+ .mbox_size = 0, /* Use BAR size */
+ .sram_dev_addr = NPU1_SRAM_BAR_BASE,
+ .hwctx_limit = 6,
+ .sram_offs = {
+ DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU1_SRAM, MPNPU_SRAM_X2I_MAILBOX_0),
+ DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU1_SRAM, MPNPU_SRAM_I2X_MAILBOX_15),
+ },
+ .psp_regs_off = {
+ DEFINE_BAR_OFFSET(PSP_CMD_REG, NPU1_PSP, MPNPU_PUB_SCRATCH2),
+ DEFINE_BAR_OFFSET(PSP_ARG0_REG, NPU1_PSP, MPNPU_PUB_SCRATCH3),
+ DEFINE_BAR_OFFSET(PSP_ARG1_REG, NPU1_PSP, MPNPU_PUB_SCRATCH4),
+ DEFINE_BAR_OFFSET(PSP_ARG2_REG, NPU1_PSP, MPNPU_PUB_SCRATCH9),
+ DEFINE_BAR_OFFSET(PSP_INTR_REG, NPU1_PSP, MPNPU_PUB_SEC_INTR),
+ DEFINE_BAR_OFFSET(PSP_STATUS_REG, NPU1_PSP, MPNPU_PUB_SCRATCH2),
+ DEFINE_BAR_OFFSET(PSP_RESP_REG, NPU1_PSP, MPNPU_PUB_SCRATCH3),
+ },
+ .smu_regs_off = {
+ DEFINE_BAR_OFFSET(SMU_CMD_REG, NPU1_SMU, MPNPU_PUB_SCRATCH5),
+ DEFINE_BAR_OFFSET(SMU_ARG_REG, NPU1_SMU, MPNPU_PUB_SCRATCH7),
+ DEFINE_BAR_OFFSET(SMU_INTR_REG, NPU1_SMU, MPNPU_PUB_PWRMGMT_INTR),
+ DEFINE_BAR_OFFSET(SMU_RESP_REG, NPU1_SMU, MPNPU_PUB_SCRATCH6),
+ DEFINE_BAR_OFFSET(SMU_OUT_REG, NPU1_SMU, MPNPU_PUB_SCRATCH7),
+ },
+ .hw_ops = {
+ .set_dpm = npu1_set_dpm,
+ },
+};
+
+const struct amdxdna_dev_info dev_npu1_info = {
+ .reg_bar = NPU1_REG_BAR_INDEX,
+ .mbox_bar = NPU1_MBOX_BAR_INDEX,
+ .sram_bar = NPU1_SRAM_BAR_INDEX,
+ .psp_bar = NPU1_PSP_BAR_INDEX,
+ .smu_bar = NPU1_SMU_BAR_INDEX,
+ .first_col = 1,
+ .dev_mem_buf_shift = 15, /* 32 KiB aligned */
+ .dev_mem_base = AIE2_DEVM_BASE,
+ .dev_mem_size = AIE2_DEVM_SIZE,
+ .vbnv = "RyzenAI-npu1",
+ .device_type = AMDXDNA_DEV_TYPE_KMQ,
+ .dev_priv = &npu1_dev_priv,
+ .ops = &aie2_ops,
+};
diff --git a/drivers/accel/amdxdna/npu2_regs.c b/drivers/accel/amdxdna/npu2_regs.c
new file mode 100644
index 000000000000..86f87d0d1354
--- /dev/null
+++ b/drivers/accel/amdxdna/npu2_regs.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/sizes.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+/* NPU Public Registers on MpNPUAxiXbar (refer to Diag npu_registers.h) */
+#define MPNPU_PUB_SEC_INTR 0x3010060
+#define MPNPU_PUB_PWRMGMT_INTR 0x3010064
+#define MPNPU_PUB_SCRATCH0 0x301006C
+#define MPNPU_PUB_SCRATCH1 0x3010070
+#define MPNPU_PUB_SCRATCH2 0x3010074
+#define MPNPU_PUB_SCRATCH3 0x3010078
+#define MPNPU_PUB_SCRATCH4 0x301007C
+#define MPNPU_PUB_SCRATCH5 0x3010080
+#define MPNPU_PUB_SCRATCH6 0x3010084
+#define MPNPU_PUB_SCRATCH7 0x3010088
+#define MPNPU_PUB_SCRATCH8 0x301008C
+#define MPNPU_PUB_SCRATCH9 0x3010090
+#define MPNPU_PUB_SCRATCH10 0x3010094
+#define MPNPU_PUB_SCRATCH11 0x3010098
+#define MPNPU_PUB_SCRATCH12 0x301009C
+#define MPNPU_PUB_SCRATCH13 0x30100A0
+#define MPNPU_PUB_SCRATCH14 0x30100A4
+#define MPNPU_PUB_SCRATCH15 0x30100A8
+#define MP0_C2PMSG_73 0x3810A24
+#define MP0_C2PMSG_123 0x3810AEC
+
+#define MP1_C2PMSG_0 0x3B10900
+#define MP1_C2PMSG_60 0x3B109F0
+#define MP1_C2PMSG_61 0x3B109F4
+
+#define MPNPU_SRAM_X2I_MAILBOX_0 0x3600000
+#define MPNPU_SRAM_X2I_MAILBOX_15 0x361E000
+#define MPNPU_SRAM_X2I_MAILBOX_31 0x363E000
+#define MPNPU_SRAM_I2X_MAILBOX_31 0x363F000
+
+#define MMNPU_APERTURE0_BASE 0x3000000
+#define MMNPU_APERTURE1_BASE 0x3600000
+#define MMNPU_APERTURE3_BASE 0x3810000
+#define MMNPU_APERTURE4_BASE 0x3B10000
+
+/* PCIe BAR Index for NPU2 */
+#define NPU2_REG_BAR_INDEX 0
+#define NPU2_MBOX_BAR_INDEX 0
+#define NPU2_PSP_BAR_INDEX 4
+#define NPU2_SMU_BAR_INDEX 5
+#define NPU2_SRAM_BAR_INDEX 2
+/* Associated BARs and Apertures */
+#define NPU2_REG_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU2_MBOX_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU2_PSP_BAR_BASE MMNPU_APERTURE3_BASE
+#define NPU2_SMU_BAR_BASE MMNPU_APERTURE4_BASE
+#define NPU2_SRAM_BAR_BASE MMNPU_APERTURE1_BASE
+
+static const struct amdxdna_dev_priv npu2_dev_priv = {
+ .fw_path = "amdnpu/17f0_00/npu.sbin",
+ .protocol_major = 0x6,
+ .protocol_minor = 0x6,
+ .rt_config = npu4_default_rt_cfg,
+ .dpm_clk_tbl = npu4_dpm_clk_table,
+ .fw_feature_tbl = npu4_fw_feature_table,
+ .col_align = COL_ALIGN_NATURE,
+ .mbox_dev_addr = NPU2_MBOX_BAR_BASE,
+ .mbox_size = 0, /* Use BAR size */
+ .sram_dev_addr = NPU2_SRAM_BAR_BASE,
+ .hwctx_limit = 16,
+ .sram_offs = {
+ DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU2_SRAM, MPNPU_SRAM_X2I_MAILBOX_0),
+ DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU2_SRAM, MPNPU_SRAM_X2I_MAILBOX_15),
+ },
+ .psp_regs_off = {
+ DEFINE_BAR_OFFSET(PSP_CMD_REG, NPU2_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_ARG0_REG, NPU2_REG, MPNPU_PUB_SCRATCH3),
+ DEFINE_BAR_OFFSET(PSP_ARG1_REG, NPU2_REG, MPNPU_PUB_SCRATCH4),
+ DEFINE_BAR_OFFSET(PSP_ARG2_REG, NPU2_REG, MPNPU_PUB_SCRATCH9),
+ DEFINE_BAR_OFFSET(PSP_INTR_REG, NPU2_PSP, MP0_C2PMSG_73),
+ DEFINE_BAR_OFFSET(PSP_STATUS_REG, NPU2_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_RESP_REG, NPU2_REG, MPNPU_PUB_SCRATCH3),
+ },
+ .smu_regs_off = {
+ DEFINE_BAR_OFFSET(SMU_CMD_REG, NPU2_SMU, MP1_C2PMSG_0),
+ DEFINE_BAR_OFFSET(SMU_ARG_REG, NPU2_SMU, MP1_C2PMSG_60),
+ DEFINE_BAR_OFFSET(SMU_INTR_REG, NPU2_SMU, MMNPU_APERTURE4_BASE),
+ DEFINE_BAR_OFFSET(SMU_RESP_REG, NPU2_SMU, MP1_C2PMSG_61),
+ DEFINE_BAR_OFFSET(SMU_OUT_REG, NPU2_SMU, MP1_C2PMSG_60),
+ },
+ .hw_ops = {
+ .set_dpm = npu4_set_dpm,
+ },
+};
+
+const struct amdxdna_dev_info dev_npu2_info = {
+ .reg_bar = NPU2_REG_BAR_INDEX,
+ .mbox_bar = NPU2_MBOX_BAR_INDEX,
+ .sram_bar = NPU2_SRAM_BAR_INDEX,
+ .psp_bar = NPU2_PSP_BAR_INDEX,
+ .smu_bar = NPU2_SMU_BAR_INDEX,
+ .first_col = 0,
+ .dev_mem_buf_shift = 15, /* 32 KiB aligned */
+ .dev_mem_base = AIE2_DEVM_BASE,
+ .dev_mem_size = AIE2_DEVM_SIZE,
+ .vbnv = "RyzenAI-npu2",
+ .device_type = AMDXDNA_DEV_TYPE_KMQ,
+ .dev_priv = &npu2_dev_priv,
+ .ops = &aie2_ops, /* NPU2 can share NPU1's callback */
+};
diff --git a/drivers/accel/amdxdna/npu4_regs.c b/drivers/accel/amdxdna/npu4_regs.c
new file mode 100644
index 000000000000..986a5f28ba24
--- /dev/null
+++ b/drivers/accel/amdxdna/npu4_regs.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/sizes.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+/* NPU Public Registers on MpNPUAxiXbar (refer to Diag npu_registers.h) */
+#define MPNPU_PUB_SEC_INTR 0x3010060
+#define MPNPU_PUB_PWRMGMT_INTR 0x3010064
+#define MPNPU_PUB_SCRATCH0 0x301006C
+#define MPNPU_PUB_SCRATCH1 0x3010070
+#define MPNPU_PUB_SCRATCH2 0x3010074
+#define MPNPU_PUB_SCRATCH3 0x3010078
+#define MPNPU_PUB_SCRATCH4 0x301007C
+#define MPNPU_PUB_SCRATCH5 0x3010080
+#define MPNPU_PUB_SCRATCH6 0x3010084
+#define MPNPU_PUB_SCRATCH7 0x3010088
+#define MPNPU_PUB_SCRATCH8 0x301008C
+#define MPNPU_PUB_SCRATCH9 0x3010090
+#define MPNPU_PUB_SCRATCH10 0x3010094
+#define MPNPU_PUB_SCRATCH11 0x3010098
+#define MPNPU_PUB_SCRATCH12 0x301009C
+#define MPNPU_PUB_SCRATCH13 0x30100A0
+#define MPNPU_PUB_SCRATCH14 0x30100A4
+#define MPNPU_PUB_SCRATCH15 0x30100A8
+#define MP0_C2PMSG_73 0x3810A24
+#define MP0_C2PMSG_123 0x3810AEC
+
+#define MP1_C2PMSG_0 0x3B10900
+#define MP1_C2PMSG_60 0x3B109F0
+#define MP1_C2PMSG_61 0x3B109F4
+
+#define MPNPU_SRAM_X2I_MAILBOX_0 0x3600000
+#define MPNPU_SRAM_X2I_MAILBOX_15 0x361E000
+#define MPNPU_SRAM_X2I_MAILBOX_31 0x363E000
+#define MPNPU_SRAM_I2X_MAILBOX_31 0x363F000
+
+#define MMNPU_APERTURE0_BASE 0x3000000
+#define MMNPU_APERTURE1_BASE 0x3600000
+#define MMNPU_APERTURE3_BASE 0x3810000
+#define MMNPU_APERTURE4_BASE 0x3B10000
+
+/* PCIe BAR Index for NPU4 */
+#define NPU4_REG_BAR_INDEX 0
+#define NPU4_MBOX_BAR_INDEX 0
+#define NPU4_PSP_BAR_INDEX 4
+#define NPU4_SMU_BAR_INDEX 5
+#define NPU4_SRAM_BAR_INDEX 2
+/* Associated BARs and Apertures */
+#define NPU4_REG_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU4_MBOX_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU4_PSP_BAR_BASE MMNPU_APERTURE3_BASE
+#define NPU4_SMU_BAR_BASE MMNPU_APERTURE4_BASE
+#define NPU4_SRAM_BAR_BASE MMNPU_APERTURE1_BASE
+
+const struct rt_config npu4_default_rt_cfg[] = {
+ { 5, 1, AIE2_RT_CFG_INIT }, /* PDI APP LOAD MODE */
+ { 10, 1, AIE2_RT_CFG_INIT }, /* DEBUG BUF */
+ { 14, 0, AIE2_RT_CFG_INIT, BIT_U64(AIE2_PREEMPT) }, /* Frame boundary preemption */
+ { 1, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
+ { 2, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
+ { 3, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
+ { 4, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
+ { 13, 0, AIE2_RT_CFG_FORCE_PREEMPT },
+ { 14, 0, AIE2_RT_CFG_FRAME_BOUNDARY_PREEMPT },
+ { 0 },
+};
+
+const struct dpm_clk_freq npu4_dpm_clk_table[] = {
+ {396, 792},
+ {600, 1056},
+ {792, 1152},
+ {975, 1267},
+ {975, 1267},
+ {1056, 1408},
+ {1152, 1584},
+ {1267, 1800},
+ { 0 }
+};
+
+const struct aie2_fw_feature_tbl npu4_fw_feature_table[] = {
+ { .feature = AIE2_NPU_COMMAND, .min_minor = 15 },
+ { .feature = AIE2_PREEMPT, .min_minor = 12 },
+ { 0 }
+};
+
+static const struct amdxdna_dev_priv npu4_dev_priv = {
+ .fw_path = "amdnpu/17f0_10/npu.sbin",
+ .protocol_major = 0x6,
+ .protocol_minor = 12,
+ .rt_config = npu4_default_rt_cfg,
+ .dpm_clk_tbl = npu4_dpm_clk_table,
+ .fw_feature_tbl = npu4_fw_feature_table,
+ .col_align = COL_ALIGN_NATURE,
+ .mbox_dev_addr = NPU4_MBOX_BAR_BASE,
+ .mbox_size = 0, /* Use BAR size */
+ .sram_dev_addr = NPU4_SRAM_BAR_BASE,
+ .hwctx_limit = 16,
+ .sram_offs = {
+ DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU4_SRAM, MPNPU_SRAM_X2I_MAILBOX_0),
+ DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU4_SRAM, MPNPU_SRAM_X2I_MAILBOX_15),
+ },
+ .psp_regs_off = {
+ DEFINE_BAR_OFFSET(PSP_CMD_REG, NPU4_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_ARG0_REG, NPU4_REG, MPNPU_PUB_SCRATCH3),
+ DEFINE_BAR_OFFSET(PSP_ARG1_REG, NPU4_REG, MPNPU_PUB_SCRATCH4),
+ DEFINE_BAR_OFFSET(PSP_ARG2_REG, NPU4_REG, MPNPU_PUB_SCRATCH9),
+ DEFINE_BAR_OFFSET(PSP_INTR_REG, NPU4_PSP, MP0_C2PMSG_73),
+ DEFINE_BAR_OFFSET(PSP_STATUS_REG, NPU4_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_RESP_REG, NPU4_REG, MPNPU_PUB_SCRATCH3),
+ },
+ .smu_regs_off = {
+ DEFINE_BAR_OFFSET(SMU_CMD_REG, NPU4_SMU, MP1_C2PMSG_0),
+ DEFINE_BAR_OFFSET(SMU_ARG_REG, NPU4_SMU, MP1_C2PMSG_60),
+ DEFINE_BAR_OFFSET(SMU_INTR_REG, NPU4_SMU, MMNPU_APERTURE4_BASE),
+ DEFINE_BAR_OFFSET(SMU_RESP_REG, NPU4_SMU, MP1_C2PMSG_61),
+ DEFINE_BAR_OFFSET(SMU_OUT_REG, NPU4_SMU, MP1_C2PMSG_60),
+ },
+ .hw_ops = {
+ .set_dpm = npu4_set_dpm,
+ },
+};
+
+const struct amdxdna_dev_info dev_npu4_info = {
+ .reg_bar = NPU4_REG_BAR_INDEX,
+ .mbox_bar = NPU4_MBOX_BAR_INDEX,
+ .sram_bar = NPU4_SRAM_BAR_INDEX,
+ .psp_bar = NPU4_PSP_BAR_INDEX,
+ .smu_bar = NPU4_SMU_BAR_INDEX,
+ .first_col = 0,
+ .dev_mem_buf_shift = 15, /* 32 KiB aligned */
+ .dev_mem_base = AIE2_DEVM_BASE,
+ .dev_mem_size = AIE2_DEVM_SIZE,
+ .vbnv = "RyzenAI-npu4",
+ .device_type = AMDXDNA_DEV_TYPE_KMQ,
+ .dev_priv = &npu4_dev_priv,
+ .ops = &aie2_ops, /* NPU4 can share NPU1's callback */
+};
diff --git a/drivers/accel/amdxdna/npu5_regs.c b/drivers/accel/amdxdna/npu5_regs.c
new file mode 100644
index 000000000000..75ad97f0b937
--- /dev/null
+++ b/drivers/accel/amdxdna/npu5_regs.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/sizes.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+/* NPU Public Registers on MpNPUAxiXbar (refer to Diag npu_registers.h) */
+#define MPNPU_PUB_SEC_INTR 0x3010060
+#define MPNPU_PUB_PWRMGMT_INTR 0x3010064
+#define MPNPU_PUB_SCRATCH0 0x301006C
+#define MPNPU_PUB_SCRATCH1 0x3010070
+#define MPNPU_PUB_SCRATCH2 0x3010074
+#define MPNPU_PUB_SCRATCH3 0x3010078
+#define MPNPU_PUB_SCRATCH4 0x301007C
+#define MPNPU_PUB_SCRATCH5 0x3010080
+#define MPNPU_PUB_SCRATCH6 0x3010084
+#define MPNPU_PUB_SCRATCH7 0x3010088
+#define MPNPU_PUB_SCRATCH8 0x301008C
+#define MPNPU_PUB_SCRATCH9 0x3010090
+#define MPNPU_PUB_SCRATCH10 0x3010094
+#define MPNPU_PUB_SCRATCH11 0x3010098
+#define MPNPU_PUB_SCRATCH12 0x301009C
+#define MPNPU_PUB_SCRATCH13 0x30100A0
+#define MPNPU_PUB_SCRATCH14 0x30100A4
+#define MPNPU_PUB_SCRATCH15 0x30100A8
+#define MP0_C2PMSG_73 0x3810A24
+#define MP0_C2PMSG_123 0x3810AEC
+
+#define MP1_C2PMSG_0 0x3B10900
+#define MP1_C2PMSG_60 0x3B109F0
+#define MP1_C2PMSG_61 0x3B109F4
+
+#define MPNPU_SRAM_X2I_MAILBOX_0 0x3600000
+#define MPNPU_SRAM_X2I_MAILBOX_15 0x361E000
+#define MPNPU_SRAM_X2I_MAILBOX_31 0x363E000
+#define MPNPU_SRAM_I2X_MAILBOX_31 0x363F000
+
+#define MMNPU_APERTURE0_BASE 0x3000000
+#define MMNPU_APERTURE1_BASE 0x3600000
+#define MMNPU_APERTURE3_BASE 0x3810000
+#define MMNPU_APERTURE4_BASE 0x3B10000
+
+/* PCIe BAR Index for NPU5 */
+#define NPU5_REG_BAR_INDEX 0
+#define NPU5_MBOX_BAR_INDEX 0
+#define NPU5_PSP_BAR_INDEX 4
+#define NPU5_SMU_BAR_INDEX 5
+#define NPU5_SRAM_BAR_INDEX 2
+/* Associated BARs and Apertures */
+#define NPU5_REG_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU5_MBOX_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU5_PSP_BAR_BASE MMNPU_APERTURE3_BASE
+#define NPU5_SMU_BAR_BASE MMNPU_APERTURE4_BASE
+#define NPU5_SRAM_BAR_BASE MMNPU_APERTURE1_BASE
+
+static const struct amdxdna_dev_priv npu5_dev_priv = {
+ .fw_path = "amdnpu/17f0_11/npu.sbin",
+ .protocol_major = 0x6,
+ .protocol_minor = 12,
+ .rt_config = npu4_default_rt_cfg,
+ .dpm_clk_tbl = npu4_dpm_clk_table,
+ .fw_feature_tbl = npu4_fw_feature_table,
+ .col_align = COL_ALIGN_NATURE,
+ .mbox_dev_addr = NPU5_MBOX_BAR_BASE,
+ .mbox_size = 0, /* Use BAR size */
+ .sram_dev_addr = NPU5_SRAM_BAR_BASE,
+ .hwctx_limit = 16,
+ .sram_offs = {
+ DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU5_SRAM, MPNPU_SRAM_X2I_MAILBOX_0),
+ DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU5_SRAM, MPNPU_SRAM_X2I_MAILBOX_15),
+ },
+ .psp_regs_off = {
+ DEFINE_BAR_OFFSET(PSP_CMD_REG, NPU5_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_ARG0_REG, NPU5_REG, MPNPU_PUB_SCRATCH3),
+ DEFINE_BAR_OFFSET(PSP_ARG1_REG, NPU5_REG, MPNPU_PUB_SCRATCH4),
+ DEFINE_BAR_OFFSET(PSP_ARG2_REG, NPU5_REG, MPNPU_PUB_SCRATCH9),
+ DEFINE_BAR_OFFSET(PSP_INTR_REG, NPU5_PSP, MP0_C2PMSG_73),
+ DEFINE_BAR_OFFSET(PSP_STATUS_REG, NPU5_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_RESP_REG, NPU5_REG, MPNPU_PUB_SCRATCH3),
+ },
+ .smu_regs_off = {
+ DEFINE_BAR_OFFSET(SMU_CMD_REG, NPU5_SMU, MP1_C2PMSG_0),
+ DEFINE_BAR_OFFSET(SMU_ARG_REG, NPU5_SMU, MP1_C2PMSG_60),
+ DEFINE_BAR_OFFSET(SMU_INTR_REG, NPU5_SMU, MMNPU_APERTURE4_BASE),
+ DEFINE_BAR_OFFSET(SMU_RESP_REG, NPU5_SMU, MP1_C2PMSG_61),
+ DEFINE_BAR_OFFSET(SMU_OUT_REG, NPU5_SMU, MP1_C2PMSG_60),
+ },
+ .hw_ops = {
+ .set_dpm = npu4_set_dpm,
+ },
+};
+
+const struct amdxdna_dev_info dev_npu5_info = {
+ .reg_bar = NPU5_REG_BAR_INDEX,
+ .mbox_bar = NPU5_MBOX_BAR_INDEX,
+ .sram_bar = NPU5_SRAM_BAR_INDEX,
+ .psp_bar = NPU5_PSP_BAR_INDEX,
+ .smu_bar = NPU5_SMU_BAR_INDEX,
+ .first_col = 0,
+ .dev_mem_buf_shift = 15, /* 32 KiB aligned */
+ .dev_mem_base = AIE2_DEVM_BASE,
+ .dev_mem_size = AIE2_DEVM_SIZE,
+ .vbnv = "RyzenAI-npu5",
+ .device_type = AMDXDNA_DEV_TYPE_KMQ,
+ .dev_priv = &npu5_dev_priv,
+ .ops = &aie2_ops,
+};
diff --git a/drivers/accel/amdxdna/npu6_regs.c b/drivers/accel/amdxdna/npu6_regs.c
new file mode 100644
index 000000000000..758dc013fe13
--- /dev/null
+++ b/drivers/accel/amdxdna/npu6_regs.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/sizes.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+/* NPU Public Registers on MpNPUAxiXbar (refer to Diag npu_registers.h) */
+#define MPNPU_PUB_SEC_INTR 0x3010060
+#define MPNPU_PUB_PWRMGMT_INTR 0x3010064
+#define MPNPU_PUB_SCRATCH0 0x301006C
+#define MPNPU_PUB_SCRATCH1 0x3010070
+#define MPNPU_PUB_SCRATCH2 0x3010074
+#define MPNPU_PUB_SCRATCH3 0x3010078
+#define MPNPU_PUB_SCRATCH4 0x301007C
+#define MPNPU_PUB_SCRATCH5 0x3010080
+#define MPNPU_PUB_SCRATCH6 0x3010084
+#define MPNPU_PUB_SCRATCH7 0x3010088
+#define MPNPU_PUB_SCRATCH8 0x301008C
+#define MPNPU_PUB_SCRATCH9 0x3010090
+#define MPNPU_PUB_SCRATCH10 0x3010094
+#define MPNPU_PUB_SCRATCH11 0x3010098
+#define MPNPU_PUB_SCRATCH12 0x301009C
+#define MPNPU_PUB_SCRATCH13 0x30100A0
+#define MPNPU_PUB_SCRATCH14 0x30100A4
+#define MPNPU_PUB_SCRATCH15 0x30100A8
+#define MP0_C2PMSG_73 0x3810A24
+#define MP0_C2PMSG_123 0x3810AEC
+
+#define MP1_C2PMSG_0 0x3B10900
+#define MP1_C2PMSG_60 0x3B109F0
+#define MP1_C2PMSG_61 0x3B109F4
+
+#define MPNPU_SRAM_X2I_MAILBOX_0 0x3600000
+#define MPNPU_SRAM_X2I_MAILBOX_15 0x361E000
+#define MPNPU_SRAM_X2I_MAILBOX_31 0x363E000
+#define MPNPU_SRAM_I2X_MAILBOX_31 0x363F000
+
+#define MMNPU_APERTURE0_BASE 0x3000000
+#define MMNPU_APERTURE1_BASE 0x3600000
+#define MMNPU_APERTURE3_BASE 0x3810000
+#define MMNPU_APERTURE4_BASE 0x3B10000
+
+/* PCIe BAR Index for NPU6 */
+#define NPU6_REG_BAR_INDEX 0
+#define NPU6_MBOX_BAR_INDEX 0
+#define NPU6_PSP_BAR_INDEX 4
+#define NPU6_SMU_BAR_INDEX 5
+#define NPU6_SRAM_BAR_INDEX 2
+/* Associated BARs and Apertures */
+#define NPU6_REG_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU6_MBOX_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU6_PSP_BAR_BASE MMNPU_APERTURE3_BASE
+#define NPU6_SMU_BAR_BASE MMNPU_APERTURE4_BASE
+#define NPU6_SRAM_BAR_BASE MMNPU_APERTURE1_BASE
+
+static const struct amdxdna_dev_priv npu6_dev_priv = {
+ .fw_path = "amdnpu/17f0_10/npu.sbin",
+ .protocol_major = 0x6,
+ .protocol_minor = 12,
+ .rt_config = npu4_default_rt_cfg,
+ .dpm_clk_tbl = npu4_dpm_clk_table,
+ .fw_feature_tbl = npu4_fw_feature_table,
+ .col_align = COL_ALIGN_NATURE,
+ .mbox_dev_addr = NPU6_MBOX_BAR_BASE,
+ .mbox_size = 0, /* Use BAR size */
+ .sram_dev_addr = NPU6_SRAM_BAR_BASE,
+ .hwctx_limit = 16,
+ .sram_offs = {
+ DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU6_SRAM, MPNPU_SRAM_X2I_MAILBOX_0),
+ DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU6_SRAM, MPNPU_SRAM_X2I_MAILBOX_15),
+ },
+ .psp_regs_off = {
+ DEFINE_BAR_OFFSET(PSP_CMD_REG, NPU6_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_ARG0_REG, NPU6_REG, MPNPU_PUB_SCRATCH3),
+ DEFINE_BAR_OFFSET(PSP_ARG1_REG, NPU6_REG, MPNPU_PUB_SCRATCH4),
+ DEFINE_BAR_OFFSET(PSP_ARG2_REG, NPU6_REG, MPNPU_PUB_SCRATCH9),
+ DEFINE_BAR_OFFSET(PSP_INTR_REG, NPU6_PSP, MP0_C2PMSG_73),
+ DEFINE_BAR_OFFSET(PSP_STATUS_REG, NPU6_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_RESP_REG, NPU6_REG, MPNPU_PUB_SCRATCH3),
+ },
+ .smu_regs_off = {
+ DEFINE_BAR_OFFSET(SMU_CMD_REG, NPU6_SMU, MP1_C2PMSG_0),
+ DEFINE_BAR_OFFSET(SMU_ARG_REG, NPU6_SMU, MP1_C2PMSG_60),
+ DEFINE_BAR_OFFSET(SMU_INTR_REG, NPU6_SMU, MMNPU_APERTURE4_BASE),
+ DEFINE_BAR_OFFSET(SMU_RESP_REG, NPU6_SMU, MP1_C2PMSG_61),
+ DEFINE_BAR_OFFSET(SMU_OUT_REG, NPU6_SMU, MP1_C2PMSG_60),
+ },
+ .hw_ops = {
+ .set_dpm = npu4_set_dpm,
+ },
+
+};
+
+const struct amdxdna_dev_info dev_npu6_info = {
+ .reg_bar = NPU6_REG_BAR_INDEX,
+ .mbox_bar = NPU6_MBOX_BAR_INDEX,
+ .sram_bar = NPU6_SRAM_BAR_INDEX,
+ .psp_bar = NPU6_PSP_BAR_INDEX,
+ .smu_bar = NPU6_SMU_BAR_INDEX,
+ .first_col = 0,
+ .dev_mem_buf_shift = 15, /* 32 KiB aligned */
+ .dev_mem_base = AIE2_DEVM_BASE,
+ .dev_mem_size = AIE2_DEVM_SIZE,
+ .vbnv = "RyzenAI-npu6",
+ .device_type = AMDXDNA_DEV_TYPE_KMQ,
+ .dev_priv = &npu6_dev_priv,
+ .ops = &aie2_ops,
+};
diff --git a/drivers/accel/drm_accel.c b/drivers/accel/drm_accel.c
index 16c3edb8c46e..ca3357acd127 100644
--- a/drivers/accel/drm_accel.c
+++ b/drivers/accel/drm_accel.c
@@ -8,7 +8,7 @@
#include <linux/debugfs.h>
#include <linux/device.h>
-#include <linux/idr.h>
+#include <linux/xarray.h>
#include <drm/drm_accel.h>
#include <drm/drm_auth.h>
@@ -18,10 +18,7 @@
#include <drm/drm_ioctl.h>
#include <drm/drm_print.h>
-static DEFINE_SPINLOCK(accel_minor_lock);
-static struct idr accel_minors_idr;
-
-static struct dentry *accel_debugfs_root;
+DEFINE_XARRAY_ALLOC(accel_minors_xa);
static const struct device_type accel_sysfs_device_minor = {
.name = "accel_minor"
@@ -75,17 +72,6 @@ static const struct drm_info_list accel_debugfs_list[] = {
#define ACCEL_DEBUGFS_ENTRIES ARRAY_SIZE(accel_debugfs_list)
/**
- * accel_debugfs_init() - Initialize debugfs for device
- * @dev: Pointer to the device instance.
- *
- * This function creates a root directory for the device in debugfs.
- */
-void accel_debugfs_init(struct drm_device *dev)
-{
- drm_debugfs_dev_init(dev, accel_debugfs_root);
-}
-
-/**
* accel_debugfs_register() - Register debugfs for device
* @dev: Pointer to the device instance.
*
@@ -118,99 +104,6 @@ void accel_set_device_instance_params(struct device *kdev, int index)
}
/**
- * accel_minor_alloc() - Allocates a new accel minor
- *
- * This function access the accel minors idr and allocates from it
- * a new id to represent a new accel minor
- *
- * Return: A new id on success or error code in case idr_alloc failed
- */
-int accel_minor_alloc(void)
-{
- unsigned long flags;
- int r;
-
- spin_lock_irqsave(&accel_minor_lock, flags);
- r = idr_alloc(&accel_minors_idr, NULL, 0, ACCEL_MAX_MINORS, GFP_NOWAIT);
- spin_unlock_irqrestore(&accel_minor_lock, flags);
-
- return r;
-}
-
-/**
- * accel_minor_remove() - Remove an accel minor
- * @index: The minor id to remove.
- *
- * This function access the accel minors idr and removes from
- * it the member with the id that is passed to this function.
- */
-void accel_minor_remove(int index)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&accel_minor_lock, flags);
- idr_remove(&accel_minors_idr, index);
- spin_unlock_irqrestore(&accel_minor_lock, flags);
-}
-
-/**
- * accel_minor_replace() - Replace minor pointer in accel minors idr.
- * @minor: Pointer to the new minor.
- * @index: The minor id to replace.
- *
- * This function access the accel minors idr structure and replaces the pointer
- * that is associated with an existing id. Because the minor pointer can be
- * NULL, we need to explicitly pass the index.
- *
- * Return: 0 for success, negative value for error
- */
-void accel_minor_replace(struct drm_minor *minor, int index)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&accel_minor_lock, flags);
- idr_replace(&accel_minors_idr, minor, index);
- spin_unlock_irqrestore(&accel_minor_lock, flags);
-}
-
-/*
- * Looks up the given minor-ID and returns the respective DRM-minor object. The
- * refence-count of the underlying device is increased so you must release this
- * object with accel_minor_release().
- *
- * The object can be only a drm_minor that represents an accel device.
- *
- * As long as you hold this minor, it is guaranteed that the object and the
- * minor->dev pointer will stay valid! However, the device may get unplugged and
- * unregistered while you hold the minor.
- */
-static struct drm_minor *accel_minor_acquire(unsigned int minor_id)
-{
- struct drm_minor *minor;
- unsigned long flags;
-
- spin_lock_irqsave(&accel_minor_lock, flags);
- minor = idr_find(&accel_minors_idr, minor_id);
- if (minor)
- drm_dev_get(minor->dev);
- spin_unlock_irqrestore(&accel_minor_lock, flags);
-
- if (!minor) {
- return ERR_PTR(-ENODEV);
- } else if (drm_dev_is_unplugged(minor->dev)) {
- drm_dev_put(minor->dev);
- return ERR_PTR(-ENODEV);
- }
-
- return minor;
-}
-
-static void accel_minor_release(struct drm_minor *minor)
-{
- drm_dev_put(minor->dev);
-}
-
-/**
* accel_open - open method for ACCEL file
* @inode: device inode
* @filp: file pointer.
@@ -227,7 +120,7 @@ int accel_open(struct inode *inode, struct file *filp)
struct drm_minor *minor;
int retcode;
- minor = accel_minor_acquire(iminor(inode));
+ minor = drm_minor_acquire(&accel_minors_xa, iminor(inode));
if (IS_ERR(minor))
return PTR_ERR(minor);
@@ -246,7 +139,7 @@ int accel_open(struct inode *inode, struct file *filp)
err_undo:
atomic_dec(&dev->open_count);
- accel_minor_release(minor);
+ drm_minor_release(minor);
return retcode;
}
EXPORT_SYMBOL_GPL(accel_open);
@@ -257,7 +150,7 @@ static int accel_stub_open(struct inode *inode, struct file *filp)
struct drm_minor *minor;
int err;
- minor = accel_minor_acquire(iminor(inode));
+ minor = drm_minor_acquire(&accel_minors_xa, iminor(inode));
if (IS_ERR(minor))
return PTR_ERR(minor);
@@ -274,7 +167,7 @@ static int accel_stub_open(struct inode *inode, struct file *filp)
err = 0;
out:
- accel_minor_release(minor);
+ drm_minor_release(minor);
return err;
}
@@ -288,25 +181,20 @@ static const struct file_operations accel_stub_fops = {
void accel_core_exit(void)
{
unregister_chrdev(ACCEL_MAJOR, "accel");
- debugfs_remove(accel_debugfs_root);
accel_sysfs_destroy();
- idr_destroy(&accel_minors_idr);
+ WARN_ON(!xa_empty(&accel_minors_xa));
}
int __init accel_core_init(void)
{
int ret;
- idr_init(&accel_minors_idr);
-
ret = accel_sysfs_init();
if (ret < 0) {
DRM_ERROR("Cannot create ACCEL class: %d\n", ret);
goto error;
}
- accel_debugfs_root = debugfs_create_dir("accel", NULL);
-
ret = register_chrdev(ACCEL_MAJOR, "accel", &accel_stub_fops);
if (ret < 0)
DRM_ERROR("Cannot register ACCEL major: %d\n", ret);
diff --git a/drivers/accel/ethosu/Kconfig b/drivers/accel/ethosu/Kconfig
new file mode 100644
index 000000000000..d25f9b3eb317
--- /dev/null
+++ b/drivers/accel/ethosu/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config DRM_ACCEL_ARM_ETHOSU
+ tristate "Arm Ethos-U65/U85 NPU"
+ depends on HAS_IOMEM
+ depends on DRM_ACCEL
+ select DRM_GEM_DMA_HELPER
+ select DRM_SCHED
+ select GENERIC_ALLOCATOR
+ help
+ Enables driver for Arm Ethos-U65/U85 NPUs
diff --git a/drivers/accel/ethosu/Makefile b/drivers/accel/ethosu/Makefile
new file mode 100644
index 000000000000..17db5a600416
--- /dev/null
+++ b/drivers/accel/ethosu/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_DRM_ACCEL_ARM_ETHOSU) := ethosu.o
+ethosu-y += ethosu_drv.o ethosu_gem.o ethosu_job.o
diff --git a/drivers/accel/ethosu/ethosu_device.h b/drivers/accel/ethosu/ethosu_device.h
new file mode 100644
index 000000000000..b189fa783d6a
--- /dev/null
+++ b/drivers/accel/ethosu/ethosu_device.h
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: GPL-2.0-only or MIT */
+/* Copyright 2025 Arm, Ltd. */
+
+#ifndef __ETHOSU_DEVICE_H__
+#define __ETHOSU_DEVICE_H__
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/types.h>
+
+#include <drm/drm_device.h>
+#include <drm/gpu_scheduler.h>
+
+#include <drm/ethosu_accel.h>
+
+struct clk;
+struct gen_pool;
+
+#define NPU_REG_ID 0x0000
+#define NPU_REG_STATUS 0x0004
+#define NPU_REG_CMD 0x0008
+#define NPU_REG_RESET 0x000c
+#define NPU_REG_QBASE 0x0010
+#define NPU_REG_QBASE_HI 0x0014
+#define NPU_REG_QREAD 0x0018
+#define NPU_REG_QCONFIG 0x001c
+#define NPU_REG_QSIZE 0x0020
+#define NPU_REG_PROT 0x0024
+#define NPU_REG_CONFIG 0x0028
+#define NPU_REG_REGIONCFG 0x003c
+#define NPU_REG_AXILIMIT0 0x0040 // U65
+#define NPU_REG_AXILIMIT1 0x0044 // U65
+#define NPU_REG_AXILIMIT2 0x0048 // U65
+#define NPU_REG_AXILIMIT3 0x004c // U65
+#define NPU_REG_MEM_ATTR0 0x0040 // U85
+#define NPU_REG_MEM_ATTR1 0x0044 // U85
+#define NPU_REG_MEM_ATTR2 0x0048 // U85
+#define NPU_REG_MEM_ATTR3 0x004c // U85
+#define NPU_REG_AXI_SRAM 0x0050 // U85
+#define NPU_REG_AXI_EXT 0x0054 // U85
+
+#define NPU_REG_BASEP(x) (0x0080 + (x) * 8)
+#define NPU_REG_BASEP_HI(x) (0x0084 + (x) * 8)
+#define NPU_BASEP_REGION_MAX 8
+
+#define ID_ARCH_MAJOR_MASK GENMASK(31, 28)
+#define ID_ARCH_MINOR_MASK GENMASK(27, 20)
+#define ID_ARCH_PATCH_MASK GENMASK(19, 16)
+#define ID_VER_MAJOR_MASK GENMASK(11, 8)
+#define ID_VER_MINOR_MASK GENMASK(7, 4)
+
+#define CONFIG_MACS_PER_CC_MASK GENMASK(3, 0)
+#define CONFIG_CMD_STREAM_VER_MASK GENMASK(7, 4)
+
+#define STATUS_STATE_RUNNING BIT(0)
+#define STATUS_IRQ_RAISED BIT(1)
+#define STATUS_BUS_STATUS BIT(2)
+#define STATUS_RESET_STATUS BIT(3)
+#define STATUS_CMD_PARSE_ERR BIT(4)
+#define STATUS_CMD_END_REACHED BIT(5)
+
+#define CMD_CLEAR_IRQ BIT(1)
+#define CMD_TRANSITION_TO_RUN BIT(0)
+
+#define RESET_PENDING_CSL BIT(1)
+#define RESET_PENDING_CPL BIT(0)
+
+#define PROT_ACTIVE_CSL BIT(1)
+
+enum ethosu_cmds {
+ NPU_OP_CONV = 0x2,
+ NPU_OP_DEPTHWISE = 0x3,
+ NPU_OP_POOL = 0x5,
+ NPU_OP_ELEMENTWISE = 0x6,
+ NPU_OP_RESIZE = 0x7, // U85 only
+ NPU_OP_DMA_START = 0x10,
+ NPU_SET_IFM_PAD_TOP = 0x100,
+ NPU_SET_IFM_PAD_LEFT = 0x101,
+ NPU_SET_IFM_PAD_RIGHT = 0x102,
+ NPU_SET_IFM_PAD_BOTTOM = 0x103,
+ NPU_SET_IFM_DEPTH_M1 = 0x104,
+ NPU_SET_IFM_PRECISION = 0x105,
+ NPU_SET_IFM_BROADCAST = 0x108,
+ NPU_SET_IFM_WIDTH0_M1 = 0x10a,
+ NPU_SET_IFM_HEIGHT0_M1 = 0x10b,
+ NPU_SET_IFM_HEIGHT1_M1 = 0x10c,
+ NPU_SET_IFM_REGION = 0x10f,
+ NPU_SET_OFM_WIDTH_M1 = 0x111,
+ NPU_SET_OFM_HEIGHT_M1 = 0x112,
+ NPU_SET_OFM_DEPTH_M1 = 0x113,
+ NPU_SET_OFM_PRECISION = 0x114,
+ NPU_SET_OFM_WIDTH0_M1 = 0x11a,
+ NPU_SET_OFM_HEIGHT0_M1 = 0x11b,
+ NPU_SET_OFM_HEIGHT1_M1 = 0x11c,
+ NPU_SET_OFM_REGION = 0x11f,
+ NPU_SET_KERNEL_WIDTH_M1 = 0x120,
+ NPU_SET_KERNEL_HEIGHT_M1 = 0x121,
+ NPU_SET_KERNEL_STRIDE = 0x122,
+ NPU_SET_WEIGHT_REGION = 0x128,
+ NPU_SET_SCALE_REGION = 0x129,
+ NPU_SET_DMA0_SRC_REGION = 0x130,
+ NPU_SET_DMA0_DST_REGION = 0x131,
+ NPU_SET_DMA0_SIZE0 = 0x132,
+ NPU_SET_DMA0_SIZE1 = 0x133,
+ NPU_SET_IFM2_BROADCAST = 0x180,
+ NPU_SET_IFM2_PRECISION = 0x185,
+ NPU_SET_IFM2_WIDTH0_M1 = 0x18a,
+ NPU_SET_IFM2_HEIGHT0_M1 = 0x18b,
+ NPU_SET_IFM2_HEIGHT1_M1 = 0x18c,
+ NPU_SET_IFM2_REGION = 0x18f,
+ NPU_SET_IFM_BASE0 = 0x4000,
+ NPU_SET_IFM_BASE1 = 0x4001,
+ NPU_SET_IFM_BASE2 = 0x4002,
+ NPU_SET_IFM_BASE3 = 0x4003,
+ NPU_SET_IFM_STRIDE_X = 0x4004,
+ NPU_SET_IFM_STRIDE_Y = 0x4005,
+ NPU_SET_IFM_STRIDE_C = 0x4006,
+ NPU_SET_OFM_BASE0 = 0x4010,
+ NPU_SET_OFM_BASE1 = 0x4011,
+ NPU_SET_OFM_BASE2 = 0x4012,
+ NPU_SET_OFM_BASE3 = 0x4013,
+ NPU_SET_OFM_STRIDE_X = 0x4014,
+ NPU_SET_OFM_STRIDE_Y = 0x4015,
+ NPU_SET_OFM_STRIDE_C = 0x4016,
+ NPU_SET_WEIGHT_BASE = 0x4020,
+ NPU_SET_WEIGHT_LENGTH = 0x4021,
+ NPU_SET_SCALE_BASE = 0x4022,
+ NPU_SET_SCALE_LENGTH = 0x4023,
+ NPU_SET_DMA0_SRC = 0x4030,
+ NPU_SET_DMA0_DST = 0x4031,
+ NPU_SET_DMA0_LEN = 0x4032,
+ NPU_SET_DMA0_SRC_STRIDE0 = 0x4033,
+ NPU_SET_DMA0_SRC_STRIDE1 = 0x4034,
+ NPU_SET_DMA0_DST_STRIDE0 = 0x4035,
+ NPU_SET_DMA0_DST_STRIDE1 = 0x4036,
+ NPU_SET_IFM2_BASE0 = 0x4080,
+ NPU_SET_IFM2_BASE1 = 0x4081,
+ NPU_SET_IFM2_BASE2 = 0x4082,
+ NPU_SET_IFM2_BASE3 = 0x4083,
+ NPU_SET_IFM2_STRIDE_X = 0x4084,
+ NPU_SET_IFM2_STRIDE_Y = 0x4085,
+ NPU_SET_IFM2_STRIDE_C = 0x4086,
+ NPU_SET_WEIGHT1_BASE = 0x4090,
+ NPU_SET_WEIGHT1_LENGTH = 0x4091,
+ NPU_SET_SCALE1_BASE = 0x4092,
+ NPU_SET_WEIGHT2_BASE = 0x4092,
+ NPU_SET_SCALE1_LENGTH = 0x4093,
+ NPU_SET_WEIGHT2_LENGTH = 0x4093,
+ NPU_SET_WEIGHT3_BASE = 0x4094,
+ NPU_SET_WEIGHT3_LENGTH = 0x4095,
+};
+
+#define ETHOSU_SRAM_REGION 2 /* Matching Vela compiler */
+
+/**
+ * struct ethosu_device - Ethosu device
+ */
+struct ethosu_device {
+ /** @base: Base drm_device. */
+ struct drm_device base;
+
+ /** @iomem: CPU mapping of the registers. */
+ void __iomem *regs;
+
+ void __iomem *sram;
+ struct gen_pool *srampool;
+ dma_addr_t sramphys;
+
+ struct clk_bulk_data *clks;
+ int num_clks;
+ int irq;
+
+ struct drm_ethosu_npu_info npu_info;
+
+ struct ethosu_job *in_flight_job;
+ /* For in_flight_job and ethosu_job_hw_submit() */
+ struct mutex job_lock;
+
+ /* For dma_fence */
+ spinlock_t fence_lock;
+
+ struct drm_gpu_scheduler sched;
+ /* For ethosu_job_do_push() */
+ struct mutex sched_lock;
+ u64 fence_context;
+ u64 emit_seqno;
+};
+
+#define to_ethosu_device(drm_dev) \
+ ((struct ethosu_device *)container_of(drm_dev, struct ethosu_device, base))
+
+static inline bool ethosu_is_u65(const struct ethosu_device *ethosudev)
+{
+ return FIELD_GET(ID_ARCH_MAJOR_MASK, ethosudev->npu_info.id) == 1;
+}
+
+#endif
diff --git a/drivers/accel/ethosu/ethosu_drv.c b/drivers/accel/ethosu/ethosu_drv.c
new file mode 100644
index 000000000000..e05a69bf5574
--- /dev/null
+++ b/drivers/accel/ethosu/ethosu_drv.c
@@ -0,0 +1,403 @@
+// SPDX-License-Identifier: GPL-2.0-only or MIT
+// Copyright (C) 2025 Arm, Ltd.
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_utils.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_accel.h>
+#include <drm/ethosu_accel.h>
+
+#include "ethosu_drv.h"
+#include "ethosu_device.h"
+#include "ethosu_gem.h"
+#include "ethosu_job.h"
+
+static int ethosu_ioctl_dev_query(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct ethosu_device *ethosudev = to_ethosu_device(ddev);
+ struct drm_ethosu_dev_query *args = data;
+
+ if (!args->pointer) {
+ switch (args->type) {
+ case DRM_ETHOSU_DEV_QUERY_NPU_INFO:
+ args->size = sizeof(ethosudev->npu_info);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ switch (args->type) {
+ case DRM_ETHOSU_DEV_QUERY_NPU_INFO:
+ if (args->size < offsetofend(struct drm_ethosu_npu_info, sram_size))
+ return -EINVAL;
+ return copy_struct_to_user(u64_to_user_ptr(args->pointer),
+ args->size,
+ &ethosudev->npu_info,
+ sizeof(ethosudev->npu_info), NULL);
+ default:
+ return -EINVAL;
+ }
+}
+
+#define ETHOSU_BO_FLAGS DRM_ETHOSU_BO_NO_MMAP
+
+static int ethosu_ioctl_bo_create(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct drm_ethosu_bo_create *args = data;
+ int cookie, ret;
+
+ if (!drm_dev_enter(ddev, &cookie))
+ return -ENODEV;
+
+ if (!args->size || (args->flags & ~ETHOSU_BO_FLAGS)) {
+ ret = -EINVAL;
+ goto out_dev_exit;
+ }
+
+ ret = ethosu_gem_create_with_handle(file, ddev, &args->size,
+ args->flags, &args->handle);
+
+out_dev_exit:
+ drm_dev_exit(cookie);
+ return ret;
+}
+
+static int ethosu_ioctl_bo_wait(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct drm_ethosu_bo_wait *args = data;
+ int cookie, ret;
+ unsigned long timeout = drm_timeout_abs_to_jiffies(args->timeout_ns);
+
+ if (args->pad)
+ return -EINVAL;
+
+ if (!drm_dev_enter(ddev, &cookie))
+ return -ENODEV;
+
+ ret = drm_gem_dma_resv_wait(file, args->handle, true, timeout);
+
+ drm_dev_exit(cookie);
+ return ret;
+}
+
+static int ethosu_ioctl_bo_mmap_offset(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct drm_ethosu_bo_mmap_offset *args = data;
+ struct drm_gem_object *obj;
+
+ if (args->pad)
+ return -EINVAL;
+
+ obj = drm_gem_object_lookup(file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ args->offset = drm_vma_node_offset_addr(&obj->vma_node);
+ drm_gem_object_put(obj);
+ return 0;
+}
+
+static int ethosu_ioctl_cmdstream_bo_create(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct drm_ethosu_cmdstream_bo_create *args = data;
+ int cookie, ret;
+
+ if (!drm_dev_enter(ddev, &cookie))
+ return -ENODEV;
+
+ if (!args->size || !args->data || args->pad || args->flags) {
+ ret = -EINVAL;
+ goto out_dev_exit;
+ }
+
+ args->flags |= DRM_ETHOSU_BO_NO_MMAP;
+
+ ret = ethosu_gem_cmdstream_create(file, ddev, args->size, args->data,
+ args->flags, &args->handle);
+
+out_dev_exit:
+ drm_dev_exit(cookie);
+ return ret;
+}
+
+static int ethosu_open(struct drm_device *ddev, struct drm_file *file)
+{
+ int ret = 0;
+
+ if (!try_module_get(THIS_MODULE))
+ return -EINVAL;
+
+ struct ethosu_file_priv __free(kfree) *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto err_put_mod;
+ }
+ priv->edev = to_ethosu_device(ddev);
+
+ ret = ethosu_job_open(priv);
+ if (ret)
+ goto err_put_mod;
+
+ file->driver_priv = no_free_ptr(priv);
+ return 0;
+
+err_put_mod:
+ module_put(THIS_MODULE);
+ return ret;
+}
+
+static void ethosu_postclose(struct drm_device *ddev, struct drm_file *file)
+{
+ ethosu_job_close(file->driver_priv);
+ kfree(file->driver_priv);
+ module_put(THIS_MODULE);
+}
+
+static const struct drm_ioctl_desc ethosu_drm_driver_ioctls[] = {
+#define ETHOSU_IOCTL(n, func, flags) \
+ DRM_IOCTL_DEF_DRV(ETHOSU_##n, ethosu_ioctl_##func, flags)
+
+ ETHOSU_IOCTL(DEV_QUERY, dev_query, 0),
+ ETHOSU_IOCTL(BO_CREATE, bo_create, 0),
+ ETHOSU_IOCTL(BO_WAIT, bo_wait, 0),
+ ETHOSU_IOCTL(BO_MMAP_OFFSET, bo_mmap_offset, 0),
+ ETHOSU_IOCTL(CMDSTREAM_BO_CREATE, cmdstream_bo_create, 0),
+ ETHOSU_IOCTL(SUBMIT, submit, 0),
+};
+
+DEFINE_DRM_ACCEL_FOPS(ethosu_drm_driver_fops);
+
+/*
+ * Ethosu driver version:
+ * - 1.0 - initial interface
+ */
+static const struct drm_driver ethosu_drm_driver = {
+ .driver_features = DRIVER_COMPUTE_ACCEL | DRIVER_GEM,
+ .open = ethosu_open,
+ .postclose = ethosu_postclose,
+ .ioctls = ethosu_drm_driver_ioctls,
+ .num_ioctls = ARRAY_SIZE(ethosu_drm_driver_ioctls),
+ .fops = &ethosu_drm_driver_fops,
+ .name = "ethosu",
+ .desc = "Arm Ethos-U Accel driver",
+ .major = 1,
+ .minor = 0,
+
+ .gem_create_object = ethosu_gem_create_object,
+};
+
+#define U65_DRAM_AXI_LIMIT_CFG 0x1f3f0002
+#define U65_SRAM_AXI_LIMIT_CFG 0x1f3f00b0
+#define U85_AXI_EXT_CFG 0x00021f3f
+#define U85_AXI_SRAM_CFG 0x00021f3f
+#define U85_MEM_ATTR0_CFG 0x00000000
+#define U85_MEM_ATTR2_CFG 0x000000b7
+
+static int ethosu_reset(struct ethosu_device *ethosudev)
+{
+ int ret;
+ u32 reg;
+
+ writel_relaxed(RESET_PENDING_CSL, ethosudev->regs + NPU_REG_RESET);
+ ret = readl_poll_timeout(ethosudev->regs + NPU_REG_STATUS, reg,
+ !FIELD_GET(STATUS_RESET_STATUS, reg),
+ USEC_PER_MSEC, USEC_PER_SEC);
+ if (ret)
+ return ret;
+
+ if (!FIELD_GET(PROT_ACTIVE_CSL, readl_relaxed(ethosudev->regs + NPU_REG_PROT))) {
+ dev_warn(ethosudev->base.dev, "Could not reset to non-secure mode (PROT = %x)\n",
+ readl_relaxed(ethosudev->regs + NPU_REG_PROT));
+ }
+
+ /*
+ * Assign region 2 (SRAM) to AXI M0 (AXILIMIT0),
+ * everything else to AXI M1 (AXILIMIT2)
+ */
+ writel_relaxed(0x0000aa8a, ethosudev->regs + NPU_REG_REGIONCFG);
+ if (ethosu_is_u65(ethosudev)) {
+ writel_relaxed(U65_SRAM_AXI_LIMIT_CFG, ethosudev->regs + NPU_REG_AXILIMIT0);
+ writel_relaxed(U65_DRAM_AXI_LIMIT_CFG, ethosudev->regs + NPU_REG_AXILIMIT2);
+ } else {
+ writel_relaxed(U85_AXI_SRAM_CFG, ethosudev->regs + NPU_REG_AXI_SRAM);
+ writel_relaxed(U85_AXI_EXT_CFG, ethosudev->regs + NPU_REG_AXI_EXT);
+ writel_relaxed(U85_MEM_ATTR0_CFG, ethosudev->regs + NPU_REG_MEM_ATTR0); // SRAM
+ writel_relaxed(U85_MEM_ATTR2_CFG, ethosudev->regs + NPU_REG_MEM_ATTR2); // DRAM
+ }
+
+ if (ethosudev->sram)
+ memset_io(ethosudev->sram, 0, ethosudev->npu_info.sram_size);
+
+ return 0;
+}
+
+static int ethosu_device_resume(struct device *dev)
+{
+ struct ethosu_device *ethosudev = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_bulk_prepare_enable(ethosudev->num_clks, ethosudev->clks);
+ if (ret)
+ return ret;
+
+ ret = ethosu_reset(ethosudev);
+ if (!ret)
+ return 0;
+
+ clk_bulk_disable_unprepare(ethosudev->num_clks, ethosudev->clks);
+ return ret;
+}
+
+static int ethosu_device_suspend(struct device *dev)
+{
+ struct ethosu_device *ethosudev = dev_get_drvdata(dev);
+
+ clk_bulk_disable_unprepare(ethosudev->num_clks, ethosudev->clks);
+ return 0;
+}
+
+static int ethosu_sram_init(struct ethosu_device *ethosudev)
+{
+ ethosudev->npu_info.sram_size = 0;
+
+ ethosudev->srampool = of_gen_pool_get(ethosudev->base.dev->of_node, "sram", 0);
+ if (!ethosudev->srampool)
+ return 0;
+
+ ethosudev->npu_info.sram_size = gen_pool_size(ethosudev->srampool);
+
+ ethosudev->sram = (void __iomem *)gen_pool_dma_alloc(ethosudev->srampool,
+ ethosudev->npu_info.sram_size,
+ &ethosudev->sramphys);
+ if (!ethosudev->sram) {
+ dev_err(ethosudev->base.dev, "failed to allocate from SRAM pool\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int ethosu_init(struct ethosu_device *ethosudev)
+{
+ int ret;
+ u32 id, config;
+
+ ret = ethosu_device_resume(ethosudev->base.dev);
+ if (ret)
+ return ret;
+
+ pm_runtime_set_autosuspend_delay(ethosudev->base.dev, 50);
+ pm_runtime_use_autosuspend(ethosudev->base.dev);
+ ret = devm_pm_runtime_set_active_enabled(ethosudev->base.dev);
+ if (ret)
+ return ret;
+ pm_runtime_get_noresume(ethosudev->base.dev);
+
+ ethosudev->npu_info.id = id = readl_relaxed(ethosudev->regs + NPU_REG_ID);
+ ethosudev->npu_info.config = config = readl_relaxed(ethosudev->regs + NPU_REG_CONFIG);
+
+ ethosu_sram_init(ethosudev);
+
+ dev_info(ethosudev->base.dev,
+ "Ethos-U NPU, arch v%ld.%ld.%ld, rev r%ldp%ld, cmd stream ver%ld, %d MACs, %dKB SRAM\n",
+ FIELD_GET(ID_ARCH_MAJOR_MASK, id),
+ FIELD_GET(ID_ARCH_MINOR_MASK, id),
+ FIELD_GET(ID_ARCH_PATCH_MASK, id),
+ FIELD_GET(ID_VER_MAJOR_MASK, id),
+ FIELD_GET(ID_VER_MINOR_MASK, id),
+ FIELD_GET(CONFIG_CMD_STREAM_VER_MASK, config),
+ 1 << FIELD_GET(CONFIG_MACS_PER_CC_MASK, config),
+ ethosudev->npu_info.sram_size / 1024);
+
+ return 0;
+}
+
+static int ethosu_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct ethosu_device *ethosudev;
+
+ ethosudev = devm_drm_dev_alloc(&pdev->dev, &ethosu_drm_driver,
+ struct ethosu_device, base);
+ if (IS_ERR(ethosudev))
+ return -ENOMEM;
+ platform_set_drvdata(pdev, ethosudev);
+
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
+
+ ethosudev->regs = devm_platform_ioremap_resource(pdev, 0);
+
+ ethosudev->num_clks = devm_clk_bulk_get_all(&pdev->dev, &ethosudev->clks);
+ if (ethosudev->num_clks < 0)
+ return ethosudev->num_clks;
+
+ ret = ethosu_job_init(ethosudev);
+ if (ret)
+ return ret;
+
+ ret = ethosu_init(ethosudev);
+ if (ret)
+ return ret;
+
+ ret = drm_dev_register(&ethosudev->base, 0);
+ if (ret)
+ pm_runtime_dont_use_autosuspend(ethosudev->base.dev);
+
+ pm_runtime_put_autosuspend(ethosudev->base.dev);
+ return ret;
+}
+
+static void ethosu_remove(struct platform_device *pdev)
+{
+ struct ethosu_device *ethosudev = dev_get_drvdata(&pdev->dev);
+
+ drm_dev_unregister(&ethosudev->base);
+ ethosu_job_fini(ethosudev);
+ if (ethosudev->sram)
+ gen_pool_free(ethosudev->srampool, (unsigned long)ethosudev->sram,
+ ethosudev->npu_info.sram_size);
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "arm,ethos-u65" },
+ { .compatible = "arm,ethos-u85" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
+static DEFINE_RUNTIME_DEV_PM_OPS(ethosu_pm_ops,
+ ethosu_device_suspend,
+ ethosu_device_resume,
+ NULL);
+
+static struct platform_driver ethosu_driver = {
+ .probe = ethosu_probe,
+ .remove = ethosu_remove,
+ .driver = {
+ .name = "ethosu",
+ .pm = pm_ptr(&ethosu_pm_ops),
+ .of_match_table = dt_match,
+ },
+};
+module_platform_driver(ethosu_driver);
+
+MODULE_AUTHOR("Rob Herring <robh@kernel.org>");
+MODULE_DESCRIPTION("Arm Ethos-U Accel Driver");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/accel/ethosu/ethosu_drv.h b/drivers/accel/ethosu/ethosu_drv.h
new file mode 100644
index 000000000000..9e21dfe94184
--- /dev/null
+++ b/drivers/accel/ethosu/ethosu_drv.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright 2025 Arm, Ltd. */
+#ifndef __ETHOSU_DRV_H__
+#define __ETHOSU_DRV_H__
+
+#include <drm/gpu_scheduler.h>
+
+struct ethosu_device;
+
+struct ethosu_file_priv {
+ struct ethosu_device *edev;
+ struct drm_sched_entity sched_entity;
+};
+
+#endif
diff --git a/drivers/accel/ethosu/ethosu_gem.c b/drivers/accel/ethosu/ethosu_gem.c
new file mode 100644
index 000000000000..473b5f5d7514
--- /dev/null
+++ b/drivers/accel/ethosu/ethosu_gem.c
@@ -0,0 +1,704 @@
+// SPDX-License-Identifier: GPL-2.0-only or MIT
+/* Copyright 2025 Arm, Ltd. */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#include <drm/ethosu_accel.h>
+
+#include "ethosu_device.h"
+#include "ethosu_gem.h"
+
+static void ethosu_gem_free_object(struct drm_gem_object *obj)
+{
+ struct ethosu_gem_object *bo = to_ethosu_bo(obj);
+
+ kfree(bo->info);
+ drm_gem_free_mmap_offset(&bo->base.base);
+ drm_gem_dma_free(&bo->base);
+}
+
+static int ethosu_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct ethosu_gem_object *bo = to_ethosu_bo(obj);
+
+ /* Don't allow mmap on objects that have the NO_MMAP flag set. */
+ if (bo->flags & DRM_ETHOSU_BO_NO_MMAP)
+ return -EINVAL;
+
+ return drm_gem_dma_object_mmap(obj, vma);
+}
+
+static const struct drm_gem_object_funcs ethosu_gem_funcs = {
+ .free = ethosu_gem_free_object,
+ .print_info = drm_gem_dma_object_print_info,
+ .get_sg_table = drm_gem_dma_object_get_sg_table,
+ .vmap = drm_gem_dma_object_vmap,
+ .mmap = ethosu_gem_mmap,
+ .vm_ops = &drm_gem_dma_vm_ops,
+};
+
+/**
+ * ethosu_gem_create_object - Implementation of driver->gem_create_object.
+ * @ddev: DRM device
+ * @size: Size in bytes of the memory the object will reference
+ *
+ * This lets the GEM helpers allocate object structs for us, and keep
+ * our BO stats correct.
+ */
+struct drm_gem_object *ethosu_gem_create_object(struct drm_device *ddev, size_t size)
+{
+ struct ethosu_gem_object *obj;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ obj->base.base.funcs = &ethosu_gem_funcs;
+ return &obj->base.base;
+}
+
+/**
+ * ethosu_gem_create_with_handle() - Create a GEM object and attach it to a handle.
+ * @file: DRM file.
+ * @ddev: DRM device.
+ * @size: Size of the GEM object to allocate.
+ * @flags: Combination of drm_ethosu_bo_flags flags.
+ * @handle: Pointer holding the handle pointing to the new GEM object.
+ *
+ * Return: Zero on success
+ */
+int ethosu_gem_create_with_handle(struct drm_file *file,
+ struct drm_device *ddev,
+ u64 *size, u32 flags, u32 *handle)
+{
+ struct drm_gem_dma_object *mem;
+ struct ethosu_gem_object *bo;
+ int ret;
+
+ mem = drm_gem_dma_create(ddev, *size);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
+
+ bo = to_ethosu_bo(&mem->base);
+ bo->flags = flags;
+
+ /*
+ * Allocate an id of idr table where the obj is registered
+ * and handle has the id what user can see.
+ */
+ ret = drm_gem_handle_create(file, &mem->base, handle);
+ if (!ret)
+ *size = bo->base.base.size;
+
+ /* drop reference from allocate - handle holds it now. */
+ drm_gem_object_put(&mem->base);
+
+ return ret;
+}
+
+struct dma {
+ s8 region;
+ u64 len;
+ u64 offset;
+ s64 stride[2];
+};
+
+struct dma_state {
+ u16 size0;
+ u16 size1;
+ s8 mode;
+ struct dma src;
+ struct dma dst;
+};
+
+struct buffer {
+ u64 base;
+ u32 length;
+ s8 region;
+};
+
+struct feat_matrix {
+ u64 base[4];
+ s64 stride_x;
+ s64 stride_y;
+ s64 stride_c;
+ s8 region;
+ u8 broadcast;
+ u16 stride_kernel;
+ u16 precision;
+ u16 depth;
+ u16 width;
+ u16 width0;
+ u16 height[3];
+ u8 pad_top;
+ u8 pad_left;
+ u8 pad_bottom;
+ u8 pad_right;
+};
+
+struct cmd_state {
+ struct dma_state dma;
+ struct buffer scale[2];
+ struct buffer weight[4];
+ struct feat_matrix ofm;
+ struct feat_matrix ifm;
+ struct feat_matrix ifm2;
+};
+
+static void cmd_state_init(struct cmd_state *st)
+{
+ /* Initialize to all 1s to detect missing setup */
+ memset(st, 0xff, sizeof(*st));
+}
+
+static u64 cmd_to_addr(u32 *cmd)
+{
+ return ((u64)((cmd[0] & 0xff0000) << 16)) | cmd[1];
+}
+
+static u64 dma_length(struct ethosu_validated_cmdstream_info *info,
+ struct dma_state *dma_st, struct dma *dma)
+{
+ s8 mode = dma_st->mode;
+ u64 len = dma->len;
+
+ if (mode >= 1) {
+ len += dma->stride[0];
+ len *= dma_st->size0;
+ }
+ if (mode == 2) {
+ len += dma->stride[1];
+ len *= dma_st->size1;
+ }
+ if (dma->region >= 0)
+ info->region_size[dma->region] = max(info->region_size[dma->region],
+ len + dma->offset);
+
+ return len;
+}
+
+static u64 feat_matrix_length(struct ethosu_validated_cmdstream_info *info,
+ struct feat_matrix *fm,
+ u32 x, u32 y, u32 c)
+{
+ u32 element_size, storage = fm->precision >> 14;
+ int tile = 0;
+ u64 addr;
+
+ if (fm->region < 0)
+ return U64_MAX;
+
+ switch (storage) {
+ case 0:
+ if (x >= fm->width0 + 1) {
+ x -= fm->width0 + 1;
+ tile += 1;
+ }
+ if (y >= fm->height[tile] + 1) {
+ y -= fm->height[tile] + 1;
+ tile += 2;
+ }
+ break;
+ case 1:
+ if (y >= fm->height[1] + 1) {
+ y -= fm->height[1] + 1;
+ tile = 2;
+ } else if (y >= fm->height[0] + 1) {
+ y -= fm->height[0] + 1;
+ tile = 1;
+ }
+ break;
+ }
+ if (fm->base[tile] == U64_MAX)
+ return U64_MAX;
+
+ addr = fm->base[tile] + y * fm->stride_y;
+
+ switch ((fm->precision >> 6) & 0x3) { // format
+ case 0: //nhwc:
+ addr += x * fm->stride_x + c;
+ break;
+ case 1: //nhcwb16:
+ element_size = BIT((fm->precision >> 1) & 0x3);
+
+ addr += (c / 16) * fm->stride_c + (16 * x + (c & 0xf)) * element_size;
+ break;
+ }
+
+ info->region_size[fm->region] = max(info->region_size[fm->region], addr + 1);
+
+ return addr;
+}
+
+static int calc_sizes(struct drm_device *ddev,
+ struct ethosu_validated_cmdstream_info *info,
+ u16 op, struct cmd_state *st,
+ bool ifm, bool ifm2, bool weight, bool scale)
+{
+ u64 len;
+
+ if (ifm) {
+ if (st->ifm.stride_kernel == U16_MAX)
+ return -EINVAL;
+ u32 stride_y = ((st->ifm.stride_kernel >> 8) & 0x2) +
+ ((st->ifm.stride_kernel >> 1) & 0x1) + 1;
+ u32 stride_x = ((st->ifm.stride_kernel >> 5) & 0x2) +
+ (st->ifm.stride_kernel & 0x1) + 1;
+ u32 ifm_height = st->ofm.height[2] * stride_y +
+ st->ifm.height[2] - (st->ifm.pad_top + st->ifm.pad_bottom);
+ u32 ifm_width = st->ofm.width * stride_x +
+ st->ifm.width - (st->ifm.pad_left + st->ifm.pad_right);
+
+ len = feat_matrix_length(info, &st->ifm, ifm_width,
+ ifm_height, st->ifm.depth);
+ dev_dbg(ddev->dev, "op %d: IFM:%d:0x%llx-0x%llx\n",
+ op, st->ifm.region, st->ifm.base[0], len);
+ if (len == U64_MAX)
+ return -EINVAL;
+ }
+
+ if (ifm2) {
+ len = feat_matrix_length(info, &st->ifm2, st->ifm.depth,
+ 0, st->ofm.depth);
+ dev_dbg(ddev->dev, "op %d: IFM2:%d:0x%llx-0x%llx\n",
+ op, st->ifm2.region, st->ifm2.base[0], len);
+ if (len == U64_MAX)
+ return -EINVAL;
+ }
+
+ if (weight) {
+ dev_dbg(ddev->dev, "op %d: W:%d:0x%llx-0x%llx\n",
+ op, st->weight[0].region, st->weight[0].base,
+ st->weight[0].base + st->weight[0].length - 1);
+ if (st->weight[0].region < 0 || st->weight[0].base == U64_MAX ||
+ st->weight[0].length == U32_MAX)
+ return -EINVAL;
+ info->region_size[st->weight[0].region] =
+ max(info->region_size[st->weight[0].region],
+ st->weight[0].base + st->weight[0].length);
+ }
+
+ if (scale) {
+ dev_dbg(ddev->dev, "op %d: S:%d:0x%llx-0x%llx\n",
+ op, st->scale[0].region, st->scale[0].base,
+ st->scale[0].base + st->scale[0].length - 1);
+ if (st->scale[0].region < 0 || st->scale[0].base == U64_MAX ||
+ st->scale[0].length == U32_MAX)
+ return -EINVAL;
+ info->region_size[st->scale[0].region] =
+ max(info->region_size[st->scale[0].region],
+ st->scale[0].base + st->scale[0].length);
+ }
+
+ len = feat_matrix_length(info, &st->ofm, st->ofm.width,
+ st->ofm.height[2], st->ofm.depth);
+ dev_dbg(ddev->dev, "op %d: OFM:%d:0x%llx-0x%llx\n",
+ op, st->ofm.region, st->ofm.base[0], len);
+ if (len == U64_MAX)
+ return -EINVAL;
+ info->output_region[st->ofm.region] = true;
+
+ return 0;
+}
+
+static int calc_sizes_elemwise(struct drm_device *ddev,
+ struct ethosu_validated_cmdstream_info *info,
+ u16 op, struct cmd_state *st,
+ bool ifm, bool ifm2)
+{
+ u32 height, width, depth;
+ u64 len;
+
+ if (ifm) {
+ height = st->ifm.broadcast & 0x1 ? 0 : st->ofm.height[2];
+ width = st->ifm.broadcast & 0x2 ? 0 : st->ofm.width;
+ depth = st->ifm.broadcast & 0x4 ? 0 : st->ofm.depth;
+
+ len = feat_matrix_length(info, &st->ifm, width,
+ height, depth);
+ dev_dbg(ddev->dev, "op %d: IFM:%d:0x%llx-0x%llx\n",
+ op, st->ifm.region, st->ifm.base[0], len);
+ if (len == U64_MAX)
+ return -EINVAL;
+ }
+
+ if (ifm2) {
+ height = st->ifm2.broadcast & 0x1 ? 0 : st->ofm.height[2];
+ width = st->ifm2.broadcast & 0x2 ? 0 : st->ofm.width;
+ depth = st->ifm2.broadcast & 0x4 ? 0 : st->ofm.depth;
+
+ len = feat_matrix_length(info, &st->ifm2, width,
+ height, depth);
+ dev_dbg(ddev->dev, "op %d: IFM2:%d:0x%llx-0x%llx\n",
+ op, st->ifm2.region, st->ifm2.base[0], len);
+ if (len == U64_MAX)
+ return -EINVAL;
+ }
+
+ len = feat_matrix_length(info, &st->ofm, st->ofm.width,
+ st->ofm.height[2], st->ofm.depth);
+ dev_dbg(ddev->dev, "op %d: OFM:%d:0x%llx-0x%llx\n",
+ op, st->ofm.region, st->ofm.base[0], len);
+ if (len == U64_MAX)
+ return -EINVAL;
+ info->output_region[st->ofm.region] = true;
+
+ return 0;
+}
+
+static int ethosu_gem_cmdstream_copy_and_validate(struct drm_device *ddev,
+ u32 __user *ucmds,
+ struct ethosu_gem_object *bo,
+ u32 size)
+{
+ struct ethosu_validated_cmdstream_info __free(kfree) *info = kzalloc(sizeof(*info), GFP_KERNEL);
+ struct ethosu_device *edev = to_ethosu_device(ddev);
+ u32 *bocmds = bo->base.vaddr;
+ struct cmd_state st;
+ int i, ret;
+
+ if (!info)
+ return -ENOMEM;
+ info->cmd_size = size;
+
+ cmd_state_init(&st);
+
+ for (i = 0; i < size / 4; i++) {
+ bool use_ifm, use_ifm2, use_scale;
+ u64 dstlen, srclen;
+ u16 cmd, param;
+ u32 cmds[2];
+ u64 addr;
+
+ if (get_user(cmds[0], ucmds++))
+ return -EFAULT;
+
+ bocmds[i] = cmds[0];
+
+ cmd = cmds[0];
+ param = cmds[0] >> 16;
+
+ if (cmd & 0x4000) {
+ if (get_user(cmds[1], ucmds++))
+ return -EFAULT;
+
+ i++;
+ bocmds[i] = cmds[1];
+ addr = cmd_to_addr(cmds);
+ }
+
+ switch (cmd) {
+ case NPU_OP_DMA_START:
+ srclen = dma_length(info, &st.dma, &st.dma.src);
+ dstlen = dma_length(info, &st.dma, &st.dma.dst);
+
+ if (st.dma.dst.region >= 0)
+ info->output_region[st.dma.dst.region] = true;
+ dev_dbg(ddev->dev, "cmd: DMA SRC:%d:0x%llx+0x%llx DST:%d:0x%llx+0x%llx\n",
+ st.dma.src.region, st.dma.src.offset, srclen,
+ st.dma.dst.region, st.dma.dst.offset, dstlen);
+ break;
+ case NPU_OP_CONV:
+ case NPU_OP_DEPTHWISE:
+ use_ifm2 = param & 0x1; // weights_ifm2
+ use_scale = !(st.ofm.precision & 0x100);
+ ret = calc_sizes(ddev, info, cmd, &st, true, use_ifm2,
+ !use_ifm2, use_scale);
+ if (ret)
+ return ret;
+ break;
+ case NPU_OP_POOL:
+ use_ifm = param != 0x4; // pooling mode
+ use_scale = !(st.ofm.precision & 0x100);
+ ret = calc_sizes(ddev, info, cmd, &st, use_ifm, false,
+ false, use_scale);
+ if (ret)
+ return ret;
+ break;
+ case NPU_OP_ELEMENTWISE:
+ use_ifm2 = !((st.ifm2.broadcast == 8) || (param == 5) ||
+ (param == 6) || (param == 7) || (param == 0x24));
+ use_ifm = st.ifm.broadcast != 8;
+ ret = calc_sizes_elemwise(ddev, info, cmd, &st, use_ifm, use_ifm2);
+ if (ret)
+ return ret;
+ break;
+ case NPU_OP_RESIZE: // U85 only
+ WARN_ON(1); // TODO
+ break;
+ case NPU_SET_KERNEL_WIDTH_M1:
+ st.ifm.width = param;
+ break;
+ case NPU_SET_KERNEL_HEIGHT_M1:
+ st.ifm.height[2] = param;
+ break;
+ case NPU_SET_KERNEL_STRIDE:
+ st.ifm.stride_kernel = param;
+ break;
+ case NPU_SET_IFM_PAD_TOP:
+ st.ifm.pad_top = param & 0x7f;
+ break;
+ case NPU_SET_IFM_PAD_LEFT:
+ st.ifm.pad_left = param & 0x7f;
+ break;
+ case NPU_SET_IFM_PAD_RIGHT:
+ st.ifm.pad_right = param & 0xff;
+ break;
+ case NPU_SET_IFM_PAD_BOTTOM:
+ st.ifm.pad_bottom = param & 0xff;
+ break;
+ case NPU_SET_IFM_DEPTH_M1:
+ st.ifm.depth = param;
+ break;
+ case NPU_SET_IFM_PRECISION:
+ st.ifm.precision = param;
+ break;
+ case NPU_SET_IFM_BROADCAST:
+ st.ifm.broadcast = param;
+ break;
+ case NPU_SET_IFM_REGION:
+ st.ifm.region = param & 0x7f;
+ break;
+ case NPU_SET_IFM_WIDTH0_M1:
+ st.ifm.width0 = param;
+ break;
+ case NPU_SET_IFM_HEIGHT0_M1:
+ st.ifm.height[0] = param;
+ break;
+ case NPU_SET_IFM_HEIGHT1_M1:
+ st.ifm.height[1] = param;
+ break;
+ case NPU_SET_IFM_BASE0:
+ case NPU_SET_IFM_BASE1:
+ case NPU_SET_IFM_BASE2:
+ case NPU_SET_IFM_BASE3:
+ st.ifm.base[cmd & 0x3] = addr;
+ break;
+ case NPU_SET_IFM_STRIDE_X:
+ st.ifm.stride_x = addr;
+ break;
+ case NPU_SET_IFM_STRIDE_Y:
+ st.ifm.stride_y = addr;
+ break;
+ case NPU_SET_IFM_STRIDE_C:
+ st.ifm.stride_c = addr;
+ break;
+
+ case NPU_SET_OFM_WIDTH_M1:
+ st.ofm.width = param;
+ break;
+ case NPU_SET_OFM_HEIGHT_M1:
+ st.ofm.height[2] = param;
+ break;
+ case NPU_SET_OFM_DEPTH_M1:
+ st.ofm.depth = param;
+ break;
+ case NPU_SET_OFM_PRECISION:
+ st.ofm.precision = param;
+ break;
+ case NPU_SET_OFM_REGION:
+ st.ofm.region = param & 0x7;
+ break;
+ case NPU_SET_OFM_WIDTH0_M1:
+ st.ofm.width0 = param;
+ break;
+ case NPU_SET_OFM_HEIGHT0_M1:
+ st.ofm.height[0] = param;
+ break;
+ case NPU_SET_OFM_HEIGHT1_M1:
+ st.ofm.height[1] = param;
+ break;
+ case NPU_SET_OFM_BASE0:
+ case NPU_SET_OFM_BASE1:
+ case NPU_SET_OFM_BASE2:
+ case NPU_SET_OFM_BASE3:
+ st.ofm.base[cmd & 0x3] = addr;
+ break;
+ case NPU_SET_OFM_STRIDE_X:
+ st.ofm.stride_x = addr;
+ break;
+ case NPU_SET_OFM_STRIDE_Y:
+ st.ofm.stride_y = addr;
+ break;
+ case NPU_SET_OFM_STRIDE_C:
+ st.ofm.stride_c = addr;
+ break;
+
+ case NPU_SET_IFM2_BROADCAST:
+ st.ifm2.broadcast = param;
+ break;
+ case NPU_SET_IFM2_PRECISION:
+ st.ifm2.precision = param;
+ break;
+ case NPU_SET_IFM2_REGION:
+ st.ifm2.region = param & 0x7;
+ break;
+ case NPU_SET_IFM2_WIDTH0_M1:
+ st.ifm2.width0 = param;
+ break;
+ case NPU_SET_IFM2_HEIGHT0_M1:
+ st.ifm2.height[0] = param;
+ break;
+ case NPU_SET_IFM2_HEIGHT1_M1:
+ st.ifm2.height[1] = param;
+ break;
+ case NPU_SET_IFM2_BASE0:
+ case NPU_SET_IFM2_BASE1:
+ case NPU_SET_IFM2_BASE2:
+ case NPU_SET_IFM2_BASE3:
+ st.ifm2.base[cmd & 0x3] = addr;
+ break;
+ case NPU_SET_IFM2_STRIDE_X:
+ st.ifm2.stride_x = addr;
+ break;
+ case NPU_SET_IFM2_STRIDE_Y:
+ st.ifm2.stride_y = addr;
+ break;
+ case NPU_SET_IFM2_STRIDE_C:
+ st.ifm2.stride_c = addr;
+ break;
+
+ case NPU_SET_WEIGHT_REGION:
+ st.weight[0].region = param & 0x7;
+ break;
+ case NPU_SET_SCALE_REGION:
+ st.scale[0].region = param & 0x7;
+ break;
+ case NPU_SET_WEIGHT_BASE:
+ st.weight[0].base = addr;
+ break;
+ case NPU_SET_WEIGHT_LENGTH:
+ st.weight[0].length = cmds[1];
+ break;
+ case NPU_SET_SCALE_BASE:
+ st.scale[0].base = addr;
+ break;
+ case NPU_SET_SCALE_LENGTH:
+ st.scale[0].length = cmds[1];
+ break;
+ case NPU_SET_WEIGHT1_BASE:
+ st.weight[1].base = addr;
+ break;
+ case NPU_SET_WEIGHT1_LENGTH:
+ st.weight[1].length = cmds[1];
+ break;
+ case NPU_SET_SCALE1_BASE: // NPU_SET_WEIGHT2_BASE (U85)
+ if (ethosu_is_u65(edev))
+ st.scale[1].base = addr;
+ else
+ st.weight[2].base = addr;
+ break;
+ case NPU_SET_SCALE1_LENGTH: // NPU_SET_WEIGHT2_LENGTH (U85)
+ if (ethosu_is_u65(edev))
+ st.scale[1].length = cmds[1];
+ else
+ st.weight[1].length = cmds[1];
+ break;
+ case NPU_SET_WEIGHT3_BASE:
+ st.weight[3].base = addr;
+ break;
+ case NPU_SET_WEIGHT3_LENGTH:
+ st.weight[3].length = cmds[1];
+ break;
+
+ case NPU_SET_DMA0_SRC_REGION:
+ if (param & 0x100)
+ st.dma.src.region = -1;
+ else
+ st.dma.src.region = param & 0x7;
+ st.dma.mode = (param >> 9) & 0x3;
+ break;
+ case NPU_SET_DMA0_DST_REGION:
+ if (param & 0x100)
+ st.dma.dst.region = -1;
+ else
+ st.dma.dst.region = param & 0x7;
+ break;
+ case NPU_SET_DMA0_SIZE0:
+ st.dma.size0 = param;
+ break;
+ case NPU_SET_DMA0_SIZE1:
+ st.dma.size1 = param;
+ break;
+ case NPU_SET_DMA0_SRC_STRIDE0:
+ st.dma.src.stride[0] = ((s64)addr << 24) >> 24;
+ break;
+ case NPU_SET_DMA0_SRC_STRIDE1:
+ st.dma.src.stride[1] = ((s64)addr << 24) >> 24;
+ break;
+ case NPU_SET_DMA0_DST_STRIDE0:
+ st.dma.dst.stride[0] = ((s64)addr << 24) >> 24;
+ break;
+ case NPU_SET_DMA0_DST_STRIDE1:
+ st.dma.dst.stride[1] = ((s64)addr << 24) >> 24;
+ break;
+ case NPU_SET_DMA0_SRC:
+ st.dma.src.offset = addr;
+ break;
+ case NPU_SET_DMA0_DST:
+ st.dma.dst.offset = addr;
+ break;
+ case NPU_SET_DMA0_LEN:
+ st.dma.src.len = st.dma.dst.len = addr;
+ break;
+ default:
+ break;
+ }
+ }
+
+ for (i = 0; i < NPU_BASEP_REGION_MAX; i++) {
+ if (!info->region_size[i])
+ continue;
+ dev_dbg(ddev->dev, "region %d max size: 0x%llx\n",
+ i, info->region_size[i]);
+ }
+
+ bo->info = no_free_ptr(info);
+ return 0;
+}
+
+/**
+ * ethosu_gem_cmdstream_create() - Create a GEM object and attach it to a handle.
+ * @file: DRM file.
+ * @ddev: DRM device.
+ * @exclusive_vm: Exclusive VM. Not NULL if the GEM object can't be shared.
+ * @size: Size of the GEM object to allocate.
+ * @flags: Combination of drm_ethosu_bo_flags flags.
+ * @handle: Pointer holding the handle pointing to the new GEM object.
+ *
+ * Return: Zero on success
+ */
+int ethosu_gem_cmdstream_create(struct drm_file *file,
+ struct drm_device *ddev,
+ u32 size, u64 data, u32 flags, u32 *handle)
+{
+ int ret;
+ struct drm_gem_dma_object *mem;
+ struct ethosu_gem_object *bo;
+
+ mem = drm_gem_dma_create(ddev, size);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
+
+ bo = to_ethosu_bo(&mem->base);
+ bo->flags = flags;
+
+ ret = ethosu_gem_cmdstream_copy_and_validate(ddev,
+ (void __user *)(uintptr_t)data,
+ bo, size);
+ if (ret)
+ goto fail;
+
+ /*
+ * Allocate an id of idr table where the obj is registered
+ * and handle has the id what user can see.
+ */
+ ret = drm_gem_handle_create(file, &mem->base, handle);
+
+fail:
+ /* drop reference from allocate - handle holds it now. */
+ drm_gem_object_put(&mem->base);
+
+ return ret;
+}
diff --git a/drivers/accel/ethosu/ethosu_gem.h b/drivers/accel/ethosu/ethosu_gem.h
new file mode 100644
index 000000000000..3922895a60fb
--- /dev/null
+++ b/drivers/accel/ethosu/ethosu_gem.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
+/* Copyright 2025 Arm, Ltd. */
+
+#ifndef __ETHOSU_GEM_H__
+#define __ETHOSU_GEM_H__
+
+#include "ethosu_device.h"
+#include <drm/drm_gem_dma_helper.h>
+
+struct ethosu_validated_cmdstream_info {
+ u32 cmd_size;
+ u64 region_size[NPU_BASEP_REGION_MAX];
+ bool output_region[NPU_BASEP_REGION_MAX];
+};
+
+/**
+ * struct ethosu_gem_object - Driver specific GEM object.
+ */
+struct ethosu_gem_object {
+ /** @base: Inherit from drm_gem_shmem_object. */
+ struct drm_gem_dma_object base;
+
+ struct ethosu_validated_cmdstream_info *info;
+
+ /** @flags: Combination of drm_ethosu_bo_flags flags. */
+ u32 flags;
+};
+
+static inline
+struct ethosu_gem_object *to_ethosu_bo(struct drm_gem_object *obj)
+{
+ return container_of(to_drm_gem_dma_obj(obj), struct ethosu_gem_object, base);
+}
+
+struct drm_gem_object *ethosu_gem_create_object(struct drm_device *ddev,
+ size_t size);
+
+int ethosu_gem_create_with_handle(struct drm_file *file,
+ struct drm_device *ddev,
+ u64 *size, u32 flags, uint32_t *handle);
+
+int ethosu_gem_cmdstream_create(struct drm_file *file,
+ struct drm_device *ddev,
+ u32 size, u64 data, u32 flags, u32 *handle);
+
+#endif /* __ETHOSU_GEM_H__ */
diff --git a/drivers/accel/ethosu/ethosu_job.c b/drivers/accel/ethosu/ethosu_job.c
new file mode 100644
index 000000000000..26e7a2f64d71
--- /dev/null
+++ b/drivers/accel/ethosu/ethosu_job.c
@@ -0,0 +1,497 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+/* Copyright 2025 Arm, Ltd. */
+
+#include <linux/bitfield.h>
+#include <linux/genalloc.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drm_file.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_print.h>
+#include <drm/ethosu_accel.h>
+
+#include "ethosu_device.h"
+#include "ethosu_drv.h"
+#include "ethosu_gem.h"
+#include "ethosu_job.h"
+
+#define JOB_TIMEOUT_MS 500
+
+static struct ethosu_job *to_ethosu_job(struct drm_sched_job *sched_job)
+{
+ return container_of(sched_job, struct ethosu_job, base);
+}
+
+static const char *ethosu_fence_get_driver_name(struct dma_fence *fence)
+{
+ return "ethosu";
+}
+
+static const char *ethosu_fence_get_timeline_name(struct dma_fence *fence)
+{
+ return "ethosu-npu";
+}
+
+static const struct dma_fence_ops ethosu_fence_ops = {
+ .get_driver_name = ethosu_fence_get_driver_name,
+ .get_timeline_name = ethosu_fence_get_timeline_name,
+};
+
+static void ethosu_job_hw_submit(struct ethosu_device *dev, struct ethosu_job *job)
+{
+ struct drm_gem_dma_object *cmd_bo = to_drm_gem_dma_obj(job->cmd_bo);
+ struct ethosu_validated_cmdstream_info *cmd_info = to_ethosu_bo(job->cmd_bo)->info;
+
+ for (int i = 0; i < job->region_cnt; i++) {
+ struct drm_gem_dma_object *bo;
+ int region = job->region_bo_num[i];
+
+ bo = to_drm_gem_dma_obj(job->region_bo[i]);
+ writel_relaxed(lower_32_bits(bo->dma_addr), dev->regs + NPU_REG_BASEP(region));
+ writel_relaxed(upper_32_bits(bo->dma_addr), dev->regs + NPU_REG_BASEP_HI(region));
+ dev_dbg(dev->base.dev, "Region %d base addr = %pad\n", region, &bo->dma_addr);
+ }
+
+ if (job->sram_size) {
+ writel_relaxed(lower_32_bits(dev->sramphys),
+ dev->regs + NPU_REG_BASEP(ETHOSU_SRAM_REGION));
+ writel_relaxed(upper_32_bits(dev->sramphys),
+ dev->regs + NPU_REG_BASEP_HI(ETHOSU_SRAM_REGION));
+ dev_dbg(dev->base.dev, "Region %d base addr = %pad (SRAM)\n",
+ ETHOSU_SRAM_REGION, &dev->sramphys);
+ }
+
+ writel_relaxed(lower_32_bits(cmd_bo->dma_addr), dev->regs + NPU_REG_QBASE);
+ writel_relaxed(upper_32_bits(cmd_bo->dma_addr), dev->regs + NPU_REG_QBASE_HI);
+ writel_relaxed(cmd_info->cmd_size, dev->regs + NPU_REG_QSIZE);
+
+ writel(CMD_TRANSITION_TO_RUN, dev->regs + NPU_REG_CMD);
+
+ dev_dbg(dev->base.dev,
+ "Submitted cmd at %pad to core\n", &cmd_bo->dma_addr);
+}
+
+static int ethosu_acquire_object_fences(struct ethosu_job *job)
+{
+ int i, ret;
+ struct drm_gem_object **bos = job->region_bo;
+ struct ethosu_validated_cmdstream_info *info = to_ethosu_bo(job->cmd_bo)->info;
+
+ for (i = 0; i < job->region_cnt; i++) {
+ bool is_write;
+
+ if (!bos[i])
+ break;
+
+ ret = dma_resv_reserve_fences(bos[i]->resv, 1);
+ if (ret)
+ return ret;
+
+ is_write = info->output_region[job->region_bo_num[i]];
+ ret = drm_sched_job_add_implicit_dependencies(&job->base, bos[i],
+ is_write);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ethosu_attach_object_fences(struct ethosu_job *job)
+{
+ int i;
+ struct dma_fence *fence = job->inference_done_fence;
+ struct drm_gem_object **bos = job->region_bo;
+ struct ethosu_validated_cmdstream_info *info = to_ethosu_bo(job->cmd_bo)->info;
+
+ for (i = 0; i < job->region_cnt; i++)
+ if (info->output_region[job->region_bo_num[i]])
+ dma_resv_add_fence(bos[i]->resv, fence, DMA_RESV_USAGE_WRITE);
+}
+
+static int ethosu_job_push(struct ethosu_job *job)
+{
+ struct ww_acquire_ctx acquire_ctx;
+ int ret;
+
+ ret = drm_gem_lock_reservations(job->region_bo, job->region_cnt, &acquire_ctx);
+ if (ret)
+ return ret;
+
+ ret = ethosu_acquire_object_fences(job);
+ if (ret)
+ goto out;
+
+ ret = pm_runtime_resume_and_get(job->dev->base.dev);
+ if (!ret) {
+ guard(mutex)(&job->dev->sched_lock);
+
+ drm_sched_job_arm(&job->base);
+ job->inference_done_fence = dma_fence_get(&job->base.s_fence->finished);
+ kref_get(&job->refcount); /* put by scheduler job completion */
+ drm_sched_entity_push_job(&job->base);
+ ethosu_attach_object_fences(job);
+ }
+
+out:
+ drm_gem_unlock_reservations(job->region_bo, job->region_cnt, &acquire_ctx);
+ return ret;
+}
+
+static void ethosu_job_cleanup(struct kref *ref)
+{
+ struct ethosu_job *job = container_of(ref, struct ethosu_job,
+ refcount);
+ unsigned int i;
+
+ pm_runtime_put_autosuspend(job->dev->base.dev);
+
+ dma_fence_put(job->done_fence);
+ dma_fence_put(job->inference_done_fence);
+
+ for (i = 0; i < job->region_cnt; i++)
+ drm_gem_object_put(job->region_bo[i]);
+
+ drm_gem_object_put(job->cmd_bo);
+
+ kfree(job);
+}
+
+static void ethosu_job_put(struct ethosu_job *job)
+{
+ kref_put(&job->refcount, ethosu_job_cleanup);
+}
+
+static void ethosu_job_free(struct drm_sched_job *sched_job)
+{
+ struct ethosu_job *job = to_ethosu_job(sched_job);
+
+ drm_sched_job_cleanup(sched_job);
+ ethosu_job_put(job);
+}
+
+static struct dma_fence *ethosu_job_run(struct drm_sched_job *sched_job)
+{
+ struct ethosu_job *job = to_ethosu_job(sched_job);
+ struct ethosu_device *dev = job->dev;
+ struct dma_fence *fence = job->done_fence;
+
+ if (unlikely(job->base.s_fence->finished.error))
+ return NULL;
+
+ dma_fence_init(fence, &ethosu_fence_ops, &dev->fence_lock,
+ dev->fence_context, ++dev->emit_seqno);
+ dma_fence_get(fence);
+
+ scoped_guard(mutex, &dev->job_lock) {
+ dev->in_flight_job = job;
+ ethosu_job_hw_submit(dev, job);
+ }
+
+ return fence;
+}
+
+static void ethosu_job_handle_irq(struct ethosu_device *dev)
+{
+ u32 status = readl_relaxed(dev->regs + NPU_REG_STATUS);
+
+ if (status & (STATUS_BUS_STATUS | STATUS_CMD_PARSE_ERR)) {
+ dev_err(dev->base.dev, "Error IRQ - %x\n", status);
+ drm_sched_fault(&dev->sched);
+ return;
+ }
+
+ scoped_guard(mutex, &dev->job_lock) {
+ if (dev->in_flight_job) {
+ dma_fence_signal(dev->in_flight_job->done_fence);
+ dev->in_flight_job = NULL;
+ }
+ }
+}
+
+static irqreturn_t ethosu_job_irq_handler_thread(int irq, void *data)
+{
+ struct ethosu_device *dev = data;
+
+ ethosu_job_handle_irq(dev);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ethosu_job_irq_handler(int irq, void *data)
+{
+ struct ethosu_device *dev = data;
+ u32 status = readl_relaxed(dev->regs + NPU_REG_STATUS);
+
+ if (!(status & STATUS_IRQ_RAISED))
+ return IRQ_NONE;
+
+ writel_relaxed(CMD_CLEAR_IRQ, dev->regs + NPU_REG_CMD);
+ return IRQ_WAKE_THREAD;
+}
+
+static enum drm_gpu_sched_stat ethosu_job_timedout(struct drm_sched_job *bad)
+{
+ struct ethosu_job *job = to_ethosu_job(bad);
+ struct ethosu_device *dev = job->dev;
+ bool running;
+ u32 *bocmds = to_drm_gem_dma_obj(job->cmd_bo)->vaddr;
+ u32 cmdaddr;
+
+ cmdaddr = readl_relaxed(dev->regs + NPU_REG_QREAD);
+ running = FIELD_GET(STATUS_STATE_RUNNING, readl_relaxed(dev->regs + NPU_REG_STATUS));
+
+ if (running) {
+ int ret;
+ u32 reg;
+
+ ret = readl_relaxed_poll_timeout(dev->regs + NPU_REG_QREAD,
+ reg,
+ reg != cmdaddr,
+ USEC_PER_MSEC, 100 * USEC_PER_MSEC);
+
+ /* If still running and progress is being made, just return */
+ if (!ret)
+ return DRM_GPU_SCHED_STAT_NO_HANG;
+ }
+
+ dev_err(dev->base.dev, "NPU sched timed out: NPU %s, cmdstream offset 0x%x: 0x%x\n",
+ running ? "running" : "stopped",
+ cmdaddr, bocmds[cmdaddr / 4]);
+
+ drm_sched_stop(&dev->sched, bad);
+
+ scoped_guard(mutex, &dev->job_lock)
+ dev->in_flight_job = NULL;
+
+ /* Proceed with reset now. */
+ pm_runtime_force_suspend(dev->base.dev);
+ pm_runtime_force_resume(dev->base.dev);
+
+ /* Restart the scheduler */
+ drm_sched_start(&dev->sched, 0);
+
+ return DRM_GPU_SCHED_STAT_RESET;
+}
+
+static const struct drm_sched_backend_ops ethosu_sched_ops = {
+ .run_job = ethosu_job_run,
+ .timedout_job = ethosu_job_timedout,
+ .free_job = ethosu_job_free
+};
+
+int ethosu_job_init(struct ethosu_device *edev)
+{
+ struct device *dev = edev->base.dev;
+ struct drm_sched_init_args args = {
+ .ops = &ethosu_sched_ops,
+ .num_rqs = DRM_SCHED_PRIORITY_COUNT,
+ .credit_limit = 1,
+ .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS),
+ .name = dev_name(dev),
+ .dev = dev,
+ };
+ int ret;
+
+ spin_lock_init(&edev->fence_lock);
+ ret = devm_mutex_init(dev, &edev->job_lock);
+ if (ret)
+ return ret;
+ ret = devm_mutex_init(dev, &edev->sched_lock);
+ if (ret)
+ return ret;
+
+ edev->irq = platform_get_irq(to_platform_device(dev), 0);
+ if (edev->irq < 0)
+ return edev->irq;
+
+ ret = devm_request_threaded_irq(dev, edev->irq,
+ ethosu_job_irq_handler,
+ ethosu_job_irq_handler_thread,
+ IRQF_SHARED, KBUILD_MODNAME,
+ edev);
+ if (ret) {
+ dev_err(dev, "failed to request irq\n");
+ return ret;
+ }
+
+ edev->fence_context = dma_fence_context_alloc(1);
+
+ ret = drm_sched_init(&edev->sched, &args);
+ if (ret) {
+ dev_err(dev, "Failed to create scheduler: %d\n", ret);
+ goto err_sched;
+ }
+
+ return 0;
+
+err_sched:
+ drm_sched_fini(&edev->sched);
+ return ret;
+}
+
+void ethosu_job_fini(struct ethosu_device *dev)
+{
+ drm_sched_fini(&dev->sched);
+}
+
+int ethosu_job_open(struct ethosu_file_priv *ethosu_priv)
+{
+ struct ethosu_device *dev = ethosu_priv->edev;
+ struct drm_gpu_scheduler *sched = &dev->sched;
+ int ret;
+
+ ret = drm_sched_entity_init(&ethosu_priv->sched_entity,
+ DRM_SCHED_PRIORITY_NORMAL,
+ &sched, 1, NULL);
+ return WARN_ON(ret);
+}
+
+void ethosu_job_close(struct ethosu_file_priv *ethosu_priv)
+{
+ struct drm_sched_entity *entity = &ethosu_priv->sched_entity;
+
+ drm_sched_entity_destroy(entity);
+}
+
+static int ethosu_ioctl_submit_job(struct drm_device *dev, struct drm_file *file,
+ struct drm_ethosu_job *job)
+{
+ struct ethosu_device *edev = to_ethosu_device(dev);
+ struct ethosu_file_priv *file_priv = file->driver_priv;
+ struct ethosu_job *ejob = NULL;
+ struct ethosu_validated_cmdstream_info *cmd_info;
+ int ret = 0;
+
+ /* BO region 2 is reserved if SRAM is used */
+ if (job->region_bo_handles[ETHOSU_SRAM_REGION] && job->sram_size)
+ return -EINVAL;
+
+ if (edev->npu_info.sram_size < job->sram_size)
+ return -EINVAL;
+
+ ejob = kzalloc(sizeof(*ejob), GFP_KERNEL);
+ if (!ejob)
+ return -ENOMEM;
+
+ kref_init(&ejob->refcount);
+
+ ejob->dev = edev;
+ ejob->sram_size = job->sram_size;
+
+ ejob->done_fence = kzalloc(sizeof(*ejob->done_fence), GFP_KERNEL);
+ if (!ejob->done_fence) {
+ ret = -ENOMEM;
+ goto out_cleanup_job;
+ }
+
+ ret = drm_sched_job_init(&ejob->base,
+ &file_priv->sched_entity,
+ 1, NULL, file->client_id);
+ if (ret)
+ goto out_put_job;
+
+ ejob->cmd_bo = drm_gem_object_lookup(file, job->cmd_bo);
+ if (!ejob->cmd_bo) {
+ ret = -ENOENT;
+ goto out_cleanup_job;
+ }
+ cmd_info = to_ethosu_bo(ejob->cmd_bo)->info;
+ if (!cmd_info) {
+ ret = -EINVAL;
+ goto out_cleanup_job;
+ }
+
+ for (int i = 0; i < NPU_BASEP_REGION_MAX; i++) {
+ struct drm_gem_object *gem;
+
+ /* Can only omit a BO handle if the region is not used or used for SRAM */
+ if (!job->region_bo_handles[i] &&
+ (!cmd_info->region_size[i] || (i == ETHOSU_SRAM_REGION && job->sram_size)))
+ continue;
+
+ if (job->region_bo_handles[i] && !cmd_info->region_size[i]) {
+ dev_err(dev->dev,
+ "Cmdstream BO handle %d set for unused region %d\n",
+ job->region_bo_handles[i], i);
+ ret = -EINVAL;
+ goto out_cleanup_job;
+ }
+
+ gem = drm_gem_object_lookup(file, job->region_bo_handles[i]);
+ if (!gem) {
+ dev_err(dev->dev,
+ "Invalid BO handle %d for region %d\n",
+ job->region_bo_handles[i], i);
+ ret = -ENOENT;
+ goto out_cleanup_job;
+ }
+
+ ejob->region_bo[ejob->region_cnt] = gem;
+ ejob->region_bo_num[ejob->region_cnt] = i;
+ ejob->region_cnt++;
+
+ if (to_ethosu_bo(gem)->info) {
+ dev_err(dev->dev,
+ "Cmdstream BO handle %d used for region %d\n",
+ job->region_bo_handles[i], i);
+ ret = -EINVAL;
+ goto out_cleanup_job;
+ }
+
+ /* Verify the command stream doesn't have accesses outside the BO */
+ if (cmd_info->region_size[i] > gem->size) {
+ dev_err(dev->dev,
+ "cmd stream region %d size greater than BO size (%llu > %zu)\n",
+ i, cmd_info->region_size[i], gem->size);
+ ret = -EOVERFLOW;
+ goto out_cleanup_job;
+ }
+ }
+ ret = ethosu_job_push(ejob);
+
+out_cleanup_job:
+ if (ret)
+ drm_sched_job_cleanup(&ejob->base);
+out_put_job:
+ ethosu_job_put(ejob);
+
+ return ret;
+}
+
+int ethosu_ioctl_submit(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_ethosu_submit *args = data;
+ int ret = 0;
+ unsigned int i = 0;
+
+ if (args->pad) {
+ drm_dbg(dev, "Reserved field in drm_ethosu_submit struct should be 0.\n");
+ return -EINVAL;
+ }
+
+ struct drm_ethosu_job __free(kvfree) *jobs =
+ kvmalloc_array(args->job_count, sizeof(*jobs), GFP_KERNEL);
+ if (!jobs)
+ return -ENOMEM;
+
+ if (copy_from_user(jobs,
+ (void __user *)(uintptr_t)args->jobs,
+ args->job_count * sizeof(*jobs))) {
+ drm_dbg(dev, "Failed to copy incoming job array\n");
+ return -EFAULT;
+ }
+
+ for (i = 0; i < args->job_count; i++) {
+ ret = ethosu_ioctl_submit_job(dev, file, &jobs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/accel/ethosu/ethosu_job.h b/drivers/accel/ethosu/ethosu_job.h
new file mode 100644
index 000000000000..ff1cf448d094
--- /dev/null
+++ b/drivers/accel/ethosu/ethosu_job.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+/* Copyright 2025 Arm, Ltd. */
+
+#ifndef __ETHOSU_JOB_H__
+#define __ETHOSU_JOB_H__
+
+#include <linux/kref.h>
+#include <drm/gpu_scheduler.h>
+
+struct ethosu_device;
+struct ethosu_file_priv;
+
+struct ethosu_job {
+ struct drm_sched_job base;
+ struct ethosu_device *dev;
+
+ struct drm_gem_object *cmd_bo;
+ struct drm_gem_object *region_bo[NPU_BASEP_REGION_MAX];
+ u8 region_bo_num[NPU_BASEP_REGION_MAX];
+ u8 region_cnt;
+ u32 sram_size;
+
+ /* Fence to be signaled by drm-sched once its done with the job */
+ struct dma_fence *inference_done_fence;
+
+ /* Fence to be signaled by IRQ handler when the job is complete. */
+ struct dma_fence *done_fence;
+
+ struct kref refcount;
+};
+
+int ethosu_ioctl_submit(struct drm_device *dev, void *data, struct drm_file *file);
+
+int ethosu_job_init(struct ethosu_device *dev);
+void ethosu_job_fini(struct ethosu_device *dev);
+int ethosu_job_open(struct ethosu_file_priv *ethosu_priv);
+void ethosu_job_close(struct ethosu_file_priv *ethosu_priv);
+
+#endif
diff --git a/drivers/accel/habanalabs/Kconfig b/drivers/accel/habanalabs/Kconfig
index be85336107f9..6d1506acbd72 100644
--- a/drivers/accel/habanalabs/Kconfig
+++ b/drivers/accel/habanalabs/Kconfig
@@ -6,7 +6,7 @@
config DRM_ACCEL_HABANALABS
tristate "HabanaLabs AI accelerators"
depends on DRM_ACCEL
- depends on X86_64
+ depends on X86 && X86_64
depends on PCI && HAS_IOMEM
select GENERIC_ALLOCATOR
select HWMON
@@ -27,3 +27,26 @@ config DRM_ACCEL_HABANALABS
To compile this driver as a module, choose M here: the
module will be called habanalabs.
+
+if DRM_ACCEL_HABANALABS
+
+config HL_HLDIO
+ bool "Habanalabs NVMe Direct I/O (HLDIO)"
+ depends on PCI_P2PDMA
+ depends on BLOCK
+ help
+ Enable NVMe peer-to-peer direct I/O support for Habanalabs AI
+ accelerators.
+
+ This allows direct data transfers between NVMe storage devices
+ and Habanalabs accelerators without involving system memory,
+ using PCI peer-to-peer DMA capabilities.
+
+ Requirements:
+ - CONFIG_PCI_P2PDMA=y
+ - NVMe device and Habanalabs accelerator under same PCI root complex
+ - IOMMU disabled or in passthrough mode
+ - Hardware supporting PCI P2P DMA
+
+ If unsure, say N
+endif # DRM_ACCEL_HABANALABS
diff --git a/drivers/accel/habanalabs/common/Makefile b/drivers/accel/habanalabs/common/Makefile
index e6abffea9f87..b6d00de09db5 100644
--- a/drivers/accel/habanalabs/common/Makefile
+++ b/drivers/accel/habanalabs/common/Makefile
@@ -13,3 +13,8 @@ HL_COMMON_FILES := common/habanalabs_drv.o common/device.o common/context.o \
common/command_submission.o common/firmware_if.o \
common/security.o common/state_dump.o \
common/memory_mgr.o common/decoder.o
+
+# Conditionally add HLDIO support
+ifdef CONFIG_HL_HLDIO
+HL_COMMON_FILES += common/hldio.o
+endif \ No newline at end of file
diff --git a/drivers/accel/habanalabs/common/command_submission.c b/drivers/accel/habanalabs/common/command_submission.c
index 39e23d625a3c..dee487724918 100644
--- a/drivers/accel/habanalabs/common/command_submission.c
+++ b/drivers/accel/habanalabs/common/command_submission.c
@@ -2586,7 +2586,7 @@ int hl_cs_ioctl(struct drm_device *ddev, void *data, struct drm_file *file_priv)
cs_seq = args->in.seq;
timeout = flags & HL_CS_FLAGS_CUSTOM_TIMEOUT
- ? msecs_to_jiffies(args->in.timeout * 1000)
+ ? secs_to_jiffies(args->in.timeout)
: hpriv->hdev->timeout_jiffies;
switch (cs_type) {
@@ -3284,12 +3284,6 @@ static int ts_get_and_handle_kernel_record(struct hl_device *hdev, struct hl_ctx
/* In case the node already registered, need to unregister first then re-use */
if (req_offset_record->ts_reg_info.in_use) {
- dev_dbg(data->buf->mmg->dev,
- "Requested record %p is in use on irq: %u ts addr: %p, unregister first then put on irq: %u\n",
- req_offset_record,
- req_offset_record->ts_reg_info.interrupt->interrupt_id,
- req_offset_record->ts_reg_info.timestamp_kernel_addr,
- data->interrupt->interrupt_id);
/*
* Since interrupt here can be different than the one the node currently registered
* on, and we don't want to lock two lists while we're doing unregister, so
@@ -3345,10 +3339,6 @@ static int _hl_interrupt_ts_reg_ioctl(struct hl_device *hdev, struct hl_ctx *ctx
goto put_cq_cb;
}
- dev_dbg(hdev->dev, "Timestamp registration: interrupt id: %u, handle: 0x%llx, ts offset: %llu, cq_offset: %llu\n",
- data->interrupt->interrupt_id, data->ts_handle,
- data->ts_offset, data->cq_offset);
-
data->buf = hl_mmap_mem_buf_get(data->mmg, data->ts_handle);
if (!data->buf) {
rc = -EINVAL;
@@ -3370,9 +3360,6 @@ static int _hl_interrupt_ts_reg_ioctl(struct hl_device *hdev, struct hl_ctx *ctx
if (*pend->cq_kernel_addr >= data->target_value) {
spin_unlock_irqrestore(&data->interrupt->ts_list_lock, flags);
- dev_dbg(hdev->dev, "Target value already reached release ts record: pend: %p, offset: %llu, interrupt: %u\n",
- pend, data->ts_offset, data->interrupt->interrupt_id);
-
pend->ts_reg_info.in_use = 0;
*status = HL_WAIT_CS_STATUS_COMPLETED;
*pend->ts_reg_info.timestamp_kernel_addr = ktime_get_ns();
diff --git a/drivers/accel/habanalabs/common/context.c b/drivers/accel/habanalabs/common/context.c
index b83141f58319..9f212b17611a 100644
--- a/drivers/accel/habanalabs/common/context.c
+++ b/drivers/accel/habanalabs/common/context.c
@@ -199,7 +199,6 @@ out_err:
int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
{
- char task_comm[TASK_COMM_LEN];
int rc = 0, i;
ctx->hdev = hdev;
@@ -272,7 +271,7 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
mutex_init(&ctx->ts_reg_lock);
dev_dbg(hdev->dev, "create user context, comm=\"%s\", asid=%u\n",
- get_task_comm(task_comm, current), ctx->asid);
+ current->comm, ctx->asid);
}
return 0;
diff --git a/drivers/accel/habanalabs/common/debugfs.c b/drivers/accel/habanalabs/common/debugfs.c
index b1c88d1837d9..5f0820b19ccb 100644
--- a/drivers/accel/habanalabs/common/debugfs.c
+++ b/drivers/accel/habanalabs/common/debugfs.c
@@ -6,6 +6,7 @@
*/
#include "habanalabs.h"
+#include "hldio.h"
#include "../include/hw_ip/mmu/mmu_general.h"
#include <linux/pci.h>
@@ -42,9 +43,8 @@ static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
pkt.i2c_reg = i2c_reg;
pkt.i2c_len = i2c_len;
- rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- 0, val);
- if (rc)
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, val);
+ if (rc && rc != -EAGAIN)
dev_err(hdev->dev, "Failed to read from I2C, error %d\n", rc);
return rc;
@@ -75,10 +75,8 @@ static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
pkt.i2c_len = i2c_len;
pkt.value = cpu_to_le64(val);
- rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- 0, NULL);
-
- if (rc)
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
+ if (rc && rc != -EAGAIN)
dev_err(hdev->dev, "Failed to write to I2C, error %d\n", rc);
return rc;
@@ -99,10 +97,8 @@ static void hl_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state)
pkt.led_index = cpu_to_le32(led);
pkt.value = cpu_to_le64(state);
- rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- 0, NULL);
-
- if (rc)
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
+ if (rc && rc != -EAGAIN)
dev_err(hdev->dev, "Failed to set LED %d, error %d\n", led, rc);
}
@@ -607,6 +603,198 @@ static int engines_show(struct seq_file *s, void *data)
return 0;
}
+#ifdef CONFIG_HL_HLDIO
+/* DIO debugfs functions following the standard pattern */
+static int dio_ssd2hl_show(struct seq_file *s, void *data)
+{
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+
+ if (!hdev->asic_prop.supports_nvme) {
+ seq_puts(s, "NVMe Direct I/O not supported\\n");
+ return 0;
+ }
+
+ seq_puts(s, "Usage: echo \"fd=N va=0xADDR off=N len=N\" > dio_ssd2hl\n");
+ seq_printf(s, "Last transfer: %zu bytes\\n", dev_entry->dio_stats.last_len_read);
+ seq_puts(s, "Note: All parameters must be page-aligned (4KB)\\n");
+
+ return 0;
+}
+
+static ssize_t dio_ssd2hl_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct seq_file *s = file->private_data;
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+ struct hl_ctx *ctx = hdev->kernel_ctx;
+ char kbuf[128];
+ u64 device_va = 0, off_bytes = 0, len_bytes = 0;
+ u32 fd = 0;
+ size_t len_read = 0;
+ int rc, parsed;
+
+ if (!hdev->asic_prop.supports_nvme)
+ return -EOPNOTSUPP;
+
+ if (count >= sizeof(kbuf))
+ return -EINVAL;
+
+ if (copy_from_user(kbuf, buf, count))
+ return -EFAULT;
+
+ kbuf[count] = 0;
+
+ /* Parse: fd=N va=0xADDR off=N len=N */
+ parsed = sscanf(kbuf, "fd=%u va=0x%llx off=%llu len=%llu",
+ &fd, &device_va, &off_bytes, &len_bytes);
+ if (parsed != 4) {
+ dev_err(hdev->dev, "Invalid format. Expected: fd=N va=0xADDR off=N len=N\\n");
+ return -EINVAL;
+ }
+
+ /* Validate file descriptor */
+ if (fd == 0) {
+ dev_err(hdev->dev, "Invalid file descriptor: %u\\n", fd);
+ return -EINVAL;
+ }
+
+ /* Validate alignment requirements */
+ if (!IS_ALIGNED(device_va, PAGE_SIZE) ||
+ !IS_ALIGNED(off_bytes, PAGE_SIZE) ||
+ !IS_ALIGNED(len_bytes, PAGE_SIZE)) {
+ dev_err(hdev->dev,
+ "All parameters must be page-aligned (4KB)\\n");
+ return -EINVAL;
+ }
+
+ /* Validate transfer size */
+ if (len_bytes == 0 || len_bytes > SZ_1G) {
+ dev_err(hdev->dev, "Invalid length: %llu (max 1GB)\\n",
+ len_bytes);
+ return -EINVAL;
+ }
+
+ dev_dbg(hdev->dev, "DIO SSD2HL: fd=%u va=0x%llx off=%llu len=%llu\\n",
+ fd, device_va, off_bytes, len_bytes);
+
+ rc = hl_dio_ssd2hl(hdev, ctx, fd, device_va, off_bytes, len_bytes, &len_read);
+ if (rc < 0) {
+ dev_entry->dio_stats.failed_ops++;
+ dev_err(hdev->dev, "SSD2HL operation failed: %d\\n", rc);
+ return rc;
+ }
+
+ /* Update statistics */
+ dev_entry->dio_stats.total_ops++;
+ dev_entry->dio_stats.successful_ops++;
+ dev_entry->dio_stats.bytes_transferred += len_read;
+ dev_entry->dio_stats.last_len_read = len_read;
+
+ dev_dbg(hdev->dev, "DIO SSD2HL completed: %zu bytes transferred\\n", len_read);
+
+ return count;
+}
+
+static int dio_hl2ssd_show(struct seq_file *s, void *data)
+{
+ seq_puts(s, "HL2SSD (device-to-SSD) transfers not implemented\\n");
+ return 0;
+}
+
+static ssize_t dio_hl2ssd_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct seq_file *s = file->private_data;
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+
+ if (!hdev->asic_prop.supports_nvme)
+ return -EOPNOTSUPP;
+
+ dev_dbg(hdev->dev, "HL2SSD operation not implemented\\n");
+ return -EOPNOTSUPP;
+}
+
+static int dio_stats_show(struct seq_file *s, void *data)
+{
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+ struct hl_dio_stats *stats = &dev_entry->dio_stats;
+ u64 avg_bytes_per_op = 0, success_rate = 0;
+
+ if (!hdev->asic_prop.supports_nvme) {
+ seq_puts(s, "NVMe Direct I/O not supported\\n");
+ return 0;
+ }
+
+ if (stats->successful_ops > 0)
+ avg_bytes_per_op = stats->bytes_transferred / stats->successful_ops;
+
+ if (stats->total_ops > 0)
+ success_rate = (stats->successful_ops * 100) / stats->total_ops;
+
+ seq_puts(s, "=== Habanalabs Direct I/O Statistics ===\\n");
+ seq_printf(s, "Total operations: %llu\\n", stats->total_ops);
+ seq_printf(s, "Successful ops: %llu\\n", stats->successful_ops);
+ seq_printf(s, "Failed ops: %llu\\n", stats->failed_ops);
+ seq_printf(s, "Success rate: %llu%%\\n", success_rate);
+ seq_printf(s, "Total bytes: %llu\\n", stats->bytes_transferred);
+ seq_printf(s, "Avg bytes per op: %llu\\n", avg_bytes_per_op);
+ seq_printf(s, "Last transfer: %zu bytes\\n", stats->last_len_read);
+
+ return 0;
+}
+
+static int dio_reset_show(struct seq_file *s, void *data)
+{
+ seq_puts(s, "Write '1' to reset DIO statistics\\n");
+ return 0;
+}
+
+static ssize_t dio_reset_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct seq_file *s = file->private_data;
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+ char kbuf[8];
+ unsigned long val;
+ int rc;
+
+ if (!hdev->asic_prop.supports_nvme)
+ return -EOPNOTSUPP;
+
+ if (count >= sizeof(kbuf))
+ return -EINVAL;
+
+ if (copy_from_user(kbuf, buf, count))
+ return -EFAULT;
+
+ kbuf[count] = 0;
+
+ rc = kstrtoul(kbuf, 0, &val);
+ if (rc)
+ return rc;
+
+ if (val == 1) {
+ memset(&dev_entry->dio_stats, 0, sizeof(dev_entry->dio_stats));
+ dev_dbg(hdev->dev, "DIO statistics reset\\n");
+ } else {
+ dev_err(hdev->dev, "Write '1' to reset statistics\\n");
+ return -EINVAL;
+ }
+
+ return count;
+}
+#endif
+
static ssize_t hl_memory_scrub(struct file *f, const char __user *buf,
size_t count, loff_t *ppos)
{
@@ -793,6 +981,113 @@ static void hl_access_host_mem(struct hl_device *hdev, u64 addr, u64 *val,
}
}
+static void dump_cfg_access_entry(struct hl_device *hdev,
+ struct hl_debugfs_cfg_access_entry *entry)
+{
+ char *access_type = "";
+ struct tm tm;
+
+ switch (entry->debugfs_type) {
+ case DEBUGFS_READ32:
+ access_type = "READ32 from";
+ break;
+ case DEBUGFS_WRITE32:
+ access_type = "WRITE32 to";
+ break;
+ case DEBUGFS_READ64:
+ access_type = "READ64 from";
+ break;
+ case DEBUGFS_WRITE64:
+ access_type = "WRITE64 to";
+ break;
+ default:
+ dev_err(hdev->dev, "Invalid DEBUGFS access type (%u)\n", entry->debugfs_type);
+ return;
+ }
+
+ time64_to_tm(entry->seconds_since_epoch, 0, &tm);
+ dev_info(hdev->dev,
+ "%ld-%02d-%02d %02d:%02d:%02d (UTC): %s %#llx\n", tm.tm_year + 1900, tm.tm_mon + 1,
+ tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec, access_type, entry->addr);
+}
+
+void hl_debugfs_cfg_access_history_dump(struct hl_device *hdev)
+{
+ struct hl_debugfs_cfg_access *dbgfs = &hdev->debugfs_cfg_accesses;
+ u32 i, head, count = 0;
+ time64_t entry_time, now;
+ unsigned long flags;
+
+ now = ktime_get_real_seconds();
+
+ spin_lock_irqsave(&dbgfs->lock, flags);
+ head = dbgfs->head;
+ if (head == 0)
+ i = HL_DBGFS_CFG_ACCESS_HIST_LEN - 1;
+ else
+ i = head - 1;
+
+ /* Walk back until timeout or invalid entry */
+ while (dbgfs->cfg_access_list[i].valid) {
+ entry_time = dbgfs->cfg_access_list[i].seconds_since_epoch;
+ /* Stop when entry is older than timeout */
+ if (now - entry_time > HL_DBGFS_CFG_ACCESS_HIST_TIMEOUT_SEC)
+ break;
+
+ /* print single entry under lock */
+ {
+ struct hl_debugfs_cfg_access_entry entry = dbgfs->cfg_access_list[i];
+ /*
+ * We copy the entry out under lock and then print after
+ * releasing the lock to minimize time under lock.
+ */
+ spin_unlock_irqrestore(&dbgfs->lock, flags);
+ dump_cfg_access_entry(hdev, &entry);
+ spin_lock_irqsave(&dbgfs->lock, flags);
+ }
+
+ /* mark consumed */
+ dbgfs->cfg_access_list[i].valid = false;
+
+ if (i == 0)
+ i = HL_DBGFS_CFG_ACCESS_HIST_LEN - 1;
+ else
+ i--;
+ count++;
+ if (count >= HL_DBGFS_CFG_ACCESS_HIST_LEN)
+ break;
+ }
+ spin_unlock_irqrestore(&dbgfs->lock, flags);
+}
+
+static void check_if_cfg_access_and_log(struct hl_device *hdev, u64 addr, size_t access_size,
+ enum debugfs_access_type access_type)
+{
+ struct hl_debugfs_cfg_access *dbgfs_cfg_accesses = &hdev->debugfs_cfg_accesses;
+ struct pci_mem_region *mem_reg = &hdev->pci_mem_region[PCI_REGION_CFG];
+ struct hl_debugfs_cfg_access_entry *new_entry;
+ unsigned long flags;
+
+ /* Check if address is in config memory */
+ if (addr >= mem_reg->region_base &&
+ mem_reg->region_size >= access_size &&
+ addr <= mem_reg->region_base + mem_reg->region_size - access_size) {
+
+ spin_lock_irqsave(&dbgfs_cfg_accesses->lock, flags);
+
+ new_entry = &dbgfs_cfg_accesses->cfg_access_list[dbgfs_cfg_accesses->head];
+ new_entry->seconds_since_epoch = ktime_get_real_seconds();
+ new_entry->addr = addr;
+ new_entry->debugfs_type = access_type;
+ new_entry->valid = true;
+ dbgfs_cfg_accesses->head = (dbgfs_cfg_accesses->head + 1)
+ % HL_DBGFS_CFG_ACCESS_HIST_LEN;
+
+ spin_unlock_irqrestore(&dbgfs_cfg_accesses->lock, flags);
+
+ }
+}
+
static int hl_access_mem(struct hl_device *hdev, u64 addr, u64 *val,
enum debugfs_access_type acc_type)
{
@@ -810,6 +1105,7 @@ static int hl_access_mem(struct hl_device *hdev, u64 addr, u64 *val,
return rc;
}
+ check_if_cfg_access_and_log(hdev, addr, acc_size, acc_type);
rc = hl_access_dev_mem_by_region(hdev, addr, val, acc_type, &found);
if (rc) {
dev_err(hdev->dev,
@@ -1408,7 +1704,7 @@ static ssize_t hl_timeout_locked_write(struct file *f, const char __user *buf,
return rc;
if (value)
- hdev->timeout_jiffies = msecs_to_jiffies(value * 1000);
+ hdev->timeout_jiffies = secs_to_jiffies(value);
else
hdev->timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
@@ -1530,6 +1826,13 @@ static const struct hl_info_list hl_debugfs_list[] = {
{"mmu", mmu_show, mmu_asid_va_write},
{"mmu_error", mmu_ack_error, mmu_ack_error_value_write},
{"engines", engines_show, NULL},
+#ifdef CONFIG_HL_HLDIO
+ /* DIO entries - only created if NVMe is supported */
+ {"dio_ssd2hl", dio_ssd2hl_show, dio_ssd2hl_write},
+ {"dio_stats", dio_stats_show, NULL},
+ {"dio_reset", dio_reset_show, dio_reset_write},
+ {"dio_hl2ssd", dio_hl2ssd_show, dio_hl2ssd_write},
+#endif
};
static int hl_debugfs_open(struct inode *inode, struct file *file)
@@ -1722,7 +2025,17 @@ static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_ent
root,
&hdev->device_release_watchdog_timeout_sec);
+ debugfs_create_u16("server_type",
+ 0444,
+ root,
+ &hdev->asic_prop.server_type);
+
for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
+ /* Skip DIO entries if NVMe is not supported */
+ if (strncmp(hl_debugfs_list[i].name, "dio_", 4) == 0 &&
+ !hdev->asic_prop.supports_nvme)
+ continue;
+
debugfs_create_file(hl_debugfs_list[i].name,
0644,
root,
@@ -1762,6 +2075,14 @@ int hl_debugfs_device_init(struct hl_device *hdev)
spin_lock_init(&dev_entry->userptr_spinlock);
mutex_init(&dev_entry->ctx_mem_hash_mutex);
+ spin_lock_init(&hdev->debugfs_cfg_accesses.lock);
+ hdev->debugfs_cfg_accesses.head = 0; /* already zero by alloc but explicit init is fine */
+
+#ifdef CONFIG_HL_HLDIO
+ /* Initialize DIO statistics */
+ memset(&dev_entry->dio_stats, 0, sizeof(dev_entry->dio_stats));
+#endif
+
return 0;
}
@@ -1780,6 +2101,7 @@ void hl_debugfs_device_fini(struct hl_device *hdev)
vfree(entry->state_dump[i]);
kfree(entry->entry_arr);
+
}
void hl_debugfs_add_device(struct hl_device *hdev)
@@ -1792,6 +2114,7 @@ void hl_debugfs_add_device(struct hl_device *hdev)
if (!hdev->asic_prop.fw_security_enabled)
add_secured_nodes(dev_entry, dev_entry->root);
+
}
void hl_debugfs_add_file(struct hl_fpriv *hpriv)
@@ -1924,3 +2247,4 @@ void hl_debugfs_set_state_dump(struct hl_device *hdev, char *data,
up_write(&dev_entry->state_dump_sem);
}
+
diff --git a/drivers/accel/habanalabs/common/device.c b/drivers/accel/habanalabs/common/device.c
index 8f92445c5a90..999c92d7036e 100644
--- a/drivers/accel/habanalabs/common/device.c
+++ b/drivers/accel/habanalabs/common/device.c
@@ -30,6 +30,8 @@ enum dma_alloc_type {
#define MEM_SCRUB_DEFAULT_VAL 0x1122334455667788
+static void hl_device_heartbeat(struct work_struct *work);
+
/*
* hl_set_dram_bar- sets the bar to allow later access to address
*
@@ -130,8 +132,8 @@ static void *hl_dma_alloc_common(struct hl_device *hdev, size_t size, dma_addr_t
}
if (trace_habanalabs_dma_alloc_enabled() && !ZERO_OR_NULL_PTR(ptr))
- trace_habanalabs_dma_alloc(hdev->dev, (u64) (uintptr_t) ptr, *dma_handle, size,
- caller);
+ trace_habanalabs_dma_alloc(&(hdev)->pdev->dev, (u64) (uintptr_t) ptr, *dma_handle,
+ size, caller);
return ptr;
}
@@ -152,7 +154,7 @@ static void hl_asic_dma_free_common(struct hl_device *hdev, size_t size, void *c
break;
}
- trace_habanalabs_dma_free(hdev->dev, store_cpu_addr, dma_handle, size, caller);
+ trace_habanalabs_dma_free(&(hdev)->pdev->dev, store_cpu_addr, dma_handle, size, caller);
}
void *hl_asic_dma_alloc_coherent_caller(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
@@ -204,15 +206,15 @@ int hl_dma_map_sgtable_caller(struct hl_device *hdev, struct sg_table *sgt,
return 0;
for_each_sgtable_dma_sg(sgt, sg, i)
- trace_habanalabs_dma_map_page(hdev->dev,
- page_to_phys(sg_page(sg)),
- sg->dma_address - prop->device_dma_offset_for_host_access,
+ trace_habanalabs_dma_map_page(&(hdev)->pdev->dev,
+ page_to_phys(sg_page(sg)),
+ sg->dma_address - prop->device_dma_offset_for_host_access,
#ifdef CONFIG_NEED_SG_DMA_LENGTH
- sg->dma_length,
+ sg->dma_length,
#else
- sg->length,
+ sg->length,
#endif
- dir, caller);
+ dir, caller);
return 0;
}
@@ -247,7 +249,8 @@ void hl_dma_unmap_sgtable_caller(struct hl_device *hdev, struct sg_table *sgt,
if (trace_habanalabs_dma_unmap_page_enabled()) {
for_each_sgtable_dma_sg(sgt, sg, i)
- trace_habanalabs_dma_unmap_page(hdev->dev, page_to_phys(sg_page(sg)),
+ trace_habanalabs_dma_unmap_page(&(hdev)->pdev->dev,
+ page_to_phys(sg_page(sg)),
sg->dma_address - prop->device_dma_offset_for_host_access,
#ifdef CONFIG_NEED_SG_DMA_LENGTH
sg->dma_length,
@@ -439,16 +442,19 @@ static void print_idle_status_mask(struct hl_device *hdev, const char *message,
u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE])
{
if (idle_mask[3])
- dev_err(hdev->dev, "%s (mask %#llx_%016llx_%016llx_%016llx)\n",
- message, idle_mask[3], idle_mask[2], idle_mask[1], idle_mask[0]);
+ dev_err(hdev->dev, "%s %s (mask %#llx_%016llx_%016llx_%016llx)\n",
+ dev_name(&hdev->pdev->dev), message,
+ idle_mask[3], idle_mask[2], idle_mask[1], idle_mask[0]);
else if (idle_mask[2])
- dev_err(hdev->dev, "%s (mask %#llx_%016llx_%016llx)\n",
- message, idle_mask[2], idle_mask[1], idle_mask[0]);
+ dev_err(hdev->dev, "%s %s (mask %#llx_%016llx_%016llx)\n",
+ dev_name(&hdev->pdev->dev), message,
+ idle_mask[2], idle_mask[1], idle_mask[0]);
else if (idle_mask[1])
- dev_err(hdev->dev, "%s (mask %#llx_%016llx)\n",
- message, idle_mask[1], idle_mask[0]);
+ dev_err(hdev->dev, "%s %s (mask %#llx_%016llx)\n",
+ dev_name(&hdev->pdev->dev), message, idle_mask[1], idle_mask[0]);
else
- dev_err(hdev->dev, "%s (mask %#llx)\n", message, idle_mask[0]);
+ dev_err(hdev->dev, "%s %s (mask %#llx)\n", dev_name(&hdev->pdev->dev), message,
+ idle_mask[0]);
}
static void hpriv_release(struct kref *ref)
@@ -545,7 +551,8 @@ int hl_hpriv_put(struct hl_fpriv *hpriv)
return kref_put(&hpriv->refcount, hpriv_release);
}
-static void print_device_in_use_info(struct hl_device *hdev, const char *message)
+static void print_device_in_use_info(struct hl_device *hdev,
+ struct hl_mem_mgr_fini_stats *mm_fini_stats, const char *message)
{
u32 active_cs_num, dmabuf_export_cnt;
bool unknown_reason = true;
@@ -569,6 +576,12 @@ static void print_device_in_use_info(struct hl_device *hdev, const char *message
dmabuf_export_cnt);
}
+ if (mm_fini_stats->n_busy_cb) {
+ unknown_reason = false;
+ offset += scnprintf(buf + offset, size - offset, " [%u live CB handles]",
+ mm_fini_stats->n_busy_cb);
+ }
+
if (unknown_reason)
scnprintf(buf + offset, size - offset, " [unknown reason]");
@@ -586,6 +599,7 @@ void hl_device_release(struct drm_device *ddev, struct drm_file *file_priv)
{
struct hl_fpriv *hpriv = file_priv->driver_priv;
struct hl_device *hdev = to_hl_device(ddev);
+ struct hl_mem_mgr_fini_stats mm_fini_stats;
if (!hdev) {
pr_crit("Closing FD after device was removed. Memory leak will occur and it is advised to reboot.\n");
@@ -597,12 +611,13 @@ void hl_device_release(struct drm_device *ddev, struct drm_file *file_priv)
/* Memory buffers might be still in use at this point and thus the handles IDR destruction
* is postponed to hpriv_release().
*/
- hl_mem_mgr_fini(&hpriv->mem_mgr);
+ hl_mem_mgr_fini(&hpriv->mem_mgr, &mm_fini_stats);
hdev->compute_ctx_in_release = 1;
if (!hl_hpriv_put(hpriv)) {
- print_device_in_use_info(hdev, "User process closed FD but device still in use");
+ print_device_in_use_info(hdev, &mm_fini_stats,
+ "User process closed FD but device still in use");
hl_device_reset(hdev, HL_DRV_RESET_HARD);
}
@@ -802,7 +817,7 @@ static void device_hard_reset_pending(struct work_struct *work)
}
queue_delayed_work(hdev->reset_wq, &device_reset_work->reset_work,
- msecs_to_jiffies(HL_PENDING_RESET_PER_SEC * 1000));
+ secs_to_jiffies(HL_PENDING_RESET_PER_SEC));
}
}
@@ -858,6 +873,10 @@ static int device_early_init(struct hl_device *hdev)
gaudi2_set_asic_funcs(hdev);
strscpy(hdev->asic_name, "GAUDI2C", sizeof(hdev->asic_name));
break;
+ case ASIC_GAUDI2D:
+ gaudi2_set_asic_funcs(hdev);
+ strscpy(hdev->asic_name, "GAUDI2D", sizeof(hdev->asic_name));
+ break;
default:
dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
hdev->asic_type);
@@ -946,6 +965,8 @@ static int device_early_init(struct hl_device *hdev)
goto free_cb_mgr;
}
+ INIT_DELAYED_WORK(&hdev->work_heartbeat, hl_device_heartbeat);
+
INIT_DELAYED_WORK(&hdev->device_reset_work.reset_work, device_hard_reset_pending);
hdev->device_reset_work.hdev = hdev;
hdev->device_fini_pending = 0;
@@ -968,7 +989,7 @@ static int device_early_init(struct hl_device *hdev)
return 0;
free_cb_mgr:
- hl_mem_mgr_fini(&hdev->kernel_mem_mgr);
+ hl_mem_mgr_fini(&hdev->kernel_mem_mgr, NULL);
hl_mem_mgr_idr_destroy(&hdev->kernel_mem_mgr);
free_chip_info:
kfree(hdev->hl_chip_info);
@@ -1012,7 +1033,7 @@ static void device_early_fini(struct hl_device *hdev)
mutex_destroy(&hdev->clk_throttling.lock);
- hl_mem_mgr_fini(&hdev->kernel_mem_mgr);
+ hl_mem_mgr_fini(&hdev->kernel_mem_mgr, NULL);
hl_mem_mgr_idr_destroy(&hdev->kernel_mem_mgr);
kfree(hdev->hl_chip_info);
@@ -1045,21 +1066,36 @@ static bool is_pci_link_healthy(struct hl_device *hdev)
return (device_id == hdev->pdev->device);
}
-static int hl_device_eq_heartbeat_check(struct hl_device *hdev)
+static bool hl_device_eq_heartbeat_received(struct hl_device *hdev)
{
+ struct eq_heartbeat_debug_info *heartbeat_debug_info = &hdev->heartbeat_debug_info;
+ u32 cpu_q_id = heartbeat_debug_info->cpu_queue_id, pq_pi_mask = (HL_QUEUE_LENGTH << 1) - 1;
struct asic_fixed_properties *prop = &hdev->asic_prop;
if (!prop->cpucp_info.eq_health_check_supported)
- return 0;
+ return true;
- if (hdev->eq_heartbeat_received) {
- hdev->eq_heartbeat_received = false;
- } else {
+ if (!hdev->eq_heartbeat_received) {
dev_err(hdev->dev, "EQ heartbeat event was not received!\n");
- return -EIO;
+
+ dev_err(hdev->dev,
+ "EQ: {CI %u, HB counter %u, last HB time: %ptTs}, PQ: {PI: %u, CI: %u (%u), last HB time: %ptTs}\n",
+ hdev->event_queue.ci,
+ heartbeat_debug_info->heartbeat_event_counter,
+ &hdev->heartbeat_debug_info.last_eq_heartbeat_ts,
+ hdev->kernel_queues[cpu_q_id].pi,
+ atomic_read(&hdev->kernel_queues[cpu_q_id].ci),
+ atomic_read(&hdev->kernel_queues[cpu_q_id].ci) & pq_pi_mask,
+ &hdev->heartbeat_debug_info.last_pq_heartbeat_ts);
+
+ hl_eq_dump(hdev, &hdev->event_queue);
+
+ return false;
}
- return 0;
+ hdev->eq_heartbeat_received = false;
+
+ return true;
}
static void hl_device_heartbeat(struct work_struct *work)
@@ -1078,7 +1114,7 @@ static void hl_device_heartbeat(struct work_struct *work)
* in order to validate the eq is working.
* Only if both the EQ is healthy and we managed to send the next heartbeat reschedule.
*/
- if ((!hl_device_eq_heartbeat_check(hdev)) && (!hdev->asic_funcs->send_heartbeat(hdev)))
+ if (hl_device_eq_heartbeat_received(hdev) && (!hdev->asic_funcs->send_heartbeat(hdev)))
goto reschedule;
if (hl_device_operational(hdev, NULL))
@@ -1132,21 +1168,6 @@ static int device_late_init(struct hl_device *hdev)
}
hdev->high_pll = hdev->asic_prop.high_pll;
-
- if (hdev->heartbeat) {
- /*
- * Before scheduling the heartbeat driver will check if eq event has received.
- * for the first schedule we need to set the indication as true then for the next
- * one this indication will be true only if eq event was sent by FW.
- */
- hdev->eq_heartbeat_received = true;
-
- INIT_DELAYED_WORK(&hdev->work_heartbeat, hl_device_heartbeat);
-
- schedule_delayed_work(&hdev->work_heartbeat,
- usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
- }
-
hdev->late_init_done = true;
return 0;
@@ -1163,9 +1184,6 @@ static void device_late_fini(struct hl_device *hdev)
if (!hdev->late_init_done)
return;
- if (hdev->heartbeat)
- cancel_delayed_work_sync(&hdev->work_heartbeat);
-
if (hdev->asic_funcs->late_fini)
hdev->asic_funcs->late_fini(hdev);
@@ -1266,8 +1284,12 @@ static void hl_abort_waiting_for_completions(struct hl_device *hdev)
static void cleanup_resources(struct hl_device *hdev, bool hard_reset, bool fw_reset,
bool skip_wq_flush)
{
- if (hard_reset)
+ if (hard_reset) {
+ if (hdev->heartbeat)
+ cancel_delayed_work_sync(&hdev->work_heartbeat);
+
device_late_fini(hdev);
+ }
/*
* Halt the engines and disable interrupts so we won't get any more
@@ -1495,15 +1517,14 @@ static void send_disable_pci_access(struct hl_device *hdev, u32 flags)
* of heartbeat, the device CPU is marked as disable
* so this message won't be sent
*/
- if (hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0)) {
- dev_warn(hdev->dev, "Failed to disable FW's PCI access\n");
+ if (hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0))
return;
- }
- /* verify that last EQs are handled before disabled is set */
+ /* disable_irq also generates sync irq, this verifies that last EQs are handled
+ * before disabled is set. The IRQ will be enabled again in request_irq call.
+ */
if (hdev->cpu_queues_enable)
- synchronize_irq(pci_irq_vector(hdev->pdev,
- hdev->asic_prop.eq_interrupt_id));
+ disable_irq(pci_irq_vector(hdev->pdev, hdev->asic_prop.eq_interrupt_id));
}
}
@@ -1547,6 +1568,31 @@ static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
}
}
+static void reset_heartbeat_debug_info(struct hl_device *hdev)
+{
+ hdev->heartbeat_debug_info.last_pq_heartbeat_ts = 0;
+ hdev->heartbeat_debug_info.last_eq_heartbeat_ts = 0;
+ hdev->heartbeat_debug_info.heartbeat_event_counter = 0;
+}
+
+static inline void device_heartbeat_schedule(struct hl_device *hdev)
+{
+ if (!hdev->heartbeat)
+ return;
+
+ reset_heartbeat_debug_info(hdev);
+
+ /*
+ * Before scheduling the heartbeat driver will check if eq event has received.
+ * for the first schedule we need to set the indication as true then for the next
+ * one this indication will be true only if eq event was sent by FW.
+ */
+ hdev->eq_heartbeat_received = true;
+
+ schedule_delayed_work(&hdev->work_heartbeat,
+ usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
+}
+
/*
* hl_device_reset - reset the device
*
@@ -1584,6 +1630,11 @@ int hl_device_reset(struct hl_device *hdev, u32 flags)
from_watchdog_thread = !!(flags & HL_DRV_RESET_FROM_WD_THR);
reset_upon_device_release = hdev->reset_upon_device_release && from_dev_release;
+ if (hdev->cpld_shutdown) {
+ dev_err(hdev->dev, "Cannot reset device, cpld is shutdown! Device is NOT usable\n");
+ return -EIO;
+ }
+
if (!hard_reset && (hl_device_status(hdev) == HL_DEVICE_STATUS_MALFUNCTION)) {
dev_dbg(hdev->dev, "soft-reset isn't supported on a malfunctioning device\n");
return 0;
@@ -1916,6 +1967,8 @@ kill_processes:
if (hard_reset) {
hdev->reset_info.hard_reset_cnt++;
+ device_heartbeat_schedule(hdev);
+
/* After reset is done, we are ready to receive events from
* the F/W. We can't do it before because we will ignore events
* and if those events are fatal, we won't know about it and
@@ -2024,7 +2077,7 @@ int hl_device_cond_reset(struct hl_device *hdev, u32 flags, u64 event_mask)
dev_dbg(hdev->dev, "Device is going to be hard-reset in %u sec unless being released\n",
hdev->device_release_watchdog_timeout_sec);
schedule_delayed_work(&hdev->device_release_watchdog_work.reset_work,
- msecs_to_jiffies(hdev->device_release_watchdog_timeout_sec * 1000));
+ secs_to_jiffies(hdev->device_release_watchdog_timeout_sec));
hdev->reset_info.watchdog_active = 1;
out:
spin_unlock(&hdev->reset_info.lock);
@@ -2350,6 +2403,12 @@ int hl_device_init(struct hl_device *hdev)
goto out_disabled;
}
+ /* Scheduling the EQ heartbeat thread must come after driver is done with all
+ * initializations, as we want to make sure the FW gets enough time to be prepared
+ * to respond to heartbeat packets.
+ */
+ device_heartbeat_schedule(hdev);
+
dev_notice(hdev->dev,
"Successfully added device %s to habanalabs driver\n",
dev_name(&(hdev)->pdev->dev));
@@ -2522,6 +2581,14 @@ void hl_device_fini(struct hl_device *hdev)
if (rc)
dev_err(hdev->dev, "hw_fini failed in device fini while removing device %d\n", rc);
+ /* Reset the H/W (if it accessible). It will be in idle state after this returns */
+ if (!hdev->cpld_shutdown) {
+ rc = hdev->asic_funcs->hw_fini(hdev, true, false);
+ if (rc)
+ dev_err(hdev->dev,
+ "hw_fini failed in device fini while removing device %d\n", rc);
+ }
+
hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE;
/* Release kernel context */
@@ -2592,7 +2659,7 @@ inline u32 hl_rreg(struct hl_device *hdev, u32 reg)
u32 val = readl(hdev->rmmio + reg);
if (unlikely(trace_habanalabs_rreg32_enabled()))
- trace_habanalabs_rreg32(hdev->dev, reg, val);
+ trace_habanalabs_rreg32(&(hdev)->pdev->dev, reg, val);
return val;
}
@@ -2610,7 +2677,7 @@ inline u32 hl_rreg(struct hl_device *hdev, u32 reg)
inline void hl_wreg(struct hl_device *hdev, u32 reg, u32 val)
{
if (unlikely(trace_habanalabs_wreg32_enabled()))
- trace_habanalabs_wreg32(hdev->dev, reg, val);
+ trace_habanalabs_wreg32(&(hdev)->pdev->dev, reg, val);
writel(val, hdev->rmmio + reg);
}
@@ -2836,3 +2903,66 @@ void hl_set_irq_affinity(struct hl_device *hdev, int irq)
if (irq_set_affinity_and_hint(irq, &hdev->irq_affinity_mask))
dev_err(hdev->dev, "Failed setting irq %d affinity\n", irq);
}
+
+void hl_eq_heartbeat_event_handle(struct hl_device *hdev)
+{
+ hdev->heartbeat_debug_info.heartbeat_event_counter++;
+ hdev->heartbeat_debug_info.last_eq_heartbeat_ts = ktime_get_real_seconds();
+ hdev->eq_heartbeat_received = true;
+}
+
+void hl_handle_clk_change_event(struct hl_device *hdev, u16 event_type, u64 *event_mask)
+{
+ struct hl_clk_throttle *clk_throttle = &hdev->clk_throttling;
+ ktime_t zero_time = ktime_set(0, 0);
+
+ mutex_lock(&clk_throttle->lock);
+
+ switch (event_type) {
+ case EQ_EVENT_POWER_EVT_START:
+ clk_throttle->current_reason |= HL_CLK_THROTTLE_POWER;
+ clk_throttle->aggregated_reason |= HL_CLK_THROTTLE_POWER;
+ clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_POWER].start = ktime_get();
+ clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = zero_time;
+ dev_dbg_ratelimited(hdev->dev, "Clock throttling due to power consumption\n");
+ break;
+
+ case EQ_EVENT_POWER_EVT_END:
+ clk_throttle->current_reason &= ~HL_CLK_THROTTLE_POWER;
+ clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = ktime_get();
+ dev_dbg_ratelimited(hdev->dev, "Power envelop is safe, back to optimal clock\n");
+ break;
+
+ case EQ_EVENT_THERMAL_EVT_START:
+ clk_throttle->current_reason |= HL_CLK_THROTTLE_THERMAL;
+ clk_throttle->aggregated_reason |= HL_CLK_THROTTLE_THERMAL;
+ clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].start = ktime_get();
+ clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = zero_time;
+ *event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
+ dev_info_ratelimited(hdev->dev, "Clock throttling due to overheating\n");
+ break;
+
+ case EQ_EVENT_THERMAL_EVT_END:
+ clk_throttle->current_reason &= ~HL_CLK_THROTTLE_THERMAL;
+ clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = ktime_get();
+ *event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
+ dev_info_ratelimited(hdev->dev, "Thermal envelop is safe, back to optimal clock\n");
+ break;
+
+ default:
+ dev_err(hdev->dev, "Received invalid clock change event %d\n", event_type);
+ break;
+ }
+
+ mutex_unlock(&clk_throttle->lock);
+}
+
+void hl_eq_cpld_shutdown_event_handle(struct hl_device *hdev, u16 event_id, u64 *event_mask)
+{
+ hl_handle_critical_hw_err(hdev, event_id, event_mask);
+ *event_mask |= HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE;
+
+ /* Avoid any new accesses to the H/W */
+ hdev->disabled = true;
+ hdev->cpld_shutdown = true;
+}
diff --git a/drivers/accel/habanalabs/common/firmware_if.c b/drivers/accel/habanalabs/common/firmware_if.c
index 4bd02778a970..eeb6b2a80fc7 100644
--- a/drivers/accel/habanalabs/common/firmware_if.c
+++ b/drivers/accel/habanalabs/common/firmware_if.c
@@ -8,6 +8,7 @@
#include "habanalabs.h"
#include <linux/habanalabs/hl_boot_if.h>
+#include <linux/pci.h>
#include <linux/firmware.h>
#include <linux/crc32.h>
#include <linux/slab.h>
@@ -40,6 +41,31 @@ static char *comms_sts_str_arr[COMMS_STS_INVLD_LAST] = {
[COMMS_STS_TIMEOUT_ERR] = __stringify(COMMS_STS_TIMEOUT_ERR),
};
+/**
+ * hl_fw_version_cmp() - compares the FW version to a specific version
+ *
+ * @hdev: pointer to hl_device structure
+ * @major: major number of a reference version
+ * @minor: minor number of a reference version
+ * @subminor: sub-minor number of a reference version
+ *
+ * Return 1 if FW version greater than the reference version, -1 if it's
+ * smaller and 0 if versions are identical.
+ */
+int hl_fw_version_cmp(struct hl_device *hdev, u32 major, u32 minor, u32 subminor)
+{
+ if (hdev->fw_sw_major_ver != major)
+ return (hdev->fw_sw_major_ver > major) ? 1 : -1;
+
+ if (hdev->fw_sw_minor_ver != minor)
+ return (hdev->fw_sw_minor_ver > minor) ? 1 : -1;
+
+ if (hdev->fw_sw_sub_minor_ver != subminor)
+ return (hdev->fw_sw_sub_minor_ver > subminor) ? 1 : -1;
+
+ return 0;
+}
+
static char *extract_fw_ver_from_str(const char *fw_str)
{
char *str, *fw_ver, *whitespace;
@@ -345,43 +371,63 @@ int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode, u64 value)
{
struct cpucp_packet pkt = {};
+ int rc;
pkt.ctl = cpu_to_le32(opcode << CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.value = cpu_to_le64(value);
- return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
+ if (rc)
+ dev_err(hdev->dev, "Failed to disable FW's PCI access\n");
+
+ return rc;
}
+/**
+ * hl_fw_send_cpu_message() - send CPU message to the device.
+ *
+ * @hdev: pointer to hl_device structure.
+ * @hw_queue_id: HW queue ID
+ * @msg: raw data of the message/packet
+ * @size: size of @msg in bytes
+ * @timeout_us: timeout in usec to wait for CPU reply on the message
+ * @result: return code reported by FW
+ *
+ * send message to the device CPU.
+ *
+ * Return: 0 on success, non-zero for failure.
+ * -ENOMEM: memory allocation failure
+ * -EAGAIN: CPU is disabled (try again when enabled)
+ * -ETIMEDOUT: timeout waiting for FW response
+ * -EIO: protocol error
+ */
int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
- u16 len, u32 timeout, u64 *result)
+ u16 size, u32 timeout_us, u64 *result)
{
struct hl_hw_queue *queue = &hdev->kernel_queues[hw_queue_id];
struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u32 tmp, expected_ack_val, pi, opcode;
struct cpucp_packet *pkt;
dma_addr_t pkt_dma_addr;
struct hl_bd *sent_bd;
- u32 tmp, expected_ack_val, pi, opcode;
- int rc;
+ int rc = 0, fw_rc;
- pkt = hl_cpu_accessible_dma_pool_alloc(hdev, len, &pkt_dma_addr);
+ pkt = hl_cpu_accessible_dma_pool_alloc(hdev, size, &pkt_dma_addr);
if (!pkt) {
- dev_err(hdev->dev,
- "Failed to allocate DMA memory for packet to CPU\n");
+ dev_err(hdev->dev, "Failed to allocate DMA memory for packet to CPU\n");
return -ENOMEM;
}
- memcpy(pkt, msg, len);
+ memcpy(pkt, msg, size);
mutex_lock(&hdev->send_cpu_message_lock);
/* CPU-CP messages can be sent during soft-reset */
- if (hdev->disabled && !hdev->reset_info.in_compute_reset) {
- rc = 0;
+ if (hdev->disabled && !hdev->reset_info.in_compute_reset)
goto out;
- }
if (hdev->device_cpu_disabled) {
- rc = -EIO;
+ rc = -EAGAIN;
goto out;
}
@@ -397,7 +443,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
* Which means that we don't need to lock the access to the entire H/W
* queues module when submitting a JOB to the CPU queue.
*/
- hl_hw_queue_submit_bd(hdev, queue, hl_queue_inc_ptr(queue->pi), len, pkt_dma_addr);
+ hl_hw_queue_submit_bd(hdev, queue, hl_queue_inc_ptr(queue->pi), size, pkt_dma_addr);
if (prop->fw_app_cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN)
expected_ack_val = queue->pi;
@@ -406,7 +452,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
rc = hl_poll_timeout_memory(hdev, &pkt->fence, tmp,
(tmp == expected_ack_val), 1000,
- timeout, true);
+ timeout_us, true);
hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
@@ -414,19 +460,27 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
/* If FW performed reset just before sending it a packet, we will get a timeout.
* This is expected behavior, hence no need for error message.
*/
- if (!hl_device_operational(hdev, NULL) && !hdev->reset_info.in_compute_reset)
+ if (!hl_device_operational(hdev, NULL) && !hdev->reset_info.in_compute_reset) {
dev_dbg(hdev->dev, "Device CPU packet timeout (0x%x) due to FW reset\n",
tmp);
- else
- dev_err(hdev->dev, "Device CPU packet timeout (status = 0x%x)\n", tmp);
+ } else {
+ struct hl_bd *bd = queue->kernel_address;
+
+ bd += hl_pi_2_offset(pi);
+
+ dev_err(hdev->dev, "Device CPU packet timeout (status = 0x%x)\n"
+ "Pkt info[%u]: dma_addr: 0x%llx, kernel_addr: %p, len:0x%x, ctl: 0x%x, ptr:0x%llx, dram_bd:%u\n",
+ tmp, pi, pkt_dma_addr, (void *)pkt, bd->len, bd->ctl, bd->ptr,
+ queue->dram_bd);
+ }
hdev->device_cpu_disabled = true;
goto out;
}
tmp = le32_to_cpu(pkt->ctl);
- rc = (tmp & CPUCP_PKT_CTL_RC_MASK) >> CPUCP_PKT_CTL_RC_SHIFT;
- if (rc) {
+ fw_rc = (tmp & CPUCP_PKT_CTL_RC_MASK) >> CPUCP_PKT_CTL_RC_SHIFT;
+ if (fw_rc) {
opcode = (tmp & CPUCP_PKT_CTL_OPCODE_MASK) >> CPUCP_PKT_CTL_OPCODE_SHIFT;
if (!prop->supports_advanced_cpucp_rc) {
@@ -435,7 +489,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
goto scrub_descriptor;
}
- switch (rc) {
+ switch (fw_rc) {
case cpucp_packet_invalid:
dev_err(hdev->dev,
"CPU packet %d is not supported by F/W\n", opcode);
@@ -460,7 +514,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
/* propagate the return code from the f/w to the callers who want to check it */
if (result)
- *result = rc;
+ *result = fw_rc;
rc = -EIO;
@@ -480,7 +534,7 @@ scrub_descriptor:
out:
mutex_unlock(&hdev->send_cpu_message_lock);
- hl_cpu_accessible_dma_pool_free(hdev, len, pkt);
+ hl_cpu_accessible_dma_pool_free(hdev, size, pkt);
return rc;
}
@@ -550,7 +604,7 @@ int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr,
int hl_fw_test_cpu_queue(struct hl_device *hdev)
{
struct cpucp_packet test_pkt = {};
- u64 result;
+ u64 result = 0;
int rc;
test_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST <<
@@ -623,16 +677,14 @@ int hl_fw_send_device_activity(struct hl_device *hdev, bool open)
int hl_fw_send_heartbeat(struct hl_device *hdev)
{
struct cpucp_packet hb_pkt;
- u64 result;
+ u64 result = 0;
int rc;
memset(&hb_pkt, 0, sizeof(hb_pkt));
- hb_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST <<
- CPUCP_PKT_CTL_OPCODE_SHIFT);
+ hb_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST << CPUCP_PKT_CTL_OPCODE_SHIFT);
hb_pkt.value = cpu_to_le64(CPUCP_PACKET_FENCE_VAL);
- rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt,
- sizeof(hb_pkt), 0, &result);
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt, sizeof(hb_pkt), 0, &result);
if ((rc) || (result != CPUCP_PACKET_FENCE_VAL))
return -EIO;
@@ -643,6 +695,8 @@ int hl_fw_send_heartbeat(struct hl_device *hdev)
rc = -EIO;
}
+ hdev->heartbeat_debug_info.last_pq_heartbeat_ts = ktime_get_real_seconds();
+
return rc;
}
@@ -885,7 +939,7 @@ static int hl_fw_send_msi_info_msg(struct hl_device *hdev)
{
struct cpucp_array_data_packet *pkt;
size_t total_pkt_size, data_size;
- u64 result;
+ u64 result = 0;
int rc;
/* skip sending this info for unsupported ASICs */
@@ -976,11 +1030,10 @@ int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size)
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_CPUCP_EEPROM_TIMEOUT_USEC, &result);
-
if (rc) {
- dev_err(hdev->dev,
- "Failed to handle CPU-CP EEPROM packet, error %d\n",
- rc);
+ if (rc != -EAGAIN)
+ dev_err(hdev->dev,
+ "Failed to handle CPU-CP EEPROM packet, error %d\n", rc);
goto out;
}
@@ -1021,7 +1074,9 @@ int hl_fw_get_monitor_dump(struct hl_device *hdev, void *data)
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_CPUCP_MON_DUMP_TIMEOUT_USEC, &result);
if (rc) {
- dev_err(hdev->dev, "Failed to handle CPU-CP monitor-dump packet, error %d\n", rc);
+ if (rc != -EAGAIN)
+ dev_err(hdev->dev,
+ "Failed to handle CPU-CP monitor-dump packet, error %d\n", rc);
goto out;
}
@@ -1055,8 +1110,9 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_CPUCP_INFO_TIMEOUT_USEC, &result);
if (rc) {
- dev_err(hdev->dev,
- "Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
+ if (rc != -EAGAIN)
+ dev_err(hdev->dev,
+ "Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
return rc;
}
counters->rx_throughput = result;
@@ -1070,8 +1126,9 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_CPUCP_INFO_TIMEOUT_USEC, &result);
if (rc) {
- dev_err(hdev->dev,
- "Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
+ if (rc != -EAGAIN)
+ dev_err(hdev->dev,
+ "Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
return rc;
}
counters->tx_throughput = result;
@@ -1084,8 +1141,9 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_CPUCP_INFO_TIMEOUT_USEC, &result);
if (rc) {
- dev_err(hdev->dev,
- "Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
+ if (rc != -EAGAIN)
+ dev_err(hdev->dev,
+ "Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
return rc;
}
counters->replay_cnt = (u32) result;
@@ -1105,9 +1163,9 @@ int hl_fw_cpucp_total_energy_get(struct hl_device *hdev, u64 *total_energy)
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_CPUCP_INFO_TIMEOUT_USEC, &result);
if (rc) {
- dev_err(hdev->dev,
- "Failed to handle CpuCP total energy pkt, error %d\n",
- rc);
+ if (rc != -EAGAIN)
+ dev_err(hdev->dev,
+ "Failed to handle CpuCP total energy pkt, error %d\n", rc);
return rc;
}
@@ -1183,7 +1241,8 @@ int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u32 pll_index,
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_CPUCP_INFO_TIMEOUT_USEC, &result);
if (rc) {
- dev_err(hdev->dev, "Failed to read PLL info, error %d\n", rc);
+ if (rc != -EAGAIN)
+ dev_err(hdev->dev, "Failed to read PLL info, error %d\n", rc);
return rc;
}
@@ -1210,7 +1269,8 @@ int hl_fw_cpucp_power_get(struct hl_device *hdev, u64 *power)
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_CPUCP_INFO_TIMEOUT_USEC, &result);
if (rc) {
- dev_err(hdev->dev, "Failed to read power, error %d\n", rc);
+ if (rc != -EAGAIN)
+ dev_err(hdev->dev, "Failed to read power, error %d\n", rc);
return rc;
}
@@ -1247,8 +1307,9 @@ int hl_fw_dram_replaced_row_get(struct hl_device *hdev,
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_CPUCP_INFO_TIMEOUT_USEC, &result);
if (rc) {
- dev_err(hdev->dev,
- "Failed to handle CPU-CP replaced rows info pkt, error %d\n", rc);
+ if (rc != -EAGAIN)
+ dev_err(hdev->dev,
+ "Failed to handle CPU-CP replaced rows info pkt, error %d\n", rc);
goto out;
}
@@ -1273,7 +1334,8 @@ int hl_fw_dram_pending_row_get(struct hl_device *hdev, u32 *pend_rows_num)
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, &result);
if (rc) {
- dev_err(hdev->dev,
+ if (rc != -EAGAIN)
+ dev_err(hdev->dev,
"Failed to handle CPU-CP pending rows info pkt, error %d\n", rc);
goto out;
}
@@ -1428,7 +1490,7 @@ int hl_fw_wait_preboot_ready(struct hl_device *hdev)
{
struct pre_fw_load_props *pre_fw_load = &hdev->fw_loader.pre_fw_load;
u32 status = 0, timeout;
- int rc, tries = 1;
+ int rc, tries = 1, fw_err = 0;
bool preboot_still_runs;
/* Need to check two possible scenarios:
@@ -1468,18 +1530,18 @@ retry:
}
}
- if (rc) {
+ /* If we read all FF, then something is totally wrong, no point
+ * of reading specific errors
+ */
+ if (status != -1)
+ fw_err = fw_read_errors(hdev, pre_fw_load->boot_err0_reg,
+ pre_fw_load->boot_err1_reg,
+ pre_fw_load->sts_boot_dev_sts0_reg,
+ pre_fw_load->sts_boot_dev_sts1_reg);
+ if (rc || fw_err) {
detect_cpu_boot_status(hdev, status);
- dev_err(hdev->dev, "CPU boot ready timeout (status = %d)\n", status);
-
- /* If we read all FF, then something is totally wrong, no point
- * of reading specific errors
- */
- if (status != -1)
- fw_read_errors(hdev, pre_fw_load->boot_err0_reg,
- pre_fw_load->boot_err1_reg,
- pre_fw_load->sts_boot_dev_sts0_reg,
- pre_fw_load->sts_boot_dev_sts1_reg);
+ dev_err(hdev->dev, "CPU boot %s (status = %d)\n",
+ fw_err ? "failed due to an error" : "ready timeout", status);
return -EIO;
}
@@ -1750,7 +1812,7 @@ static void hl_fw_dynamic_send_cmd(struct hl_device *hdev,
val = FIELD_PREP(COMMS_COMMAND_CMD_MASK, cmd);
val |= FIELD_PREP(COMMS_COMMAND_SIZE_MASK, size);
- trace_habanalabs_comms_send_cmd(hdev->dev, comms_cmd_str_arr[cmd]);
+ trace_habanalabs_comms_send_cmd(&hdev->pdev->dev, comms_cmd_str_arr[cmd]);
WREG32(le32_to_cpu(dyn_regs->kmd_msg_to_cpu), val);
}
@@ -1808,7 +1870,7 @@ static int hl_fw_dynamic_wait_for_status(struct hl_device *hdev,
dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
- trace_habanalabs_comms_wait_status(hdev->dev, comms_sts_str_arr[expected_status]);
+ trace_habanalabs_comms_wait_status(&hdev->pdev->dev, comms_sts_str_arr[expected_status]);
/* Wait for expected status */
rc = hl_poll_timeout(
@@ -1825,7 +1887,8 @@ static int hl_fw_dynamic_wait_for_status(struct hl_device *hdev,
return -EIO;
}
- trace_habanalabs_comms_wait_status_done(hdev->dev, comms_sts_str_arr[expected_status]);
+ trace_habanalabs_comms_wait_status_done(&hdev->pdev->dev,
+ comms_sts_str_arr[expected_status]);
/*
* skip storing FW response for NOOP to preserve the actual desired
@@ -1899,7 +1962,7 @@ int hl_fw_dynamic_send_protocol_cmd(struct hl_device *hdev,
{
int rc;
- trace_habanalabs_comms_protocol_cmd(hdev->dev, comms_cmd_str_arr[cmd]);
+ trace_habanalabs_comms_protocol_cmd(&hdev->pdev->dev, comms_cmd_str_arr[cmd]);
/* first send clear command to clean former commands */
rc = hl_fw_dynamic_send_clear_cmd(hdev, fw_loader);
@@ -2038,7 +2101,7 @@ static int hl_fw_dynamic_validate_descriptor(struct hl_device *hdev,
* note that no alignment/stride address issues here as all structures
* are 64 bit padded.
*/
- data_ptr = (u8 *)fw_desc + sizeof(struct comms_desc_header);
+ data_ptr = (u8 *)fw_desc + sizeof(struct comms_msg_header);
data_size = le16_to_cpu(fw_desc->header.size);
data_crc32 = hl_fw_compat_crc32(data_ptr, data_size);
@@ -2192,11 +2255,11 @@ static int hl_fw_dynamic_read_and_validate_descriptor(struct hl_device *hdev,
memcpy_fromio(fw_desc, src, sizeof(struct lkd_fw_comms_desc));
fw_data_size = le16_to_cpu(fw_desc->header.size);
- temp_fw_desc = vzalloc(sizeof(struct comms_desc_header) + fw_data_size);
+ temp_fw_desc = vzalloc(sizeof(struct comms_msg_header) + fw_data_size);
if (!temp_fw_desc)
return -ENOMEM;
- memcpy_fromio(temp_fw_desc, src, sizeof(struct comms_desc_header) + fw_data_size);
+ memcpy_fromio(temp_fw_desc, src, sizeof(struct comms_msg_header) + fw_data_size);
rc = hl_fw_dynamic_validate_descriptor(hdev, fw_loader,
(struct lkd_fw_comms_desc *) temp_fw_desc);
@@ -3122,10 +3185,10 @@ long hl_fw_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
pkt.pll_index = cpu_to_le32((u32)used_pll_idx);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, &result);
-
if (rc) {
- dev_err(hdev->dev, "Failed to get frequency of PLL %d, error %d\n",
- used_pll_idx, rc);
+ if (rc != -EAGAIN)
+ dev_err(hdev->dev, "Failed to get frequency of PLL %d, error %d\n",
+ used_pll_idx, rc);
return rc;
}
@@ -3149,8 +3212,7 @@ void hl_fw_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq)
pkt.value = cpu_to_le64(freq);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
-
- if (rc)
+ if (rc && rc != -EAGAIN)
dev_err(hdev->dev, "Failed to set frequency to PLL %d, error %d\n",
used_pll_idx, rc);
}
@@ -3166,9 +3228,9 @@ long hl_fw_get_max_power(struct hl_device *hdev)
pkt.ctl = cpu_to_le32(CPUCP_PACKET_MAX_POWER_GET << CPUCP_PKT_CTL_OPCODE_SHIFT);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, &result);
-
if (rc) {
- dev_err(hdev->dev, "Failed to get max power, error %d\n", rc);
+ if (rc != -EAGAIN)
+ dev_err(hdev->dev, "Failed to get max power, error %d\n", rc);
return rc;
}
@@ -3190,8 +3252,7 @@ void hl_fw_set_max_power(struct hl_device *hdev)
pkt.value = cpu_to_le64(hdev->max_power);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
-
- if (rc)
+ if (rc && rc != -EAGAIN)
dev_err(hdev->dev, "Failed to set max power, error %d\n", rc);
}
@@ -3217,11 +3278,11 @@ static int hl_fw_get_sec_attest_data(struct hl_device *hdev, u32 packet_id, void
pkt.data_max_size = cpu_to_le32(size);
pkt.nonce = cpu_to_le32(nonce);
- rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- timeout, NULL);
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), timeout, NULL);
if (rc) {
- dev_err(hdev->dev,
- "Failed to handle CPU-CP pkt %u, error %d\n", packet_id, rc);
+ if (rc != -EAGAIN)
+ dev_err(hdev->dev,
+ "Failed to handle CPU-CP pkt %u, error %d\n", packet_id, rc);
goto out;
}
@@ -3263,10 +3324,12 @@ int hl_fw_send_generic_request(struct hl_device *hdev, enum hl_passthrough_type
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *)&pkt, sizeof(pkt),
HL_CPUCP_INFO_TIMEOUT_USEC, &result);
- if (rc)
- dev_err(hdev->dev, "failed to send CPUCP data of generic fw pkt\n");
- else
+ if (rc) {
+ if (rc != -EAGAIN)
+ dev_err(hdev->dev, "failed to send CPUCP data of generic fw pkt\n");
+ } else {
dev_dbg(hdev->dev, "generic pkt was successful, result: 0x%llx\n", result);
+ }
*size = (u32)result;
diff --git a/drivers/accel/habanalabs/common/habanalabs.h b/drivers/accel/habanalabs/common/habanalabs.h
index 48f0f3eea1ef..d94c2ba22a6a 100644
--- a/drivers/accel/habanalabs/common/habanalabs.h
+++ b/drivers/accel/habanalabs/common/habanalabs.h
@@ -71,7 +71,7 @@ struct hl_fpriv;
#define HL_DEVICE_TIMEOUT_USEC 1000000 /* 1 s */
-#define HL_HEARTBEAT_PER_USEC 5000000 /* 5 s */
+#define HL_HEARTBEAT_PER_USEC 10000000 /* 10 s */
#define HL_PLL_LOW_JOB_FREQ_USEC 5000000 /* 5 s */
@@ -90,7 +90,9 @@ struct hl_fpriv;
#define HL_COMMON_USER_CQ_INTERRUPT_ID 0xFFF
#define HL_COMMON_DEC_INTERRUPT_ID 0xFFE
-#define HL_STATE_DUMP_HIST_LEN 5
+#define HL_STATE_DUMP_HIST_LEN 5
+#define HL_DBGFS_CFG_ACCESS_HIST_LEN 20
+#define HL_DBGFS_CFG_ACCESS_HIST_TIMEOUT_SEC 2 /* 2s */
/* Default value for device reset trigger , an invalid value */
#define HL_RESET_TRIGGER_DEFAULT 0xFF
@@ -651,6 +653,8 @@ struct hl_hints_range {
* @hbw_flush_reg: register to read to generate HBW flush. value of 0 means HBW flush is
* not supported.
* @reserved_fw_mem_size: size of dram memory reserved for FW.
+ * @fw_event_queue_size: queue size for events from CPU-CP.
+ * A value of 0 means using the default HL_EQ_SIZE_IN_BYTES value.
* @collective_first_sob: first sync object available for collective use
* @collective_first_mon: first monitor available for collective use
* @sync_stream_first_sob: first sync object available for sync stream use
@@ -700,6 +704,7 @@ struct hl_hints_range {
* @supports_advanced_cpucp_rc: true if new cpucp opcodes are supported.
* @supports_engine_modes: true if changing engines/engine_cores modes is supported.
* @support_dynamic_resereved_fw_size: true if we support dynamic reserved size for fw.
+ * @supports_nvme: indicates whether the asic supports NVMe P2P DMA.
*/
struct asic_fixed_properties {
struct hw_queue_properties *hw_queues_props;
@@ -782,6 +787,7 @@ struct asic_fixed_properties {
u32 glbl_err_max_cause_num;
u32 hbw_flush_reg;
u32 reserved_fw_mem_size;
+ u32 fw_event_queue_size;
u16 collective_first_sob;
u16 collective_first_mon;
u16 sync_stream_first_sob;
@@ -819,6 +825,7 @@ struct asic_fixed_properties {
u8 supports_advanced_cpucp_rc;
u8 supports_engine_modes;
u8 support_dynamic_resereved_fw_size;
+ u8 supports_nvme;
};
/**
@@ -902,6 +909,18 @@ struct hl_mem_mgr {
};
/**
+ * struct hl_mem_mgr_fini_stats - describes statistics returned during memory manager teardown.
+ * @n_busy_cb: the amount of CB handles that could not be removed
+ * @n_busy_ts: the amount of TS handles that could not be removed
+ * @n_busy_other: the amount of any other type of handles that could not be removed
+ */
+struct hl_mem_mgr_fini_stats {
+ u32 n_busy_cb;
+ u32 n_busy_ts;
+ u32 n_busy_other;
+};
+
+/**
* struct hl_mmap_mem_buf_behavior - describes unified memory manager buffer behavior
* @topic: string identifier used for logging
* @mem_id: memory type identifier, embedded in the handle and used to identify
@@ -1229,6 +1248,7 @@ struct hl_user_pending_interrupt {
* @hdev: pointer to the device structure
* @kernel_address: holds the queue's kernel virtual address
* @bus_address: holds the queue's DMA address
+ * @size: the event queue size
* @ci: ci inside the queue
* @prev_eqe_index: the index of the previous event queue entry. The index of
* the current entry's index must be +1 of the previous one.
@@ -1240,6 +1260,7 @@ struct hl_eq {
struct hl_device *hdev;
void *kernel_address;
dma_addr_t bus_address;
+ u32 size;
u32 ci;
u32 prev_eqe_index;
bool check_eqe_index;
@@ -1268,15 +1289,18 @@ struct hl_dec {
* @ASIC_GAUDI2: Gaudi2 device.
* @ASIC_GAUDI2B: Gaudi2B device.
* @ASIC_GAUDI2C: Gaudi2C device.
+ * @ASIC_GAUDI2D: Gaudi2D device.
*/
enum hl_asic_type {
ASIC_INVALID,
+
ASIC_GOYA,
ASIC_GAUDI,
ASIC_GAUDI_SEC,
ASIC_GAUDI2,
ASIC_GAUDI2B,
ASIC_GAUDI2C,
+ ASIC_GAUDI2D,
};
struct hl_cs_parser;
@@ -2254,6 +2278,9 @@ struct hl_vm {
u8 init_done;
};
+#ifdef CONFIG_HL_HLDIO
+#include "hldio.h"
+#endif
/*
* DEBUG, PROFILING STRUCTURE
@@ -2324,7 +2351,6 @@ struct hl_fpriv {
struct mutex ctx_lock;
};
-
/*
* DebugFS
*/
@@ -2352,6 +2378,7 @@ struct hl_debugfs_entry {
struct hl_dbg_device_entry *dev_entry;
};
+
/**
* struct hl_dbg_device_entry - ASIC specific debugfs manager.
* @root: root dentry.
@@ -2383,6 +2410,7 @@ struct hl_debugfs_entry {
* @i2c_addr: generic u8 debugfs file for address value to use in i2c_data_read.
* @i2c_reg: generic u8 debugfs file for register value to use in i2c_data_read.
* @i2c_len: generic u8 debugfs file for length value to use in i2c_data_read.
+ * @dio_stats: Direct I/O statistics
*/
struct hl_dbg_device_entry {
struct dentry *root;
@@ -2414,6 +2442,35 @@ struct hl_dbg_device_entry {
u8 i2c_addr;
u8 i2c_reg;
u8 i2c_len;
+#ifdef CONFIG_HL_HLDIO
+ struct hl_dio_stats dio_stats;
+#endif
+};
+
+/**
+ * struct hl_debugfs_cfg_access_entry - single debugfs config access object, member of
+ * hl_debugfs_cfg_access.
+ * @seconds_since_epoch: seconds since January 1, 1970, used for time comparisons.
+ * @debugfs_type: the debugfs operation requested, can be READ32, WRITE32, READ64 or WRITE64.
+ * @addr: the requested address to access.
+ * @valid: if set, this entry has valid data for dumping at interrupt time.
+ */
+struct hl_debugfs_cfg_access_entry {
+ ktime_t seconds_since_epoch;
+ enum debugfs_access_type debugfs_type;
+ u64 addr;
+ bool valid;
+};
+
+/**
+ * struct hl_debugfs_cfg_access - saves debugfs config region access requests history.
+ * @cfg_access_list: list of objects describing config region access requests.
+ * @head: next valid index to add new entry to in cfg_access_list.
+ */
+struct hl_debugfs_cfg_access {
+ struct hl_debugfs_cfg_access_entry cfg_access_list[HL_DBGFS_CFG_ACCESS_HIST_LEN];
+ u32 head;
+ spinlock_t lock; /* protects head and entries */
};
/**
@@ -2709,11 +2766,16 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
* updated directly by the device. If false, the host memory being polled will
* be updated by host CPU. Required so host knows whether or not the memory
* might need to be byte-swapped before returning value to caller.
+ *
+ * On the first 4 polling iterations the macro goes to sleep for short period of
+ * time that gradually increases and reaches sleep_us on the fifth iteration.
*/
#define hl_poll_timeout_memory(hdev, addr, val, cond, sleep_us, timeout_us, \
mem_written_by_device) \
({ \
+ u64 __sleep_step_us; \
ktime_t __timeout; \
+ u8 __step = 8; \
\
__timeout = ktime_add_us(ktime_get(), timeout_us); \
might_sleep_if(sleep_us); \
@@ -2731,8 +2793,10 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
(val) = le32_to_cpu(*(__le32 *) &(val)); \
break; \
} \
- if (sleep_us) \
- usleep_range((sleep_us >> 2) + 1, sleep_us); \
+ __sleep_step_us = sleep_us >> __step; \
+ if (__sleep_step_us) \
+ usleep_range((__sleep_step_us >> 2) + 1, __sleep_step_us); \
+ __step >>= 1; \
} \
(cond) ? 0 : -ETIMEDOUT; \
})
@@ -3175,6 +3239,21 @@ struct hl_reset_info {
};
/**
+ * struct eq_heartbeat_debug_info - stores debug info to be used upon heartbeat failure.
+ * @last_pq_heartbeat_ts: timestamp of the last test packet that was sent to FW.
+ * This packet is the trigger in FW to send the EQ heartbeat event.
+ * @last_eq_heartbeat_ts: timestamp of the last EQ heartbeat event that was received from FW.
+ * @heartbeat_event_counter: number of heartbeat events received.
+ * @cpu_queue_id: used to read the queue pi/ci
+ */
+struct eq_heartbeat_debug_info {
+ time64_t last_pq_heartbeat_ts;
+ time64_t last_eq_heartbeat_ts;
+ u32 heartbeat_event_counter;
+ u32 cpu_queue_id;
+};
+
+/**
* struct hl_device - habanalabs device structure.
* @pdev: pointer to PCI device, can be NULL in case of simulator device.
* @pcie_bar_phys: array of available PCIe bars physical addresses.
@@ -3239,6 +3318,7 @@ struct hl_reset_info {
* @hl_chip_info: ASIC's sensors information.
* @device_status_description: device status description.
* @hl_debugfs: device's debugfs manager.
+ * @debugfs_cfg_accesses: list of last debugfs config region accesses.
* @cb_pool: list of pre allocated CBs.
* @cb_pool_lock: protects the CB pool.
* @internal_cb_pool_virt_addr: internal command buffer pool virtual address.
@@ -3262,6 +3342,8 @@ struct hl_reset_info {
* @clk_throttling: holds information about current/previous clock throttling events
* @captured_err_info: holds information about errors.
* @reset_info: holds current device reset information.
+ * @heartbeat_debug_info: counters used to debug heartbeat failures.
+ * @hldio: describes habanalabs direct storage interaction interface.
* @irq_affinity_mask: mask of available CPU cores for user and decoder interrupt handling.
* @stream_master_qid_arr: pointer to array with QIDs of master streams.
* @fw_inner_major_ver: the major of current loaded preboot inner version.
@@ -3314,6 +3396,7 @@ struct hl_reset_info {
* addresses.
* @is_in_dram_scrub: true if dram scrub operation is on going.
* @disabled: is device disabled.
+ * @cpld_shutdown: is cpld shutdown.
* @late_init_done: is late init stage was done during initialization.
* @hwmon_initialized: is H/W monitor sensors was initialized.
* @reset_on_lockup: true if a reset should be done in case of stuck CS, false
@@ -3418,6 +3501,7 @@ struct hl_device {
struct hwmon_chip_info *hl_chip_info;
struct hl_dbg_device_entry hl_debugfs;
+ struct hl_debugfs_cfg_access debugfs_cfg_accesses;
struct list_head cb_pool;
spinlock_t cb_pool_lock;
@@ -3452,6 +3536,10 @@ struct hl_device {
struct hl_reset_info reset_info;
+ struct eq_heartbeat_debug_info heartbeat_debug_info;
+#ifdef CONFIG_HL_HLDIO
+ struct hl_dio hldio;
+#endif
cpumask_t irq_affinity_mask;
u32 *stream_master_qid_arr;
@@ -3487,6 +3575,7 @@ struct hl_device {
u16 cpu_pci_msb_addr;
u8 is_in_dram_scrub;
u8 disabled;
+ u8 cpld_shutdown;
u8 late_init_done;
u8 hwmon_initialized;
u8 reset_on_lockup;
@@ -3596,25 +3685,6 @@ struct hl_ioctl_desc {
hl_ioctl_t *func;
};
-static inline bool hl_is_fw_sw_ver_below(struct hl_device *hdev, u32 fw_sw_major, u32 fw_sw_minor)
-{
- if (hdev->fw_sw_major_ver < fw_sw_major)
- return true;
- if (hdev->fw_sw_major_ver > fw_sw_major)
- return false;
- if (hdev->fw_sw_minor_ver < fw_sw_minor)
- return true;
- return false;
-}
-
-static inline bool hl_is_fw_sw_ver_equal_or_greater(struct hl_device *hdev, u32 fw_sw_major,
- u32 fw_sw_minor)
-{
- return (hdev->fw_sw_major_ver > fw_sw_major ||
- (hdev->fw_sw_major_ver == fw_sw_major &&
- hdev->fw_sw_minor_ver >= fw_sw_minor));
-}
-
/*
* Kernel module functions that can be accessed by entire module
*/
@@ -3740,6 +3810,7 @@ int hl_eq_init(struct hl_device *hdev, struct hl_eq *q);
void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q);
void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q);
void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q);
+void hl_eq_dump(struct hl_device *hdev, struct hl_eq *q);
irqreturn_t hl_irq_handler_cq(int irq, void *arg);
irqreturn_t hl_irq_handler_eq(int irq, void *arg);
irqreturn_t hl_irq_handler_dec_abnrm(int irq, void *arg);
@@ -3919,6 +3990,7 @@ void hl_mmu_dr_flush(struct hl_ctx *ctx);
int hl_mmu_dr_init(struct hl_device *hdev);
void hl_mmu_dr_fini(struct hl_device *hdev);
+int hl_fw_version_cmp(struct hl_device *hdev, u32 major, u32 minor, u32 subminor);
int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
void __iomem *dst, u32 src_offset, u32 size);
int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode, u64 value);
@@ -4033,7 +4105,7 @@ char *hl_format_as_binary(char *buf, size_t buf_len, u32 n);
const char *hl_sync_engine_to_string(enum hl_sync_engine_type engine_type);
void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg);
-void hl_mem_mgr_fini(struct hl_mem_mgr *mmg);
+void hl_mem_mgr_fini(struct hl_mem_mgr *mmg, struct hl_mem_mgr_fini_stats *stats);
void hl_mem_mgr_idr_destroy(struct hl_mem_mgr *mmg);
int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma,
void *args);
@@ -4059,6 +4131,9 @@ void hl_capture_engine_err(struct hl_device *hdev, u16 engine_id, u16 error_coun
void hl_enable_err_info_capture(struct hl_error_info *captured_err_info);
void hl_init_cpu_for_irq(struct hl_device *hdev);
void hl_set_irq_affinity(struct hl_device *hdev, int irq);
+void hl_eq_heartbeat_event_handle(struct hl_device *hdev);
+void hl_handle_clk_change_event(struct hl_device *hdev, u16 event_type, u64 *event_mask);
+void hl_eq_cpld_shutdown_event_handle(struct hl_device *hdev, u16 event_id, u64 *event_mask);
#ifdef CONFIG_DEBUG_FS
@@ -4080,6 +4155,7 @@ void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
void hl_debugfs_set_state_dump(struct hl_device *hdev, char *data,
unsigned long length);
+void hl_debugfs_cfg_access_history_dump(struct hl_device *hdev);
#else
@@ -4155,6 +4231,10 @@ static inline void hl_debugfs_set_state_dump(struct hl_device *hdev,
{
}
+static inline void hl_debugfs_cfg_access_history_dump(struct hl_device *hdev)
+{
+}
+
#endif
/* Security */
diff --git a/drivers/accel/habanalabs/common/habanalabs_drv.c b/drivers/accel/habanalabs/common/habanalabs_drv.c
index e542fd40e16c..0035748f3228 100644
--- a/drivers/accel/habanalabs/common/habanalabs_drv.c
+++ b/drivers/accel/habanalabs/common/habanalabs_drv.c
@@ -101,7 +101,6 @@ static const struct drm_driver hl_driver = {
.major = LINUX_VERSION_MAJOR,
.minor = LINUX_VERSION_PATCHLEVEL,
.patchlevel = LINUX_VERSION_SUBLEVEL,
- .date = "20190505",
.fops = &hl_fops,
.open = hl_device_open,
@@ -144,6 +143,9 @@ static enum hl_asic_type get_asic_type(struct hl_device *hdev)
case REV_ID_C:
asic_type = ASIC_GAUDI2C;
break;
+ case REV_ID_D:
+ asic_type = ASIC_GAUDI2D;
+ break;
default:
break;
}
@@ -260,7 +262,7 @@ int hl_device_open(struct drm_device *ddev, struct drm_file *file_priv)
out_err:
mutex_unlock(&hdev->fpriv_list_lock);
- hl_mem_mgr_fini(&hpriv->mem_mgr);
+ hl_mem_mgr_fini(&hpriv->mem_mgr, NULL);
hl_mem_mgr_idr_destroy(&hpriv->mem_mgr);
hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr);
mutex_destroy(&hpriv->ctx_lock);
@@ -359,8 +361,7 @@ static void fixup_device_params_per_asic(struct hl_device *hdev, int timeout)
* a different default timeout for Gaudi
*/
if (timeout == HL_DEFAULT_TIMEOUT_LOCKED)
- hdev->timeout_jiffies = msecs_to_jiffies(GAUDI_DEFAULT_TIMEOUT_LOCKED *
- MSEC_PER_SEC);
+ hdev->timeout_jiffies = secs_to_jiffies(GAUDI_DEFAULT_TIMEOUT_LOCKED);
hdev->reset_upon_device_release = 0;
break;
@@ -385,7 +386,7 @@ static int fixup_device_params(struct hl_device *hdev)
hdev->fw_comms_poll_interval_usec = HL_FW_STATUS_POLL_INTERVAL_USEC;
if (tmp_timeout)
- hdev->timeout_jiffies = msecs_to_jiffies(tmp_timeout * MSEC_PER_SEC);
+ hdev->timeout_jiffies = secs_to_jiffies(tmp_timeout);
else
hdev->timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
diff --git a/drivers/accel/habanalabs/common/habanalabs_ioctl.c b/drivers/accel/habanalabs/common/habanalabs_ioctl.c
index 1dd6e23172ca..fdfdabc85e54 100644
--- a/drivers/accel/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/accel/habanalabs/common/habanalabs_ioctl.c
@@ -17,8 +17,6 @@
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
-#include <asm/msr.h>
-
/* make sure there is space for all the signed info */
static_assert(sizeof(struct cpucp_info) <= SEC_DEV_INFO_BUF_SZ);
@@ -963,6 +961,12 @@ static int send_fw_generic_request(struct hl_device *hdev, struct hl_info_args *
case HL_PASSTHROUGH_VERSIONS:
need_input_buff = false;
break;
+ case HL_GET_ERR_COUNTERS_CMD:
+ need_input_buff = true;
+ break;
+ case HL_GET_P_STATE:
+ need_input_buff = false;
+ break;
default:
return -EINVAL;
}
@@ -1279,13 +1283,10 @@ static long _hl_ioctl(struct hl_fpriv *hpriv, unsigned int cmd, unsigned long ar
retcode = -EFAULT;
out_err:
- if (retcode) {
- char task_comm[TASK_COMM_LEN];
-
+ if (retcode)
dev_dbg_ratelimited(dev,
"error in ioctl: pid=%d, comm=\"%s\", cmd=%#010x, nr=%#04x\n",
- task_pid_nr(current), get_task_comm(task_comm, current), cmd, nr);
- }
+ task_pid_nr(current), current->comm, cmd, nr);
if (kdata != stack_kdata)
kfree(kdata);
@@ -1308,11 +1309,9 @@ long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg)
if (nr == _IOC_NR(DRM_IOCTL_HL_INFO)) {
ioctl = &hl_ioctls_control[nr - HL_COMMAND_START];
} else {
- char task_comm[TASK_COMM_LEN];
-
dev_dbg_ratelimited(hdev->dev_ctrl,
"invalid ioctl: pid=%d, comm=\"%s\", cmd=%#010x, nr=%#04x\n",
- task_pid_nr(current), get_task_comm(task_comm, current), cmd, nr);
+ task_pid_nr(current), current->comm, cmd, nr);
return -ENOTTY;
}
diff --git a/drivers/accel/habanalabs/common/hldio.c b/drivers/accel/habanalabs/common/hldio.c
new file mode 100644
index 000000000000..083ae5610875
--- /dev/null
+++ b/drivers/accel/habanalabs/common/hldio.c
@@ -0,0 +1,437 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2024 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "habanalabs.h"
+#include "hldio.h"
+#include <generated/uapi/linux/version.h>
+#include <linux/pci-p2pdma.h>
+#include <linux/blkdev.h>
+#include <linux/vmalloc.h>
+
+/*
+ * NVMe Direct I/O implementation for habanalabs driver
+ *
+ * ASSUMPTIONS
+ * ===========
+ * 1. No IOMMU (well, technically it can work with IOMMU, but it is *almost useless).
+ * 2. Only READ operations (can extend in the future).
+ * 3. No sparse files (can overcome this in the future).
+ * 4. Kernel version >= 6.9
+ * 5. Requiring page alignment is OK (I don't see a solution to this one right,
+ * now, how do we read partial pages?)
+ * 6. Kernel compiled with CONFIG_PCI_P2PDMA. This requires a CUSTOM kernel.
+ * Theoretically I have a slight idea on how this could be solvable, but it
+ * is probably inacceptable for the upstream. Also may not work in the end.
+ * 7. Either make sure our cards and disks are under the same PCI bridge, or
+ * compile a custom kernel to hack around this.
+ */
+
+#define IO_STABILIZE_TIMEOUT 10000000 /* 10 seconds in microseconds */
+
+/*
+ * This struct contains all the useful data I could milk out of the file handle
+ * provided by the user.
+ * @TODO: right now it is retrieved on each IO, but can be done once with some
+ * dedicated IOCTL, call it for example HL_REGISTER_HANDLE.
+ */
+struct hl_dio_fd {
+ /* Back pointer in case we need it in async completion */
+ struct hl_ctx *ctx;
+ /* Associated fd struct */
+ struct file *filp;
+};
+
+/*
+ * This is a single IO descriptor
+ */
+struct hl_direct_io {
+ struct hl_dio_fd f;
+ struct kiocb kio;
+ struct bio_vec *bv;
+ struct iov_iter iter;
+ u64 device_va;
+ u64 off_bytes;
+ u64 len_bytes;
+ u32 type;
+};
+
+bool hl_device_supports_nvme(struct hl_device *hdev)
+{
+ return hdev->asic_prop.supports_nvme;
+}
+
+static int hl_dio_fd_register(struct hl_ctx *ctx, int fd, struct hl_dio_fd *f)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct block_device *bd;
+ struct super_block *sb;
+ struct inode *inode;
+ struct gendisk *gd;
+ struct device *disk_dev;
+ int rc;
+
+ f->filp = fget(fd);
+ if (!f->filp) {
+ rc = -ENOENT;
+ goto out;
+ }
+
+ if (!(f->filp->f_flags & O_DIRECT)) {
+ dev_err(hdev->dev, "file is not in the direct mode\n");
+ rc = -EINVAL;
+ goto fput;
+ }
+
+ if (!f->filp->f_op->read_iter) {
+ dev_err(hdev->dev, "read iter is not supported, need to fall back to legacy\n");
+ rc = -EINVAL;
+ goto fput;
+ }
+
+ inode = file_inode(f->filp);
+ sb = inode->i_sb;
+ bd = sb->s_bdev;
+ gd = bd->bd_disk;
+
+ if (inode->i_blocks << sb->s_blocksize_bits < i_size_read(inode)) {
+ dev_err(hdev->dev, "sparse files are not currently supported\n");
+ rc = -EINVAL;
+ goto fput;
+ }
+
+ if (!bd || !gd) {
+ dev_err(hdev->dev, "invalid block device\n");
+ rc = -ENODEV;
+ goto fput;
+ }
+ /* Get the underlying device from the block device */
+ disk_dev = disk_to_dev(gd);
+ if (!dma_pci_p2pdma_supported(disk_dev)) {
+ dev_err(hdev->dev, "device does not support PCI P2P DMA\n");
+ rc = -EOPNOTSUPP;
+ goto fput;
+ }
+
+ /*
+ * @TODO: Maybe we need additional checks here
+ */
+
+ f->ctx = ctx;
+ rc = 0;
+
+ goto out;
+fput:
+ fput(f->filp);
+out:
+ return rc;
+}
+
+static void hl_dio_fd_unregister(struct hl_dio_fd *f)
+{
+ fput(f->filp);
+}
+
+static long hl_dio_count_io(struct hl_device *hdev)
+{
+ s64 sum = 0;
+ int i;
+
+ for_each_possible_cpu(i)
+ sum += per_cpu(*hdev->hldio.inflight_ios, i);
+
+ return sum;
+}
+
+static bool hl_dio_get_iopath(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+
+ if (hdev->hldio.io_enabled) {
+ this_cpu_inc(*hdev->hldio.inflight_ios);
+
+ /* Avoid race conditions */
+ if (!hdev->hldio.io_enabled) {
+ this_cpu_dec(*hdev->hldio.inflight_ios);
+ return false;
+ }
+
+ hl_ctx_get(ctx);
+
+ return true;
+ }
+
+ return false;
+}
+
+static void hl_dio_put_iopath(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+
+ hl_ctx_put(ctx);
+ this_cpu_dec(*hdev->hldio.inflight_ios);
+}
+
+static void hl_dio_set_io_enabled(struct hl_device *hdev, bool enabled)
+{
+ hdev->hldio.io_enabled = enabled;
+}
+
+static bool hl_dio_validate_io(struct hl_device *hdev, struct hl_direct_io *io)
+{
+ if ((u64)io->device_va & ~PAGE_MASK) {
+ dev_dbg(hdev->dev, "device address must be 4K aligned\n");
+ return false;
+ }
+
+ if (io->len_bytes & ~PAGE_MASK) {
+ dev_dbg(hdev->dev, "IO length must be 4K aligned\n");
+ return false;
+ }
+
+ if (io->off_bytes & ~PAGE_MASK) {
+ dev_dbg(hdev->dev, "IO offset must be 4K aligned\n");
+ return false;
+ }
+
+ return true;
+}
+
+static struct page *hl_dio_va2page(struct hl_device *hdev, struct hl_ctx *ctx, u64 device_va)
+{
+ struct hl_dio *hldio = &hdev->hldio;
+ u64 device_pa;
+ int rc, i;
+
+ rc = hl_mmu_va_to_pa(ctx, device_va, &device_pa);
+ if (rc) {
+ dev_err(hdev->dev, "device virtual address translation error: %#llx (%d)",
+ device_va, rc);
+ return NULL;
+ }
+
+ for (i = 0 ; i < hldio->np2prs ; ++i) {
+ if (device_pa >= hldio->p2prs[i].device_pa &&
+ device_pa < hldio->p2prs[i].device_pa + hldio->p2prs[i].size)
+ return hldio->p2prs[i].p2ppages[(device_pa - hldio->p2prs[i].device_pa) >>
+ PAGE_SHIFT];
+ }
+
+ return NULL;
+}
+
+static ssize_t hl_direct_io(struct hl_device *hdev, struct hl_direct_io *io)
+{
+ u64 npages, device_va;
+ ssize_t rc;
+ int i;
+
+ if (!hl_dio_validate_io(hdev, io))
+ return -EINVAL;
+
+ if (!hl_dio_get_iopath(io->f.ctx)) {
+ dev_info(hdev->dev, "can't schedule a new IO, IO is disabled\n");
+ return -ESHUTDOWN;
+ }
+
+ init_sync_kiocb(&io->kio, io->f.filp);
+ io->kio.ki_pos = io->off_bytes;
+
+ npages = (io->len_bytes >> PAGE_SHIFT);
+
+ /* @TODO: this can be implemented smarter, vmalloc in iopath is not
+ * ideal. Maybe some variation of genpool. Number of pages may differ
+ * greatly, so maybe even use pools of different sizes and chose the
+ * closest one.
+ */
+ io->bv = vzalloc(npages * sizeof(struct bio_vec));
+ if (!io->bv)
+ return -ENOMEM;
+
+ for (i = 0, device_va = io->device_va; i < npages ; ++i, device_va += PAGE_SIZE) {
+ io->bv[i].bv_page = hl_dio_va2page(hdev, io->f.ctx, device_va);
+ if (!io->bv[i].bv_page) {
+ dev_err(hdev->dev, "error getting page struct for device va %#llx",
+ device_va);
+ rc = -EFAULT;
+ goto cleanup;
+ }
+ io->bv[i].bv_offset = 0;
+ io->bv[i].bv_len = PAGE_SIZE;
+ }
+
+ iov_iter_bvec(&io->iter, io->type, io->bv, 1, io->len_bytes);
+ if (io->f.filp->f_op && io->f.filp->f_op->read_iter)
+ rc = io->f.filp->f_op->read_iter(&io->kio, &io->iter);
+ else
+ rc = -EINVAL;
+
+cleanup:
+ vfree(io->bv);
+ hl_dio_put_iopath(io->f.ctx);
+
+ dev_dbg(hdev->dev, "IO ended with %ld\n", rc);
+
+ return rc;
+}
+
+/*
+ * @TODO: This function can be used as a callback for io completion under
+ * kio->ki_complete in order to implement async IO.
+ * Note that on more recent kernels there is no ret2.
+ */
+__maybe_unused static void hl_direct_io_complete(struct kiocb *kio, long ret, long ret2)
+{
+ struct hl_direct_io *io = container_of(kio, struct hl_direct_io, kio);
+
+ dev_dbg(io->f.ctx->hdev->dev, "IO completed with %ld\n", ret);
+
+ /* Do something to copy result to user / notify completion */
+
+ hl_dio_put_iopath(io->f.ctx);
+
+ hl_dio_fd_unregister(&io->f);
+}
+
+/*
+ * DMA disk to ASIC, wait for results. Must be invoked from the user context
+ */
+int hl_dio_ssd2hl(struct hl_device *hdev, struct hl_ctx *ctx, int fd,
+ u64 device_va, off_t off_bytes, size_t len_bytes,
+ size_t *len_read)
+{
+ struct hl_direct_io *io;
+ ssize_t rc;
+
+ dev_dbg(hdev->dev, "SSD2HL fd=%d va=%#llx len=%#lx\n", fd, device_va, len_bytes);
+
+ io = kzalloc(sizeof(*io), GFP_KERNEL);
+ if (!io) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ *io = (struct hl_direct_io){
+ .device_va = device_va,
+ .len_bytes = len_bytes,
+ .off_bytes = off_bytes,
+ .type = READ,
+ };
+
+ rc = hl_dio_fd_register(ctx, fd, &io->f);
+ if (rc)
+ goto kfree_io;
+
+ rc = hl_direct_io(hdev, io);
+ if (rc >= 0) {
+ *len_read = rc;
+ rc = 0;
+ }
+
+ /* This shall be called only in the case of a sync IO */
+ hl_dio_fd_unregister(&io->f);
+kfree_io:
+ kfree(io);
+out:
+ return rc;
+}
+
+static void hl_p2p_region_fini(struct hl_device *hdev, struct hl_p2p_region *p2pr)
+{
+ if (p2pr->p2ppages) {
+ vfree(p2pr->p2ppages);
+ p2pr->p2ppages = NULL;
+ }
+
+ if (p2pr->p2pmem) {
+ dev_dbg(hdev->dev, "freeing P2P mem from %p, size=%#llx\n",
+ p2pr->p2pmem, p2pr->size);
+ pci_free_p2pmem(hdev->pdev, p2pr->p2pmem, p2pr->size);
+ p2pr->p2pmem = NULL;
+ }
+}
+
+void hl_p2p_region_fini_all(struct hl_device *hdev)
+{
+ int i;
+
+ for (i = 0 ; i < hdev->hldio.np2prs ; ++i)
+ hl_p2p_region_fini(hdev, &hdev->hldio.p2prs[i]);
+
+ kvfree(hdev->hldio.p2prs);
+ hdev->hldio.p2prs = NULL;
+ hdev->hldio.np2prs = 0;
+}
+
+int hl_p2p_region_init(struct hl_device *hdev, struct hl_p2p_region *p2pr)
+{
+ void *addr;
+ int rc, i;
+
+ /* Start by publishing our p2p memory */
+ rc = pci_p2pdma_add_resource(hdev->pdev, p2pr->bar, p2pr->size, p2pr->bar_offset);
+ if (rc) {
+ dev_err(hdev->dev, "error adding p2p resource: %d\n", rc);
+ goto err;
+ }
+
+ /* Alloc all p2p mem */
+ p2pr->p2pmem = pci_alloc_p2pmem(hdev->pdev, p2pr->size);
+ if (!p2pr->p2pmem) {
+ dev_err(hdev->dev, "error allocating p2p memory\n");
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ p2pr->p2ppages = vmalloc((p2pr->size >> PAGE_SHIFT) * sizeof(struct page *));
+ if (!p2pr->p2ppages) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0, addr = p2pr->p2pmem ; i < (p2pr->size >> PAGE_SHIFT) ; ++i, addr += PAGE_SIZE) {
+ p2pr->p2ppages[i] = virt_to_page(addr);
+ if (!p2pr->p2ppages[i]) {
+ rc = -EFAULT;
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ hl_p2p_region_fini(hdev, p2pr);
+ return rc;
+}
+
+int hl_dio_start(struct hl_device *hdev)
+{
+ dev_dbg(hdev->dev, "initializing HLDIO\n");
+
+ /* Initialize the IO counter and enable IO */
+ hdev->hldio.inflight_ios = alloc_percpu(s64);
+ if (!hdev->hldio.inflight_ios)
+ return -ENOMEM;
+
+ hl_dio_set_io_enabled(hdev, true);
+
+ return 0;
+}
+
+void hl_dio_stop(struct hl_device *hdev)
+{
+ dev_dbg(hdev->dev, "deinitializing HLDIO\n");
+
+ if (hdev->hldio.io_enabled) {
+ /* Wait for all the IO to finish */
+ hl_dio_set_io_enabled(hdev, false);
+ hl_poll_timeout_condition(hdev, !hl_dio_count_io(hdev), 1000, IO_STABILIZE_TIMEOUT);
+ }
+
+ if (hdev->hldio.inflight_ios) {
+ free_percpu(hdev->hldio.inflight_ios);
+ hdev->hldio.inflight_ios = NULL;
+ }
+}
diff --git a/drivers/accel/habanalabs/common/hldio.h b/drivers/accel/habanalabs/common/hldio.h
new file mode 100644
index 000000000000..2874388f2851
--- /dev/null
+++ b/drivers/accel/habanalabs/common/hldio.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * hldio.h - NVMe Direct I/O (HLDIO) infrastructure for Habana Labs Driver
+ *
+ * This feature requires specific hardware setup and must not be built
+ * under COMPILE_TEST.
+ */
+
+#ifndef __HL_HLDIO_H__
+#define __HL_HLDIO_H__
+
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+#include <linux/ktime.h> /* ktime functions */
+#include <linux/delay.h> /* usleep_range */
+#include <linux/kernel.h> /* might_sleep_if */
+#include <linux/errno.h> /* error codes */
+
+/* Forward declarations */
+struct hl_device;
+struct file;
+
+/* Enable only if Kconfig selected */
+#ifdef CONFIG_HL_HLDIO
+/**
+ * struct hl_p2p_region - describes a single P2P memory region
+ * @p2ppages: array of page structs for the P2P memory
+ * @p2pmem: virtual address of the P2P memory region
+ * @device_pa: physical address on the device
+ * @bar_offset: offset within the BAR
+ * @size: size of the region in bytes
+ * @bar: BAR number containing this region
+ */
+struct hl_p2p_region {
+ struct page **p2ppages;
+ void *p2pmem;
+ u64 device_pa;
+ u64 bar_offset;
+ u64 size;
+ int bar;
+};
+
+/**
+ * struct hl_dio_stats - Direct I/O statistics
+ * @total_ops: total number of operations attempted
+ * @successful_ops: number of successful operations
+ * @failed_ops: number of failed operations
+ * @bytes_transferred: total bytes successfully transferred
+ * @last_len_read: length of the last read operation
+ */
+struct hl_dio_stats {
+ u64 total_ops;
+ u64 successful_ops;
+ u64 failed_ops;
+ u64 bytes_transferred;
+ size_t last_len_read;
+};
+
+/**
+ * struct hl_dio - describes habanalabs direct storage interaction interface
+ * @p2prs: array of p2p regions
+ * @inflight_ios: percpu counter for inflight ios
+ * @np2prs: number of elements in p2prs
+ * @io_enabled: 1 if io is enabled 0 otherwise
+ */
+struct hl_dio {
+ struct hl_p2p_region *p2prs;
+ s64 __percpu *inflight_ios;
+ u8 np2prs;
+ u8 io_enabled;
+};
+
+int hl_dio_ssd2hl(struct hl_device *hdev, struct hl_ctx *ctx, int fd,
+ u64 device_va, off_t off_bytes, size_t len_bytes,
+ size_t *len_read);
+void hl_p2p_region_fini_all(struct hl_device *hdev);
+int hl_p2p_region_init(struct hl_device *hdev, struct hl_p2p_region *p2pr);
+int hl_dio_start(struct hl_device *hdev);
+void hl_dio_stop(struct hl_device *hdev);
+
+/* Init/teardown */
+int hl_hldio_init(struct hl_device *hdev);
+void hl_hldio_fini(struct hl_device *hdev);
+
+/* File operations */
+long hl_hldio_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
+
+/* DebugFS hooks */
+#ifdef CONFIG_DEBUG_FS
+void hl_hldio_debugfs_init(struct hl_device *hdev);
+void hl_hldio_debugfs_fini(struct hl_device *hdev);
+#else
+static inline void hl_hldio_debugfs_init(struct hl_device *hdev) { }
+static inline void hl_hldio_debugfs_fini(struct hl_device *hdev) { }
+#endif
+
+#else /* !CONFIG_HL_HLDIO */
+
+struct hl_p2p_region;
+/* Stubs when HLDIO is disabled */
+static inline int hl_dio_ssd2hl(struct hl_device *hdev, struct hl_ctx *ctx, int fd,
+ u64 device_va, off_t off_bytes, size_t len_bytes,
+ size_t *len_read)
+{ return -EOPNOTSUPP; }
+static inline void hl_p2p_region_fini_all(struct hl_device *hdev) {}
+static inline int hl_p2p_region_init(struct hl_device *hdev, struct hl_p2p_region *p2pr)
+{ return -EOPNOTSUPP; }
+static inline int hl_dio_start(struct hl_device *hdev) { return -EOPNOTSUPP; }
+static inline void hl_dio_stop(struct hl_device *hdev) {}
+
+static inline int hl_hldio_init(struct hl_device *hdev) { return 0; }
+static inline void hl_hldio_fini(struct hl_device *hdev) { }
+static inline long hl_hldio_ioctl(struct file *f, unsigned int c,
+ unsigned long a)
+{ return -ENOTTY; }
+static inline void hl_hldio_debugfs_init(struct hl_device *hdev) { }
+static inline void hl_hldio_debugfs_fini(struct hl_device *hdev) { }
+
+#endif /* CONFIG_HL_HLDIO */
+
+/* Simplified polling macro for HLDIO (no simulator support) */
+#define hl_poll_timeout_condition(hdev, cond, sleep_us, timeout_us) \
+({ \
+ ktime_t __timeout = ktime_add_us(ktime_get(), timeout_us); \
+ might_sleep_if(sleep_us); \
+ (void)(hdev); /* keep signature consistent, hdev unused */ \
+ for (;;) { \
+ mb(); /* ensure ordering of memory operations */ \
+ if (cond) \
+ break; \
+ if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) \
+ break; \
+ if (sleep_us) \
+ usleep_range((sleep_us >> 2) + 1, sleep_us); \
+ } \
+ (cond) ? 0 : -ETIMEDOUT; \
+})
+
+#ifdef CONFIG_HL_HLDIO
+bool hl_device_supports_nvme(struct hl_device *hdev);
+#else
+static inline bool hl_device_supports_nvme(struct hl_device *hdev) { return false; }
+#endif
+
+#endif /* __HL_HLDIO_H__ */
diff --git a/drivers/accel/habanalabs/common/hwmon.c b/drivers/accel/habanalabs/common/hwmon.c
index 36b951b5f503..52d1e6bf10dc 100644
--- a/drivers/accel/habanalabs/common/hwmon.c
+++ b/drivers/accel/habanalabs/common/hwmon.c
@@ -585,9 +585,10 @@ int hl_get_temperature(struct hl_device *hdev,
*value = (long) result;
if (rc) {
- dev_err_ratelimited(hdev->dev,
- "Failed to get temperature from sensor %d, error %d\n",
- sensor_index, rc);
+ if (rc != -EAGAIN)
+ dev_err_ratelimited(hdev->dev,
+ "Failed to get temperature from sensor %d, error %d\n",
+ sensor_index, rc);
*value = 0;
}
@@ -610,8 +611,7 @@ int hl_set_temperature(struct hl_device *hdev,
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
0, NULL);
-
- if (rc)
+ if (rc && rc != -EAGAIN)
dev_err_ratelimited(hdev->dev,
"Failed to set temperature of sensor %d, error %d\n",
sensor_index, rc);
@@ -639,9 +639,10 @@ int hl_get_voltage(struct hl_device *hdev,
*value = (long) result;
if (rc) {
- dev_err_ratelimited(hdev->dev,
- "Failed to get voltage from sensor %d, error %d\n",
- sensor_index, rc);
+ if (rc != -EAGAIN)
+ dev_err_ratelimited(hdev->dev,
+ "Failed to get voltage from sensor %d, error %d\n",
+ sensor_index, rc);
*value = 0;
}
@@ -668,9 +669,10 @@ int hl_get_current(struct hl_device *hdev,
*value = (long) result;
if (rc) {
- dev_err_ratelimited(hdev->dev,
- "Failed to get current from sensor %d, error %d\n",
- sensor_index, rc);
+ if (rc != -EAGAIN)
+ dev_err_ratelimited(hdev->dev,
+ "Failed to get current from sensor %d, error %d\n",
+ sensor_index, rc);
*value = 0;
}
@@ -697,9 +699,10 @@ int hl_get_fan_speed(struct hl_device *hdev,
*value = (long) result;
if (rc) {
- dev_err_ratelimited(hdev->dev,
- "Failed to get fan speed from sensor %d, error %d\n",
- sensor_index, rc);
+ if (rc != -EAGAIN)
+ dev_err_ratelimited(hdev->dev,
+ "Failed to get fan speed from sensor %d, error %d\n",
+ sensor_index, rc);
*value = 0;
}
@@ -726,9 +729,10 @@ int hl_get_pwm_info(struct hl_device *hdev,
*value = (long) result;
if (rc) {
- dev_err_ratelimited(hdev->dev,
- "Failed to get pwm info from sensor %d, error %d\n",
- sensor_index, rc);
+ if (rc != -EAGAIN)
+ dev_err_ratelimited(hdev->dev,
+ "Failed to get pwm info from sensor %d, error %d\n",
+ sensor_index, rc);
*value = 0;
}
@@ -751,8 +755,7 @@ void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
0, NULL);
-
- if (rc)
+ if (rc && rc != -EAGAIN)
dev_err_ratelimited(hdev->dev,
"Failed to set pwm info to sensor %d, error %d\n",
sensor_index, rc);
@@ -774,8 +777,7 @@ int hl_set_voltage(struct hl_device *hdev,
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
0, NULL);
-
- if (rc)
+ if (rc && rc != -EAGAIN)
dev_err_ratelimited(hdev->dev,
"Failed to set voltage of sensor %d, error %d\n",
sensor_index, rc);
@@ -797,10 +799,8 @@ int hl_set_current(struct hl_device *hdev,
pkt.type = __cpu_to_le16(attr);
pkt.value = __cpu_to_le64(value);
- rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- 0, NULL);
-
- if (rc)
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
+ if (rc && rc != -EAGAIN)
dev_err_ratelimited(hdev->dev,
"Failed to set current of sensor %d, error %d\n",
sensor_index, rc);
@@ -830,8 +830,7 @@ int hl_set_power(struct hl_device *hdev,
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
0, NULL);
-
- if (rc)
+ if (rc && rc != -EAGAIN)
dev_err_ratelimited(hdev->dev,
"Failed to set power of sensor %d, error %d\n",
sensor_index, rc);
@@ -859,9 +858,10 @@ int hl_get_power(struct hl_device *hdev,
*value = (long) result;
if (rc) {
- dev_err_ratelimited(hdev->dev,
- "Failed to get power of sensor %d, error %d\n",
- sensor_index, rc);
+ if (rc != -EAGAIN)
+ dev_err_ratelimited(hdev->dev,
+ "Failed to get power of sensor %d, error %d\n",
+ sensor_index, rc);
*value = 0;
}
diff --git a/drivers/accel/habanalabs/common/irq.c b/drivers/accel/habanalabs/common/irq.c
index 978b7f4d5eeb..7c9f2f6a2870 100644
--- a/drivers/accel/habanalabs/common/irq.c
+++ b/drivers/accel/habanalabs/common/irq.c
@@ -652,14 +652,16 @@ void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q)
*/
int hl_eq_init(struct hl_device *hdev, struct hl_eq *q)
{
+ u32 size = hdev->asic_prop.fw_event_queue_size ? : HL_EQ_SIZE_IN_BYTES;
void *p;
- p = hl_cpu_accessible_dma_pool_alloc(hdev, HL_EQ_SIZE_IN_BYTES, &q->bus_address);
+ p = hl_cpu_accessible_dma_pool_alloc(hdev, size, &q->bus_address);
if (!p)
return -ENOMEM;
q->hdev = hdev;
q->kernel_address = p;
+ q->size = size;
q->ci = 0;
q->prev_eqe_index = 0;
@@ -678,7 +680,7 @@ void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q)
{
flush_workqueue(hdev->eq_wq);
- hl_cpu_accessible_dma_pool_free(hdev, HL_EQ_SIZE_IN_BYTES, q->kernel_address);
+ hl_cpu_accessible_dma_pool_free(hdev, q->size, q->kernel_address);
}
void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q)
@@ -693,5 +695,30 @@ void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q)
* when the device is operational again
*/
- memset(q->kernel_address, 0, HL_EQ_SIZE_IN_BYTES);
+ memset(q->kernel_address, 0, q->size);
+}
+
+void hl_eq_dump(struct hl_device *hdev, struct hl_eq *q)
+{
+ u32 eq_length, eqe_size, ctl, ready, mode, type, index;
+ struct hl_eq_header *hdr;
+ u8 *ptr;
+ int i;
+
+ eq_length = HL_EQ_LENGTH;
+ eqe_size = q->size / HL_EQ_LENGTH;
+
+ dev_info(hdev->dev, "Contents of EQ entries headers:\n");
+
+ for (i = 0, ptr = q->kernel_address ; i < eq_length ; ++i, ptr += eqe_size) {
+ hdr = (struct hl_eq_header *) ptr;
+ ctl = le32_to_cpu(hdr->ctl);
+ ready = FIELD_GET(EQ_CTL_READY_MASK, ctl);
+ mode = FIELD_GET(EQ_CTL_EVENT_MODE_MASK, ctl);
+ type = FIELD_GET(EQ_CTL_EVENT_TYPE_MASK, ctl);
+ index = FIELD_GET(EQ_CTL_INDEX_MASK, ctl);
+
+ dev_info(hdev->dev, "%02u: %#010x [ready: %u, mode %u, type %04u, index %05u]\n",
+ i, ctl, ready, mode, type, index);
+ }
}
diff --git a/drivers/accel/habanalabs/common/memory.c b/drivers/accel/habanalabs/common/memory.c
index 3348ad12c237..633db4bff46f 100644
--- a/drivers/accel/habanalabs/common/memory.c
+++ b/drivers/accel/habanalabs/common/memory.c
@@ -14,7 +14,7 @@
#include <linux/vmalloc.h>
#include <linux/pci-p2pdma.h>
-MODULE_IMPORT_NS(DMA_BUF);
+MODULE_IMPORT_NS("DMA_BUF");
#define HL_MMU_DEBUG 0
@@ -1829,9 +1829,6 @@ static void hl_release_dmabuf(struct dma_buf *dmabuf)
struct hl_dmabuf_priv *hl_dmabuf = dmabuf->priv;
struct hl_ctx *ctx;
- if (!hl_dmabuf)
- return;
-
ctx = hl_dmabuf->ctx;
if (hl_dmabuf->memhash_hnode)
@@ -1840,7 +1837,12 @@ static void hl_release_dmabuf(struct dma_buf *dmabuf)
atomic_dec(&ctx->hdev->dmabuf_export_cnt);
hl_ctx_put(ctx);
- /* Paired with get_file() in export_dmabuf() */
+ /*
+ * Paired with get_file() in export_dmabuf().
+ * 'ctx' can be still used here to get the file pointer, even after hl_ctx_put() was called,
+ * because releasing the compute device file involves another reference decrement, and it
+ * would be possible only after calling fput().
+ */
fput(ctx->hpriv->file_priv->filp);
kfree(hl_dmabuf);
@@ -1859,7 +1861,12 @@ static int export_dmabuf(struct hl_ctx *ctx,
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
struct hl_device *hdev = ctx->hdev;
- int rc, fd;
+ CLASS(get_unused_fd, fd)(flags);
+
+ if (fd < 0) {
+ dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf, %d\n", fd);
+ return fd;
+ }
exp_info.ops = &habanalabs_dmabuf_ops;
exp_info.size = total_size;
@@ -1872,13 +1879,6 @@ static int export_dmabuf(struct hl_ctx *ctx,
return PTR_ERR(hl_dmabuf->dmabuf);
}
- fd = dma_buf_fd(hl_dmabuf->dmabuf, flags);
- if (fd < 0) {
- dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf, %d\n", fd);
- rc = fd;
- goto err_dma_buf_put;
- }
-
hl_dmabuf->ctx = ctx;
hl_ctx_get(hl_dmabuf->ctx);
atomic_inc(&ctx->hdev->dmabuf_export_cnt);
@@ -1890,13 +1890,9 @@ static int export_dmabuf(struct hl_ctx *ctx,
get_file(ctx->hpriv->file_priv->filp);
*dmabuf_fd = fd;
+ fd_install(take_fd(fd), hl_dmabuf->dmabuf->file);
return 0;
-
-err_dma_buf_put:
- hl_dmabuf->dmabuf->priv = NULL;
- dma_buf_put(hl_dmabuf->dmabuf);
- return rc;
}
static int validate_export_params_common(struct hl_device *hdev, u64 addr, u64 size, u64 offset)
@@ -2341,7 +2337,7 @@ static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size,
if (rc < 0)
goto destroy_pages;
npages = rc;
- rc = -EFAULT;
+ rc = -ENOMEM;
goto put_pages;
}
userptr->npages = npages;
diff --git a/drivers/accel/habanalabs/common/memory_mgr.c b/drivers/accel/habanalabs/common/memory_mgr.c
index c4d84df355b0..4401beb99e42 100644
--- a/drivers/accel/habanalabs/common/memory_mgr.c
+++ b/drivers/accel/habanalabs/common/memory_mgr.c
@@ -259,13 +259,8 @@ int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma,
goto put_mem;
}
-#ifdef _HAS_TYPE_ARG_IN_ACCESS_OK
- if (!access_ok(VERIFY_WRITE, (void __user *)(uintptr_t)vma->vm_start,
- user_mem_size)) {
-#else
if (!access_ok((void __user *)(uintptr_t)vma->vm_start,
user_mem_size)) {
-#endif
dev_err(mmg->dev, "%s: User pointer is invalid - 0x%lx\n",
buf->behavior->topic, vma->vm_start);
@@ -318,28 +313,61 @@ void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg)
idr_init(&mmg->handles);
}
+static void hl_mem_mgr_fini_stats_reset(struct hl_mem_mgr_fini_stats *stats)
+{
+ if (!stats)
+ return;
+
+ memset(stats, 0, sizeof(*stats));
+}
+
+static void hl_mem_mgr_fini_stats_inc(u64 mem_id, struct hl_mem_mgr_fini_stats *stats)
+{
+ if (!stats)
+ return;
+
+ switch (mem_id) {
+ case HL_MMAP_TYPE_CB:
+ ++stats->n_busy_cb;
+ break;
+ case HL_MMAP_TYPE_TS_BUFF:
+ ++stats->n_busy_ts;
+ break;
+ default:
+ /* we currently store only CB/TS so this shouldn't happen */
+ ++stats->n_busy_other;
+ }
+}
+
/**
* hl_mem_mgr_fini - release unified memory manager
*
* @mmg: parent unified memory manager
+ * @stats: if non-NULL, will return some counters for handles that could not be removed.
*
* Release the unified memory manager. Shall be called from an interrupt context.
*/
-void hl_mem_mgr_fini(struct hl_mem_mgr *mmg)
+void hl_mem_mgr_fini(struct hl_mem_mgr *mmg, struct hl_mem_mgr_fini_stats *stats)
{
struct hl_mmap_mem_buf *buf;
struct idr *idp;
const char *topic;
+ u64 mem_id;
u32 id;
+ hl_mem_mgr_fini_stats_reset(stats);
+
idp = &mmg->handles;
idr_for_each_entry(idp, buf, id) {
topic = buf->behavior->topic;
- if (hl_mmap_mem_buf_put(buf) != 1)
+ mem_id = buf->behavior->mem_id;
+ if (hl_mmap_mem_buf_put(buf) != 1) {
dev_err(mmg->dev,
"%s: Buff handle %u for CTX is still alive\n",
topic, id);
+ hl_mem_mgr_fini_stats_inc(mem_id, stats);
+ }
}
}
diff --git a/drivers/accel/habanalabs/common/mmu/mmu.c b/drivers/accel/habanalabs/common/mmu/mmu.c
index d3eaab908457..79823facce7f 100644
--- a/drivers/accel/habanalabs/common/mmu/mmu.c
+++ b/drivers/accel/habanalabs/common/mmu/mmu.c
@@ -6,6 +6,7 @@
*/
#include <linux/slab.h>
+#include <linux/pci.h>
#include "../habanalabs.h"
@@ -262,7 +263,7 @@ int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, bool flu
mmu_funcs->flush(ctx);
if (trace_habanalabs_mmu_unmap_enabled() && !rc)
- trace_habanalabs_mmu_unmap(hdev->dev, virt_addr, 0, page_size, flush_pte);
+ trace_habanalabs_mmu_unmap(&hdev->pdev->dev, virt_addr, 0, page_size, flush_pte);
return rc;
}
@@ -349,7 +350,7 @@ int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_s
if (flush_pte)
mmu_funcs->flush(ctx);
- trace_habanalabs_mmu_map(hdev->dev, virt_addr, phys_addr, page_size, flush_pte);
+ trace_habanalabs_mmu_map(&hdev->pdev->dev, virt_addr, phys_addr, page_size, flush_pte);
return 0;
@@ -599,6 +600,7 @@ int hl_mmu_if_set_funcs(struct hl_device *hdev)
case ASIC_GAUDI2:
case ASIC_GAUDI2B:
case ASIC_GAUDI2C:
+ case ASIC_GAUDI2D:
hl_mmu_v2_set_funcs(hdev, &hdev->mmu_func[MMU_DR_PGT]);
if (prop->pmmu.host_resident)
hl_mmu_v2_hr_set_funcs(hdev, &hdev->mmu_func[MMU_HR_PGT]);
@@ -644,7 +646,8 @@ int hl_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, u32 flags)
rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, is_hard, flags);
if (rc)
dev_err_ratelimited(hdev->dev,
- "%s cache invalidation failed, rc=%d\n",
+ "%s: %s cache invalidation failed, rc=%d\n",
+ dev_name(&hdev->pdev->dev),
flags == VM_TYPE_USERPTR ? "PMMU" : "HMMU", rc);
return rc;
@@ -659,8 +662,9 @@ int hl_mmu_invalidate_cache_range(struct hl_device *hdev, bool is_hard,
asid, va, size);
if (rc)
dev_err_ratelimited(hdev->dev,
- "%s cache range invalidation failed: va=%#llx, size=%llu, rc=%d",
- flags == VM_TYPE_USERPTR ? "PMMU" : "HMMU", va, size, rc);
+ "%s: %s cache range invalidation failed: va=%#llx, size=%llu, rc=%d",
+ dev_name(&hdev->pdev->dev), flags == VM_TYPE_USERPTR ? "PMMU" : "HMMU",
+ va, size, rc);
return rc;
}
diff --git a/drivers/accel/habanalabs/common/pci/pci.c b/drivers/accel/habanalabs/common/pci/pci.c
index 191e0e3cf3a5..81cbd8697d4c 100644
--- a/drivers/accel/habanalabs/common/pci/pci.c
+++ b/drivers/accel/habanalabs/common/pci/pci.c
@@ -123,7 +123,7 @@ int hl_pci_elbi_read(struct hl_device *hdev, u64 addr, u32 *data)
pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_DATA, data);
if (unlikely(trace_habanalabs_elbi_read_enabled()))
- trace_habanalabs_elbi_read(hdev->dev, (u32) addr, val);
+ trace_habanalabs_elbi_read(&hdev->pdev->dev, (u32) addr, val);
return 0;
}
@@ -186,7 +186,7 @@ static int hl_pci_elbi_write(struct hl_device *hdev, u64 addr, u32 data)
if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE) {
if (unlikely(trace_habanalabs_elbi_write_enabled()))
- trace_habanalabs_elbi_write(hdev->dev, (u32) addr, val);
+ trace_habanalabs_elbi_write(&hdev->pdev->dev, (u32) addr, val);
return 0;
}
diff --git a/drivers/accel/habanalabs/common/sysfs.c b/drivers/accel/habanalabs/common/sysfs.c
index 8a9f98832157..8f55ba3b4e73 100644
--- a/drivers/accel/habanalabs/common/sysfs.c
+++ b/drivers/accel/habanalabs/common/sysfs.c
@@ -96,14 +96,21 @@ static ssize_t vrm_ver_show(struct device *dev, struct device_attribute *attr, c
infineon_second_stage_third_instance =
(infineon_second_stage_version >> 16) & mask;
- if (cpucp_info->infineon_second_stage_version)
+ if (cpucp_info->infineon_version && cpucp_info->infineon_second_stage_version)
return sprintf(buf, "%#04x %#04x:%#04x:%#04x\n",
le32_to_cpu(cpucp_info->infineon_version),
infineon_second_stage_first_instance,
infineon_second_stage_second_instance,
infineon_second_stage_third_instance);
- else
+ else if (cpucp_info->infineon_second_stage_version)
+ return sprintf(buf, "%#04x:%#04x:%#04x\n",
+ infineon_second_stage_first_instance,
+ infineon_second_stage_second_instance,
+ infineon_second_stage_third_instance);
+ else if (cpucp_info->infineon_version)
return sprintf(buf, "%#04x\n", le32_to_cpu(cpucp_info->infineon_version));
+
+ return 0;
}
static DEVICE_ATTR_RO(vrm_ver);
@@ -142,8 +149,9 @@ static ssize_t cpld_ver_show(struct device *dev, struct device_attribute *attr,
{
struct hl_device *hdev = dev_get_drvdata(dev);
- return sprintf(buf, "0x%08x\n",
- le32_to_cpu(hdev->asic_prop.cpucp_info.cpld_version));
+ return sprintf(buf, "0x%08x%08x\n",
+ le32_to_cpu(hdev->asic_prop.cpucp_info.cpld_timestamp),
+ le32_to_cpu(hdev->asic_prop.cpucp_info.cpld_version));
}
static ssize_t cpucp_kernel_ver_show(struct device *dev,
@@ -270,6 +278,9 @@ static ssize_t device_type_show(struct device *dev,
case ASIC_GAUDI2C:
str = "GAUDI2C";
break;
+ case ASIC_GAUDI2D:
+ str = "GAUDI2D";
+ break;
default:
dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
hdev->asic_type);
@@ -364,7 +375,7 @@ out:
}
static ssize_t eeprom_read_handler(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t offset,
+ const struct bin_attribute *attr, char *buf, loff_t offset,
size_t max_size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -439,7 +450,7 @@ static DEVICE_ATTR_RO(security_enabled);
static DEVICE_ATTR_RO(module_id);
static DEVICE_ATTR_RO(parent_device);
-static struct bin_attribute bin_attr_eeprom = {
+static const struct bin_attribute bin_attr_eeprom = {
.attr = {.name = "eeprom", .mode = (0444)},
.size = PAGE_SIZE,
.read = eeprom_read_handler
@@ -468,7 +479,7 @@ static struct attribute *hl_dev_attrs[] = {
NULL,
};
-static struct bin_attribute *hl_dev_bin_attrs[] = {
+static const struct bin_attribute *const hl_dev_bin_attrs[] = {
&bin_attr_eeprom,
NULL
};
diff --git a/drivers/accel/habanalabs/gaudi/gaudi.c b/drivers/accel/habanalabs/gaudi/gaudi.c
index f2b04ffb0ecb..34771d75da9d 100644
--- a/drivers/accel/habanalabs/gaudi/gaudi.c
+++ b/drivers/accel/habanalabs/gaudi/gaudi.c
@@ -1639,10 +1639,8 @@ static int gaudi_late_init(struct hl_device *hdev)
}
rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_ENABLE_PCI_ACCESS, 0x0);
- if (rc) {
- dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
+ if (rc)
return rc;
- }
/* Scrub both SRAM and DRAM */
rc = hdev->asic_funcs->scrub_device_mem(hdev);
@@ -4154,13 +4152,7 @@ skip_reset:
static int gaudi_suspend(struct hl_device *hdev)
{
- int rc;
-
- rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);
- if (rc)
- dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
-
- return rc;
+ return hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);
}
static int gaudi_resume(struct hl_device *hdev)
@@ -4176,10 +4168,29 @@ static int gaudi_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
VM_DONTCOPY | VM_NORESERVE);
+#ifdef _HAS_DMA_MMAP_COHERENT
+ /*
+ * If dma_alloc_coherent() returns a vmalloc address, set VM_MIXEDMAP
+ * so vm_insert_page() can handle it safely. Without this, the kernel
+ * may BUG_ON due to VM_PFNMAP.
+ */
+ if (is_vmalloc_addr(cpu_addr))
+ vm_flags_set(vma, VM_MIXEDMAP);
+
rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
(dma_addr - HOST_PHYS_BASE), size);
if (rc)
dev_err(hdev->dev, "dma_mmap_coherent error %d", rc);
+#else
+
+ rc = remap_pfn_range(vma, vma->vm_start,
+ virt_to_phys(cpu_addr) >> PAGE_SHIFT,
+ size, vma->vm_page_prot);
+ if (rc)
+ dev_err(hdev->dev, "remap_pfn_range error %d", rc);
+
+ #endif
+
return rc;
}
diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2.c b/drivers/accel/habanalabs/gaudi2/gaudi2.c
index fa1c4feb9f89..b8c0689dba64 100644
--- a/drivers/accel/habanalabs/gaudi2/gaudi2.c
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2.c
@@ -728,6 +728,354 @@ static const int gaudi2_dma_core_async_event_id[] = {
[DMA_CORE_ID_KDMA] = GAUDI2_EVENT_KDMA0_CORE,
};
+const char *gaudi2_engine_id_str[] = {
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_EDMA_0),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_EDMA_1),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_MME),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_0),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_1),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_2),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_3),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_4),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_5),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_DEC_0),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_DEC_1),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_EDMA_0),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_EDMA_1),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_MME),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_0),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_1),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_2),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_3),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_4),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_5),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_DEC_0),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_DEC_1),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_EDMA_0),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_EDMA_1),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_MME),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_0),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_1),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_2),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_3),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_4),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_5),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_DEC_0),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_DEC_1),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_EDMA_0),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_EDMA_1),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_MME),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_0),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_1),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_2),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_3),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_4),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_5),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_DEC_0),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_DEC_1),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_6),
+ __stringify(GAUDI2_ENGINE_ID_PDMA_0),
+ __stringify(GAUDI2_ENGINE_ID_PDMA_1),
+ __stringify(GAUDI2_ENGINE_ID_ROT_0),
+ __stringify(GAUDI2_ENGINE_ID_ROT_1),
+ __stringify(GAUDI2_PCIE_ENGINE_ID_DEC_0),
+ __stringify(GAUDI2_PCIE_ENGINE_ID_DEC_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC0_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC0_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC1_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC1_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC2_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC2_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC3_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC3_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC4_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC4_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC5_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC5_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC6_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC6_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC7_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC7_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC8_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC8_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC9_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC9_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC10_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC10_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC11_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC11_1),
+ __stringify(GAUDI2_ENGINE_ID_PCIE),
+ __stringify(GAUDI2_ENGINE_ID_PSOC),
+ __stringify(GAUDI2_ENGINE_ID_ARC_FARM),
+ __stringify(GAUDI2_ENGINE_ID_KDMA),
+ __stringify(GAUDI2_ENGINE_ID_SIZE),
+};
+
+const char *gaudi2_queue_id_str[] = {
+ __stringify(GAUDI2_QUEUE_ID_PDMA_0_0),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_0_1),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_0_2),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_0_3),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_1_0),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_1_1),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_1_2),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_MME_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_MME_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_MME_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_MME_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_2_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_2_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_2_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_2_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_3_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_3_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_3_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_3_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_4_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_4_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_4_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_4_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_5_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_5_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_5_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_5_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_6_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_6_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_6_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_6_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_MME_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_MME_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_MME_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_MME_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_2_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_2_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_2_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_2_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_3_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_3_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_3_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_3_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_4_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_4_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_4_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_4_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_5_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_5_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_5_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_5_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_MME_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_MME_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_MME_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_MME_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_2_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_2_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_2_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_2_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_3_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_3_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_3_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_3_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_4_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_4_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_4_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_4_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_5_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_5_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_5_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_5_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_MME_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_MME_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_MME_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_MME_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_2_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_2_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_2_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_2_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_3_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_3_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_3_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_3_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_4_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_4_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_4_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_4_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_5_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_5_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_5_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_5_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_0_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_0_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_0_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_0_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_1_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_1_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_1_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_1_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_2_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_2_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_2_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_2_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_3_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_3_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_3_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_3_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_4_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_4_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_4_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_4_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_5_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_5_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_5_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_5_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_6_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_6_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_6_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_6_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_7_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_7_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_7_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_7_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_8_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_8_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_8_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_8_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_9_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_9_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_9_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_9_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_10_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_10_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_10_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_10_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_11_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_11_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_11_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_11_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_12_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_12_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_12_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_12_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_13_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_13_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_13_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_13_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_14_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_14_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_14_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_14_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_15_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_15_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_15_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_15_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_16_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_16_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_16_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_16_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_17_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_17_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_17_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_17_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_18_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_18_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_18_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_18_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_19_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_19_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_19_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_19_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_20_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_20_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_20_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_20_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_21_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_21_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_21_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_21_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_22_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_22_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_22_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_22_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_23_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_23_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_23_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_23_3),
+ __stringify(GAUDI2_QUEUE_ID_ROT_0_0),
+ __stringify(GAUDI2_QUEUE_ID_ROT_0_1),
+ __stringify(GAUDI2_QUEUE_ID_ROT_0_2),
+ __stringify(GAUDI2_QUEUE_ID_ROT_0_3),
+ __stringify(GAUDI2_QUEUE_ID_ROT_1_0),
+ __stringify(GAUDI2_QUEUE_ID_ROT_1_1),
+ __stringify(GAUDI2_QUEUE_ID_ROT_1_2),
+ __stringify(GAUDI2_QUEUE_ID_ROT_1_3),
+ __stringify(GAUDI2_QUEUE_ID_CPU_PQ),
+ __stringify(GAUDI2_QUEUE_ID_SIZE),
+};
+
static const char * const gaudi2_qm_sei_error_cause[GAUDI2_NUM_OF_QM_SEI_ERR_CAUSE] = {
"qman sei intr",
"arc sei intr"
@@ -2601,6 +2949,8 @@ static int gaudi2_set_fixed_properties(struct hl_device *hdev)
prop->hbw_flush_reg = mmPCIE_WRAP_SPECIAL_GLBL_SPARE_0;
+ prop->supports_advanced_cpucp_rc = true;
+
return 0;
free_qprops:
@@ -3148,7 +3498,6 @@ static int gaudi2_early_init(struct hl_device *hdev)
rc = hl_fw_read_preboot_status(hdev);
if (rc) {
if (hdev->reset_on_preboot_fail)
- /* we are already on failure flow, so don't check if hw_fini fails. */
hdev->asic_funcs->hw_fini(hdev, true, false);
goto pci_fini;
}
@@ -3160,6 +3509,13 @@ static int gaudi2_early_init(struct hl_device *hdev)
dev_err(hdev->dev, "failed to reset HW in dirty state (%d)\n", rc);
goto pci_fini;
}
+
+ rc = hl_fw_read_preboot_status(hdev);
+ if (rc) {
+ if (hdev->reset_on_preboot_fail)
+ hdev->asic_funcs->hw_fini(hdev, true, false);
+ goto pci_fini;
+ }
}
return 0;
@@ -3308,14 +3664,10 @@ static int gaudi2_late_init(struct hl_device *hdev)
struct gaudi2_device *gaudi2 = hdev->asic_specific;
int rc;
- hdev->asic_prop.supports_advanced_cpucp_rc = true;
-
rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_ENABLE_PCI_ACCESS,
gaudi2->virt_msix_db_dma_addr);
- if (rc) {
- dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
+ if (rc)
return rc;
- }
rc = gaudi2_fetch_psoc_frequency(hdev);
if (rc) {
@@ -3783,7 +4135,7 @@ static int gaudi2_sw_init(struct hl_device *hdev)
prop->supports_compute_reset = true;
/* Event queue sanity check added in FW version 1.11 */
- if (hl_is_fw_sw_ver_below(hdev, 1, 11))
+ if (hl_fw_version_cmp(hdev, 1, 11, 0) < 0)
hdev->event_queue.check_eqe_index = false;
else
hdev->event_queue.check_eqe_index = true;
@@ -3798,6 +4150,8 @@ static int gaudi2_sw_init(struct hl_device *hdev)
if (rc)
goto special_blocks_free;
+ hdev->heartbeat_debug_info.cpu_queue_id = GAUDI2_QUEUE_ID_CPU_PQ;
+
return 0;
special_blocks_free:
@@ -4836,7 +5190,7 @@ static void gaudi2_halt_engines(struct hl_device *hdev, bool hard_reset, bool fw
else
wait_timeout_ms = GAUDI2_RESET_WAIT_MSEC;
- if (fw_reset)
+ if (fw_reset || hdev->cpld_shutdown)
goto skip_engines;
gaudi2_stop_dma_qmans(hdev);
@@ -6314,26 +6668,6 @@ static void gaudi2_execute_hard_reset(struct hl_device *hdev)
WREG32(mmPSOC_RESET_CONF_SW_ALL_RST, 1);
}
-static int gaudi2_get_soft_rst_done_indication(struct hl_device *hdev, u32 poll_timeout_us)
-{
- int i, rc = 0;
- u32 reg_val;
-
- for (i = 0 ; i < GAUDI2_RESET_POLL_CNT ; i++)
- rc = hl_poll_timeout(
- hdev,
- mmCPU_RST_STATUS_TO_HOST,
- reg_val,
- reg_val == CPU_RST_STATUS_SOFT_RST_DONE,
- 1000,
- poll_timeout_us);
-
- if (rc)
- dev_err(hdev->dev, "Timeout while waiting for FW to complete soft reset (0x%x)\n",
- reg_val);
- return rc;
-}
-
/**
* gaudi2_execute_soft_reset - execute soft reset by driver/FW
*
@@ -6346,23 +6680,8 @@ static int gaudi2_get_soft_rst_done_indication(struct hl_device *hdev, u32 poll_
static int gaudi2_execute_soft_reset(struct hl_device *hdev, bool driver_performs_reset,
u32 poll_timeout_us)
{
- int rc;
-
- if (!driver_performs_reset) {
- if (hl_is_fw_sw_ver_below(hdev, 1, 10)) {
- /* set SP to indicate reset request sent to FW */
- WREG32(mmCPU_RST_STATUS_TO_HOST, CPU_RST_STATUS_NA);
-
- WREG32(mmGIC_HOST_SOFT_RST_IRQ_POLL_REG,
- gaudi2_irq_map_table[GAUDI2_EVENT_CPU_SOFT_RESET].cpu_id);
-
- /* wait for f/w response */
- rc = gaudi2_get_soft_rst_done_indication(hdev, poll_timeout_us);
- } else {
- rc = hl_fw_send_soft_reset(hdev);
- }
- return rc;
- }
+ if (!driver_performs_reset)
+ return hl_fw_send_soft_reset(hdev);
/* Block access to engines, QMANs and SM during reset, these
* RRs will be reconfigured after soft reset.
@@ -6502,13 +6821,7 @@ skip_reset:
static int gaudi2_suspend(struct hl_device *hdev)
{
- int rc;
-
- rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);
- if (rc)
- dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
-
- return rc;
+ return hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);
}
static int gaudi2_resume(struct hl_device *hdev)
@@ -6525,6 +6838,13 @@ static int gaudi2_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
VM_DONTCOPY | VM_NORESERVE);
#ifdef _HAS_DMA_MMAP_COHERENT
+ /*
+ * If dma_alloc_coherent() returns a vmalloc address, set VM_MIXEDMAP
+ * so vm_insert_page() can handle it safely. Without this, the kernel
+ * may BUG_ON due to VM_PFNMAP.
+ */
+ if (is_vmalloc_addr(cpu_addr))
+ vm_flags_set(vma, VM_MIXEDMAP);
rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, dma_addr, size);
if (rc)
@@ -6815,7 +7135,8 @@ static int gaudi2_validate_cb_address(struct hl_device *hdev, struct hl_cs_parse
struct gaudi2_device *gaudi2 = hdev->asic_specific;
if (!gaudi2_is_queue_enabled(hdev, parser->hw_queue_id)) {
- dev_err(hdev->dev, "h/w queue %d is disabled\n", parser->hw_queue_id);
+ dev_err(hdev->dev, "h/w queue %s is disabled\n",
+ GAUDI2_QUEUE_ID_TO_STR(parser->hw_queue_id));
return -EINVAL;
}
@@ -7067,7 +7388,8 @@ static int gaudi2_test_queue_send_msg_short(struct hl_device *hdev, u32 hw_queue
rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, pkt_size, msg_info->dma_addr);
if (rc)
dev_err(hdev->dev,
- "Failed to send msg_short packet to H/W queue %d\n", hw_queue_id);
+ "Failed to send msg_short packet to H/W queue %s\n",
+ GAUDI2_QUEUE_ID_TO_STR(hw_queue_id));
return rc;
}
@@ -7093,8 +7415,8 @@ static int gaudi2_test_queue_wait_completion(struct hl_device *hdev, u32 hw_queu
timeout_usec);
if (rc == -ETIMEDOUT) {
- dev_err(hdev->dev, "H/W queue %d test failed (SOB_OBJ_0 == 0x%x)\n",
- hw_queue_id, tmp);
+ dev_err(hdev->dev, "H/W queue %s test failed (SOB_OBJ_0 == 0x%x)\n",
+ GAUDI2_QUEUE_ID_TO_STR(hw_queue_id), tmp);
rc = -EIO;
}
@@ -7914,7 +8236,7 @@ static bool gaudi2_handle_ecc_event(struct hl_device *hdev, u16 event_type,
bool has_block_id = false;
u16 block_id;
- if (!hl_is_fw_sw_ver_below(hdev, 1, 12))
+ if (hl_fw_version_cmp(hdev, 1, 12, 0) >= 0)
has_block_id = true;
ecc_address = le64_to_cpu(ecc_data->ecc_address);
@@ -8165,13 +8487,7 @@ static void gaudi2_ack_module_razwi_event_handler(struct hl_device *hdev,
}
hbw_rtr_id = gaudi2_tpc_initiator_hbw_rtr_id[module_idx];
-
- if (hl_is_fw_sw_ver_below(hdev, 1, 9) &&
- !hdev->asic_prop.fw_security_enabled &&
- ((module_idx == 0) || (module_idx == 1)))
- lbw_rtr_id = DCORE0_RTR0;
- else
- lbw_rtr_id = gaudi2_tpc_initiator_lbw_rtr_id[module_idx];
+ lbw_rtr_id = gaudi2_tpc_initiator_lbw_rtr_id[module_idx];
break;
case RAZWI_MME:
sprintf(initiator_name, "MME_%u", module_idx);
@@ -9310,8 +9626,8 @@ static int gaudi2_handle_mmu_spi_sei_err(struct hl_device *hdev, u16 event_type,
static bool gaudi2_hbm_sei_handle_read_err(struct hl_device *hdev,
struct hl_eq_hbm_sei_read_err_intr_info *rd_err_data, u32 err_cnt)
{
+ bool require_hard_reset = false;
u32 addr, beat, beat_shift;
- bool rc = false;
dev_err_ratelimited(hdev->dev,
"READ ERROR count: ECC SERR: %d, ECC DERR: %d, RD_PARITY: %d\n",
@@ -9343,7 +9659,7 @@ static bool gaudi2_hbm_sei_handle_read_err(struct hl_device *hdev,
beat,
le32_to_cpu(rd_err_data->dbg_rd_err_dm),
le32_to_cpu(rd_err_data->dbg_rd_err_syndrome));
- rc |= true;
+ require_hard_reset = true;
}
beat_shift = beat * HBM_RD_ERR_BEAT_SHIFT;
@@ -9356,7 +9672,7 @@ static bool gaudi2_hbm_sei_handle_read_err(struct hl_device *hdev,
(le32_to_cpu(rd_err_data->dbg_rd_err_misc) &
(HBM_RD_ERR_PAR_DATA_BEAT0_MASK << beat_shift)) >>
(HBM_RD_ERR_PAR_DATA_BEAT0_SHIFT + beat_shift));
- rc |= true;
+ require_hard_reset = true;
}
dev_err_ratelimited(hdev->dev, "Beat%d DQ data:\n", beat);
@@ -9366,7 +9682,7 @@ static bool gaudi2_hbm_sei_handle_read_err(struct hl_device *hdev,
le32_to_cpu(rd_err_data->dbg_rd_err_data[beat * 2 + 1]));
}
- return rc;
+ return require_hard_reset;
}
static void gaudi2_hbm_sei_print_wr_par_info(struct hl_device *hdev,
@@ -9650,8 +9966,8 @@ static int hl_arc_event_handle(struct hl_device *hdev, u16 event_type,
q = (struct hl_engine_arc_dccm_queue_full_irq *) &payload;
gaudi2_print_event(hdev, event_type, true,
- "ARC DCCM Full event: EngId: %u, Intr_type: %u, Qidx: %u",
- engine_id, intr_type, q->queue_index);
+ "ARC DCCM Full event: Eng: %s, Intr_type: %u, Qidx: %u",
+ GAUDI2_ENG_ID_TO_STR(engine_id), intr_type, q->queue_index);
return 1;
default:
gaudi2_print_event(hdev, event_type, true, "Unknown ARC event type");
@@ -9824,11 +10140,6 @@ static u16 event_id_to_engine_id(struct hl_device *hdev, u16 event_type)
return U16_MAX;
}
-static void hl_eq_heartbeat_event_handle(struct hl_device *hdev)
-{
- hdev->eq_heartbeat_received = true;
-}
-
static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
@@ -10050,6 +10361,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
if (gaudi2_handle_hbm_mc_sei_err(hdev, event_type, &eq_entry->sei_data)) {
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
reset_required = true;
+ is_critical = eq_entry->sei_data.hdr.is_critical;
}
error_count++;
break;
@@ -10070,7 +10382,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
error_count = gaudi2_handle_pcie_drain(hdev, &eq_entry->pcie_drain_ind_data);
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
- if (hl_is_fw_sw_ver_equal_or_greater(hdev, 1, 13))
+ if (hl_fw_version_cmp(hdev, 1, 13, 0) >= 0)
is_critical = true;
break;
@@ -10223,7 +10535,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
dev_err(hdev->dev, "CPLD shutdown event, reset reason: 0x%llx\n",
le64_to_cpu(eq_entry->data[0]));
error_count = GAUDI2_NA_EVENT_CAUSE;
- event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
+ hl_eq_cpld_shutdown_event_handle(hdev, event_type, &event_mask);
break;
case GAUDI2_EVENT_CPU_PKT_SANITY_FAILED:
@@ -10281,8 +10593,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
gaudi2_print_event(hdev, event_type, true,
"No error cause for H/W event %u", event_type);
- if ((gaudi2_irq_map_table[event_type].reset != EVENT_RESET_TYPE_NONE) ||
- reset_required) {
+ if ((gaudi2_irq_map_table[event_type].reset != EVENT_RESET_TYPE_NONE) || reset_required) {
if (reset_required ||
(gaudi2_irq_map_table[event_type].reset == EVENT_RESET_TYPE_HARD))
reset_flags |= HL_DRV_RESET_HARD;
@@ -10312,6 +10623,7 @@ reset_device:
if (event_mask & HL_NOTIFIER_EVENT_GENERAL_HW_ERR)
hl_handle_critical_hw_err(hdev, event_type, &event_mask);
+ hl_debugfs_cfg_access_history_dump(hdev);
event_mask |= HL_NOTIFIER_EVENT_DEVICE_RESET;
hl_device_cond_reset(hdev, reset_flags, event_mask);
}
@@ -10348,8 +10660,8 @@ static int gaudi2_memset_memory_chunk_using_edma_qm(struct hl_device *hdev,
rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, pkt_size, phys_addr);
if (rc)
- dev_err(hdev->dev, "Failed to send lin_dma packet to H/W queue %d\n",
- hw_queue_id);
+ dev_err(hdev->dev, "Failed to send lin_dma packet to H/W queue %s\n",
+ GAUDI2_QUEUE_ID_TO_STR(hw_queue_id));
return rc;
}
@@ -10489,7 +10801,7 @@ end:
(u64 *)(lin_dma_pkts_arr), DEBUGFS_WRITE64);
WREG32(sob_addr, 0);
- kfree(lin_dma_pkts_arr);
+ kvfree(lin_dma_pkts_arr);
return rc;
}
diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2P.h b/drivers/accel/habanalabs/gaudi2/gaudi2P.h
index eee41387b269..bdf5c1bd2d63 100644
--- a/drivers/accel/habanalabs/gaudi2/gaudi2P.h
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2P.h
@@ -240,6 +240,15 @@
#define GAUDI2_NUM_TESTED_QS (GAUDI2_QUEUE_ID_CPU_PQ - GAUDI2_QUEUE_ID_PDMA_0_0)
+extern const char *gaudi2_engine_id_str[];
+extern const char *gaudi2_queue_id_str[];
+
+#define GAUDI2_ENG_ID_TO_STR(initiator) ((initiator) >= GAUDI2_ENGINE_ID_SIZE ? "not found" : \
+ gaudi2_engine_id_str[initiator])
+
+#define GAUDI2_QUEUE_ID_TO_STR(initiator) ((initiator) >= GAUDI2_QUEUE_ID_SIZE ? "not found" : \
+ gaudi2_queue_id_str[initiator])
+
enum gaudi2_reserved_sob_id {
GAUDI2_RESERVED_SOB_CS_COMPLETION_FIRST,
GAUDI2_RESERVED_SOB_CS_COMPLETION_LAST =
@@ -384,7 +393,7 @@ enum gaudi2_edma_id {
/* User interrupt count is aligned with HW CQ count.
* We have 64 CQ's per dcore, CQ0 in dcore 0 is reserved for legacy mode
*/
-#define GAUDI2_NUM_USER_INTERRUPTS 255
+#define GAUDI2_NUM_USER_INTERRUPTS 64
#define GAUDI2_NUM_RESERVED_INTERRUPTS 1
#define GAUDI2_TOTAL_USER_INTERRUPTS (GAUDI2_NUM_USER_INTERRUPTS + GAUDI2_NUM_RESERVED_INTERRUPTS)
@@ -416,11 +425,11 @@ enum gaudi2_irq_num {
GAUDI2_IRQ_NUM_NIC_PORT_LAST = (GAUDI2_IRQ_NUM_NIC_PORT_FIRST + NIC_NUMBER_OF_PORTS - 1),
GAUDI2_IRQ_NUM_TPC_ASSERT,
GAUDI2_IRQ_NUM_EQ_ERROR,
+ GAUDI2_IRQ_NUM_USER_FIRST,
+ GAUDI2_IRQ_NUM_USER_LAST = (GAUDI2_IRQ_NUM_USER_FIRST + GAUDI2_NUM_USER_INTERRUPTS - 1),
GAUDI2_IRQ_NUM_RESERVED_FIRST,
- GAUDI2_IRQ_NUM_RESERVED_LAST = (GAUDI2_MSIX_ENTRIES - GAUDI2_TOTAL_USER_INTERRUPTS - 1),
+ GAUDI2_IRQ_NUM_RESERVED_LAST = (GAUDI2_MSIX_ENTRIES - GAUDI2_NUM_RESERVED_INTERRUPTS - 1),
GAUDI2_IRQ_NUM_UNEXPECTED_ERROR = RESERVED_MSIX_UNEXPECTED_USER_ERROR_INTERRUPT,
- GAUDI2_IRQ_NUM_USER_FIRST = GAUDI2_IRQ_NUM_UNEXPECTED_ERROR + 1,
- GAUDI2_IRQ_NUM_USER_LAST = (GAUDI2_IRQ_NUM_USER_FIRST + GAUDI2_NUM_USER_INTERRUPTS - 1),
GAUDI2_IRQ_NUM_LAST = (GAUDI2_MSIX_ENTRIES - 1)
};
diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c b/drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c
index 2423620ff358..bc3c57bda5cd 100644
--- a/drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c
@@ -2426,7 +2426,7 @@ static int gaudi2_config_bmon(struct hl_device *hdev, struct hl_debug_params *pa
WREG32(base_reg + mmBMON_ADDRH_E3_OFFSET, 0);
WREG32(base_reg + mmBMON_REDUCTION_OFFSET, 0);
WREG32(base_reg + mmBMON_STM_TRC_OFFSET, 0x7 | (0xA << 8));
- WREG32(base_reg + mmBMON_CR_OFFSET, 0x77 | 0xf << 24);
+ WREG32(base_reg + mmBMON_CR_OFFSET, 0x41);
}
return 0;
diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2_security.c b/drivers/accel/habanalabs/gaudi2/gaudi2_security.c
index 34bf80c5a44b..307ccb912ccd 100644
--- a/drivers/accel/habanalabs/gaudi2/gaudi2_security.c
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2_security.c
@@ -479,6 +479,7 @@ static const u32 gaudi2_pb_dcr0_edma0_unsecured_regs[] = {
mmDCORE0_EDMA0_CORE_CTX_TE_NUMROWS,
mmDCORE0_EDMA0_CORE_CTX_IDX,
mmDCORE0_EDMA0_CORE_CTX_IDX_INC,
+ mmDCORE0_EDMA0_CORE_WR_COMP_MAX_OUTSTAND,
mmDCORE0_EDMA0_CORE_RD_LBW_RATE_LIM_CFG,
mmDCORE0_EDMA0_QM_CQ_CFG0_0,
mmDCORE0_EDMA0_QM_CQ_CFG0_1,
diff --git a/drivers/accel/habanalabs/goya/goya.c b/drivers/accel/habanalabs/goya/goya.c
index 5a359c3bdc78..84768e306269 100644
--- a/drivers/accel/habanalabs/goya/goya.c
+++ b/drivers/accel/habanalabs/goya/goya.c
@@ -893,11 +893,8 @@ int goya_late_init(struct hl_device *hdev)
WREG32(mmMMU_LOG2_DDR_SIZE, ilog2(prop->dram_size));
rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_ENABLE_PCI_ACCESS, 0x0);
- if (rc) {
- dev_err(hdev->dev,
- "Failed to enable PCI access from CPU %d\n", rc);
+ if (rc)
return rc;
- }
/* force setting to low frequency */
goya->curr_pll_profile = PLL_LOW;
@@ -2864,13 +2861,7 @@ static int goya_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset)
int goya_suspend(struct hl_device *hdev)
{
- int rc;
-
- rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);
- if (rc)
- dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
-
- return rc;
+ return hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);
}
int goya_resume(struct hl_device *hdev)
diff --git a/drivers/accel/habanalabs/include/gaudi2/gaudi2.h b/drivers/accel/habanalabs/include/gaudi2/gaudi2.h
index 0231d6c55b4a..753d46a2836b 100644
--- a/drivers/accel/habanalabs/include/gaudi2/gaudi2.h
+++ b/drivers/accel/habanalabs/include/gaudi2/gaudi2.h
@@ -63,9 +63,9 @@
#define RESERVED_VA_RANGE_FOR_ARC_ON_HOST_HPAGE_START 0xFFF0F80000000000ull
#define RESERVED_VA_RANGE_FOR_ARC_ON_HOST_HPAGE_END 0xFFF0FFFFFFFFFFFFull
-#define RESERVED_MSIX_UNEXPECTED_USER_ERROR_INTERRUPT 256
+#define RESERVED_MSIX_UNEXPECTED_USER_ERROR_INTERRUPT 127
-#define GAUDI2_MSIX_ENTRIES 512
+#define GAUDI2_MSIX_ENTRIES 128
#define QMAN_PQ_ENTRY_SIZE 16 /* Bytes */
diff --git a/drivers/accel/habanalabs/include/gaudi2/gaudi2_async_ids_map_extended.h b/drivers/accel/habanalabs/include/gaudi2/gaudi2_async_ids_map_extended.h
index b2dbe1f64430..82d639990cca 100644
--- a/drivers/accel/habanalabs/include/gaudi2/gaudi2_async_ids_map_extended.h
+++ b/drivers/accel/habanalabs/include/gaudi2/gaudi2_async_ids_map_extended.h
@@ -330,9 +330,9 @@ static struct gaudi2_async_events_ids_map gaudi2_irq_map_table[] = {
{ .fc_id = 149, .cpu_id = 48, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "EDMA7_ECC_SERR" },
{ .fc_id = 150, .cpu_id = 48, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
- .name = "HDMA4_ECC_SERR" },
+ .name = "EDMA4_ECC_SERR" },
{ .fc_id = 151, .cpu_id = 48, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
- .name = "HDMA5_ECC_SERR" },
+ .name = "EDMA5_ECC_SERR" },
{ .fc_id = 152, .cpu_id = 49, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "EDMA2_ECC_DERR" },
{ .fc_id = 153, .cpu_id = 49, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
@@ -856,55 +856,55 @@ static struct gaudi2_async_events_ids_map gaudi2_irq_map_table[] = {
{ .fc_id = 412, .cpu_id = 84, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "PCIE_ADDR_DEC_ERR" },
{ .fc_id = 413, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC0_AXI_ERR_RSP" },
+ .name = "DCORE0_TPC0_AXI_ERR_RSP" },
{ .fc_id = 414, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC1_AXI_ERR_RSP" },
+ .name = "DCORE0_TPC1_AXI_ERR_RSP" },
{ .fc_id = 415, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC2_AXI_ERR_RSP" },
+ .name = "DCORE0_TPC2_AXI_ERR_RSP" },
{ .fc_id = 416, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC3_AXI_ERR_RSP" },
+ .name = "DCORE0_TPC3_AXI_ERR_RSP" },
{ .fc_id = 417, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC4_AXI_ERR_RSP" },
+ .name = "DCORE0_TPC4_AXI_ERR_RSP" },
{ .fc_id = 418, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC5_AXI_ERR_RSP" },
+ .name = "DCORE0_TPC5_AXI_ERR_RSP" },
{ .fc_id = 419, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC6_AXI_ERR_RSP" },
+ .name = "DCORE1_TPC0_AXI_ERR_RSP" },
{ .fc_id = 420, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC7_AXI_ERR_RSP" },
+ .name = "DCORE1_TPC1_AXI_ERR_RSP" },
{ .fc_id = 421, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC8_AXI_ERR_RSP" },
+ .name = "DCORE1_TPC2_AXI_ERR_RSP" },
{ .fc_id = 422, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC9_AXI_ERR_RSP" },
+ .name = "DCORE1_TPC3_AXI_ERR_RSP" },
{ .fc_id = 423, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC10_AXI_ERR_RSP" },
+ .name = "DCORE1_TPC4_AXI_ERR_RSP" },
{ .fc_id = 424, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC11_AXI_ERR_RSP" },
+ .name = "DCORE1_TPC5_AXI_ERR_RSP" },
{ .fc_id = 425, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC12_AXI_ERR_RSP" },
+ .name = "DCORE2_TPC0_AXI_ERR_RSP" },
{ .fc_id = 426, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC13_AXI_ERR_RSP" },
+ .name = "DCORE2_TPC1_AXI_ERR_RSP" },
{ .fc_id = 427, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC14_AXI_ERR_RSP" },
+ .name = "DCORE2_TPC2_AXI_ERR_RSP" },
{ .fc_id = 428, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC15_AXI_ERR_RSP" },
+ .name = "DCORE2_TPC3_AXI_ERR_RSP" },
{ .fc_id = 429, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC16_AXI_ERR_RSP" },
+ .name = "DCORE2_TPC4_AXI_ERR_RSP" },
{ .fc_id = 430, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC17_AXI_ERR_RSP" },
+ .name = "DCORE2_TPC5_AXI_ERR_RSP" },
{ .fc_id = 431, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC18_AXI_ERR_RSP" },
+ .name = "DCORE3_TPC0_AXI_ERR_RSP" },
{ .fc_id = 432, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC19_AXI_ERR_RSP" },
+ .name = "DCORE3_TPC1_AXI_ERR_RSP" },
{ .fc_id = 433, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC20_AXI_ERR_RSP" },
+ .name = "DCORE3_TPC2_AXI_ERR_RSP" },
{ .fc_id = 434, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC21_AXI_ERR_RSP" },
+ .name = "DCORE3_TPC3_AXI_ERR_RSP" },
{ .fc_id = 435, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC22_AXI_ERR_RSP" },
+ .name = "DCORE3_TPC4_AXI_ERR_RSP" },
{ .fc_id = 436, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC23_AXI_ERR_RSP" },
+ .name = "DCORE3_TPC5_AXI_ERR_RSP" },
{ .fc_id = 437, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC24_AXI_ERR_RSP" },
+ .name = "DCORE4_TPC0_AXI_ERR_RSP" },
{ .fc_id = 438, .cpu_id = 86, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "AXI_ECC" },
{ .fc_id = 439, .cpu_id = 87, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
@@ -965,73 +965,73 @@ static struct gaudi2_async_events_ids_map gaudi2_irq_map_table[] = {
.name = "MME3_CTRL_AXI_ERROR_RESPONSE" },
{ .fc_id = 467, .cpu_id = 91, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "MME3_QMAN_SW_ERROR" },
- { .fc_id = 468, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ { .fc_id = 468, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "PSOC_MME_PLL_LOCK_ERR" },
- { .fc_id = 469, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ { .fc_id = 469, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "PSOC_CPU_PLL_LOCK_ERR" },
- { .fc_id = 470, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ { .fc_id = 470, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE3_TPC_PLL_LOCK_ERR" },
- { .fc_id = 471, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ { .fc_id = 471, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE3_NIC_PLL_LOCK_ERR" },
- { .fc_id = 472, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ { .fc_id = 472, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE3_XBAR_MMU_PLL_LOCK_ERR" },
- { .fc_id = 473, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ { .fc_id = 473, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE3_XBAR_DMA_PLL_LOCK_ERR" },
- { .fc_id = 474, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ { .fc_id = 474, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE3_XBAR_IF_PLL_LOCK_ERR" },
- { .fc_id = 475, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ { .fc_id = 475, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE3_XBAR_BANK_PLL_LOCK_ERR" },
- { .fc_id = 476, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ { .fc_id = 476, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE1_XBAR_MMU_PLL_LOCK_ERR" },
- { .fc_id = 477, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ { .fc_id = 477, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE1_XBAR_DMA_PLL_LOCK_ERR" },
- { .fc_id = 478, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ { .fc_id = 478, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE1_XBAR_IF_PLL_LOCK_ERR" },
- { .fc_id = 479, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ { .fc_id = 479, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE1_XBAR_MESH_PLL_LOCK_ERR" },
- { .fc_id = 480, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ { .fc_id = 480, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE1_TPC_PLL_LOCK_ERR" },
- { .fc_id = 481, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ { .fc_id = 481, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE1_NIC_PLL_LOCK_ERR" },
- { .fc_id = 482, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ { .fc_id = 482, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "PMMU_MME_PLL_LOCK_ERR" },
- { .fc_id = 483, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ { .fc_id = 483, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE0_TPC_PLL_LOCK_ERR" },
- { .fc_id = 484, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ { .fc_id = 484, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE0_PCI_PLL_LOCK_ERR" },
- { .fc_id = 485, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ { .fc_id = 485, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE0_XBAR_MMU_PLL_LOCK_ERR" },
- { .fc_id = 486, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ { .fc_id = 486, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE0_XBAR_DMA_PLL_LOCK_ERR" },
- { .fc_id = 487, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ { .fc_id = 487, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE0_XBAR_IF_PLL_LOCK_ERR" },
- { .fc_id = 488, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ { .fc_id = 488, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE0_XBAR_MESH_PLL_LOCK_ERR" },
- { .fc_id = 489, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ { .fc_id = 489, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE2_XBAR_MMU_PLL_LOCK_ERR" },
- { .fc_id = 490, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ { .fc_id = 490, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE2_XBAR_DMA_PLL_LOCK_ERR" },
- { .fc_id = 491, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ { .fc_id = 491, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE2_XBAR_IF_PLL_LOCK_ERR" },
- { .fc_id = 492, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ { .fc_id = 492, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE2_XBAR_BANK_PLL_LOCK_ERR" },
- { .fc_id = 493, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ { .fc_id = 493, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE2_TPC_PLL_LOCK_ERR" },
- { .fc_id = 494, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ { .fc_id = 494, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "PSOC_VID_PLL_LOCK_ERR" },
- { .fc_id = 495, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ { .fc_id = 495, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "PMMU_VID_PLL_LOCK_ERR" },
- { .fc_id = 496, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ { .fc_id = 496, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE3_HBM_PLL_LOCK_ERR" },
- { .fc_id = 497, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ { .fc_id = 497, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE1_XBAR_HBM_PLL_LOCK_ERR" },
- { .fc_id = 498, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ { .fc_id = 498, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE1_HBM_PLL_LOCK_ERR" },
- { .fc_id = 499, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ { .fc_id = 499, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE0_HBM_PLL_LOCK_ERR" },
- { .fc_id = 500, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ { .fc_id = 500, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE2_XBAR_HBM_PLL_LOCK_ERR" },
- { .fc_id = 501, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ { .fc_id = 501, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE2_HBM_PLL_LOCK_ERR" },
{ .fc_id = 502, .cpu_id = 93, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "CPU_AXI_ERR_RSP" },
@@ -1298,103 +1298,103 @@ static struct gaudi2_async_events_ids_map gaudi2_irq_map_table[] = {
{ .fc_id = 633, .cpu_id = 130, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC0_BMON_SPMU" },
{ .fc_id = 634, .cpu_id = 131, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC0_KERNEL_ERR" },
+ .name = "DCORE0_TPC0_KERNEL_ERR" },
{ .fc_id = 635, .cpu_id = 132, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC1_BMON_SPMU" },
{ .fc_id = 636, .cpu_id = 133, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC1_KERNEL_ERR" },
+ .name = "DCORE0_TPC1_KERNEL_ERR" },
{ .fc_id = 637, .cpu_id = 134, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC2_BMON_SPMU" },
{ .fc_id = 638, .cpu_id = 135, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC2_KERNEL_ERR" },
+ .name = "DCORE0_TPC2_KERNEL_ERR" },
{ .fc_id = 639, .cpu_id = 136, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC3_BMON_SPMU" },
{ .fc_id = 640, .cpu_id = 137, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC3_KERNEL_ERR" },
+ .name = "DCORE0_TPC3_KERNEL_ERR" },
{ .fc_id = 641, .cpu_id = 138, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC4_BMON_SPMU" },
{ .fc_id = 642, .cpu_id = 139, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC4_KERNEL_ERR" },
+ .name = "DCORE0_TPC4_KERNEL_ERR" },
{ .fc_id = 643, .cpu_id = 140, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC5_BMON_SPMU" },
{ .fc_id = 644, .cpu_id = 141, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC5_KERNEL_ERR" },
+ .name = "DCORE0_TPC5_KERNEL_ERR" },
{ .fc_id = 645, .cpu_id = 150, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC6_BMON_SPMU" },
{ .fc_id = 646, .cpu_id = 151, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC6_KERNEL_ERR" },
+ .name = "DCORE1_TPC0_KERNEL_ERR" },
{ .fc_id = 647, .cpu_id = 152, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC7_BMON_SPMU" },
{ .fc_id = 648, .cpu_id = 153, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC7_KERNEL_ERR" },
+ .name = "DCORE1_TPC1_KERNEL_ERR" },
{ .fc_id = 649, .cpu_id = 146, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC8_BMON_SPMU" },
{ .fc_id = 650, .cpu_id = 147, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC8_KERNEL_ERR" },
+ .name = "DCORE1_TPC2_KERNEL_ERR" },
{ .fc_id = 651, .cpu_id = 148, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC9_BMON_SPMU" },
{ .fc_id = 652, .cpu_id = 149, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC9_KERNEL_ERR" },
+ .name = "DCORE1_TPC3_KERNEL_ERR" },
{ .fc_id = 653, .cpu_id = 142, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC10_BMON_SPMU" },
{ .fc_id = 654, .cpu_id = 143, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC10_KERNEL_ERR" },
+ .name = "DCORE1_TPC4_KERNEL_ERR" },
{ .fc_id = 655, .cpu_id = 144, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC11_BMON_SPMU" },
{ .fc_id = 656, .cpu_id = 145, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC11_KERNEL_ERR" },
+ .name = "DCORE1_TPC5_KERNEL_ERR" },
{ .fc_id = 657, .cpu_id = 162, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC12_BMON_SPMU" },
{ .fc_id = 658, .cpu_id = 163, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC12_KERNEL_ERR" },
+ .name = "DCORE2_TPC0_KERNEL_ERR" },
{ .fc_id = 659, .cpu_id = 164, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC13_BMON_SPMU" },
{ .fc_id = 660, .cpu_id = 165, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC13_KERNEL_ERR" },
+ .name = "DCORE2_TPC1_KERNEL_ERR" },
{ .fc_id = 661, .cpu_id = 158, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC14_BMON_SPMU" },
{ .fc_id = 662, .cpu_id = 159, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC14_KERNEL_ERR" },
+ .name = "DCORE2_TPC2_KERNEL_ERR" },
{ .fc_id = 663, .cpu_id = 160, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC15_BMON_SPMU" },
{ .fc_id = 664, .cpu_id = 161, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC15_KERNEL_ERR" },
+ .name = "DCORE2_TPC3_KERNEL_ERR" },
{ .fc_id = 665, .cpu_id = 154, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC16_BMON_SPMU" },
{ .fc_id = 666, .cpu_id = 155, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC16_KERNEL_ERR" },
+ .name = "DCORE2_TPC4_KERNEL_ERR" },
{ .fc_id = 667, .cpu_id = 156, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC17_BMON_SPMU" },
{ .fc_id = 668, .cpu_id = 157, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC17_KERNEL_ERR" },
+ .name = "DCORE2_TPC5_KERNEL_ERR" },
{ .fc_id = 669, .cpu_id = 166, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC18_BMON_SPMU" },
{ .fc_id = 670, .cpu_id = 167, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC18_KERNEL_ERR" },
+ .name = "DCORE3_TPC0_KERNEL_ERR" },
{ .fc_id = 671, .cpu_id = 168, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC19_BMON_SPMU" },
{ .fc_id = 672, .cpu_id = 169, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC19_KERNEL_ERR" },
+ .name = "DCORE3_TPC1_KERNEL_ERR" },
{ .fc_id = 673, .cpu_id = 170, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC20_BMON_SPMU" },
{ .fc_id = 674, .cpu_id = 171, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC20_KERNEL_ERR" },
+ .name = "DCORE3_TPC2_KERNEL_ERR" },
{ .fc_id = 675, .cpu_id = 172, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC21_BMON_SPMU" },
{ .fc_id = 676, .cpu_id = 173, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC21_KERNEL_ERR" },
+ .name = "DCORE3_TPC3_KERNEL_ERR" },
{ .fc_id = 677, .cpu_id = 174, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC22_BMON_SPMU" },
{ .fc_id = 678, .cpu_id = 175, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC22_KERNEL_ERR" },
+ .name = "DCORE3_TPC4_KERNEL_ERR" },
{ .fc_id = 679, .cpu_id = 176, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC23_BMON_SPMU" },
{ .fc_id = 680, .cpu_id = 177, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC23_KERNEL_ERR" },
+ .name = "DCORE3_TPC5_KERNEL_ERR" },
{ .fc_id = 681, .cpu_id = 178, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC24_BMON_SPMU" },
{ .fc_id = 682, .cpu_id = 179, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC24_KERNEL_ERR" },
+ .name = "DCORE4_TPC0_KERNEL_ERR" },
{ .fc_id = 683, .cpu_id = 180, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "" },
{ .fc_id = 684, .cpu_id = 180, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
@@ -1827,8 +1827,8 @@ static struct gaudi2_async_events_ids_map gaudi2_irq_map_table[] = {
.name = "DEC0_BMON_SPMU" },
{ .fc_id = 898, .cpu_id = 330, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "DEC1_SPI" },
- { .fc_id = 899, .cpu_id = 330, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "DEC1_SPI" },
+ { .fc_id = 899, .cpu_id = 330, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC1_BMON_SPMU" },
{ .fc_id = 900, .cpu_id = 331, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "DEC2_SPI" },
{ .fc_id = 901, .cpu_id = 331, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
@@ -2377,8 +2377,8 @@ static struct gaudi2_async_events_ids_map gaudi2_irq_map_table[] = {
.name = "" },
{ .fc_id = 1173, .cpu_id = 479, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "" },
- { .fc_id = 1174, .cpu_id = 480, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
- .name = "" },
+ { .fc_id = 1174, .cpu_id = 480, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PSOC_DMA_QM" },
{ .fc_id = 1175, .cpu_id = 481, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "" },
{ .fc_id = 1176, .cpu_id = 482, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
@@ -2442,55 +2442,55 @@ static struct gaudi2_async_events_ids_map gaudi2_irq_map_table[] = {
{ .fc_id = 1205, .cpu_id = 511, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "" },
{ .fc_id = 1206, .cpu_id = 512, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC0_QM" },
+ .name = "DCORE0_TPC0_QM" },
{ .fc_id = 1207, .cpu_id = 513, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC1_QM" },
+ .name = "DCORE0_TPC1_QM" },
{ .fc_id = 1208, .cpu_id = 514, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC2_QM" },
+ .name = "DCORE0_TPC2_QM" },
{ .fc_id = 1209, .cpu_id = 515, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC3_QM" },
+ .name = "DCORE0_TPC3_QM" },
{ .fc_id = 1210, .cpu_id = 516, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC4_QM" },
+ .name = "DCORE0_TPC4_QM" },
{ .fc_id = 1211, .cpu_id = 517, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC5_QM" },
+ .name = "DCORE0_TPC5_QM" },
{ .fc_id = 1212, .cpu_id = 518, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC6_QM" },
+ .name = "DCORE1_TPC0_QM" },
{ .fc_id = 1213, .cpu_id = 519, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC7_QM" },
+ .name = "DCORE1_TPC1_QM" },
{ .fc_id = 1214, .cpu_id = 520, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC8_QM" },
+ .name = "DCORE1_TPC2_QM" },
{ .fc_id = 1215, .cpu_id = 521, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC9_QM" },
+ .name = "DCORE1_TPC3_QM" },
{ .fc_id = 1216, .cpu_id = 522, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC10_QM" },
+ .name = "DCORE1_TPC4_QM" },
{ .fc_id = 1217, .cpu_id = 523, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC11_QM" },
+ .name = "DCORE1_TPC5_QM" },
{ .fc_id = 1218, .cpu_id = 524, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC12_QM" },
+ .name = "DCORE2_TPC0_QM" },
{ .fc_id = 1219, .cpu_id = 525, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC13_QM" },
+ .name = "DCORE2_TPC1_QM" },
{ .fc_id = 1220, .cpu_id = 526, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC14_QM" },
+ .name = "DCORE2_TPC2_QM" },
{ .fc_id = 1221, .cpu_id = 527, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC15_QM" },
+ .name = "DCORE2_TPC3_QM" },
{ .fc_id = 1222, .cpu_id = 528, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC16_QM" },
+ .name = "DCORE2_TPC4_QM" },
{ .fc_id = 1223, .cpu_id = 529, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC17_QM" },
+ .name = "DCORE2_TPC5_QM" },
{ .fc_id = 1224, .cpu_id = 530, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC18_QM" },
+ .name = "DCORE3_TPC0_QM" },
{ .fc_id = 1225, .cpu_id = 531, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC19_QM" },
+ .name = "DCORE3_TPC1_QM" },
{ .fc_id = 1226, .cpu_id = 532, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC20_QM" },
+ .name = "DCORE3_TPC2_QM" },
{ .fc_id = 1227, .cpu_id = 533, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC21_QM" },
+ .name = "DCORE3_TPC3_QM" },
{ .fc_id = 1228, .cpu_id = 534, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC22_QM" },
+ .name = "DCORE3_TPC4_QM" },
{ .fc_id = 1229, .cpu_id = 535, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC23_QM" },
+ .name = "DCORE3_TPC5_QM" },
{ .fc_id = 1230, .cpu_id = 536, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
- .name = "TPC24_QM" },
+ .name = "DCORE4_TPC0_QM" },
{ .fc_id = 1231, .cpu_id = 537, .valid = 0, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
.name = "" },
{ .fc_id = 1232, .cpu_id = 538, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
@@ -2674,19 +2674,19 @@ static struct gaudi2_async_events_ids_map gaudi2_irq_map_table[] = {
{ .fc_id = 1321, .cpu_id = 627, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
.name = "DEV_RESET_REQ" },
{ .fc_id = 1322, .cpu_id = 628, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
- .name = "ARC_PWR_BRK_ENTRY" },
+ .name = "PWR_BRK_ENTRY" },
{ .fc_id = 1323, .cpu_id = 629, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
- .name = "ARC_PWR_BRK_EXT" },
+ .name = "PWR_BRK_EXT" },
{ .fc_id = 1324, .cpu_id = 630, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
- .name = "ARC_PWR_RD_MODE0" },
+ .name = "PWR_RD_MODE0" },
{ .fc_id = 1325, .cpu_id = 631, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
- .name = "ARC_PWR_RD_MODE1" },
+ .name = "PWR_RD_MODE1" },
{ .fc_id = 1326, .cpu_id = 632, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
- .name = "ARC_PWR_RD_MODE2" },
+ .name = "PWR_RD_MODE2" },
{ .fc_id = 1327, .cpu_id = 633, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
- .name = "ARC_PWR_RD_MODE3" },
+ .name = "PWR_RD_MODE3" },
{ .fc_id = 1328, .cpu_id = 634, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
- .name = "ARC_EQ_HEARTBEAT" },
+ .name = "EQ_HEARTBEAT" },
};
#endif /* __GAUDI2_ASYNC_IDS_MAP_EVENTS_EXT_H_ */
diff --git a/drivers/accel/habanalabs/include/gaudi2/gaudi2_fw_if.h b/drivers/accel/habanalabs/include/gaudi2/gaudi2_fw_if.h
index 18ca147b1c86..6ea936c9594e 100644
--- a/drivers/accel/habanalabs/include/gaudi2/gaudi2_fw_if.h
+++ b/drivers/accel/habanalabs/include/gaudi2/gaudi2_fw_if.h
@@ -45,6 +45,13 @@
#define GAUDI2_ARM_RX_MB_OFFSET (GAUDI2_ARM_RX_MB_ADDR - \
GAUDI2_SP_SRAM_BASE_ADDR)
+#define POWER_MODE_LEVELS { \
+ 150000, /* 00 */ \
+ 250000, /* 01 */ \
+ 400000, /* 10 */ \
+ /* 11: Normal mode */ \
+}
+
enum gaudi2_fw_status {
GAUDI2_PID_STATUS_UP = 0x1, /* PID on ARC0 is up */
GAUDI2_ARM_STATUS_UP = 0x2, /* ARM Linux Boot complete */
@@ -52,26 +59,6 @@ enum gaudi2_fw_status {
GAUDI2_STATUS_LAST = 0xFF
};
-struct gaudi2_cold_rst_data {
- union {
- struct {
- u32 recovery_flag: 1;
- u32 validation_flag: 1;
- u32 efuse_read_flag: 1;
- u32 spsram_init_done : 1;
- u32 fake_security_enable : 1;
- u32 fake_sig_validation_en : 1;
- u32 bist_skip_enable : 1;
- u32 reserved1 : 1;
- u32 fake_bis_compliant : 1;
- u32 wd_rst_cause_arm : 1;
- u32 wd_rst_cause_arcpid : 1;
- u32 reserved : 21;
- };
- __le32 data;
- };
-};
-
enum gaudi2_rst_src {
HL_COLD_RST = 1,
HL_MANUAL_RST = 2,
diff --git a/drivers/accel/habanalabs/include/gaudi2/gaudi2_reg_map.h b/drivers/accel/habanalabs/include/gaudi2/gaudi2_reg_map.h
index f3eaeb6d9b7e..1e9c056e437d 100644
--- a/drivers/accel/habanalabs/include/gaudi2/gaudi2_reg_map.h
+++ b/drivers/accel/habanalabs/include/gaudi2/gaudi2_reg_map.h
@@ -58,4 +58,12 @@
#define mmWD_GPIO_DATAOUT_REG mmPSOC_GPIO3_DATAOUT
#define mmSTM_PROFILER_SPE_REG mmPSOC_STM_STMSPER
+/* Registers below are used to pass the boot_if data between ARM and ARC1 */
+#define mmARM_MSG_BOOT_ERR_SET mmCPU_IF_SPECIAL_GLBL_SPARE_0
+#define mmARM_MSG_BOOT_ERR_CLR mmCPU_IF_SPECIAL_GLBL_SPARE_1
+#define mmARM_MSG_BOOT_DEV_STS_SET mmCPU_IF_SPECIAL_GLBL_SPARE_2
+#define mmARM_MSG_BOOT_DEV_STS_CLR mmCPU_IF_SPECIAL_GLBL_SPARE_3
+#define mmMGMT_MSG_BOOT_ERR mmCPU_MSTR_IF_SPECIAL_GLBL_SPARE_0
+#define mmMGMT_MSG_BOOT_DEV_STS mmCPU_MSTR_IF_SPECIAL_GLBL_SPARE_1
+
#endif /* GAUDI2_REG_MAP_H_ */
diff --git a/drivers/accel/habanalabs/include/hw_ip/pci/pci_general.h b/drivers/accel/habanalabs/include/hw_ip/pci/pci_general.h
index 4f951cada077..a75faa00197f 100644
--- a/drivers/accel/habanalabs/include/hw_ip/pci/pci_general.h
+++ b/drivers/accel/habanalabs/include/hw_ip/pci/pci_general.h
@@ -25,7 +25,8 @@ enum hl_revision_id {
REV_ID_INVALID = 0x00,
REV_ID_A = 0x01,
REV_ID_B = 0x02,
- REV_ID_C = 0x03
+ REV_ID_C = 0x03,
+ REV_ID_D = 0x04
};
#endif /* INCLUDE_PCI_GENERAL_H_ */
diff --git a/drivers/accel/ivpu/Kconfig b/drivers/accel/ivpu/Kconfig
index 682c53245286..9e055b5ce03d 100644
--- a/drivers/accel/ivpu/Kconfig
+++ b/drivers/accel/ivpu/Kconfig
@@ -8,6 +8,7 @@ config DRM_ACCEL_IVPU
select FW_LOADER
select DRM_GEM_SHMEM_HELPER
select GENERIC_ALLOCATOR
+ select WANT_DEV_COREDUMP
help
Choose this option if you have a system with an 14th generation
Intel CPU (Meteor Lake) or newer. Intel NPU (formerly called Intel VPU)
@@ -15,3 +16,12 @@ config DRM_ACCEL_IVPU
and Deep Learning applications.
If "M" is selected, the module will be called intel_vpu.
+
+config DRM_ACCEL_IVPU_DEBUG
+ bool "Intel NPU debug mode"
+ depends on DRM_ACCEL_IVPU
+ help
+ Choose this option to enable additional
+ debug features for the Intel NPU driver:
+ - Always print debug messages regardless of dyndbg config,
+ - Enable unsafe module params.
diff --git a/drivers/accel/ivpu/Makefile b/drivers/accel/ivpu/Makefile
index 95ff7ad16338..dbf76b8a5b4c 100644
--- a/drivers/accel/ivpu/Makefile
+++ b/drivers/accel/ivpu/Makefile
@@ -1,20 +1,30 @@
# SPDX-License-Identifier: GPL-2.0-only
-# Copyright (C) 2023 Intel Corporation
+# Copyright (C) 2023-2024 Intel Corporation
intel_vpu-y := \
ivpu_drv.o \
ivpu_fw.o \
ivpu_fw_log.o \
ivpu_gem.o \
- ivpu_hw_37xx.o \
- ivpu_hw_40xx.o \
+ ivpu_gem_userptr.o \
+ ivpu_hw.o \
+ ivpu_hw_btrs.o \
+ ivpu_hw_ip.o \
ivpu_ipc.o \
ivpu_job.o \
ivpu_jsm_msg.o \
ivpu_mmu.o \
ivpu_mmu_context.o \
- ivpu_pm.o
+ ivpu_ms.o \
+ ivpu_pm.o \
+ ivpu_sysfs.o \
+ ivpu_trace_points.o
intel_vpu-$(CONFIG_DEBUG_FS) += ivpu_debugfs.o
+intel_vpu-$(CONFIG_DEV_COREDUMP) += ivpu_coredump.o
obj-$(CONFIG_DRM_ACCEL_IVPU) += intel_vpu.o
+
+subdir-ccflags-$(CONFIG_DRM_ACCEL_IVPU_DEBUG) += -DDEBUG
+
+CFLAGS_ivpu_trace_points.o = -I$(src)
diff --git a/drivers/accel/ivpu/ivpu_coredump.c b/drivers/accel/ivpu/ivpu_coredump.c
new file mode 100644
index 000000000000..16ad0c30818c
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_coredump.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2024 Intel Corporation
+ */
+
+#include <linux/devcoredump.h>
+#include <linux/firmware.h>
+
+#include "ivpu_coredump.h"
+#include "ivpu_fw.h"
+#include "ivpu_gem.h"
+#include "vpu_boot_api.h"
+
+#define CRASH_DUMP_HEADER "Intel NPU crash dump"
+#define CRASH_DUMP_HEADERS_SIZE SZ_4K
+
+void ivpu_dev_coredump(struct ivpu_device *vdev)
+{
+ struct drm_print_iterator pi = {};
+ struct drm_printer p;
+ size_t coredump_size;
+ char *coredump;
+
+ coredump_size = CRASH_DUMP_HEADERS_SIZE + FW_VERSION_HEADER_SIZE +
+ ivpu_bo_size(vdev->fw->mem_log_crit) + ivpu_bo_size(vdev->fw->mem_log_verb);
+ coredump = vmalloc(coredump_size);
+ if (!coredump)
+ return;
+
+ pi.data = coredump;
+ pi.remain = coredump_size;
+ p = drm_coredump_printer(&pi);
+
+ drm_printf(&p, "%s\n", CRASH_DUMP_HEADER);
+ drm_printf(&p, "FW version: %s\n", vdev->fw->version);
+ ivpu_fw_log_print(vdev, false, &p);
+
+ dev_coredumpv(vdev->drm.dev, coredump, pi.offset, GFP_KERNEL);
+}
diff --git a/drivers/accel/ivpu/ivpu_coredump.h b/drivers/accel/ivpu/ivpu_coredump.h
new file mode 100644
index 000000000000..8efb09d02441
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_coredump.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2024 Intel Corporation
+ */
+
+#ifndef __IVPU_COREDUMP_H__
+#define __IVPU_COREDUMP_H__
+
+#include <drm/drm_print.h>
+
+#include "ivpu_drv.h"
+#include "ivpu_fw_log.h"
+
+#ifdef CONFIG_DEV_COREDUMP
+void ivpu_dev_coredump(struct ivpu_device *vdev);
+#else
+static inline void ivpu_dev_coredump(struct ivpu_device *vdev)
+{
+ struct drm_printer p = drm_info_printer(vdev->drm.dev);
+
+ ivpu_fw_log_print(vdev, false, &p);
+}
+#endif
+
+#endif /* __IVPU_COREDUMP_H__ */
diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c
index e07e447d08d1..3bd85ee6c26b 100644
--- a/drivers/accel/ivpu/ivpu_debugfs.c
+++ b/drivers/accel/ivpu/ivpu_debugfs.c
@@ -1,9 +1,10 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#include <linux/debugfs.h>
+#include <linux/fault-inject.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_file.h>
@@ -45,6 +46,14 @@ static int fw_name_show(struct seq_file *s, void *v)
return 0;
}
+static int fw_version_show(struct seq_file *s, void *v)
+{
+ struct ivpu_device *vdev = seq_to_ivpu(s);
+
+ seq_printf(s, "%s\n", vdev->fw->version);
+ return 0;
+}
+
static int fw_trace_capability_show(struct seq_file *s, void *v)
{
struct ivpu_device *vdev = seq_to_ivpu(s);
@@ -108,41 +117,66 @@ static int reset_pending_show(struct seq_file *s, void *v)
return 0;
}
+static int firewall_irq_counter_show(struct seq_file *s, void *v)
+{
+ struct ivpu_device *vdev = seq_to_ivpu(s);
+
+ seq_printf(s, "%d\n", atomic_read(&vdev->hw->firewall_irq_counter));
+ return 0;
+}
+
static const struct drm_debugfs_info vdev_debugfs_list[] = {
{"bo_list", bo_list_show, 0},
{"fw_name", fw_name_show, 0},
+ {"fw_version", fw_version_show, 0},
{"fw_trace_capability", fw_trace_capability_show, 0},
{"fw_trace_config", fw_trace_config_show, 0},
{"last_bootmode", last_bootmode_show, 0},
{"reset_counter", reset_counter_show, 0},
{"reset_pending", reset_pending_show, 0},
+ {"firewall_irq_counter", firewall_irq_counter_show, 0},
};
+static int dvfs_mode_get(void *data, u64 *dvfs_mode)
+{
+ struct ivpu_device *vdev = (struct ivpu_device *)data;
+
+ *dvfs_mode = vdev->fw->dvfs_mode;
+ return 0;
+}
+
+static int dvfs_mode_set(void *data, u64 dvfs_mode)
+{
+ struct ivpu_device *vdev = (struct ivpu_device *)data;
+
+ vdev->fw->dvfs_mode = (u32)dvfs_mode;
+ return pci_try_reset_function(to_pci_dev(vdev->drm.dev));
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(dvfs_mode_fops, dvfs_mode_get, dvfs_mode_set, "%llu\n");
+
static ssize_t
-dvfs_mode_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
+fw_dyndbg_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
{
struct ivpu_device *vdev = file->private_data;
- struct ivpu_fw_info *fw = vdev->fw;
- u32 dvfs_mode;
+ char buffer[VPU_DYNDBG_CMD_MAX_LEN] = {};
int ret;
- ret = kstrtou32_from_user(user_buf, size, 0, &dvfs_mode);
- if (ret < 0)
- return ret;
-
- fw->dvfs_mode = dvfs_mode;
+ if (size >= VPU_DYNDBG_CMD_MAX_LEN)
+ return -EINVAL;
- ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev));
- if (ret)
+ ret = strncpy_from_user(buffer, user_buf, size);
+ if (ret < 0)
return ret;
+ ivpu_jsm_dyndbg_control(vdev, buffer, size);
return size;
}
-static const struct file_operations dvfs_mode_fops = {
+static const struct file_operations fw_dyndbg_fops = {
.owner = THIS_MODULE,
.open = simple_open,
- .write = dvfs_mode_fops_write,
+ .write = fw_dyndbg_fops_write,
};
static int fw_log_show(struct seq_file *s, void *v)
@@ -168,7 +202,7 @@ fw_log_fops_write(struct file *file, const char __user *user_buf, size_t size, l
if (!size)
return -EINVAL;
- ivpu_fw_log_clear(vdev);
+ ivpu_fw_log_mark_read(vdev);
return size;
}
@@ -298,7 +332,7 @@ ivpu_force_recovery_fn(struct file *file, const char __user *user_buf, size_t si
return -EINVAL;
ret = ivpu_rpm_get(vdev);
- if (ret)
+ if (ret < 0)
return ret;
ivpu_pm_trigger_recovery(vdev, "debugfs");
@@ -313,26 +347,127 @@ static const struct file_operations ivpu_force_recovery_fops = {
.write = ivpu_force_recovery_fn,
};
+static int ivpu_reset_engine_fn(void *data, u64 val)
+{
+ struct ivpu_device *vdev = (struct ivpu_device *)data;
+
+ return ivpu_jsm_reset_engine(vdev, (u32)val);
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(ivpu_reset_engine_fops, NULL, ivpu_reset_engine_fn, "0x%02llx\n");
+
+static int ivpu_resume_engine_fn(void *data, u64 val)
+{
+ struct ivpu_device *vdev = (struct ivpu_device *)data;
+
+ return ivpu_jsm_hws_resume_engine(vdev, (u32)val);
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(ivpu_resume_engine_fops, NULL, ivpu_resume_engine_fn, "0x%02llx\n");
+
+static int dct_active_get(void *data, u64 *active_percent)
+{
+ struct ivpu_device *vdev = data;
+
+ *active_percent = vdev->pm->dct_active_percent;
+
+ return 0;
+}
+
+static int dct_active_set(void *data, u64 active_percent)
+{
+ struct ivpu_device *vdev = data;
+ int ret;
+
+ if (active_percent > 100)
+ return -EINVAL;
+
+ ret = ivpu_rpm_get(vdev);
+ if (ret < 0)
+ return ret;
+
+ if (active_percent)
+ ret = ivpu_pm_dct_enable(vdev, active_percent);
+ else
+ ret = ivpu_pm_dct_disable(vdev);
+
+ ivpu_rpm_put(vdev);
+
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(ivpu_dct_fops, dct_active_get, dct_active_set, "%llu\n");
+
+static void print_priority_band(struct seq_file *s, struct ivpu_hw_info *hw,
+ int band, const char *name)
+{
+ seq_printf(s, "%-9s: grace_period %9u process_grace_period %9u process_quantum %9u\n",
+ name,
+ hw->hws.grace_period[band],
+ hw->hws.process_grace_period[band],
+ hw->hws.process_quantum[band]);
+}
+
+static int priority_bands_show(struct seq_file *s, void *v)
+{
+ struct ivpu_device *vdev = s->private;
+ struct ivpu_hw_info *hw = vdev->hw;
+
+ print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE, "Idle");
+ print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL, "Normal");
+ print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS, "Focus");
+ print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME, "Realtime");
+
+ return 0;
+}
+
+static int priority_bands_fops_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, priority_bands_show, inode->i_private);
+}
+
static ssize_t
-ivpu_reset_engine_fn(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
+priority_bands_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
{
- struct ivpu_device *vdev = file->private_data;
+ struct seq_file *s = file->private_data;
+ struct ivpu_device *vdev = s->private;
+ char buf[64];
+ u32 grace_period;
+ u32 process_grace_period;
+ u32 process_quantum;
+ u32 band;
+ int ret;
- if (!size)
+ if (size >= sizeof(buf))
return -EINVAL;
- if (ivpu_jsm_reset_engine(vdev, DRM_IVPU_ENGINE_COMPUTE))
- return -ENODEV;
- if (ivpu_jsm_reset_engine(vdev, DRM_IVPU_ENGINE_COPY))
- return -ENODEV;
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, pos, user_buf, size);
+ if (ret < 0)
+ return ret;
+
+ buf[ret] = '\0';
+ ret = sscanf(buf, "%u %u %u %u", &band, &grace_period, &process_grace_period,
+ &process_quantum);
+ if (ret != 4)
+ return -EINVAL;
+
+ if (band >= VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT)
+ return -EINVAL;
+
+ vdev->hw->hws.grace_period[band] = grace_period;
+ vdev->hw->hws.process_grace_period[band] = process_grace_period;
+ vdev->hw->hws.process_quantum[band] = process_quantum;
return size;
}
-static const struct file_operations ivpu_reset_engine_fops = {
+static const struct file_operations ivpu_hws_priority_bands_fops = {
.owner = THIS_MODULE,
- .open = simple_open,
- .write = ivpu_reset_engine_fn,
+ .open = priority_bands_fops_open,
+ .write = priority_bands_fops_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
};
void ivpu_debugfs_init(struct ivpu_device *vdev)
@@ -344,9 +479,11 @@ void ivpu_debugfs_init(struct ivpu_device *vdev)
debugfs_create_file("force_recovery", 0200, debugfs_root, vdev,
&ivpu_force_recovery_fops);
- debugfs_create_file("dvfs_mode", 0200, debugfs_root, vdev,
+ debugfs_create_file("dvfs_mode", 0644, debugfs_root, vdev,
&dvfs_mode_fops);
+ debugfs_create_file("fw_dyndbg", 0200, debugfs_root, vdev,
+ &fw_dyndbg_fops);
debugfs_create_file("fw_log", 0644, debugfs_root, vdev,
&fw_log_fops);
debugfs_create_file("fw_trace_destination_mask", 0200, debugfs_root, vdev,
@@ -355,11 +492,21 @@ void ivpu_debugfs_init(struct ivpu_device *vdev)
&fw_trace_hw_comp_mask_fops);
debugfs_create_file("fw_trace_level", 0200, debugfs_root, vdev,
&fw_trace_level_fops);
+ debugfs_create_file("hws_priority_bands", 0200, debugfs_root, vdev,
+ &ivpu_hws_priority_bands_fops);
debugfs_create_file("reset_engine", 0200, debugfs_root, vdev,
&ivpu_reset_engine_fops);
+ debugfs_create_file("resume_engine", 0200, debugfs_root, vdev,
+ &ivpu_resume_engine_fops);
- if (ivpu_hw_gen(vdev) >= IVPU_HW_40XX)
+ if (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_40XX) {
debugfs_create_file("fw_profiling_freq_drive", 0200,
debugfs_root, vdev, &fw_profiling_freq_fops);
+ debugfs_create_file("dct", 0644, debugfs_root, vdev, &ivpu_dct_fops);
+ }
+
+#ifdef CONFIG_FAULT_INJECTION
+ fault_create_debugfs_attr("fail_hw", debugfs_root, &ivpu_hw_failure);
+#endif
}
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index 51d3f1a55d02..3d6fccdefdd6 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -1,12 +1,14 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2024 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
+#include <linux/workqueue.h>
+#include <generated/utsrelease.h>
#include <drm/drm_accel.h>
#include <drm/drm_file.h>
@@ -14,7 +16,7 @@
#include <drm/drm_ioctl.h>
#include <drm/drm_prime.h>
-#include "vpu_boot_api.h"
+#include "ivpu_coredump.h"
#include "ivpu_debugfs.h"
#include "ivpu_drv.h"
#include "ivpu_fw.h"
@@ -26,22 +28,24 @@
#include "ivpu_jsm_msg.h"
#include "ivpu_mmu.h"
#include "ivpu_mmu_context.h"
+#include "ivpu_ms.h"
#include "ivpu_pm.h"
+#include "ivpu_sysfs.h"
+#include "vpu_boot_api.h"
#ifndef DRIVER_VERSION_STR
-#define DRIVER_VERSION_STR __stringify(DRM_IVPU_DRIVER_MAJOR) "." \
- __stringify(DRM_IVPU_DRIVER_MINOR) "."
+#define DRIVER_VERSION_STR "1.0.0 " UTS_RELEASE
#endif
-static struct lock_class_key submitted_jobs_xa_lock_class_key;
-
int ivpu_dbg_mask;
module_param_named(dbg_mask, ivpu_dbg_mask, int, 0644);
MODULE_PARM_DESC(dbg_mask, "Driver debug mask. See IVPU_DBG_* macros.");
int ivpu_test_mode;
+#if IS_ENABLED(CONFIG_DRM_ACCEL_IVPU_DEBUG)
module_param_named_unsafe(test_mode, ivpu_test_mode, int, 0644);
MODULE_PARM_DESC(test_mode, "Test mode mask. See IVPU_TEST_MODE_* macros.");
+#endif
u8 ivpu_pll_min_ratio;
module_param_named(pll_min_ratio, ivpu_pll_min_ratio, byte, 0644);
@@ -51,10 +55,18 @@ u8 ivpu_pll_max_ratio = U8_MAX;
module_param_named(pll_max_ratio, ivpu_pll_max_ratio, byte, 0644);
MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set NPU frequency");
+int ivpu_sched_mode = IVPU_SCHED_MODE_AUTO;
+module_param_named(sched_mode, ivpu_sched_mode, int, 0444);
+MODULE_PARM_DESC(sched_mode, "Scheduler mode: -1 - Use default scheduler, 0 - Use OS scheduler (supported on 27XX - 50XX), 1 - Use HW scheduler");
+
bool ivpu_disable_mmu_cont_pages;
-module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0644);
+module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0444);
MODULE_PARM_DESC(disable_mmu_cont_pages, "Disable MMU contiguous pages optimization");
+bool ivpu_force_snoop;
+module_param_named(force_snoop, ivpu_force_snoop, bool, 0444);
+MODULE_PARM_DESC(force_snoop, "Force snooping for NPU host memory access");
+
struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv)
{
struct ivpu_device *vdev = file_priv->vdev;
@@ -74,9 +86,8 @@ static void file_priv_unbind(struct ivpu_device *vdev, struct ivpu_file_priv *fi
ivpu_dbg(vdev, FILE, "file_priv unbind: ctx %u\n", file_priv->ctx.id);
ivpu_cmdq_release_all_locked(file_priv);
- ivpu_jsm_context_release(vdev, file_priv->ctx.id);
ivpu_bo_unbind_all_bos_from_context(vdev, &file_priv->ctx);
- ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
+ ivpu_mmu_context_fini(vdev, &file_priv->ctx);
file_priv->bound = false;
drm_WARN_ON(&vdev->drm, !xa_erase_irq(&vdev->context_xa, file_priv->ctx.id));
}
@@ -94,9 +105,12 @@ static void file_priv_release(struct kref *ref)
pm_runtime_get_sync(vdev->drm.dev);
mutex_lock(&vdev->context_list_lock);
file_priv_unbind(vdev, file_priv);
+ drm_WARN_ON(&vdev->drm, !xa_empty(&file_priv->cmdq_xa));
+ xa_destroy(&file_priv->cmdq_xa);
mutex_unlock(&vdev->context_list_lock);
pm_runtime_put_autosuspend(vdev->drm.dev);
+ mutex_destroy(&file_priv->ms_lock);
mutex_destroy(&file_priv->lock);
kfree(file_priv);
}
@@ -106,8 +120,6 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link)
struct ivpu_file_priv *file_priv = *link;
struct ivpu_device *vdev = file_priv->vdev;
- drm_WARN_ON(&vdev->drm, !file_priv);
-
ivpu_dbg(vdev, KREF, "file_priv put: ctx %u refcount %u\n",
file_priv->ctx.id, kref_read(&file_priv->ref));
@@ -115,20 +127,20 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link)
kref_put(&file_priv->ref, file_priv_release);
}
-static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param *args)
+bool ivpu_is_capable(struct ivpu_device *vdev, u32 capability)
{
- switch (args->index) {
+ switch (capability) {
case DRM_IVPU_CAP_METRIC_STREAMER:
- args->value = 0;
- break;
+ return true;
case DRM_IVPU_CAP_DMA_MEMORY_RANGE:
- args->value = 1;
- break;
+ return true;
+ case DRM_IVPU_CAP_BO_CREATE_FROM_USERPTR:
+ return true;
+ case DRM_IVPU_CAP_MANAGE_CMDQ:
+ return vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW;
default:
- return -EINVAL;
+ return false;
}
-
- return 0;
}
static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
@@ -154,7 +166,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
args->value = vdev->platform;
break;
case DRM_IVPU_PARAM_CORE_CLOCK_RATE:
- args->value = ivpu_hw_ratio_to_freq(vdev, vdev->hw->pll.max_ratio);
+ args->value = ivpu_hw_dpu_max_freq_get(vdev);
break;
case DRM_IVPU_PARAM_NUM_CONTEXTS:
args->value = ivpu_get_context_count(vdev);
@@ -188,7 +200,10 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
args->value = vdev->hw->sku;
break;
case DRM_IVPU_PARAM_CAPABILITIES:
- ret = ivpu_get_capabilities(vdev, args);
+ args->value = ivpu_is_capable(vdev, args->index);
+ break;
+ case DRM_IVPU_PARAM_PREEMPT_BUFFER_SIZE:
+ args->value = ivpu_fw_preempt_buf_size(vdev);
break;
default:
ret = -EINVAL;
@@ -228,10 +243,13 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file)
goto err_dev_exit;
}
+ INIT_LIST_HEAD(&file_priv->ms_instance_list);
+
file_priv->vdev = vdev;
file_priv->bound = true;
kref_init(&file_priv->ref);
mutex_init(&file_priv->lock);
+ mutex_init(&file_priv->ms_lock);
mutex_lock(&vdev->context_list_lock);
@@ -242,9 +260,14 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file)
goto err_unlock;
}
- ret = ivpu_mmu_user_context_init(vdev, &file_priv->ctx, ctx_id);
- if (ret)
- goto err_xa_erase;
+ ivpu_mmu_context_init(vdev, &file_priv->ctx, ctx_id);
+
+ file_priv->job_limit.min = FIELD_PREP(IVPU_JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1));
+ file_priv->job_limit.max = file_priv->job_limit.min | IVPU_JOB_ID_JOB_MASK;
+
+ xa_init_flags(&file_priv->cmdq_xa, XA_FLAGS_ALLOC1);
+ file_priv->cmdq_limit.min = IVPU_CMDQ_MIN_ID;
+ file_priv->cmdq_limit.max = IVPU_CMDQ_MAX_ID;
mutex_unlock(&vdev->context_list_lock);
drm_dev_exit(idx);
@@ -256,10 +279,9 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file)
return 0;
-err_xa_erase:
- xa_erase_irq(&vdev->context_xa, ctx_id);
err_unlock:
mutex_unlock(&vdev->context_list_lock);
+ mutex_destroy(&file_priv->ms_lock);
mutex_destroy(&file_priv->lock);
kfree(file_priv);
err_dev_exit:
@@ -275,6 +297,7 @@ static void ivpu_postclose(struct drm_device *dev, struct drm_file *file)
ivpu_dbg(vdev, FILE, "file_priv close: ctx %u process %s pid %d\n",
file_priv->ctx.id, current->comm, task_pid_nr(current));
+ ivpu_ms_cleanup(file_priv);
ivpu_file_priv_put(&file_priv);
}
@@ -285,6 +308,14 @@ static const struct drm_ioctl_desc ivpu_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(IVPU_BO_INFO, ivpu_bo_info_ioctl, 0),
DRM_IOCTL_DEF_DRV(IVPU_SUBMIT, ivpu_submit_ioctl, 0),
DRM_IOCTL_DEF_DRV(IVPU_BO_WAIT, ivpu_bo_wait_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_START, ivpu_ms_start_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_GET_DATA, ivpu_ms_get_data_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_STOP, ivpu_ms_stop_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_GET_INFO, ivpu_ms_get_info_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(IVPU_CMDQ_CREATE, ivpu_cmdq_create_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(IVPU_CMDQ_DESTROY, ivpu_cmdq_destroy_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(IVPU_CMDQ_SUBMIT, ivpu_cmdq_submit_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(IVPU_BO_CREATE_FROM_USERPTR, ivpu_bo_create_from_userptr_ioctl, 0),
};
static int ivpu_wait_for_ready(struct ivpu_device *vdev)
@@ -301,7 +332,7 @@ static int ivpu_wait_for_ready(struct ivpu_device *vdev)
timeout = jiffies + msecs_to_jiffies(vdev->timeout.boot);
while (1) {
- ivpu_ipc_irq_handler(vdev, NULL);
+ ivpu_ipc_irq_handler(vdev);
ret = ivpu_ipc_receive(vdev, &cons, &ipc_hdr, NULL, 0);
if (ret != -ETIMEDOUT || time_after_eq(jiffies, timeout))
break;
@@ -323,6 +354,21 @@ static int ivpu_wait_for_ready(struct ivpu_device *vdev)
return ret;
}
+static int ivpu_hw_sched_init(struct ivpu_device *vdev)
+{
+ int ret = 0;
+
+ if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
+ ret = ivpu_jsm_hws_setup_priority_bands(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to enable hw scheduler: %d", ret);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
/**
* ivpu_boot() - Start VPU firmware
* @vdev: VPU device
@@ -334,8 +380,10 @@ int ivpu_boot(struct ivpu_device *vdev)
{
int ret;
- /* Update boot params located at first 4KB of FW memory */
- ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem));
+ drm_WARN_ON(&vdev->drm, atomic_read(&vdev->job_timeout_counter));
+ drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
+
+ ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem_bp));
ret = ivpu_hw_boot_fw(vdev);
if (ret) {
@@ -346,23 +394,44 @@ int ivpu_boot(struct ivpu_device *vdev)
ret = ivpu_wait_for_ready(vdev);
if (ret) {
ivpu_err(vdev, "Failed to boot the firmware: %d\n", ret);
- ivpu_hw_diagnose_failure(vdev);
- ivpu_mmu_evtq_dump(vdev);
- ivpu_fw_log_dump(vdev);
- return ret;
+ goto err_diagnose_failure;
}
ivpu_hw_irq_clear(vdev);
enable_irq(vdev->irq);
ivpu_hw_irq_enable(vdev);
ivpu_ipc_enable(vdev);
+
+ if (ivpu_fw_is_cold_boot(vdev)) {
+ ret = ivpu_pm_dct_init(vdev);
+ if (ret)
+ goto err_disable_ipc;
+
+ ret = ivpu_hw_sched_init(vdev);
+ if (ret)
+ goto err_disable_ipc;
+ }
+
return 0;
+
+err_disable_ipc:
+ ivpu_ipc_disable(vdev);
+ ivpu_hw_irq_disable(vdev);
+ disable_irq(vdev->irq);
+err_diagnose_failure:
+ ivpu_hw_diagnose_failure(vdev);
+ ivpu_mmu_evtq_dump(vdev);
+ ivpu_dev_coredump(vdev);
+ return ret;
}
void ivpu_prepare_for_reset(struct ivpu_device *vdev)
{
ivpu_hw_irq_disable(vdev);
disable_irq(vdev->irq);
+ flush_work(&vdev->irq_ipc_work);
+ flush_work(&vdev->irq_dct_work);
+ flush_work(&vdev->context_abort_work);
ivpu_ipc_disable(vdev);
ivpu_mmu_disable(vdev);
}
@@ -386,6 +455,9 @@ int ivpu_shutdown(struct ivpu_device *vdev)
static const struct file_operations ivpu_fops = {
.owner = THIS_MODULE,
DRM_ACCEL_FOPS,
+#ifdef CONFIG_PROC_FS
+ .show_fdinfo = drm_show_fdinfo,
+#endif
};
static const struct drm_driver driver = {
@@ -395,25 +467,20 @@ static const struct drm_driver driver = {
.postclose = ivpu_postclose,
.gem_create_object = ivpu_gem_create_object,
- .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
+ .gem_prime_import = ivpu_gem_prime_import,
.ioctls = ivpu_drm_ioctls,
.num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls),
.fops = &ivpu_fops,
+#ifdef CONFIG_PROC_FS
+ .show_fdinfo = drm_show_memory_stats,
+#endif
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
- .major = DRM_IVPU_DRIVER_MAJOR,
- .minor = DRM_IVPU_DRIVER_MINOR,
-};
-
-static irqreturn_t ivpu_irq_thread_handler(int irq, void *arg)
-{
- struct ivpu_device *vdev = arg;
- return ivpu_ipc_irq_thread_handler(vdev);
-}
+ .major = 1,
+};
static int ivpu_irq_init(struct ivpu_device *vdev)
{
@@ -426,10 +493,16 @@ static int ivpu_irq_init(struct ivpu_device *vdev)
return ret;
}
+ INIT_WORK(&vdev->irq_ipc_work, ivpu_ipc_irq_work_fn);
+ INIT_WORK(&vdev->irq_dct_work, ivpu_pm_irq_dct_work_fn);
+ INIT_WORK(&vdev->context_abort_work, ivpu_context_abort_work_fn);
+
+ ivpu_irq_handlers_init(vdev);
+
vdev->irq = pci_irq_vector(pdev, 0);
- ret = devm_request_threaded_irq(vdev->drm.dev, vdev->irq, vdev->hw->ops->irq_handler,
- ivpu_irq_thread_handler, IRQF_NO_AUTOEN, DRIVER_NAME, vdev);
+ ret = devm_request_irq(vdev->drm.dev, vdev->irq, ivpu_hw_irq_handler,
+ IRQF_NO_AUTOEN, DRIVER_NAME, vdev);
if (ret)
ivpu_err(vdev, "Failed to request an IRQ %d\n", ret);
@@ -505,28 +578,32 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
if (!vdev->pm)
return -ENOMEM;
- if (ivpu_hw_gen(vdev) >= IVPU_HW_40XX) {
- vdev->hw->ops = &ivpu_hw_40xx_ops;
+ if (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_40XX)
vdev->hw->dma_bits = 48;
- } else {
- vdev->hw->ops = &ivpu_hw_37xx_ops;
+ else
vdev->hw->dma_bits = 38;
- }
vdev->platform = IVPU_PLATFORM_INVALID;
vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
atomic64_set(&vdev->unique_id_counter, 0);
+ atomic_set(&vdev->job_timeout_counter, 0);
xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1);
- lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
INIT_LIST_HEAD(&vdev->bo_list);
+ vdev->db_limit.min = IVPU_MIN_DB;
+ vdev->db_limit.max = IVPU_MAX_DB;
+
ret = drmm_mutex_init(&vdev->drm, &vdev->context_list_lock);
if (ret)
goto err_xa_destroy;
+ ret = drmm_mutex_init(&vdev->drm, &vdev->submitted_jobs_lock);
+ if (ret)
+ goto err_xa_destroy;
+
ret = drmm_mutex_init(&vdev->drm, &vdev->bo_list_lock);
if (ret)
goto err_xa_destroy;
@@ -540,7 +617,7 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
goto err_xa_destroy;
/* Init basic HW info based on buttress registers which are accessible before power up */
- ret = ivpu_hw_info_init(vdev);
+ ret = ivpu_hw_init(vdev);
if (ret)
goto err_xa_destroy;
@@ -549,9 +626,7 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
if (ret)
goto err_shutdown;
- ret = ivpu_mmu_global_context_init(vdev);
- if (ret)
- goto err_shutdown;
+ ivpu_mmu_global_context_init(vdev);
ret = ivpu_mmu_init(vdev);
if (ret)
@@ -612,13 +687,14 @@ static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device *vdev)
static void ivpu_dev_fini(struct ivpu_device *vdev)
{
+ ivpu_jobs_abort_all(vdev);
+ ivpu_pm_disable_recovery(vdev);
ivpu_pm_disable(vdev);
ivpu_prepare_for_reset(vdev);
ivpu_shutdown(vdev);
- ivpu_jobs_abort_all(vdev);
+ ivpu_ms_cleanup_all(vdev);
ivpu_job_done_consumer_fini(vdev);
- ivpu_pm_cancel_recovery(vdev);
ivpu_bo_unbind_all_user_contexts(vdev);
ivpu_ipc_fini(vdev);
@@ -638,6 +714,9 @@ static struct pci_device_id ivpu_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MTL) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_ARL) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PTL_P) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_WCL) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_NVL) },
{ }
};
MODULE_DEVICE_TABLE(pci, ivpu_pci_ids);
@@ -658,6 +737,7 @@ static int ivpu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return ret;
ivpu_debugfs_init(vdev);
+ ivpu_sysfs_init(vdev);
ret = drm_dev_register(&vdev->drm, 0);
if (ret) {
diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
index bb4374d0eaec..5b34b6f50e69 100644
--- a/drivers/accel/ivpu/ivpu_drv.h
+++ b/drivers/accel/ivpu/ivpu_drv.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2024 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#ifndef __IVPU_DRV_H__
@@ -21,14 +21,23 @@
#define DRIVER_NAME "intel_vpu"
#define DRIVER_DESC "Driver for Intel NPU (Neural Processing Unit)"
-#define DRIVER_DATE "20230117"
-#define PCI_DEVICE_ID_MTL 0x7d1d
-#define PCI_DEVICE_ID_ARL 0xad1d
-#define PCI_DEVICE_ID_LNL 0x643e
+#define PCI_DEVICE_ID_MTL 0x7d1d
+#define PCI_DEVICE_ID_ARL 0xad1d
+#define PCI_DEVICE_ID_LNL 0x643e
+#define PCI_DEVICE_ID_PTL_P 0xb03e
+#define PCI_DEVICE_ID_WCL 0xfd3e
+#define PCI_DEVICE_ID_NVL 0xd71d
-#define IVPU_HW_37XX 37
-#define IVPU_HW_40XX 40
+#define IVPU_HW_IP_37XX 37
+#define IVPU_HW_IP_40XX 40
+#define IVPU_HW_IP_50XX 50
+#define IVPU_HW_IP_60XX 60
+
+#define IVPU_HW_IP_REV_LNL_B0 4
+
+#define IVPU_HW_BTRS_MTL 1
+#define IVPU_HW_BTRS_LNL 2
#define IVPU_GLOBAL_CONTEXT_MMU_SSID 0
/* SSID 1 is used by the VPU to represent reserved context */
@@ -39,13 +48,23 @@
#define IVPU_MIN_DB 1
#define IVPU_MAX_DB 255
-#define IVPU_NUM_ENGINES 2
+#define IVPU_JOB_ID_JOB_MASK GENMASK(7, 0)
+#define IVPU_JOB_ID_CONTEXT_MASK GENMASK(31, 8)
+
+#define IVPU_NUM_PRIORITIES 4
+#define IVPU_NUM_CMDQS_PER_CTX (IVPU_NUM_PRIORITIES)
+
+#define IVPU_CMDQ_MIN_ID 1
+#define IVPU_CMDQ_MAX_ID 255
#define IVPU_PLATFORM_SILICON 0
#define IVPU_PLATFORM_SIMICS 2
#define IVPU_PLATFORM_FPGA 3
+#define IVPU_PLATFORM_HSLE 4
#define IVPU_PLATFORM_INVALID 8
+#define IVPU_SCHED_MODE_AUTO -1
+
#define IVPU_DBG_REG BIT(0)
#define IVPU_DBG_IRQ BIT(1)
#define IVPU_DBG_MMU BIT(2)
@@ -60,6 +79,7 @@
#define IVPU_DBG_KREF BIT(11)
#define IVPU_DBG_RPM BIT(12)
#define IVPU_DBG_MMU_MAP BIT(13)
+#define IVPU_DBG_IOCTL BIT(14)
#define ivpu_err(vdev, fmt, ...) \
drm_err(&(vdev)->drm, "%s(): " fmt, __func__, ##__VA_ARGS__)
@@ -93,6 +113,8 @@ struct ivpu_wa_table {
bool interrupt_clear_with_0;
bool disable_clock_relinquish;
bool disable_d0i3_msg;
+ bool wp0_during_power_up;
+ bool disable_d0i2;
};
struct ivpu_hw_info;
@@ -122,22 +144,34 @@ struct ivpu_device {
struct xa_limit context_xa_limit;
struct xarray db_xa;
+ struct xa_limit db_limit;
+ u32 db_next;
+
+ struct work_struct irq_ipc_work;
+ struct work_struct irq_dct_work;
+ struct work_struct context_abort_work;
struct mutex bo_list_lock; /* Protects bo_list */
struct list_head bo_list;
+ struct mutex submitted_jobs_lock; /* Protects submitted_jobs */
struct xarray submitted_jobs_xa;
struct ivpu_ipc_consumer job_done_consumer;
+ atomic_t job_timeout_counter;
atomic64_t unique_id_counter;
+ ktime_t busy_start_ts;
+ ktime_t busy_time;
+
struct {
int boot;
int jsm;
int tdr;
- int reschedule_suspend;
+ int inference;
int autosuspend;
int d0i3_entry_msg;
+ int state_dump_msg;
} timeout;
};
@@ -149,22 +183,39 @@ struct ivpu_file_priv {
struct kref ref;
struct ivpu_device *vdev;
struct mutex lock; /* Protects cmdq */
- struct ivpu_cmdq *cmdq[IVPU_NUM_ENGINES];
+ struct xarray cmdq_xa;
struct ivpu_mmu_context ctx;
+ struct mutex ms_lock; /* Protects ms_instance_list, ms_info_bo */
+ struct list_head ms_instance_list;
+ struct ivpu_bo *ms_info_bo;
+ struct xa_limit job_limit;
+ u32 job_id_next;
+ struct xa_limit cmdq_limit;
+ u32 cmdq_id_next;
bool has_mmu_faults;
bool bound;
+ bool aborted;
};
extern int ivpu_dbg_mask;
extern u8 ivpu_pll_min_ratio;
extern u8 ivpu_pll_max_ratio;
+extern int ivpu_sched_mode;
extern bool ivpu_disable_mmu_cont_pages;
+extern bool ivpu_force_snoop;
#define IVPU_TEST_MODE_FW_TEST BIT(0)
#define IVPU_TEST_MODE_NULL_HW BIT(1)
#define IVPU_TEST_MODE_NULL_SUBMISSION BIT(2)
#define IVPU_TEST_MODE_D0I3_MSG_DISABLE BIT(4)
#define IVPU_TEST_MODE_D0I3_MSG_ENABLE BIT(5)
+#define IVPU_TEST_MODE_MIP_DISABLE BIT(6)
+#define IVPU_TEST_MODE_DISABLE_TIMEOUTS BIT(8)
+#define IVPU_TEST_MODE_TURBO_ENABLE BIT(9)
+#define IVPU_TEST_MODE_TURBO_DISABLE BIT(10)
+#define IVPU_TEST_MODE_CLK_RELINQ_DISABLE BIT(11)
+#define IVPU_TEST_MODE_CLK_RELINQ_ENABLE BIT(12)
+#define IVPU_TEST_MODE_D0I2_DISABLE BIT(13)
extern int ivpu_test_mode;
struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv);
@@ -173,6 +224,7 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link);
int ivpu_boot(struct ivpu_device *vdev);
int ivpu_shutdown(struct ivpu_device *vdev);
void ivpu_prepare_for_reset(struct ivpu_device *vdev);
+bool ivpu_is_capable(struct ivpu_device *vdev, u32 capability);
static inline u8 ivpu_revision(struct ivpu_device *vdev)
{
@@ -184,16 +236,40 @@ static inline u16 ivpu_device_id(struct ivpu_device *vdev)
return to_pci_dev(vdev->drm.dev)->device;
}
-static inline int ivpu_hw_gen(struct ivpu_device *vdev)
+static inline int ivpu_hw_ip_gen(struct ivpu_device *vdev)
+{
+ switch (ivpu_device_id(vdev)) {
+ case PCI_DEVICE_ID_MTL:
+ case PCI_DEVICE_ID_ARL:
+ return IVPU_HW_IP_37XX;
+ case PCI_DEVICE_ID_LNL:
+ return IVPU_HW_IP_40XX;
+ case PCI_DEVICE_ID_PTL_P:
+ case PCI_DEVICE_ID_WCL:
+ return IVPU_HW_IP_50XX;
+ case PCI_DEVICE_ID_NVL:
+ return IVPU_HW_IP_60XX;
+ default:
+ dump_stack();
+ ivpu_err(vdev, "Unknown NPU IP generation\n");
+ return 0;
+ }
+}
+
+static inline int ivpu_hw_btrs_gen(struct ivpu_device *vdev)
{
switch (ivpu_device_id(vdev)) {
case PCI_DEVICE_ID_MTL:
case PCI_DEVICE_ID_ARL:
- return IVPU_HW_37XX;
+ return IVPU_HW_BTRS_MTL;
case PCI_DEVICE_ID_LNL:
- return IVPU_HW_40XX;
+ case PCI_DEVICE_ID_PTL_P:
+ case PCI_DEVICE_ID_WCL:
+ case PCI_DEVICE_ID_NVL:
+ return IVPU_HW_BTRS_LNL;
default:
- ivpu_err(vdev, "Unknown NPU device\n");
+ dump_stack();
+ ivpu_err(vdev, "Unknown buttress generation\n");
return 0;
}
}
@@ -228,7 +304,13 @@ static inline bool ivpu_is_simics(struct ivpu_device *vdev)
static inline bool ivpu_is_fpga(struct ivpu_device *vdev)
{
- return ivpu_get_platform(vdev) == IVPU_PLATFORM_FPGA;
+ return ivpu_get_platform(vdev) == IVPU_PLATFORM_FPGA ||
+ ivpu_get_platform(vdev) == IVPU_PLATFORM_HSLE;
+}
+
+static inline bool ivpu_is_force_snoop_enabled(struct ivpu_device *vdev)
+{
+ return ivpu_force_snoop;
}
#endif /* __IVPU_DRV_H__ */
diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
index 1457300828bf..48386d2cddbb 100644
--- a/drivers/accel/ivpu/ivpu_fw.c
+++ b/drivers/accel/ivpu/ivpu_fw.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#include <linux/firmware.h>
@@ -17,16 +17,10 @@
#include "ivpu_ipc.h"
#include "ivpu_pm.h"
-#define FW_GLOBAL_MEM_START (2ull * SZ_1G)
-#define FW_GLOBAL_MEM_END (3ull * SZ_1G)
-#define FW_SHARED_MEM_SIZE SZ_256M /* Must be aligned to FW_SHARED_MEM_ALIGNMENT */
-#define FW_SHARED_MEM_ALIGNMENT SZ_128K /* VPU MTRR limitation */
-#define FW_RUNTIME_MAX_SIZE SZ_512M
#define FW_SHAVE_NN_MAX_SIZE SZ_2M
-#define FW_RUNTIME_MIN_ADDR (FW_GLOBAL_MEM_START)
-#define FW_RUNTIME_MAX_ADDR (FW_GLOBAL_MEM_END - FW_SHARED_MEM_SIZE)
-#define FW_VERSION_HEADER_SIZE SZ_4K
#define FW_FILE_IMAGE_OFFSET (VPU_FW_HEADER_SIZE + FW_VERSION_HEADER_SIZE)
+#define FW_PREEMPT_BUF_MIN_SIZE SZ_4K
+#define FW_PREEMPT_BUF_MAX_SIZE SZ_32M
#define WATCHDOG_MSS_REDIRECT 32
#define WATCHDOG_NCE_REDIRECT 33
@@ -44,20 +38,33 @@
#define IVPU_FW_CHECK_API_VER_LT(vdev, fw_hdr, name, major, minor) \
ivpu_fw_check_api_ver_lt(vdev, fw_hdr, #name, VPU_##name##_API_VER_INDEX, major, minor)
+#define IVPU_FOCUS_PRESENT_TIMER_MS 1000
+
static char *ivpu_firmware;
+#if IS_ENABLED(CONFIG_DRM_ACCEL_IVPU_DEBUG)
module_param_named_unsafe(firmware, ivpu_firmware, charp, 0644);
MODULE_PARM_DESC(firmware, "NPU firmware binary in /lib/firmware/..");
+#endif
static struct {
int gen;
const char *name;
} fw_names[] = {
- { IVPU_HW_37XX, "vpu_37xx.bin" },
- { IVPU_HW_37XX, "intel/vpu/vpu_37xx_v0.0.bin" },
- { IVPU_HW_40XX, "vpu_40xx.bin" },
- { IVPU_HW_40XX, "intel/vpu/vpu_40xx_v0.0.bin" },
+ { IVPU_HW_IP_37XX, "intel/vpu/vpu_37xx_v1.bin" },
+ { IVPU_HW_IP_37XX, "intel/vpu/vpu_37xx_v0.0.bin" },
+ { IVPU_HW_IP_40XX, "intel/vpu/vpu_40xx_v1.bin" },
+ { IVPU_HW_IP_40XX, "intel/vpu/vpu_40xx_v0.0.bin" },
+ { IVPU_HW_IP_50XX, "intel/vpu/vpu_50xx_v1.bin" },
+ { IVPU_HW_IP_50XX, "intel/vpu/vpu_50xx_v0.0.bin" },
+ { IVPU_HW_IP_60XX, "intel/vpu/vpu_60xx_v1.bin" },
};
+/* Production fw_names from the table above */
+MODULE_FIRMWARE("intel/vpu/vpu_37xx_v1.bin");
+MODULE_FIRMWARE("intel/vpu/vpu_40xx_v1.bin");
+MODULE_FIRMWARE("intel/vpu/vpu_50xx_v1.bin");
+MODULE_FIRMWARE("intel/vpu/vpu_60xx_v1.bin");
+
static int ivpu_fw_request(struct ivpu_device *vdev)
{
int ret = -ENOENT;
@@ -71,7 +78,7 @@ static int ivpu_fw_request(struct ivpu_device *vdev)
}
for (i = 0; i < ARRAY_SIZE(fw_names); i++) {
- if (fw_names[i].gen != ivpu_hw_gen(vdev))
+ if (fw_names[i].gen != ivpu_hw_ip_gen(vdev))
continue;
ret = firmware_request_nowarn(&vdev->fw->file, fw_names[i].name, vdev->drm.dev);
@@ -121,11 +128,87 @@ ivpu_fw_check_api_ver_lt(struct ivpu_device *vdev, const struct vpu_firmware_hea
return false;
}
+bool ivpu_is_within_range(u64 addr, size_t size, struct ivpu_addr_range *range)
+{
+ u64 addr_end;
+
+ if (!range || check_add_overflow(addr, size, &addr_end))
+ return false;
+
+ if (addr < range->start || addr_end > range->end)
+ return false;
+
+ return true;
+}
+
+static u32
+ivpu_fw_sched_mode_select(struct ivpu_device *vdev, const struct vpu_firmware_header *fw_hdr)
+{
+ if (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_60XX &&
+ ivpu_sched_mode == VPU_SCHEDULING_MODE_OS) {
+ ivpu_warn(vdev, "OS sched mode is not supported, using HW mode\n");
+ return VPU_SCHEDULING_MODE_HW;
+ }
+
+ if (ivpu_sched_mode != IVPU_SCHED_MODE_AUTO)
+ return ivpu_sched_mode;
+
+ if (IVPU_FW_CHECK_API_VER_LT(vdev, fw_hdr, JSM, 3, 24))
+ return VPU_SCHEDULING_MODE_OS;
+
+ return VPU_SCHEDULING_MODE_HW;
+}
+
+static void
+ivpu_preemption_config_parse(struct ivpu_device *vdev, const struct vpu_firmware_header *fw_hdr)
+{
+ struct ivpu_fw_info *fw = vdev->fw;
+ u32 primary_preempt_buf_size, secondary_preempt_buf_size;
+
+ if (fw_hdr->preemption_buffer_1_max_size)
+ primary_preempt_buf_size = fw_hdr->preemption_buffer_1_max_size;
+ else
+ primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size;
+
+ if (fw_hdr->preemption_buffer_2_max_size)
+ secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_max_size;
+ else
+ secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size;
+
+ ivpu_dbg(vdev, FW_BOOT, "Preemption buffer size, primary: %u, secondary: %u\n",
+ primary_preempt_buf_size, secondary_preempt_buf_size);
+
+ if (primary_preempt_buf_size < FW_PREEMPT_BUF_MIN_SIZE ||
+ secondary_preempt_buf_size < FW_PREEMPT_BUF_MIN_SIZE) {
+ ivpu_warn(vdev, "Preemption buffers size too small\n");
+ return;
+ }
+
+ if (primary_preempt_buf_size > FW_PREEMPT_BUF_MAX_SIZE ||
+ secondary_preempt_buf_size > FW_PREEMPT_BUF_MAX_SIZE) {
+ ivpu_warn(vdev, "Preemption buffers size too big\n");
+ return;
+ }
+
+ if (fw->sched_mode != VPU_SCHEDULING_MODE_HW)
+ return;
+
+ if (ivpu_test_mode & IVPU_TEST_MODE_MIP_DISABLE)
+ return;
+
+ vdev->fw->primary_preempt_buf_size = ALIGN(primary_preempt_buf_size, PAGE_SIZE);
+ vdev->fw->secondary_preempt_buf_size = ALIGN(secondary_preempt_buf_size, PAGE_SIZE);
+}
+
static int ivpu_fw_parse(struct ivpu_device *vdev)
{
struct ivpu_fw_info *fw = vdev->fw;
const struct vpu_firmware_header *fw_hdr = (const void *)fw->file->data;
- u64 runtime_addr, image_load_addr, runtime_size, image_size;
+ struct ivpu_addr_range fw_image_range;
+ u64 boot_params_addr, boot_params_size;
+ u64 fw_version_addr, fw_version_size;
+ u64 runtime_addr, runtime_size;
+ u64 image_load_addr, image_size;
if (fw->file->size <= FW_FILE_IMAGE_OFFSET) {
ivpu_err(vdev, "Firmware file is too small: %zu\n", fw->file->size);
@@ -137,18 +220,37 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
return -EINVAL;
}
- runtime_addr = fw_hdr->boot_params_load_address;
- runtime_size = fw_hdr->runtime_size;
- image_load_addr = fw_hdr->image_load_address;
- image_size = fw_hdr->image_size;
+ boot_params_addr = fw_hdr->boot_params_load_address;
+ boot_params_size = SZ_4K;
- if (runtime_addr < FW_RUNTIME_MIN_ADDR || runtime_addr > FW_RUNTIME_MAX_ADDR) {
- ivpu_err(vdev, "Invalid firmware runtime address: 0x%llx\n", runtime_addr);
+ if (!ivpu_is_within_range(boot_params_addr, boot_params_size, &vdev->hw->ranges.runtime)) {
+ ivpu_err(vdev, "Invalid boot params address: 0x%llx\n", boot_params_addr);
return -EINVAL;
}
- if (runtime_size < fw->file->size || runtime_size > FW_RUNTIME_MAX_SIZE) {
- ivpu_err(vdev, "Invalid firmware runtime size: %llu\n", runtime_size);
+ fw_version_addr = fw_hdr->firmware_version_load_address;
+ fw_version_size = ALIGN(fw_hdr->firmware_version_size, SZ_4K);
+
+ if (fw_version_size != SZ_4K) {
+ ivpu_err(vdev, "Invalid firmware version size: %u\n",
+ fw_hdr->firmware_version_size);
+ return -EINVAL;
+ }
+
+ if (!ivpu_is_within_range(fw_version_addr, fw_version_size, &vdev->hw->ranges.runtime)) {
+ ivpu_err(vdev, "Invalid firmware version address: 0x%llx\n", fw_version_addr);
+ return -EINVAL;
+ }
+
+ runtime_addr = fw_hdr->image_load_address;
+ runtime_size = fw_hdr->runtime_size - boot_params_size - fw_version_size;
+
+ image_load_addr = fw_hdr->image_load_address;
+ image_size = fw_hdr->image_size;
+
+ if (!ivpu_is_within_range(runtime_addr, runtime_size, &vdev->hw->ranges.runtime)) {
+ ivpu_err(vdev, "Invalid firmware runtime address: 0x%llx and size %llu\n",
+ runtime_addr, runtime_size);
return -EINVAL;
}
@@ -157,34 +259,42 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
return -EINVAL;
}
- if (image_load_addr < runtime_addr ||
- image_load_addr + image_size > runtime_addr + runtime_size) {
- ivpu_err(vdev, "Invalid firmware load address size: 0x%llx and size %llu\n",
+ if (!ivpu_is_within_range(image_load_addr, image_size, &vdev->hw->ranges.runtime)) {
+ ivpu_err(vdev, "Invalid firmware load address: 0x%llx and size %llu\n",
image_load_addr, image_size);
return -EINVAL;
}
- if (fw_hdr->shave_nn_fw_size > FW_SHAVE_NN_MAX_SIZE) {
- ivpu_err(vdev, "SHAVE NN firmware is too big: %u\n", fw_hdr->shave_nn_fw_size);
+ if (ivpu_hw_range_init(vdev, &fw_image_range, image_load_addr, image_size))
return -EINVAL;
- }
- if (fw_hdr->entry_point < image_load_addr ||
- fw_hdr->entry_point >= image_load_addr + image_size) {
+ if (!ivpu_is_within_range(fw_hdr->entry_point, SZ_4K, &fw_image_range)) {
ivpu_err(vdev, "Invalid entry point: 0x%llx\n", fw_hdr->entry_point);
return -EINVAL;
}
+
+ if (fw_hdr->shave_nn_fw_size > FW_SHAVE_NN_MAX_SIZE) {
+ ivpu_err(vdev, "SHAVE NN firmware is too big: %u\n", fw_hdr->shave_nn_fw_size);
+ return -EINVAL;
+ }
+
ivpu_dbg(vdev, FW_BOOT, "Header version: 0x%x, format 0x%x\n",
fw_hdr->header_version, fw_hdr->image_format);
- ivpu_info(vdev, "Firmware: %s, version: %s", fw->name,
- (const char *)fw_hdr + VPU_FW_HEADER_SIZE);
+ if (!scnprintf(fw->version, sizeof(fw->version), "%s", fw->file->data + VPU_FW_HEADER_SIZE))
+ ivpu_warn(vdev, "Missing firmware version\n");
+
+ ivpu_info(vdev, "Firmware: %s, version: %s\n", fw->name, fw->version);
if (IVPU_FW_CHECK_API_COMPAT(vdev, fw_hdr, BOOT, 3))
return -EINVAL;
if (IVPU_FW_CHECK_API_COMPAT(vdev, fw_hdr, JSM, 3))
return -EINVAL;
+ fw->boot_params_addr = boot_params_addr;
+ fw->boot_params_size = boot_params_size;
+ fw->fw_version_addr = fw_version_addr;
+ fw->fw_version_size = fw_version_size;
fw->runtime_addr = runtime_addr;
fw->runtime_size = runtime_size;
fw->image_load_offset = image_load_addr - runtime_addr;
@@ -194,16 +304,42 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
fw->cold_boot_entry_point = fw_hdr->entry_point;
fw->entry_point = fw->cold_boot_entry_point;
- fw->trace_level = min_t(u32, ivpu_log_level, IVPU_FW_LOG_FATAL);
+ fw->trace_level = min_t(u32, ivpu_fw_log_level, IVPU_FW_LOG_FATAL);
fw->trace_destination_mask = VPU_TRACE_DESTINATION_VERBOSE_TRACING;
fw->trace_hw_component_mask = -1;
fw->dvfs_mode = 0;
- ivpu_dbg(vdev, FW_BOOT, "Size: file %lu image %u runtime %u shavenn %u\n",
- fw->file->size, fw->image_size, fw->runtime_size, fw->shave_nn_size);
- ivpu_dbg(vdev, FW_BOOT, "Address: runtime 0x%llx, load 0x%llx, entry point 0x%llx\n",
- fw->runtime_addr, image_load_addr, fw->entry_point);
+ fw->sched_mode = ivpu_fw_sched_mode_select(vdev, fw_hdr);
+ ivpu_info(vdev, "Scheduler mode: %s\n", fw->sched_mode ? "HW" : "OS");
+
+ ivpu_preemption_config_parse(vdev, fw_hdr);
+ ivpu_dbg(vdev, FW_BOOT, "Mid-inference preemption %s supported\n",
+ ivpu_fw_preempt_buf_size(vdev) ? "is" : "is not");
+
+ if (fw_hdr->ro_section_start_address &&
+ !ivpu_is_within_range(fw_hdr->ro_section_start_address, fw_hdr->ro_section_size,
+ &fw_image_range)) {
+ ivpu_err(vdev, "Invalid read-only section: start address 0x%llx, size %u\n",
+ fw_hdr->ro_section_start_address, fw_hdr->ro_section_size);
+ return -EINVAL;
+ }
+
+ fw->read_only_addr = fw_hdr->ro_section_start_address;
+ fw->read_only_size = fw_hdr->ro_section_size;
+
+ ivpu_dbg(vdev, FW_BOOT, "Boot params: address 0x%llx, size %llu\n",
+ fw->boot_params_addr, fw->boot_params_size);
+ ivpu_dbg(vdev, FW_BOOT, "FW version: address 0x%llx, size %llu\n",
+ fw->fw_version_addr, fw->fw_version_size);
+ ivpu_dbg(vdev, FW_BOOT, "Runtime: address 0x%llx, size %u\n",
+ fw->runtime_addr, fw->runtime_size);
+ ivpu_dbg(vdev, FW_BOOT, "Image load offset: 0x%llx, size %u\n",
+ fw->image_load_offset, fw->image_size);
+ ivpu_dbg(vdev, FW_BOOT, "Read-only section: address 0x%llx, size %u\n",
+ fw->read_only_addr, fw->read_only_size);
+ ivpu_dbg(vdev, FW_BOOT, "FW entry point: 0x%llx\n", fw->entry_point);
+ ivpu_dbg(vdev, FW_BOOT, "SHAVE NN size: %u\n", fw->shave_nn_size);
return 0;
}
@@ -230,39 +366,40 @@ ivpu_fw_init_wa(struct ivpu_device *vdev)
IVPU_PRINT_WA(disable_d0i3_msg);
}
-static int ivpu_fw_update_global_range(struct ivpu_device *vdev)
-{
- struct ivpu_fw_info *fw = vdev->fw;
- u64 start = ALIGN(fw->runtime_addr + fw->runtime_size, FW_SHARED_MEM_ALIGNMENT);
- u64 size = FW_SHARED_MEM_SIZE;
-
- if (start + size > FW_GLOBAL_MEM_END) {
- ivpu_err(vdev, "No space for shared region, start %lld, size %lld\n", start, size);
- return -EINVAL;
- }
-
- ivpu_hw_init_range(&vdev->hw->ranges.global, start, size);
- return 0;
-}
-
static int ivpu_fw_mem_init(struct ivpu_device *vdev)
{
struct ivpu_fw_info *fw = vdev->fw;
- struct ivpu_addr_range fw_range;
int log_verb_size;
int ret;
- ret = ivpu_fw_update_global_range(vdev);
- if (ret)
- return ret;
+ fw->mem_bp = ivpu_bo_create_runtime(vdev, fw->boot_params_addr, fw->boot_params_size,
+ DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
+ if (!fw->mem_bp) {
+ ivpu_err(vdev, "Failed to create firmware boot params memory buffer\n");
+ return -ENOMEM;
+ }
- fw_range.start = fw->runtime_addr;
- fw_range.end = fw->runtime_addr + fw->runtime_size;
- fw->mem = ivpu_bo_create(vdev, &vdev->gctx, &fw_range, fw->runtime_size,
- DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
+ fw->mem_fw_ver = ivpu_bo_create_runtime(vdev, fw->fw_version_addr, fw->fw_version_size,
+ DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
+ if (!fw->mem_fw_ver) {
+ ivpu_err(vdev, "Failed to create firmware version memory buffer\n");
+ ret = -ENOMEM;
+ goto err_free_bp;
+ }
+
+ fw->mem = ivpu_bo_create_runtime(vdev, fw->runtime_addr, fw->runtime_size,
+ DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
if (!fw->mem) {
ivpu_err(vdev, "Failed to create firmware runtime memory buffer\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_free_fw_ver;
+ }
+
+ ret = ivpu_mmu_context_set_pages_ro(vdev, &vdev->gctx, fw->read_only_addr,
+ fw->read_only_size);
+ if (ret) {
+ ivpu_err(vdev, "Failed to set firmware image read-only\n");
+ goto err_free_fw_mem;
}
fw->mem_log_crit = ivpu_bo_create_global(vdev, IVPU_FW_CRITICAL_BUFFER_SIZE,
@@ -273,7 +410,7 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev)
goto err_free_fw_mem;
}
- if (ivpu_log_level <= IVPU_FW_LOG_INFO)
+ if (ivpu_fw_log_level <= IVPU_FW_LOG_INFO)
log_verb_size = IVPU_FW_VERBOSE_BUFFER_LARGE_SIZE;
else
log_verb_size = IVPU_FW_VERBOSE_BUFFER_SMALL_SIZE;
@@ -304,6 +441,10 @@ err_free_log_crit:
ivpu_bo_free(fw->mem_log_crit);
err_free_fw_mem:
ivpu_bo_free(fw->mem);
+err_free_fw_ver:
+ ivpu_bo_free(fw->mem_fw_ver);
+err_free_bp:
+ ivpu_bo_free(fw->mem_bp);
return ret;
}
@@ -319,10 +460,14 @@ static void ivpu_fw_mem_fini(struct ivpu_device *vdev)
ivpu_bo_free(fw->mem_log_verb);
ivpu_bo_free(fw->mem_log_crit);
ivpu_bo_free(fw->mem);
+ ivpu_bo_free(fw->mem_fw_ver);
+ ivpu_bo_free(fw->mem_bp);
fw->mem_log_verb = NULL;
fw->mem_log_crit = NULL;
fw->mem = NULL;
+ fw->mem_fw_ver = NULL;
+ fw->mem_bp = NULL;
}
int ivpu_fw_init(struct ivpu_device *vdev)
@@ -415,11 +560,6 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_
ivpu_dbg(vdev, FW_BOOT, "boot_params.cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg = 0x%x\n",
boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg);
- ivpu_dbg(vdev, FW_BOOT, "boot_params.global_memory_allocator_base = 0x%llx\n",
- boot_params->global_memory_allocator_base);
- ivpu_dbg(vdev, FW_BOOT, "boot_params.global_memory_allocator_size = 0x%x\n",
- boot_params->global_memory_allocator_size);
-
ivpu_dbg(vdev, FW_BOOT, "boot_params.shave_nn_fw_base = 0x%llx\n",
boot_params->shave_nn_fw_base);
@@ -427,10 +567,6 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_
boot_params->watchdog_irq_mss);
ivpu_dbg(vdev, FW_BOOT, "boot_params.watchdog_irq_nce = 0x%x\n",
boot_params->watchdog_irq_nce);
- ivpu_dbg(vdev, FW_BOOT, "boot_params.host_to_vpu_irq = 0x%x\n",
- boot_params->host_to_vpu_irq);
- ivpu_dbg(vdev, FW_BOOT, "boot_params.job_done_irq = 0x%x\n",
- boot_params->job_done_irq);
ivpu_dbg(vdev, FW_BOOT, "boot_params.host_version_id = 0x%x\n",
boot_params->host_version_id);
@@ -464,6 +600,8 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_
boot_params->punit_telemetry_sram_size);
ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_telemetry_enable = 0x%x\n",
boot_params->vpu_telemetry_enable);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_scheduling_mode = 0x%x\n",
+ boot_params->vpu_scheduling_mode);
ivpu_dbg(vdev, FW_BOOT, "boot_params.dvfs_mode = %u\n",
boot_params->dvfs_mode);
ivpu_dbg(vdev, FW_BOOT, "boot_params.d0i3_delayed_entry = %d\n",
@@ -474,6 +612,10 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_
boot_params->d0i3_entry_vpu_ts);
ivpu_dbg(vdev, FW_BOOT, "boot_params.system_time_us = %llu\n",
boot_params->system_time_us);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.power_profile = 0x%x\n",
+ boot_params->power_profile);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_uses_ecc_mca_signal = 0x%x\n",
+ boot_params->vpu_uses_ecc_mca_signal);
}
void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *boot_params)
@@ -500,11 +642,11 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
return;
}
+ memset(boot_params, 0, sizeof(*boot_params));
vdev->pm->is_warmboot = false;
boot_params->magic = VPU_BOOT_PARAMS_MAGIC;
boot_params->vpu_id = to_pci_dev(vdev->drm.dev)->bus->number;
- boot_params->frequency = ivpu_hw_reg_pll_freq_get(vdev);
/*
* This param is a debug firmware feature. It switches default clock
@@ -527,8 +669,10 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->ipc_payload_area_start = ipc_mem_rx->vpu_addr + ivpu_bo_size(ipc_mem_rx) / 2;
boot_params->ipc_payload_area_size = ivpu_bo_size(ipc_mem_rx) / 2;
- boot_params->global_aliased_pio_base = vdev->hw->ranges.user.start;
- boot_params->global_aliased_pio_size = ivpu_hw_range_size(&vdev->hw->ranges.user);
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
+ boot_params->global_aliased_pio_base = vdev->hw->ranges.user.start;
+ boot_params->global_aliased_pio_size = ivpu_hw_range_size(&vdev->hw->ranges.user);
+ }
/* Allow configuration for L2C_PAGE_TABLE with boot param value */
boot_params->autoconfig = 1;
@@ -561,14 +705,21 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->verbose_tracing_buff_addr = vdev->fw->mem_log_verb->vpu_addr;
boot_params->verbose_tracing_buff_size = ivpu_bo_size(vdev->fw->mem_log_verb);
- boot_params->punit_telemetry_sram_base = ivpu_hw_reg_telemetry_offset_get(vdev);
- boot_params->punit_telemetry_sram_size = ivpu_hw_reg_telemetry_size_get(vdev);
- boot_params->vpu_telemetry_enable = ivpu_hw_reg_telemetry_enable_get(vdev);
+ boot_params->punit_telemetry_sram_base = ivpu_hw_telemetry_offset_get(vdev);
+ boot_params->punit_telemetry_sram_size = ivpu_hw_telemetry_size_get(vdev);
+ boot_params->vpu_telemetry_enable = ivpu_hw_telemetry_enable_get(vdev);
+ boot_params->vpu_scheduling_mode = vdev->fw->sched_mode;
+ if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
+ boot_params->vpu_focus_present_timer_ms = IVPU_FOCUS_PRESENT_TIMER_MS;
boot_params->dvfs_mode = vdev->fw->dvfs_mode;
if (!IVPU_WA(disable_d0i3_msg))
boot_params->d0i3_delayed_entry = 1;
boot_params->d0i3_residency_time_us = 0;
boot_params->d0i3_entry_vpu_ts = 0;
+ if (IVPU_WA(disable_d0i2))
+ boot_params->power_profile |= BIT(1);
+ boot_params->vpu_uses_ecc_mca_signal =
+ ivpu_hw_uses_ecc_mca_signal(vdev) ? VPU_BOOT_MCA_ECC_BOTH : 0;
boot_params->system_time_us = ktime_to_us(ktime_get_real());
wmb(); /* Flush WC buffers after writing bootparams */
diff --git a/drivers/accel/ivpu/ivpu_fw.h b/drivers/accel/ivpu/ivpu_fw.h
index 66b60fa161b5..00945892b55e 100644
--- a/drivers/accel/ivpu/ivpu_fw.h
+++ b/drivers/accel/ivpu/ivpu_fw.h
@@ -1,11 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#ifndef __IVPU_FW_H__
#define __IVPU_FW_H__
+#include "vpu_jsm_api.h"
+
+#define FW_VERSION_HEADER_SIZE SZ_4K
+#define FW_VERSION_STR_SIZE SZ_256
+
struct ivpu_device;
struct ivpu_bo;
struct vpu_boot_params;
@@ -13,10 +18,17 @@ struct vpu_boot_params;
struct ivpu_fw_info {
const struct firmware *file;
const char *name;
+ char version[FW_VERSION_STR_SIZE];
+ struct ivpu_bo *mem_bp;
+ struct ivpu_bo *mem_fw_ver;
struct ivpu_bo *mem;
struct ivpu_bo *mem_shave_nn;
struct ivpu_bo *mem_log_crit;
struct ivpu_bo *mem_log_verb;
+ u64 boot_params_addr;
+ u64 boot_params_size;
+ u64 fw_version_addr;
+ u64 fw_version_size;
u64 runtime_addr;
u32 runtime_size;
u64 image_load_offset;
@@ -28,16 +40,28 @@ struct ivpu_fw_info {
u32 trace_destination_mask;
u64 trace_hw_component_mask;
u32 dvfs_mode;
+ u32 primary_preempt_buf_size;
+ u32 secondary_preempt_buf_size;
+ u64 read_only_addr;
+ u32 read_only_size;
+ u32 sched_mode;
+ u64 last_heartbeat;
};
+bool ivpu_is_within_range(u64 addr, size_t size, struct ivpu_addr_range *range);
int ivpu_fw_init(struct ivpu_device *vdev);
void ivpu_fw_fini(struct ivpu_device *vdev);
void ivpu_fw_load(struct ivpu_device *vdev);
-void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *bp);
+void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *boot_params);
static inline bool ivpu_fw_is_cold_boot(struct ivpu_device *vdev)
{
return vdev->fw->entry_point == vdev->fw->cold_boot_entry_point;
}
+static inline u32 ivpu_fw_preempt_buf_size(struct ivpu_device *vdev)
+{
+ return vdev->fw->primary_preempt_buf_size + vdev->fw->secondary_preempt_buf_size;
+}
+
#endif /* __IVPU_FW_H__ */
diff --git a/drivers/accel/ivpu/ivpu_fw_log.c b/drivers/accel/ivpu/ivpu_fw_log.c
index ef0adb5e0fbe..337c906b0210 100644
--- a/drivers/accel/ivpu/ivpu_fw_log.c
+++ b/drivers/accel/ivpu/ivpu_fw_log.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#include <linux/ctype.h>
@@ -15,19 +15,19 @@
#include "ivpu_fw_log.h"
#include "ivpu_gem.h"
-#define IVPU_FW_LOG_LINE_LENGTH 256
+#define IVPU_FW_LOG_LINE_LENGTH 256
-unsigned int ivpu_log_level = IVPU_FW_LOG_ERROR;
-module_param(ivpu_log_level, uint, 0444);
-MODULE_PARM_DESC(ivpu_log_level,
- "NPU firmware default trace level: debug=" __stringify(IVPU_FW_LOG_DEBUG)
+unsigned int ivpu_fw_log_level = IVPU_FW_LOG_ERROR;
+module_param_named(fw_log_level, ivpu_fw_log_level, uint, 0444);
+MODULE_PARM_DESC(fw_log_level,
+ "NPU firmware default log level: debug=" __stringify(IVPU_FW_LOG_DEBUG)
" info=" __stringify(IVPU_FW_LOG_INFO)
" warn=" __stringify(IVPU_FW_LOG_WARN)
" error=" __stringify(IVPU_FW_LOG_ERROR)
" fatal=" __stringify(IVPU_FW_LOG_FATAL));
-static int fw_log_ptr(struct ivpu_device *vdev, struct ivpu_bo *bo, u32 *offset,
- struct vpu_tracing_buffer_header **log_header)
+static int fw_log_from_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, u32 *offset,
+ struct vpu_tracing_buffer_header **out_log)
{
struct vpu_tracing_buffer_header *log;
@@ -48,7 +48,7 @@ static int fw_log_ptr(struct ivpu_device *vdev, struct ivpu_bo *bo, u32 *offset,
return -EINVAL;
}
- *log_header = log;
+ *out_log = log;
*offset += log->size;
ivpu_dbg(vdev, FW_BOOT,
@@ -59,7 +59,7 @@ static int fw_log_ptr(struct ivpu_device *vdev, struct ivpu_bo *bo, u32 *offset,
return 0;
}
-static void buffer_print(char *buffer, u32 size, struct drm_printer *p)
+static void fw_log_print_lines(char *buffer, u32 size, struct drm_printer *p)
{
char line[IVPU_FW_LOG_LINE_LENGTH];
u32 index = 0;
@@ -87,56 +87,89 @@ static void buffer_print(char *buffer, u32 size, struct drm_printer *p)
}
line[index] = 0;
if (index != 0)
- drm_printf(p, "%s\n", line);
+ drm_printf(p, "%s", line);
}
-static void fw_log_print_buffer(struct ivpu_device *vdev, struct vpu_tracing_buffer_header *log,
- const char *prefix, bool only_new_msgs, struct drm_printer *p)
+static void fw_log_print_buffer(struct vpu_tracing_buffer_header *log, const char *prefix,
+ bool only_new_msgs, struct drm_printer *p)
{
- char *log_buffer = (void *)log + log->header_size;
- u32 log_size = log->size - log->header_size;
- u32 log_start = log->read_index;
- u32 log_end = log->write_index;
-
- if (!(log->write_index || log->wrap_count) ||
- (log->write_index == log->read_index && only_new_msgs)) {
- drm_printf(p, "==== %s \"%s\" log empty ====\n", prefix, log->name);
- return;
+ char *log_data = (void *)log + log->header_size;
+ u32 data_size = log->size - log->header_size;
+ u32 log_start = only_new_msgs ? READ_ONCE(log->read_index) : 0;
+ u32 log_end = READ_ONCE(log->write_index);
+
+ if (log->wrap_count == log->read_wrap_count) {
+ if (log_end <= log_start) {
+ drm_printf(p, "==== %s \"%s\" log empty ====\n", prefix, log->name);
+ return;
+ }
+ } else if (log->wrap_count == log->read_wrap_count + 1) {
+ if (log_end > log_start)
+ log_start = log_end;
+ } else {
+ log_start = log_end;
}
drm_printf(p, "==== %s \"%s\" log start ====\n", prefix, log->name);
- if (log->write_index > log->read_index) {
- buffer_print(log_buffer + log_start, log_end - log_start, p);
+ if (log_end > log_start) {
+ fw_log_print_lines(log_data + log_start, log_end - log_start, p);
} else {
- buffer_print(log_buffer + log_end, log_size - log_end, p);
- buffer_print(log_buffer, log_end, p);
+ fw_log_print_lines(log_data + log_start, data_size - log_start, p);
+ fw_log_print_lines(log_data, log_end, p);
}
- drm_printf(p, "\x1b[0m");
+ drm_printf(p, "\n\x1b[0m"); /* add new line and clear formatting */
drm_printf(p, "==== %s \"%s\" log end ====\n", prefix, log->name);
}
-void ivpu_fw_log_print(struct ivpu_device *vdev, bool only_new_msgs, struct drm_printer *p)
+static void
+fw_log_print_all_in_bo(struct ivpu_device *vdev, const char *name,
+ struct ivpu_bo *bo, bool only_new_msgs, struct drm_printer *p)
{
- struct vpu_tracing_buffer_header *log_header;
+ struct vpu_tracing_buffer_header *log;
u32 next = 0;
- while (fw_log_ptr(vdev, vdev->fw->mem_log_crit, &next, &log_header) == 0)
- fw_log_print_buffer(vdev, log_header, "NPU critical", only_new_msgs, p);
+ while (fw_log_from_bo(vdev, bo, &next, &log) == 0)
+ fw_log_print_buffer(log, name, only_new_msgs, p);
+}
+
+void ivpu_fw_log_print(struct ivpu_device *vdev, bool only_new_msgs, struct drm_printer *p)
+{
+ fw_log_print_all_in_bo(vdev, "NPU critical", vdev->fw->mem_log_crit, only_new_msgs, p);
+ fw_log_print_all_in_bo(vdev, "NPU verbose", vdev->fw->mem_log_verb, only_new_msgs, p);
+}
+
+void ivpu_fw_log_mark_read(struct ivpu_device *vdev)
+{
+ struct vpu_tracing_buffer_header *log;
+ u32 next;
+
+ next = 0;
+ while (fw_log_from_bo(vdev, vdev->fw->mem_log_crit, &next, &log) == 0) {
+ log->read_index = READ_ONCE(log->write_index);
+ log->read_wrap_count = READ_ONCE(log->wrap_count);
+ }
next = 0;
- while (fw_log_ptr(vdev, vdev->fw->mem_log_verb, &next, &log_header) == 0)
- fw_log_print_buffer(vdev, log_header, "NPU verbose", only_new_msgs, p);
+ while (fw_log_from_bo(vdev, vdev->fw->mem_log_verb, &next, &log) == 0) {
+ log->read_index = READ_ONCE(log->write_index);
+ log->read_wrap_count = READ_ONCE(log->wrap_count);
+ }
}
-void ivpu_fw_log_clear(struct ivpu_device *vdev)
+void ivpu_fw_log_reset(struct ivpu_device *vdev)
{
- struct vpu_tracing_buffer_header *log_header;
- u32 next = 0;
+ struct vpu_tracing_buffer_header *log;
+ u32 next;
- while (fw_log_ptr(vdev, vdev->fw->mem_log_crit, &next, &log_header) == 0)
- log_header->read_index = log_header->write_index;
+ next = 0;
+ while (fw_log_from_bo(vdev, vdev->fw->mem_log_crit, &next, &log) == 0) {
+ log->read_index = 0;
+ log->read_wrap_count = 0;
+ }
next = 0;
- while (fw_log_ptr(vdev, vdev->fw->mem_log_verb, &next, &log_header) == 0)
- log_header->read_index = log_header->write_index;
+ while (fw_log_from_bo(vdev, vdev->fw->mem_log_verb, &next, &log) == 0) {
+ log->read_index = 0;
+ log->read_wrap_count = 0;
+ }
}
diff --git a/drivers/accel/ivpu/ivpu_fw_log.h b/drivers/accel/ivpu/ivpu_fw_log.h
index 0b2573f6f315..8bb528a73cb7 100644
--- a/drivers/accel/ivpu/ivpu_fw_log.h
+++ b/drivers/accel/ivpu/ivpu_fw_log.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#ifndef __IVPU_FW_LOG_H__
@@ -8,8 +8,6 @@
#include <linux/types.h>
-#include <drm/drm_print.h>
-
#include "ivpu_drv.h"
#define IVPU_FW_LOG_DEFAULT 0
@@ -19,20 +17,15 @@
#define IVPU_FW_LOG_ERROR 4
#define IVPU_FW_LOG_FATAL 5
-extern unsigned int ivpu_log_level;
-
#define IVPU_FW_VERBOSE_BUFFER_SMALL_SIZE SZ_1M
#define IVPU_FW_VERBOSE_BUFFER_LARGE_SIZE SZ_8M
#define IVPU_FW_CRITICAL_BUFFER_SIZE SZ_512K
-void ivpu_fw_log_print(struct ivpu_device *vdev, bool only_new_msgs, struct drm_printer *p);
-void ivpu_fw_log_clear(struct ivpu_device *vdev);
+extern unsigned int ivpu_fw_log_level;
-static inline void ivpu_fw_log_dump(struct ivpu_device *vdev)
-{
- struct drm_printer p = drm_info_printer(vdev->drm.dev);
+void ivpu_fw_log_print(struct ivpu_device *vdev, bool only_new_msgs, struct drm_printer *p);
+void ivpu_fw_log_mark_read(struct ivpu_device *vdev);
+void ivpu_fw_log_reset(struct ivpu_device *vdev);
- ivpu_fw_log_print(vdev, false, &p);
-}
#endif /* __IVPU_FW_LOG_H__ */
diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c
index 1b409dbd332d..ece68f570b7e 100644
--- a/drivers/accel/ivpu/ivpu_gem.c
+++ b/drivers/accel/ivpu/ivpu_gem.c
@@ -15,50 +15,88 @@
#include <drm/drm_utils.h>
#include "ivpu_drv.h"
+#include "ivpu_fw.h"
#include "ivpu_gem.h"
#include "ivpu_hw.h"
#include "ivpu_mmu.h"
#include "ivpu_mmu_context.h"
+MODULE_IMPORT_NS("DMA_BUF");
+
static const struct drm_gem_object_funcs ivpu_gem_funcs;
static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, const char *action)
{
ivpu_dbg(vdev, BO,
- "%6s: bo %8p vpu_addr %9llx size %8zu ctx %d has_pages %d dma_mapped %d mmu_mapped %d wc %d imported %d\n",
- action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx ? bo->ctx->id : 0,
+ "%6s: bo %8p size %9zu ctx %d vpu_addr %9llx pages %d sgt %d mmu_mapped %d wc %d imported %d\n",
+ action, bo, ivpu_bo_size(bo), bo->ctx_id, bo->vpu_addr,
(bool)bo->base.pages, (bool)bo->base.sgt, bo->mmu_mapped, bo->base.map_wc,
- (bool)bo->base.base.import_attach);
+ (bool)drm_gem_is_imported(&bo->base.base));
+}
+
+static inline int ivpu_bo_lock(struct ivpu_bo *bo)
+{
+ return dma_resv_lock(bo->base.base.resv, NULL);
+}
+
+static inline void ivpu_bo_unlock(struct ivpu_bo *bo)
+{
+ dma_resv_unlock(bo->base.base.resv);
+}
+
+static struct sg_table *ivpu_bo_map_attachment(struct ivpu_device *vdev, struct ivpu_bo *bo)
+{
+ struct sg_table *sgt;
+
+ drm_WARN_ON(&vdev->drm, !bo->base.base.import_attach);
+
+ ivpu_bo_lock(bo);
+
+ sgt = bo->base.sgt;
+ if (!sgt) {
+ sgt = dma_buf_map_attachment(bo->base.base.import_attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt))
+ ivpu_err(vdev, "Failed to map BO in IOMMU: %ld\n", PTR_ERR(sgt));
+ else
+ bo->base.sgt = sgt;
+ }
+
+ ivpu_bo_unlock(bo);
+
+ return sgt;
}
/*
- * ivpu_bo_pin() - pin the backing physical pages and map them to VPU.
+ * ivpu_bo_bind() - pin the backing physical pages and map them to VPU.
*
* This function pins physical memory pages, then maps the physical pages
* to IOMMU address space and finally updates the VPU MMU page tables
* to allow the VPU to translate VPU address to IOMMU address.
*/
-int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
+int __must_check ivpu_bo_bind(struct ivpu_bo *bo)
{
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
+ struct sg_table *sgt;
int ret = 0;
- mutex_lock(&bo->lock);
+ ivpu_dbg_bo(vdev, bo, "bind");
- ivpu_dbg_bo(vdev, bo, "pin");
- drm_WARN_ON(&vdev->drm, !bo->ctx);
+ if (bo->base.base.import_attach)
+ sgt = ivpu_bo_map_attachment(vdev, bo);
+ else
+ sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret);
+ return ret;
+ }
- if (!bo->mmu_mapped) {
- struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
-
- if (IS_ERR(sgt)) {
- ret = PTR_ERR(sgt);
- ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret);
- goto unlock;
- }
+ ivpu_bo_lock(bo);
+ if (!bo->mmu_mapped) {
+ drm_WARN_ON(&vdev->drm, !bo->ctx);
ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, sgt,
- ivpu_bo_is_snooped(bo));
+ ivpu_bo_is_snooped(bo), ivpu_bo_is_read_only(bo));
if (ret) {
ivpu_err(vdev, "Failed to map BO in MMU: %d\n", ret);
goto unlock;
@@ -67,7 +105,7 @@ int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
}
unlock:
- mutex_unlock(&bo->lock);
+ ivpu_bo_unlock(bo);
return ret;
}
@@ -82,19 +120,17 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
if (!drm_dev_enter(&vdev->drm, &idx))
return -ENODEV;
- mutex_lock(&bo->lock);
+ ivpu_bo_lock(bo);
ret = ivpu_mmu_context_insert_node(ctx, range, ivpu_bo_size(bo), &bo->mm_node);
if (!ret) {
bo->ctx = ctx;
+ bo->ctx_id = ctx->id;
bo->vpu_addr = bo->mm_node.start;
- } else {
- ivpu_err(vdev, "Failed to add BO to context %u: %d\n", ctx->id, ret);
+ ivpu_dbg_bo(vdev, bo, "vaddr");
}
- ivpu_dbg_bo(vdev, bo, "alloc");
-
- mutex_unlock(&bo->lock);
+ ivpu_bo_unlock(bo);
drm_dev_exit(idx);
@@ -105,7 +141,7 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
{
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
- lockdep_assert(lockdep_is_held(&bo->lock) || !kref_read(&bo->base.base.refcount));
+ dma_resv_assert_held(bo->base.base.resv);
if (bo->mmu_mapped) {
drm_WARN_ON(&vdev->drm, !bo->ctx);
@@ -120,17 +156,17 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
bo->ctx = NULL;
}
- if (bo->base.base.import_attach)
- return;
-
- dma_resv_lock(bo->base.base.resv, NULL);
if (bo->base.sgt) {
- dma_unmap_sgtable(vdev->drm.dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0);
- sg_free_table(bo->base.sgt);
- kfree(bo->base.sgt);
+ if (bo->base.base.import_attach) {
+ dma_buf_unmap_attachment(bo->base.base.import_attach,
+ bo->base.sgt, DMA_BIDIRECTIONAL);
+ } else {
+ dma_unmap_sgtable(vdev->drm.dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0);
+ sg_free_table(bo->base.sgt);
+ kfree(bo->base.sgt);
+ }
bo->base.sgt = NULL;
}
- dma_resv_unlock(bo->base.base.resv);
}
void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
@@ -142,12 +178,12 @@ void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_m
mutex_lock(&vdev->bo_list_lock);
list_for_each_entry(bo, &vdev->bo_list, bo_list_node) {
- mutex_lock(&bo->lock);
+ ivpu_bo_lock(bo);
if (bo->ctx == ctx) {
ivpu_dbg_bo(vdev, bo, "unbind");
ivpu_bo_unbind_locked(bo);
}
- mutex_unlock(&bo->lock);
+ ivpu_bo_unlock(bo);
}
mutex_unlock(&vdev->bo_list_lock);
}
@@ -167,11 +203,52 @@ struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t siz
bo->base.pages_mark_dirty_on_put = true; /* VPU can dirty a BO anytime */
INIT_LIST_HEAD(&bo->bo_list_node);
- mutex_init(&bo->lock);
return &bo->base.base;
}
+struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct ivpu_device *vdev = to_ivpu_device(dev);
+ struct device *attach_dev = dev->dev;
+ struct dma_buf_attachment *attach;
+ struct drm_gem_object *obj;
+ struct ivpu_bo *bo;
+ int ret;
+
+ attach = dma_buf_attach(dma_buf, attach_dev);
+ if (IS_ERR(attach))
+ return ERR_CAST(attach);
+
+ get_dma_buf(dma_buf);
+
+ obj = drm_gem_shmem_prime_import_sg_table(dev, attach, NULL);
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
+ goto fail_detach;
+ }
+
+ obj->import_attach = attach;
+ obj->resv = dma_buf->resv;
+
+ bo = to_ivpu_bo(obj);
+
+ mutex_lock(&vdev->bo_list_lock);
+ list_add_tail(&bo->bo_list_node, &vdev->bo_list);
+ mutex_unlock(&vdev->bo_list_lock);
+
+ ivpu_dbg(vdev, BO, "import: bo %8p size %9zu\n", bo, ivpu_bo_size(bo));
+
+ return obj;
+
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+ dma_buf_put(dma_buf);
+
+ return ERR_PTR(ret);
+}
+
static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags)
{
struct drm_gem_shmem_object *shmem;
@@ -197,6 +274,8 @@ static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 fla
list_add_tail(&bo->bo_list_node, &vdev->bo_list);
mutex_unlock(&vdev->bo_list_lock);
+ ivpu_dbg(vdev, BO, " alloc: bo %8p size %9llu\n", bo, size);
+
return bo;
}
@@ -208,8 +287,8 @@ static int ivpu_gem_bo_open(struct drm_gem_object *obj, struct drm_file *file)
struct ivpu_addr_range *range;
if (bo->ctx) {
- ivpu_warn(vdev, "Can't add BO to ctx %u: already in ctx %u\n",
- file_priv->ctx.id, bo->ctx->id);
+ ivpu_dbg(vdev, IOCTL, "Can't add BO %pe to ctx %u: already in ctx %u\n",
+ bo, file_priv->ctx.id, bo->ctx->id);
return -EALREADY;
}
@@ -230,19 +309,41 @@ static void ivpu_gem_bo_free(struct drm_gem_object *obj)
ivpu_dbg_bo(vdev, bo, "free");
+ drm_WARN_ON(&vdev->drm, list_empty(&bo->bo_list_node));
+
mutex_lock(&vdev->bo_list_lock);
list_del(&bo->bo_list_node);
- mutex_unlock(&vdev->bo_list_lock);
- drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ));
+ drm_WARN_ON(&vdev->drm, !drm_gem_is_imported(&bo->base.base) &&
+ !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ));
+ drm_WARN_ON(&vdev->drm, ivpu_bo_size(bo) == 0);
+ drm_WARN_ON(&vdev->drm, bo->base.vaddr);
+ ivpu_bo_lock(bo);
ivpu_bo_unbind_locked(bo);
- mutex_destroy(&bo->lock);
+ ivpu_bo_unlock(bo);
+
+ mutex_unlock(&vdev->bo_list_lock);
- drm_WARN_ON(obj->dev, bo->base.pages_use_count > 1);
+ drm_WARN_ON(&vdev->drm, bo->mmu_mapped);
+ drm_WARN_ON(&vdev->drm, bo->ctx);
+
+ drm_WARN_ON(obj->dev, refcount_read(&bo->base.pages_use_count) > 1);
+ drm_WARN_ON(obj->dev, bo->base.base.vma_node.vm_files.rb_node);
drm_gem_shmem_free(&bo->base);
}
+static enum drm_gem_object_status ivpu_gem_status(struct drm_gem_object *obj)
+{
+ struct ivpu_bo *bo = to_ivpu_bo(obj);
+ enum drm_gem_object_status status = 0;
+
+ if (ivpu_bo_is_resident(bo))
+ status |= DRM_GEM_OBJECT_RESIDENT;
+
+ return status;
+}
+
static const struct drm_gem_object_funcs ivpu_gem_funcs = {
.free = ivpu_gem_bo_free,
.open = ivpu_gem_bo_open,
@@ -253,6 +354,7 @@ static const struct drm_gem_object_funcs ivpu_gem_funcs = {
.vmap = drm_gem_shmem_object_vmap,
.vunmap = drm_gem_shmem_object_vunmap,
.mmap = drm_gem_shmem_object_mmap,
+ .status = ivpu_gem_status,
.vm_ops = &drm_gem_shmem_vm_ops,
};
@@ -265,22 +367,33 @@ int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
struct ivpu_bo *bo;
int ret;
- if (args->flags & ~DRM_IVPU_BO_FLAGS)
+ if (args->flags & ~DRM_IVPU_BO_FLAGS) {
+ ivpu_dbg(vdev, IOCTL, "Invalid BO flags 0x%x\n", args->flags);
return -EINVAL;
+ }
- if (size == 0)
+ if (size == 0) {
+ ivpu_dbg(vdev, IOCTL, "Invalid BO size %llu\n", args->size);
return -EINVAL;
+ }
bo = ivpu_bo_alloc(vdev, size, args->flags);
if (IS_ERR(bo)) {
- ivpu_err(vdev, "Failed to allocate BO: %pe (ctx %u size %llu flags 0x%x)",
+ ivpu_dbg(vdev, IOCTL, "Failed to allocate BO: %pe ctx %u size %llu flags 0x%x\n",
bo, file_priv->ctx.id, args->size, args->flags);
return PTR_ERR(bo);
}
+ drm_WARN_ON(&vdev->drm, bo->base.base.handle_count != 0);
+
ret = drm_gem_handle_create(file, &bo->base.base, &args->handle);
- if (!ret)
+ if (ret) {
+ ivpu_dbg(vdev, IOCTL, "Failed to create handle for BO: %pe ctx %u size %llu flags 0x%x\n",
+ bo, file_priv->ctx.id, args->size, args->flags);
+ } else {
args->vpu_addr = bo->vpu_addr;
+ drm_WARN_ON(&vdev->drm, bo->base.base.handle_count != 1);
+ }
drm_gem_object_put(&bo->base.base);
@@ -304,23 +417,26 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
bo = ivpu_bo_alloc(vdev, size, flags);
if (IS_ERR(bo)) {
- ivpu_err(vdev, "Failed to allocate BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)",
+ ivpu_err(vdev, "Failed to allocate BO: %pe vpu_addr 0x%llx size %llu flags 0x%x\n",
bo, range->start, size, flags);
return NULL;
}
ret = ivpu_bo_alloc_vpu_addr(bo, ctx, range);
- if (ret)
+ if (ret) {
+ ivpu_err(vdev, "Failed to allocate NPU address for BO: %pe ctx %u size %llu: %d\n",
+ bo, ctx->id, size, ret);
goto err_put;
+ }
- ret = ivpu_bo_pin(bo);
+ ret = ivpu_bo_bind(bo);
if (ret)
goto err_put;
if (flags & DRM_IVPU_BO_MAPPABLE) {
- dma_resv_lock(bo->base.base.resv, NULL);
- ret = drm_gem_shmem_vmap(&bo->base, &map);
- dma_resv_unlock(bo->base.base.resv);
+ ivpu_bo_lock(bo);
+ ret = drm_gem_shmem_vmap_locked(&bo->base, &map);
+ ivpu_bo_unlock(bo);
if (ret)
goto err_put;
@@ -333,6 +449,21 @@ err_put:
return NULL;
}
+struct ivpu_bo *ivpu_bo_create_runtime(struct ivpu_device *vdev, u64 addr, u64 size, u32 flags)
+{
+ struct ivpu_addr_range range;
+
+ if (!ivpu_is_within_range(addr, size, &vdev->hw->ranges.runtime)) {
+ ivpu_err(vdev, "Invalid runtime BO address 0x%llx size %llu\n", addr, size);
+ return NULL;
+ }
+
+ if (ivpu_hw_range_init(vdev, &range, addr, size))
+ return NULL;
+
+ return ivpu_bo_create(vdev, &vdev->gctx, &range, size, flags);
+}
+
struct ivpu_bo *ivpu_bo_create_global(struct ivpu_device *vdev, u64 size, u32 flags)
{
return ivpu_bo_create(vdev, &vdev->gctx, &vdev->hw->ranges.global, size, flags);
@@ -343,9 +474,9 @@ void ivpu_bo_free(struct ivpu_bo *bo)
struct iosys_map map = IOSYS_MAP_INIT_VADDR(bo->base.vaddr);
if (bo->flags & DRM_IVPU_BO_MAPPABLE) {
- dma_resv_lock(bo->base.base.resv, NULL);
- drm_gem_shmem_vunmap(&bo->base, &map);
- dma_resv_unlock(bo->base.base.resv);
+ ivpu_bo_lock(bo);
+ drm_gem_shmem_vunmap_locked(&bo->base, &map);
+ ivpu_bo_unlock(bo);
}
drm_gem_object_put(&bo->base.base);
@@ -364,12 +495,12 @@ int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file
bo = to_ivpu_bo(obj);
- mutex_lock(&bo->lock);
+ ivpu_bo_lock(bo);
args->flags = bo->flags;
args->mmap_offset = drm_vma_node_offset_addr(&obj->vma_node);
args->vpu_addr = bo->vpu_addr;
args->size = obj->size;
- mutex_unlock(&bo->lock);
+ ivpu_bo_unlock(bo);
drm_gem_object_put(obj);
return ret;
@@ -384,6 +515,9 @@ int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file
timeout = drm_timeout_abs_to_jiffies(args->timeout_ns);
+ /* Add 1 jiffy to ensure the wait function never times out before intended timeout_ns */
+ timeout += 1;
+
obj = drm_gem_object_lookup(file, args->handle);
if (!obj)
return -EINVAL;
@@ -403,10 +537,10 @@ int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file
static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
{
- mutex_lock(&bo->lock);
+ ivpu_bo_lock(bo);
drm_printf(p, "%-9p %-3u 0x%-12llx %-10lu 0x%-8x %-4u",
- bo, bo->ctx->id, bo->vpu_addr, bo->base.base.size,
+ bo, bo->ctx_id, bo->vpu_addr, bo->base.base.size,
bo->flags, kref_read(&bo->base.base.refcount));
if (bo->base.pages)
@@ -415,12 +549,12 @@ static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
if (bo->mmu_mapped)
drm_printf(p, " mmu_mapped");
- if (bo->base.base.import_attach)
+ if (drm_gem_is_imported(&bo->base.base))
drm_printf(p, " imported");
drm_printf(p, "\n");
- mutex_unlock(&bo->lock);
+ ivpu_bo_unlock(bo);
}
void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p)
diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h
index fb7117c13eec..0c3350f22b55 100644
--- a/drivers/accel/ivpu/ivpu_gem.h
+++ b/drivers/accel/ivpu/ivpu_gem.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#ifndef __IVPU_GEM_H__
#define __IVPU_GEM_H__
@@ -17,25 +17,29 @@ struct ivpu_bo {
struct list_head bo_list_node;
struct drm_mm_node mm_node;
- struct mutex lock; /* Protects: ctx, mmu_mapped, vpu_addr */
u64 vpu_addr;
u32 flags;
u32 job_status; /* Valid only for command buffer */
+ u32 ctx_id;
bool mmu_mapped;
};
-int ivpu_bo_pin(struct ivpu_bo *bo);
+int ivpu_bo_bind(struct ivpu_bo *bo);
void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t size);
+struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf);
struct ivpu_bo *ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
struct ivpu_addr_range *range, u64 size, u32 flags);
+struct ivpu_bo *ivpu_bo_create_runtime(struct ivpu_device *vdev, u64 addr, u64 size, u32 flags);
struct ivpu_bo *ivpu_bo_create_global(struct ivpu_device *vdev, u64 size, u32 flags);
void ivpu_bo_free(struct ivpu_bo *bo);
int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
+int ivpu_bo_create_from_userptr_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p);
void ivpu_bo_list_print(struct drm_device *dev);
@@ -60,14 +64,27 @@ static inline u32 ivpu_bo_cache_mode(struct ivpu_bo *bo)
return bo->flags & DRM_IVPU_BO_CACHE_MASK;
}
+static inline struct ivpu_device *ivpu_bo_to_vdev(struct ivpu_bo *bo)
+{
+ return to_ivpu_device(bo->base.base.dev);
+}
+
static inline bool ivpu_bo_is_snooped(struct ivpu_bo *bo)
{
+ if (ivpu_is_force_snoop_enabled(ivpu_bo_to_vdev(bo)))
+ return true;
+
return ivpu_bo_cache_mode(bo) == DRM_IVPU_BO_CACHED;
}
-static inline struct ivpu_device *ivpu_bo_to_vdev(struct ivpu_bo *bo)
+static inline bool ivpu_bo_is_read_only(struct ivpu_bo *bo)
{
- return to_ivpu_device(bo->base.base.dev);
+ return bo->flags & DRM_IVPU_BO_READ_ONLY;
+}
+
+static inline bool ivpu_bo_is_resident(struct ivpu_bo *bo)
+{
+ return !!bo->base.pages;
}
static inline void *ivpu_to_cpu_addr(struct ivpu_bo *bo, u32 vpu_addr)
@@ -92,4 +109,9 @@ static inline u32 cpu_to_vpu_addr(struct ivpu_bo *bo, void *cpu_addr)
return bo->vpu_addr + (cpu_addr - ivpu_bo_vaddr(bo));
}
+static inline bool ivpu_bo_is_mappable(struct ivpu_bo *bo)
+{
+ return bo->flags & DRM_IVPU_BO_MAPPABLE;
+}
+
#endif /* __IVPU_GEM_H__ */
diff --git a/drivers/accel/ivpu/ivpu_gem_userptr.c b/drivers/accel/ivpu/ivpu_gem_userptr.c
new file mode 100644
index 000000000000..25ba606164c0
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_gem_userptr.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2025 Intel Corporation
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/err.h>
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/capability.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_gem.h>
+
+#include "ivpu_drv.h"
+#include "ivpu_gem.h"
+
+static struct sg_table *
+ivpu_gem_userptr_dmabuf_map(struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
+{
+ struct sg_table *sgt = attachment->dmabuf->priv;
+ int ret;
+
+ ret = dma_map_sgtable(attachment->dev, sgt, direction, DMA_ATTR_SKIP_CPU_SYNC);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return sgt;
+}
+
+static void ivpu_gem_userptr_dmabuf_unmap(struct dma_buf_attachment *attachment,
+ struct sg_table *sgt,
+ enum dma_data_direction direction)
+{
+ dma_unmap_sgtable(attachment->dev, sgt, direction, DMA_ATTR_SKIP_CPU_SYNC);
+}
+
+static void ivpu_gem_userptr_dmabuf_release(struct dma_buf *dma_buf)
+{
+ struct sg_table *sgt = dma_buf->priv;
+ struct sg_page_iter page_iter;
+ struct page *page;
+
+ for_each_sgtable_page(sgt, &page_iter, 0) {
+ page = sg_page_iter_page(&page_iter);
+ unpin_user_page(page);
+ }
+
+ sg_free_table(sgt);
+ kfree(sgt);
+}
+
+static const struct dma_buf_ops ivpu_gem_userptr_dmabuf_ops = {
+ .map_dma_buf = ivpu_gem_userptr_dmabuf_map,
+ .unmap_dma_buf = ivpu_gem_userptr_dmabuf_unmap,
+ .release = ivpu_gem_userptr_dmabuf_release,
+};
+
+static struct dma_buf *
+ivpu_create_userptr_dmabuf(struct ivpu_device *vdev, void __user *user_ptr,
+ size_t size, uint32_t flags)
+{
+ struct dma_buf_export_info exp_info = {};
+ struct dma_buf *dma_buf;
+ struct sg_table *sgt;
+ struct page **pages;
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+ unsigned int gup_flags = FOLL_LONGTERM;
+ int ret, i, pinned;
+
+ /* Add FOLL_WRITE only if the BO is not read-only */
+ if (!(flags & DRM_IVPU_BO_READ_ONLY))
+ gup_flags |= FOLL_WRITE;
+
+ pages = kvmalloc_array(nr_pages, sizeof(*pages), GFP_KERNEL);
+ if (!pages)
+ return ERR_PTR(-ENOMEM);
+
+ pinned = pin_user_pages_fast((unsigned long)user_ptr, nr_pages, gup_flags, pages);
+ if (pinned < 0) {
+ ret = pinned;
+ ivpu_dbg(vdev, IOCTL, "Failed to pin user pages: %d\n", ret);
+ goto free_pages_array;
+ }
+
+ if (pinned != nr_pages) {
+ ivpu_dbg(vdev, IOCTL, "Pinned %d pages, expected %lu\n", pinned, nr_pages);
+ ret = -EFAULT;
+ goto unpin_pages;
+ }
+
+ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt) {
+ ret = -ENOMEM;
+ goto unpin_pages;
+ }
+
+ ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0, size, GFP_KERNEL);
+ if (ret) {
+ ivpu_dbg(vdev, IOCTL, "Failed to create sg table: %d\n", ret);
+ goto free_sgt;
+ }
+
+ exp_info.exp_name = "ivpu_userptr_dmabuf";
+ exp_info.owner = THIS_MODULE;
+ exp_info.ops = &ivpu_gem_userptr_dmabuf_ops;
+ exp_info.size = size;
+ exp_info.flags = O_RDWR | O_CLOEXEC;
+ exp_info.priv = sgt;
+
+ dma_buf = dma_buf_export(&exp_info);
+ if (IS_ERR(dma_buf)) {
+ ret = PTR_ERR(dma_buf);
+ ivpu_dbg(vdev, IOCTL, "Failed to export userptr dma-buf: %d\n", ret);
+ goto free_sg_table;
+ }
+
+ kvfree(pages);
+ return dma_buf;
+
+free_sg_table:
+ sg_free_table(sgt);
+free_sgt:
+ kfree(sgt);
+unpin_pages:
+ for (i = 0; i < pinned; i++)
+ unpin_user_page(pages[i]);
+free_pages_array:
+ kvfree(pages);
+ return ERR_PTR(ret);
+}
+
+static struct ivpu_bo *
+ivpu_bo_create_from_userptr(struct ivpu_device *vdev, void __user *user_ptr,
+ size_t size, uint32_t flags)
+{
+ struct dma_buf *dma_buf;
+ struct drm_gem_object *obj;
+ struct ivpu_bo *bo;
+
+ dma_buf = ivpu_create_userptr_dmabuf(vdev, user_ptr, size, flags);
+ if (IS_ERR(dma_buf))
+ return ERR_CAST(dma_buf);
+
+ obj = ivpu_gem_prime_import(&vdev->drm, dma_buf);
+ if (IS_ERR(obj)) {
+ dma_buf_put(dma_buf);
+ return ERR_CAST(obj);
+ }
+
+ dma_buf_put(dma_buf);
+
+ bo = to_ivpu_bo(obj);
+ bo->flags = flags;
+
+ return bo;
+}
+
+int ivpu_bo_create_from_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_ivpu_bo_create_from_userptr *args = data;
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct ivpu_device *vdev = to_ivpu_device(dev);
+ void __user *user_ptr = u64_to_user_ptr(args->user_ptr);
+ struct ivpu_bo *bo;
+ int ret;
+
+ if (args->flags & ~(DRM_IVPU_BO_HIGH_MEM | DRM_IVPU_BO_DMA_MEM | DRM_IVPU_BO_READ_ONLY)) {
+ ivpu_dbg(vdev, IOCTL, "Invalid BO flags: 0x%x\n", args->flags);
+ return -EINVAL;
+ }
+
+ if (!args->user_ptr || !args->size) {
+ ivpu_dbg(vdev, IOCTL, "Userptr or size are zero: ptr %llx size %llu\n",
+ args->user_ptr, args->size);
+ return -EINVAL;
+ }
+
+ if (!PAGE_ALIGNED(args->user_ptr) || !PAGE_ALIGNED(args->size)) {
+ ivpu_dbg(vdev, IOCTL, "Userptr or size not page aligned: ptr %llx size %llu\n",
+ args->user_ptr, args->size);
+ return -EINVAL;
+ }
+
+ if (!access_ok(user_ptr, args->size)) {
+ ivpu_dbg(vdev, IOCTL, "Userptr is not accessible: ptr %llx size %llu\n",
+ args->user_ptr, args->size);
+ return -EFAULT;
+ }
+
+ bo = ivpu_bo_create_from_userptr(vdev, user_ptr, args->size, args->flags);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ ret = drm_gem_handle_create(file, &bo->base.base, &args->handle);
+ if (ret) {
+ ivpu_dbg(vdev, IOCTL, "Failed to create handle for BO: %pe ctx %u size %llu flags 0x%x\n",
+ bo, file_priv->ctx.id, args->size, args->flags);
+ } else {
+ ivpu_dbg(vdev, BO, "Created userptr BO: handle=%u vpu_addr=0x%llx size=%llu flags=0x%x\n",
+ args->handle, bo->vpu_addr, args->size, bo->flags);
+ args->vpu_addr = bo->vpu_addr;
+ }
+
+ drm_gem_object_put(&bo->base.base);
+
+ return ret;
+}
diff --git a/drivers/accel/ivpu/ivpu_hw.c b/drivers/accel/ivpu/ivpu_hw.c
new file mode 100644
index 000000000000..d69cd0d93569
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_hw.c
@@ -0,0 +1,420 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 - 2024 Intel Corporation
+ */
+
+#include "ivpu_drv.h"
+#include "ivpu_hw.h"
+#include "ivpu_hw_btrs.h"
+#include "ivpu_hw_ip.h"
+
+#include <asm/msr-index.h>
+#include <asm/msr.h>
+#include <linux/dmi.h>
+#include <linux/fault-inject.h>
+#include <linux/pm_runtime.h>
+
+#ifdef CONFIG_FAULT_INJECTION
+DECLARE_FAULT_ATTR(ivpu_hw_failure);
+
+static char *ivpu_fail_hw;
+module_param_named_unsafe(fail_hw, ivpu_fail_hw, charp, 0444);
+MODULE_PARM_DESC(fail_hw, "<interval>,<probability>,<space>,<times>");
+#endif
+
+#define FW_SHARED_MEM_ALIGNMENT SZ_512K /* VPU MTRR limitation */
+
+#define ECC_MCA_SIGNAL_ENABLE_MASK 0xff
+
+static char *platform_to_str(u32 platform)
+{
+ switch (platform) {
+ case IVPU_PLATFORM_SILICON:
+ return "SILICON";
+ case IVPU_PLATFORM_SIMICS:
+ return "SIMICS";
+ case IVPU_PLATFORM_FPGA:
+ return "FPGA";
+ case IVPU_PLATFORM_HSLE:
+ return "HSLE";
+ default:
+ return "Invalid platform";
+ }
+}
+
+static void platform_init(struct ivpu_device *vdev)
+{
+ int platform = ivpu_hw_btrs_platform_read(vdev);
+
+ ivpu_dbg(vdev, MISC, "Platform type: %s (%d)\n", platform_to_str(platform), platform);
+
+ switch (platform) {
+ case IVPU_PLATFORM_SILICON:
+ case IVPU_PLATFORM_SIMICS:
+ case IVPU_PLATFORM_FPGA:
+ case IVPU_PLATFORM_HSLE:
+ vdev->platform = platform;
+ break;
+
+ default:
+ ivpu_err(vdev, "Invalid platform type: %d\n", platform);
+ break;
+ }
+}
+
+static void wa_init(struct ivpu_device *vdev)
+{
+ vdev->wa.punit_disabled = false;
+ vdev->wa.clear_runtime_mem = false;
+
+ if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
+ vdev->wa.interrupt_clear_with_0 = ivpu_hw_btrs_irqs_clear_with_0_mtl(vdev);
+
+ if (ivpu_device_id(vdev) == PCI_DEVICE_ID_LNL &&
+ ivpu_revision(vdev) < IVPU_HW_IP_REV_LNL_B0)
+ vdev->wa.disable_clock_relinquish = true;
+
+ if (ivpu_test_mode & IVPU_TEST_MODE_CLK_RELINQ_ENABLE)
+ vdev->wa.disable_clock_relinquish = false;
+
+ if (ivpu_test_mode & IVPU_TEST_MODE_CLK_RELINQ_DISABLE)
+ vdev->wa.disable_clock_relinquish = true;
+
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
+ vdev->wa.wp0_during_power_up = true;
+
+ if (ivpu_test_mode & IVPU_TEST_MODE_D0I2_DISABLE)
+ vdev->wa.disable_d0i2 = true;
+
+ IVPU_PRINT_WA(punit_disabled);
+ IVPU_PRINT_WA(clear_runtime_mem);
+ IVPU_PRINT_WA(interrupt_clear_with_0);
+ IVPU_PRINT_WA(disable_clock_relinquish);
+ IVPU_PRINT_WA(wp0_during_power_up);
+ IVPU_PRINT_WA(disable_d0i2);
+}
+
+static void timeouts_init(struct ivpu_device *vdev)
+{
+ if (ivpu_test_mode & IVPU_TEST_MODE_DISABLE_TIMEOUTS) {
+ vdev->timeout.boot = -1;
+ vdev->timeout.jsm = -1;
+ vdev->timeout.tdr = -1;
+ vdev->timeout.inference = -1;
+ vdev->timeout.autosuspend = -1;
+ vdev->timeout.d0i3_entry_msg = -1;
+ } else if (ivpu_is_fpga(vdev)) {
+ vdev->timeout.boot = 50;
+ vdev->timeout.jsm = 15000;
+ vdev->timeout.tdr = 30000;
+ vdev->timeout.inference = 900000;
+ vdev->timeout.autosuspend = -1;
+ vdev->timeout.d0i3_entry_msg = 500;
+ vdev->timeout.state_dump_msg = 10000;
+ } else if (ivpu_is_simics(vdev)) {
+ vdev->timeout.boot = 50;
+ vdev->timeout.jsm = 500;
+ vdev->timeout.tdr = 10000;
+ vdev->timeout.inference = 300000;
+ vdev->timeout.autosuspend = 100;
+ vdev->timeout.d0i3_entry_msg = 100;
+ vdev->timeout.state_dump_msg = 10;
+ } else {
+ vdev->timeout.boot = 1000;
+ vdev->timeout.jsm = 500;
+ vdev->timeout.tdr = 2000;
+ vdev->timeout.inference = 60000;
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
+ vdev->timeout.autosuspend = 10;
+ else
+ vdev->timeout.autosuspend = 100;
+ vdev->timeout.d0i3_entry_msg = 5;
+ vdev->timeout.state_dump_msg = 100;
+ }
+}
+
+static void priority_bands_init(struct ivpu_device *vdev)
+{
+ /* Idle */
+ vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE] = 0;
+ vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE] = 50000;
+ vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE] = 160000;
+ /* Normal */
+ vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL] = 50000;
+ vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL] = 50000;
+ vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL] = 300000;
+ /* Focus */
+ vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS] = 50000;
+ vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS] = 50000;
+ vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS] = 200000;
+ /* Realtime */
+ vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 0;
+ vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 50000;
+ vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 200000;
+}
+
+int ivpu_hw_range_init(struct ivpu_device *vdev, struct ivpu_addr_range *range, u64 start, u64 size)
+{
+ u64 end;
+
+ if (!range || check_add_overflow(start, size, &end)) {
+ ivpu_err(vdev, "Invalid range: start 0x%llx size %llu\n", start, size);
+ return -EINVAL;
+ }
+
+ range->start = start;
+ range->end = end;
+
+ return 0;
+}
+
+static void memory_ranges_init(struct ivpu_device *vdev)
+{
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
+ ivpu_hw_range_init(vdev, &vdev->hw->ranges.runtime, 0x84800000, SZ_64M);
+ ivpu_hw_range_init(vdev, &vdev->hw->ranges.global, 0x90000000, SZ_256M);
+ ivpu_hw_range_init(vdev, &vdev->hw->ranges.user, 0xa0000000, 511 * SZ_1M);
+ ivpu_hw_range_init(vdev, &vdev->hw->ranges.shave, 0x180000000, SZ_2G);
+ ivpu_hw_range_init(vdev, &vdev->hw->ranges.dma, 0x200000000, SZ_128G);
+ } else {
+ ivpu_hw_range_init(vdev, &vdev->hw->ranges.runtime, 0x80000000, SZ_64M);
+ ivpu_hw_range_init(vdev, &vdev->hw->ranges.global, 0x90000000, SZ_256M);
+ ivpu_hw_range_init(vdev, &vdev->hw->ranges.shave, 0x80000000, SZ_2G);
+ ivpu_hw_range_init(vdev, &vdev->hw->ranges.user, 0x100000000, SZ_256G);
+ vdev->hw->ranges.dma = vdev->hw->ranges.user;
+ }
+
+ drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vdev->hw->ranges.global.start,
+ FW_SHARED_MEM_ALIGNMENT));
+}
+
+static int wp_enable(struct ivpu_device *vdev)
+{
+ return ivpu_hw_btrs_wp_drive(vdev, true);
+}
+
+static int wp_disable(struct ivpu_device *vdev)
+{
+ return ivpu_hw_btrs_wp_drive(vdev, false);
+}
+
+int ivpu_hw_power_up(struct ivpu_device *vdev)
+{
+ int ret;
+
+ if (IVPU_WA(wp0_during_power_up)) {
+ /* WP requests may fail when powering down, so issue WP 0 here */
+ ret = wp_disable(vdev);
+ if (ret)
+ ivpu_warn(vdev, "Failed to disable workpoint: %d\n", ret);
+ }
+
+ ret = ivpu_hw_btrs_d0i3_disable(vdev);
+ if (ret)
+ ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
+
+ ret = wp_enable(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to enable workpoint: %d\n", ret);
+ return ret;
+ }
+
+ if (ivpu_hw_btrs_gen(vdev) >= IVPU_HW_BTRS_LNL) {
+ if (IVPU_WA(disable_clock_relinquish))
+ ivpu_hw_btrs_clock_relinquish_disable_lnl(vdev);
+ ivpu_hw_btrs_profiling_freq_reg_set_lnl(vdev);
+ ivpu_hw_btrs_ats_print_lnl(vdev);
+ }
+
+ ret = ivpu_hw_ip_host_ss_configure(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to configure host SS: %d\n", ret);
+ return ret;
+ }
+
+ ivpu_hw_ip_idle_gen_disable(vdev);
+
+ ret = ivpu_hw_btrs_wait_for_clock_res_own_ack(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Timed out waiting for clock resource own ACK\n");
+ return ret;
+ }
+
+ ret = ivpu_hw_ip_pwr_domain_enable(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to enable power domain: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_hw_ip_host_ss_axi_enable(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to enable AXI: %d\n", ret);
+ return ret;
+ }
+
+ if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_LNL)
+ ivpu_hw_btrs_set_port_arbitration_weights_lnl(vdev);
+
+ ret = ivpu_hw_ip_top_noc_enable(vdev);
+ if (ret)
+ ivpu_err(vdev, "Failed to enable TOP NOC: %d\n", ret);
+
+ return ret;
+}
+
+static void save_d0i3_entry_timestamp(struct ivpu_device *vdev)
+{
+ vdev->hw->d0i3_entry_host_ts = ktime_get_boottime();
+ vdev->hw->d0i3_entry_vpu_ts = ivpu_hw_ip_read_perf_timer_counter(vdev);
+}
+
+int ivpu_hw_reset(struct ivpu_device *vdev)
+{
+ int ret = 0;
+
+ if (ivpu_hw_btrs_ip_reset(vdev)) {
+ ivpu_err(vdev, "Failed to reset NPU IP\n");
+ ret = -EIO;
+ }
+
+ if (wp_disable(vdev)) {
+ ivpu_err(vdev, "Failed to disable workpoint\n");
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+int ivpu_hw_power_down(struct ivpu_device *vdev)
+{
+ int ret = 0;
+
+ save_d0i3_entry_timestamp(vdev);
+
+ if (!ivpu_hw_is_idle(vdev))
+ ivpu_warn(vdev, "NPU not idle during power down\n");
+
+ if (ivpu_hw_reset(vdev)) {
+ ivpu_err(vdev, "Failed to reset NPU\n");
+ ret = -EIO;
+ }
+
+ if (ivpu_hw_btrs_d0i3_enable(vdev)) {
+ ivpu_err(vdev, "Failed to enter D0I3\n");
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+int ivpu_hw_init(struct ivpu_device *vdev)
+{
+ ivpu_hw_btrs_info_init(vdev);
+ ivpu_hw_btrs_freq_ratios_init(vdev);
+ priority_bands_init(vdev);
+ memory_ranges_init(vdev);
+ platform_init(vdev);
+ wa_init(vdev);
+ timeouts_init(vdev);
+ atomic_set(&vdev->hw->firewall_irq_counter, 0);
+
+#ifdef CONFIG_FAULT_INJECTION
+ if (ivpu_fail_hw)
+ setup_fault_attr(&ivpu_hw_failure, ivpu_fail_hw);
+#endif
+
+ return 0;
+}
+
+int ivpu_hw_boot_fw(struct ivpu_device *vdev)
+{
+ int ret;
+
+ ivpu_hw_ip_snoop_disable(vdev);
+ ivpu_hw_ip_tbu_mmu_enable(vdev);
+ ret = ivpu_hw_ip_soc_cpu_boot(vdev);
+ if (ret)
+ ivpu_err(vdev, "Failed to boot SOC CPU: %d\n", ret);
+
+ return ret;
+}
+
+void ivpu_hw_profiling_freq_drive(struct ivpu_device *vdev, bool enable)
+{
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
+ vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
+ return;
+ }
+
+ if (enable)
+ vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_HIGH;
+ else
+ vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
+}
+
+void ivpu_irq_handlers_init(struct ivpu_device *vdev)
+{
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
+ vdev->hw->irq.ip_irq_handler = ivpu_hw_ip_irq_handler_37xx;
+ else
+ vdev->hw->irq.ip_irq_handler = ivpu_hw_ip_irq_handler_40xx;
+
+ if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
+ vdev->hw->irq.btrs_irq_handler = ivpu_hw_btrs_irq_handler_mtl;
+ else
+ vdev->hw->irq.btrs_irq_handler = ivpu_hw_btrs_irq_handler_lnl;
+}
+
+void ivpu_hw_irq_enable(struct ivpu_device *vdev)
+{
+ ivpu_hw_ip_irq_enable(vdev);
+ ivpu_hw_btrs_irq_enable(vdev);
+}
+
+void ivpu_hw_irq_disable(struct ivpu_device *vdev)
+{
+ ivpu_hw_btrs_irq_disable(vdev);
+ ivpu_hw_ip_irq_disable(vdev);
+}
+
+irqreturn_t ivpu_hw_irq_handler(int irq, void *ptr)
+{
+ struct ivpu_device *vdev = ptr;
+ bool ip_handled, btrs_handled;
+
+ ivpu_hw_btrs_global_int_disable(vdev);
+
+ btrs_handled = ivpu_hw_btrs_irq_handler(vdev, irq);
+ if (!ivpu_hw_is_idle((vdev)) || !btrs_handled)
+ ip_handled = ivpu_hw_ip_irq_handler(vdev, irq);
+ else
+ ip_handled = false;
+
+ /* Re-enable global interrupts to re-trigger MSI for pending interrupts */
+ ivpu_hw_btrs_global_int_enable(vdev);
+
+ if (!ip_handled && !btrs_handled)
+ return IRQ_NONE;
+
+ pm_runtime_mark_last_busy(vdev->drm.dev);
+ return IRQ_HANDLED;
+}
+
+bool ivpu_hw_uses_ecc_mca_signal(struct ivpu_device *vdev)
+{
+ unsigned long long msr_integrity_caps;
+ int ret;
+
+ if (ivpu_hw_ip_gen(vdev) < IVPU_HW_IP_50XX)
+ return false;
+
+ ret = rdmsrq_safe(MSR_INTEGRITY_CAPS, &msr_integrity_caps);
+ if (ret) {
+ ivpu_warn(vdev, "Error reading MSR_INTEGRITY_CAPS: %d", ret);
+ return false;
+ }
+
+ ivpu_dbg(vdev, MISC, "MSR_INTEGRITY_CAPS: 0x%llx\n", msr_integrity_caps);
+
+ return msr_integrity_caps & ECC_MCA_SIGNAL_ENABLE_MASK;
+}
diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h
index 094c659d2800..b6d0f0d0dccc 100644
--- a/drivers/accel/ivpu/ivpu_hw.h
+++ b/drivers/accel/ivpu/ivpu_hw.h
@@ -1,39 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#ifndef __IVPU_HW_H__
#define __IVPU_HW_H__
#include "ivpu_drv.h"
-
-struct ivpu_hw_ops {
- int (*info_init)(struct ivpu_device *vdev);
- int (*power_up)(struct ivpu_device *vdev);
- int (*boot_fw)(struct ivpu_device *vdev);
- int (*power_down)(struct ivpu_device *vdev);
- int (*reset)(struct ivpu_device *vdev);
- bool (*is_idle)(struct ivpu_device *vdev);
- int (*wait_for_idle)(struct ivpu_device *vdev);
- void (*wdt_disable)(struct ivpu_device *vdev);
- void (*diagnose_failure)(struct ivpu_device *vdev);
- u32 (*profiling_freq_get)(struct ivpu_device *vdev);
- void (*profiling_freq_drive)(struct ivpu_device *vdev, bool enable);
- u32 (*reg_pll_freq_get)(struct ivpu_device *vdev);
- u32 (*ratio_to_freq)(struct ivpu_device *vdev, u32 ratio);
- u32 (*reg_telemetry_offset_get)(struct ivpu_device *vdev);
- u32 (*reg_telemetry_size_get)(struct ivpu_device *vdev);
- u32 (*reg_telemetry_enable_get)(struct ivpu_device *vdev);
- void (*reg_db_set)(struct ivpu_device *vdev, u32 db_id);
- u32 (*reg_ipc_rx_addr_get)(struct ivpu_device *vdev);
- u32 (*reg_ipc_rx_count_get)(struct ivpu_device *vdev);
- void (*reg_ipc_tx_set)(struct ivpu_device *vdev, u32 vpu_addr);
- void (*irq_clear)(struct ivpu_device *vdev);
- void (*irq_enable)(struct ivpu_device *vdev);
- void (*irq_disable)(struct ivpu_device *vdev);
- irqreturn_t (*irq_handler)(int irq, void *ptr);
-};
+#include "ivpu_hw_btrs.h"
+#include "ivpu_hw_ip.h"
struct ivpu_addr_range {
resource_size_t start;
@@ -41,8 +16,12 @@ struct ivpu_addr_range {
};
struct ivpu_hw_info {
- const struct ivpu_hw_ops *ops;
struct {
+ bool (*btrs_irq_handler)(struct ivpu_device *vdev, int irq);
+ bool (*ip_irq_handler)(struct ivpu_device *vdev, int irq);
+ } irq;
+ struct {
+ struct ivpu_addr_range runtime;
struct ivpu_addr_range global;
struct ivpu_addr_range user;
struct ivpu_addr_range shave;
@@ -58,148 +37,118 @@ struct ivpu_hw_info {
u8 pn_ratio;
u32 profiling_freq;
} pll;
+ struct {
+ u32 grace_period[VPU_HWS_NUM_PRIORITY_BANDS];
+ u32 process_quantum[VPU_HWS_NUM_PRIORITY_BANDS];
+ u32 process_grace_period[VPU_HWS_NUM_PRIORITY_BANDS];
+ } hws;
u32 tile_fuse;
u32 sku;
u16 config;
int dma_bits;
ktime_t d0i3_entry_host_ts;
u64 d0i3_entry_vpu_ts;
-};
-
-extern const struct ivpu_hw_ops ivpu_hw_37xx_ops;
-extern const struct ivpu_hw_ops ivpu_hw_40xx_ops;
-
-static inline int ivpu_hw_info_init(struct ivpu_device *vdev)
-{
- return vdev->hw->ops->info_init(vdev);
-};
-
-static inline int ivpu_hw_power_up(struct ivpu_device *vdev)
-{
- ivpu_dbg(vdev, PM, "HW power up\n");
-
- return vdev->hw->ops->power_up(vdev);
-};
-
-static inline int ivpu_hw_boot_fw(struct ivpu_device *vdev)
-{
- return vdev->hw->ops->boot_fw(vdev);
-};
+ atomic_t firewall_irq_counter;
+};
+
+int ivpu_hw_init(struct ivpu_device *vdev);
+int ivpu_hw_range_init(struct ivpu_device *vdev, struct ivpu_addr_range *range, u64 start,
+ u64 size);
+int ivpu_hw_power_up(struct ivpu_device *vdev);
+int ivpu_hw_power_down(struct ivpu_device *vdev);
+int ivpu_hw_reset(struct ivpu_device *vdev);
+int ivpu_hw_boot_fw(struct ivpu_device *vdev);
+void ivpu_hw_profiling_freq_drive(struct ivpu_device *vdev, bool enable);
+void ivpu_irq_handlers_init(struct ivpu_device *vdev);
+void ivpu_hw_irq_enable(struct ivpu_device *vdev);
+void ivpu_hw_irq_disable(struct ivpu_device *vdev);
+irqreturn_t ivpu_hw_irq_handler(int irq, void *ptr);
+bool ivpu_hw_uses_ecc_mca_signal(struct ivpu_device *vdev);
+
+static inline u32 ivpu_hw_btrs_irq_handler(struct ivpu_device *vdev, int irq)
+{
+ return vdev->hw->irq.btrs_irq_handler(vdev, irq);
+}
-static inline bool ivpu_hw_is_idle(struct ivpu_device *vdev)
+static inline u32 ivpu_hw_ip_irq_handler(struct ivpu_device *vdev, int irq)
{
- return vdev->hw->ops->is_idle(vdev);
-};
+ return vdev->hw->irq.ip_irq_handler(vdev, irq);
+}
-static inline int ivpu_hw_wait_for_idle(struct ivpu_device *vdev)
+static inline u64 ivpu_hw_range_size(const struct ivpu_addr_range *range)
{
- return vdev->hw->ops->wait_for_idle(vdev);
-};
+ return range->end - range->start;
+}
-static inline int ivpu_hw_power_down(struct ivpu_device *vdev)
+static inline u32 ivpu_hw_dpu_max_freq_get(struct ivpu_device *vdev)
{
- ivpu_dbg(vdev, PM, "HW power down\n");
-
- return vdev->hw->ops->power_down(vdev);
-};
+ return ivpu_hw_btrs_dpu_max_freq_get(vdev);
+}
-static inline int ivpu_hw_reset(struct ivpu_device *vdev)
+static inline u32 ivpu_hw_dpu_freq_get(struct ivpu_device *vdev)
{
- ivpu_dbg(vdev, PM, "HW reset\n");
-
- return vdev->hw->ops->reset(vdev);
-};
+ return ivpu_hw_btrs_dpu_freq_get(vdev);
+}
-static inline void ivpu_hw_wdt_disable(struct ivpu_device *vdev)
+static inline void ivpu_hw_irq_clear(struct ivpu_device *vdev)
{
- vdev->hw->ops->wdt_disable(vdev);
-};
+ ivpu_hw_ip_irq_clear(vdev);
+}
static inline u32 ivpu_hw_profiling_freq_get(struct ivpu_device *vdev)
{
- return vdev->hw->ops->profiling_freq_get(vdev);
-};
-
-static inline void ivpu_hw_profiling_freq_drive(struct ivpu_device *vdev, bool enable)
-{
- return vdev->hw->ops->profiling_freq_drive(vdev, enable);
-};
-
-/* Register indirect accesses */
-static inline u32 ivpu_hw_reg_pll_freq_get(struct ivpu_device *vdev)
-{
- return vdev->hw->ops->reg_pll_freq_get(vdev);
-};
-
-static inline u32 ivpu_hw_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
-{
- return vdev->hw->ops->ratio_to_freq(vdev, ratio);
+ return vdev->hw->pll.profiling_freq;
}
-static inline u32 ivpu_hw_reg_telemetry_offset_get(struct ivpu_device *vdev)
-{
- return vdev->hw->ops->reg_telemetry_offset_get(vdev);
-};
-
-static inline u32 ivpu_hw_reg_telemetry_size_get(struct ivpu_device *vdev)
-{
- return vdev->hw->ops->reg_telemetry_size_get(vdev);
-};
-
-static inline u32 ivpu_hw_reg_telemetry_enable_get(struct ivpu_device *vdev)
-{
- return vdev->hw->ops->reg_telemetry_enable_get(vdev);
-};
-
-static inline void ivpu_hw_reg_db_set(struct ivpu_device *vdev, u32 db_id)
+static inline void ivpu_hw_diagnose_failure(struct ivpu_device *vdev)
{
- vdev->hw->ops->reg_db_set(vdev, db_id);
-};
+ ivpu_hw_ip_diagnose_failure(vdev);
+ ivpu_hw_btrs_diagnose_failure(vdev);
+}
-static inline u32 ivpu_hw_reg_ipc_rx_addr_get(struct ivpu_device *vdev)
+static inline u32 ivpu_hw_telemetry_offset_get(struct ivpu_device *vdev)
{
- return vdev->hw->ops->reg_ipc_rx_addr_get(vdev);
-};
+ return ivpu_hw_btrs_telemetry_offset_get(vdev);
+}
-static inline u32 ivpu_hw_reg_ipc_rx_count_get(struct ivpu_device *vdev)
+static inline u32 ivpu_hw_telemetry_size_get(struct ivpu_device *vdev)
{
- return vdev->hw->ops->reg_ipc_rx_count_get(vdev);
-};
+ return ivpu_hw_btrs_telemetry_size_get(vdev);
+}
-static inline void ivpu_hw_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
+static inline u32 ivpu_hw_telemetry_enable_get(struct ivpu_device *vdev)
{
- vdev->hw->ops->reg_ipc_tx_set(vdev, vpu_addr);
-};
+ return ivpu_hw_btrs_telemetry_enable_get(vdev);
+}
-static inline void ivpu_hw_irq_clear(struct ivpu_device *vdev)
+static inline bool ivpu_hw_is_idle(struct ivpu_device *vdev)
{
- vdev->hw->ops->irq_clear(vdev);
-};
+ return ivpu_hw_btrs_is_idle(vdev);
+}
-static inline void ivpu_hw_irq_enable(struct ivpu_device *vdev)
+static inline int ivpu_hw_wait_for_idle(struct ivpu_device *vdev)
{
- vdev->hw->ops->irq_enable(vdev);
-};
+ return ivpu_hw_btrs_wait_for_idle(vdev);
+}
-static inline void ivpu_hw_irq_disable(struct ivpu_device *vdev)
+static inline void ivpu_hw_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
{
- vdev->hw->ops->irq_disable(vdev);
-};
+ ivpu_hw_ip_ipc_tx_set(vdev, vpu_addr);
+}
-static inline void ivpu_hw_init_range(struct ivpu_addr_range *range, u64 start, u64 size)
+static inline void ivpu_hw_db_set(struct ivpu_device *vdev, u32 db_id)
{
- range->start = start;
- range->end = start + size;
+ ivpu_hw_ip_db_set(vdev, db_id);
}
-static inline u64 ivpu_hw_range_size(const struct ivpu_addr_range *range)
+static inline u32 ivpu_hw_ipc_rx_addr_get(struct ivpu_device *vdev)
{
- return range->end - range->start;
+ return ivpu_hw_ip_ipc_rx_addr_get(vdev);
}
-static inline void ivpu_hw_diagnose_failure(struct ivpu_device *vdev)
+static inline u32 ivpu_hw_ipc_rx_count_get(struct ivpu_device *vdev)
{
- vdev->hw->ops->diagnose_failure(vdev);
+ return ivpu_hw_ip_ipc_rx_count_get(vdev);
}
#endif /* __IVPU_HW_H__ */
diff --git a/drivers/accel/ivpu/ivpu_hw_37xx.c b/drivers/accel/ivpu/ivpu_hw_37xx.c
deleted file mode 100644
index bd25e2d9fb0f..000000000000
--- a/drivers/accel/ivpu/ivpu_hw_37xx.c
+++ /dev/null
@@ -1,1065 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2020-2024 Intel Corporation
- */
-
-#include "ivpu_drv.h"
-#include "ivpu_fw.h"
-#include "ivpu_hw_37xx_reg.h"
-#include "ivpu_hw_reg_io.h"
-#include "ivpu_hw.h"
-#include "ivpu_ipc.h"
-#include "ivpu_mmu.h"
-#include "ivpu_pm.h"
-
-#define TILE_FUSE_ENABLE_BOTH 0x0
-#define TILE_SKU_BOTH 0x3630
-
-/* Work point configuration values */
-#define CONFIG_1_TILE 0x01
-#define CONFIG_2_TILE 0x02
-#define PLL_RATIO_5_3 0x01
-#define PLL_RATIO_4_3 0x02
-#define WP_CONFIG(tile, ratio) (((tile) << 8) | (ratio))
-#define WP_CONFIG_1_TILE_5_3_RATIO WP_CONFIG(CONFIG_1_TILE, PLL_RATIO_5_3)
-#define WP_CONFIG_1_TILE_4_3_RATIO WP_CONFIG(CONFIG_1_TILE, PLL_RATIO_4_3)
-#define WP_CONFIG_2_TILE_5_3_RATIO WP_CONFIG(CONFIG_2_TILE, PLL_RATIO_5_3)
-#define WP_CONFIG_2_TILE_4_3_RATIO WP_CONFIG(CONFIG_2_TILE, PLL_RATIO_4_3)
-#define WP_CONFIG_0_TILE_PLL_OFF WP_CONFIG(0, 0)
-
-#define PLL_REF_CLK_FREQ (50 * 1000000)
-#define PLL_SIMULATION_FREQ (10 * 1000000)
-#define PLL_PROF_CLK_FREQ (38400 * 1000)
-#define PLL_DEFAULT_EPP_VALUE 0x80
-
-#define TIM_SAFE_ENABLE 0xf1d0dead
-#define TIM_WATCHDOG_RESET_VALUE 0xffffffff
-
-#define TIMEOUT_US (150 * USEC_PER_MSEC)
-#define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC)
-#define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC)
-#define IDLE_TIMEOUT_US (5 * USEC_PER_MSEC)
-
-#define ICB_0_IRQ_MASK ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
- (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
- (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \
- (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \
- (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \
- (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \
- (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT)))
-
-#define ICB_1_IRQ_MASK ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \
- (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \
- (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT)))
-
-#define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK)
-
-#define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
- (REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR)))
-
-#define BUTTRESS_ALL_IRQ_MASK (BUTTRESS_IRQ_MASK | \
- (REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)))
-
-#define BUTTRESS_IRQ_ENABLE_MASK ((u32)~BUTTRESS_IRQ_MASK)
-#define BUTTRESS_IRQ_DISABLE_MASK ((u32)-1)
-
-#define ITF_FIREWALL_VIOLATION_MASK ((REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \
- (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \
- (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \
- (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \
- (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \
- (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \
- (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX)))
-
-static void ivpu_hw_wa_init(struct ivpu_device *vdev)
-{
- vdev->wa.punit_disabled = false;
- vdev->wa.clear_runtime_mem = false;
-
- REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, BUTTRESS_ALL_IRQ_MASK);
- if (REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) == BUTTRESS_ALL_IRQ_MASK) {
- /* Writing 1s does not clear the interrupt status register */
- vdev->wa.interrupt_clear_with_0 = true;
- REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, 0x0);
- }
-
- IVPU_PRINT_WA(punit_disabled);
- IVPU_PRINT_WA(clear_runtime_mem);
- IVPU_PRINT_WA(interrupt_clear_with_0);
-}
-
-static void ivpu_hw_timeouts_init(struct ivpu_device *vdev)
-{
- vdev->timeout.boot = 1000;
- vdev->timeout.jsm = 500;
- vdev->timeout.tdr = 2000;
- vdev->timeout.reschedule_suspend = 10;
- vdev->timeout.autosuspend = 10;
- vdev->timeout.d0i3_entry_msg = 5;
-}
-
-static int ivpu_pll_wait_for_cmd_send(struct ivpu_device *vdev)
-{
- return REGB_POLL_FLD(VPU_37XX_BUTTRESS_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US);
-}
-
-/* Send KMD initiated workpoint change */
-static int ivpu_pll_cmd_send(struct ivpu_device *vdev, u16 min_ratio, u16 max_ratio,
- u16 target_ratio, u16 config)
-{
- int ret;
- u32 val;
-
- ret = ivpu_pll_wait_for_cmd_send(vdev);
- if (ret) {
- ivpu_err(vdev, "Failed to sync before WP request: %d\n", ret);
- return ret;
- }
-
- val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0);
- val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0, MIN_RATIO, min_ratio, val);
- val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0, MAX_RATIO, max_ratio, val);
- REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0, val);
-
- val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1);
- val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1, TARGET_RATIO, target_ratio, val);
- val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1, EPP, PLL_DEFAULT_EPP_VALUE, val);
- REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1, val);
-
- val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2);
- val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2, CONFIG, config, val);
- REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2, val);
-
- val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_CMD);
- val = REG_SET_FLD(VPU_37XX_BUTTRESS_WP_REQ_CMD, SEND, val);
- REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_CMD, val);
-
- ret = ivpu_pll_wait_for_cmd_send(vdev);
- if (ret)
- ivpu_err(vdev, "Failed to sync after WP request: %d\n", ret);
-
- return ret;
-}
-
-static int ivpu_pll_wait_for_lock(struct ivpu_device *vdev, bool enable)
-{
- u32 exp_val = enable ? 0x1 : 0x0;
-
- if (IVPU_WA(punit_disabled))
- return 0;
-
- return REGB_POLL_FLD(VPU_37XX_BUTTRESS_PLL_STATUS, LOCK, exp_val, PLL_TIMEOUT_US);
-}
-
-static int ivpu_pll_wait_for_status_ready(struct ivpu_device *vdev)
-{
- if (IVPU_WA(punit_disabled))
- return 0;
-
- return REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, READY, 1, PLL_TIMEOUT_US);
-}
-
-static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev)
-{
- struct ivpu_hw_info *hw = vdev->hw;
- u8 fuse_min_ratio, fuse_max_ratio, fuse_pn_ratio;
- u32 fmin_fuse, fmax_fuse;
-
- fmin_fuse = REGB_RD32(VPU_37XX_BUTTRESS_FMIN_FUSE);
- fuse_min_ratio = REG_GET_FLD(VPU_37XX_BUTTRESS_FMIN_FUSE, MIN_RATIO, fmin_fuse);
- fuse_pn_ratio = REG_GET_FLD(VPU_37XX_BUTTRESS_FMIN_FUSE, PN_RATIO, fmin_fuse);
-
- fmax_fuse = REGB_RD32(VPU_37XX_BUTTRESS_FMAX_FUSE);
- fuse_max_ratio = REG_GET_FLD(VPU_37XX_BUTTRESS_FMAX_FUSE, MAX_RATIO, fmax_fuse);
-
- hw->pll.min_ratio = clamp_t(u8, ivpu_pll_min_ratio, fuse_min_ratio, fuse_max_ratio);
- hw->pll.max_ratio = clamp_t(u8, ivpu_pll_max_ratio, hw->pll.min_ratio, fuse_max_ratio);
- hw->pll.pn_ratio = clamp_t(u8, fuse_pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
-}
-
-static int ivpu_hw_37xx_wait_for_vpuip_bar(struct ivpu_device *vdev)
-{
- return REGV_POLL_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, AON, 0, 100);
-}
-
-static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable)
-{
- struct ivpu_hw_info *hw = vdev->hw;
- u16 target_ratio;
- u16 config;
- int ret;
-
- if (IVPU_WA(punit_disabled)) {
- ivpu_dbg(vdev, PM, "Skipping PLL request\n");
- return 0;
- }
-
- if (enable) {
- target_ratio = hw->pll.pn_ratio;
- config = hw->config;
- } else {
- target_ratio = 0;
- config = 0;
- }
-
- ivpu_dbg(vdev, PM, "PLL workpoint request: config 0x%04x pll ratio 0x%x\n",
- config, target_ratio);
-
- ret = ivpu_pll_cmd_send(vdev, hw->pll.min_ratio, hw->pll.max_ratio, target_ratio, config);
- if (ret) {
- ivpu_err(vdev, "Failed to send PLL workpoint request: %d\n", ret);
- return ret;
- }
-
- ret = ivpu_pll_wait_for_lock(vdev, enable);
- if (ret) {
- ivpu_err(vdev, "Timed out waiting for PLL lock\n");
- return ret;
- }
-
- if (enable) {
- ret = ivpu_pll_wait_for_status_ready(vdev);
- if (ret) {
- ivpu_err(vdev, "Timed out waiting for PLL ready status\n");
- return ret;
- }
-
- ret = ivpu_hw_37xx_wait_for_vpuip_bar(vdev);
- if (ret) {
- ivpu_err(vdev, "Timed out waiting for NPU IP bar\n");
- return ret;
- }
- }
-
- return 0;
-}
-
-static int ivpu_pll_enable(struct ivpu_device *vdev)
-{
- return ivpu_pll_drive(vdev, true);
-}
-
-static int ivpu_pll_disable(struct ivpu_device *vdev)
-{
- return ivpu_pll_drive(vdev, false);
-}
-
-static void ivpu_boot_host_ss_rst_clr_assert(struct ivpu_device *vdev)
-{
- u32 val = 0;
-
- val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, TOP_NOC, val);
- val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, DSS_MAS, val);
- val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, MSS_MAS, val);
-
- REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_CLR, val);
-}
-
-static void ivpu_boot_host_ss_rst_drive(struct ivpu_device *vdev, bool enable)
-{
- u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_RST_SET);
-
- if (enable) {
- val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val);
- val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val);
- val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val);
- } else {
- val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val);
- val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val);
- val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val);
- }
-
- REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_SET, val);
-}
-
-static void ivpu_boot_host_ss_clk_drive(struct ivpu_device *vdev, bool enable)
-{
- u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_CLK_SET);
-
- if (enable) {
- val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
- val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
- val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
- } else {
- val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
- val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
- val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
- }
-
- REGV_WR32(VPU_37XX_HOST_SS_CPR_CLK_SET, val);
-}
-
-static int ivpu_boot_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val)
-{
- u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN);
-
- if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val))
- return -EIO;
-
- return 0;
-}
-
-static int ivpu_boot_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
-{
- u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QACCEPTN);
-
- if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val))
- return -EIO;
-
- return 0;
-}
-
-static int ivpu_boot_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
-{
- u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QDENY);
-
- if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val))
- return -EIO;
-
- return 0;
-}
-
-static int ivpu_boot_top_noc_qrenqn_check(struct ivpu_device *vdev, u32 exp_val)
-{
- u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QREQN);
-
- if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) ||
- !REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val))
- return -EIO;
-
- return 0;
-}
-
-static int ivpu_boot_top_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
-{
- u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QACCEPTN);
-
- if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) ||
- !REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val))
- return -EIO;
-
- return 0;
-}
-
-static int ivpu_boot_top_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
-{
- u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QDENY);
-
- if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) ||
- !REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val))
- return -EIO;
-
- return 0;
-}
-
-static int ivpu_boot_host_ss_configure(struct ivpu_device *vdev)
-{
- ivpu_boot_host_ss_rst_clr_assert(vdev);
-
- return ivpu_boot_noc_qreqn_check(vdev, 0x0);
-}
-
-static void ivpu_boot_vpu_idle_gen_disable(struct ivpu_device *vdev)
-{
- REGV_WR32(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN, 0x0);
-}
-
-static int ivpu_boot_host_ss_axi_drive(struct ivpu_device *vdev, bool enable)
-{
- int ret;
- u32 val;
-
- val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN);
- if (enable)
- val = REG_SET_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
- else
- val = REG_CLR_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
- REGV_WR32(VPU_37XX_HOST_SS_NOC_QREQN, val);
-
- ret = ivpu_boot_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
- if (ret) {
- ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
- return ret;
- }
-
- ret = ivpu_boot_noc_qdeny_check(vdev, 0x0);
- if (ret)
- ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
-
- return ret;
-}
-
-static int ivpu_boot_host_ss_axi_enable(struct ivpu_device *vdev)
-{
- return ivpu_boot_host_ss_axi_drive(vdev, true);
-}
-
-static int ivpu_boot_host_ss_top_noc_drive(struct ivpu_device *vdev, bool enable)
-{
- int ret;
- u32 val;
-
- val = REGV_RD32(VPU_37XX_TOP_NOC_QREQN);
- if (enable) {
- val = REG_SET_FLD(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, val);
- val = REG_SET_FLD(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
- } else {
- val = REG_CLR_FLD(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, val);
- val = REG_CLR_FLD(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
- }
- REGV_WR32(VPU_37XX_TOP_NOC_QREQN, val);
-
- ret = ivpu_boot_top_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
- if (ret) {
- ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
- return ret;
- }
-
- ret = ivpu_boot_top_noc_qdeny_check(vdev, 0x0);
- if (ret)
- ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
-
- return ret;
-}
-
-static int ivpu_boot_host_ss_top_noc_enable(struct ivpu_device *vdev)
-{
- return ivpu_boot_host_ss_top_noc_drive(vdev, true);
-}
-
-static void ivpu_boot_pwr_island_trickle_drive(struct ivpu_device *vdev, bool enable)
-{
- u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
-
- if (enable)
- val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
- else
- val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
-
- REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val);
-}
-
-static void ivpu_boot_pwr_island_drive(struct ivpu_device *vdev, bool enable)
-{
- u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0);
-
- if (enable)
- val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
- else
- val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
-
- REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, val);
-}
-
-static int ivpu_boot_wait_for_pwr_island_status(struct ivpu_device *vdev, u32 exp_val)
-{
- return REGV_POLL_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_STATUS0, MSS_CPU,
- exp_val, PWR_ISLAND_STATUS_TIMEOUT_US);
-}
-
-static void ivpu_boot_pwr_island_isolation_drive(struct ivpu_device *vdev, bool enable)
-{
- u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0);
-
- if (enable)
- val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
- else
- val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
-
- REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, val);
-}
-
-static void ivpu_boot_dpu_active_drive(struct ivpu_device *vdev, bool enable)
-{
- u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE);
-
- if (enable)
- val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
- else
- val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
-
- REGV_WR32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, val);
-}
-
-static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
-{
- int ret;
-
- ivpu_boot_pwr_island_trickle_drive(vdev, true);
- ivpu_boot_pwr_island_drive(vdev, true);
-
- ret = ivpu_boot_wait_for_pwr_island_status(vdev, 0x1);
- if (ret) {
- ivpu_err(vdev, "Timed out waiting for power island status\n");
- return ret;
- }
-
- ret = ivpu_boot_top_noc_qrenqn_check(vdev, 0x0);
- if (ret) {
- ivpu_err(vdev, "Failed qrenqn check %d\n", ret);
- return ret;
- }
-
- ivpu_boot_host_ss_clk_drive(vdev, true);
- ivpu_boot_pwr_island_isolation_drive(vdev, false);
- ivpu_boot_host_ss_rst_drive(vdev, true);
- ivpu_boot_dpu_active_drive(vdev, true);
-
- return ret;
-}
-
-static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
-{
- u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES);
-
- val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val);
- val = REG_CLR_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
- val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
-
- REGV_WR32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, val);
-}
-
-static void ivpu_boot_tbu_mmu_enable(struct ivpu_device *vdev)
-{
- u32 val = REGV_RD32(VPU_37XX_HOST_IF_TBU_MMUSSIDV);
-
- val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
- val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
- val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
- val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
-
- REGV_WR32(VPU_37XX_HOST_IF_TBU_MMUSSIDV, val);
-}
-
-static void ivpu_boot_soc_cpu_boot(struct ivpu_device *vdev)
-{
- u32 val;
-
- val = REGV_RD32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC);
- val = REG_SET_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTRUN0, val);
-
- val = REG_CLR_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTVEC, val);
- REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
-
- val = REG_SET_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val);
- REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
-
- val = REG_CLR_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val);
- REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
-
- val = vdev->fw->entry_point >> 9;
- REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val);
-
- val = REG_SET_FLD(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, DONE, val);
- REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val);
-
- ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n",
- vdev->fw->entry_point == vdev->fw->cold_boot_entry_point ? "cold boot" : "resume");
-}
-
-static int ivpu_boot_d0i3_drive(struct ivpu_device *vdev, bool enable)
-{
- int ret;
- u32 val;
-
- ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
- if (ret) {
- ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret);
- return ret;
- }
-
- val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL);
- if (enable)
- val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, I3, val);
- else
- val = REG_CLR_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, I3, val);
- REGB_WR32(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, val);
-
- ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
- if (ret)
- ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret);
-
- return ret;
-}
-
-static int ivpu_hw_37xx_info_init(struct ivpu_device *vdev)
-{
- struct ivpu_hw_info *hw = vdev->hw;
-
- hw->tile_fuse = TILE_FUSE_ENABLE_BOTH;
- hw->sku = TILE_SKU_BOTH;
- hw->config = WP_CONFIG_2_TILE_4_3_RATIO;
-
- ivpu_pll_init_frequency_ratios(vdev);
-
- ivpu_hw_init_range(&hw->ranges.global, 0x80000000, SZ_512M);
- ivpu_hw_init_range(&hw->ranges.user, 0xc0000000, 255 * SZ_1M);
- ivpu_hw_init_range(&hw->ranges.shave, 0x180000000, SZ_2G);
- ivpu_hw_init_range(&hw->ranges.dma, 0x200000000, SZ_8G);
-
- vdev->platform = IVPU_PLATFORM_SILICON;
- ivpu_hw_wa_init(vdev);
- ivpu_hw_timeouts_init(vdev);
-
- return 0;
-}
-
-static int ivpu_hw_37xx_ip_reset(struct ivpu_device *vdev)
-{
- int ret;
- u32 val;
-
- if (IVPU_WA(punit_disabled))
- return 0;
-
- ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
- if (ret) {
- ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n");
- return ret;
- }
-
- val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_IP_RESET);
- val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, val);
- REGB_WR32(VPU_37XX_BUTTRESS_VPU_IP_RESET, val);
-
- ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
- if (ret)
- ivpu_err(vdev, "Timed out waiting for RESET completion\n");
-
- return ret;
-}
-
-static int ivpu_hw_37xx_reset(struct ivpu_device *vdev)
-{
- int ret = 0;
-
- if (ivpu_hw_37xx_ip_reset(vdev)) {
- ivpu_err(vdev, "Failed to reset NPU\n");
- ret = -EIO;
- }
-
- if (ivpu_pll_disable(vdev)) {
- ivpu_err(vdev, "Failed to disable PLL\n");
- ret = -EIO;
- }
-
- return ret;
-}
-
-static int ivpu_hw_37xx_d0i3_enable(struct ivpu_device *vdev)
-{
- int ret;
-
- ret = ivpu_boot_d0i3_drive(vdev, true);
- if (ret)
- ivpu_err(vdev, "Failed to enable D0i3: %d\n", ret);
-
- udelay(5); /* VPU requires 5 us to complete the transition */
-
- return ret;
-}
-
-static int ivpu_hw_37xx_d0i3_disable(struct ivpu_device *vdev)
-{
- int ret;
-
- ret = ivpu_boot_d0i3_drive(vdev, false);
- if (ret)
- ivpu_err(vdev, "Failed to disable D0i3: %d\n", ret);
-
- return ret;
-}
-
-static int ivpu_hw_37xx_power_up(struct ivpu_device *vdev)
-{
- int ret;
-
- /* PLL requests may fail when powering down, so issue WP 0 here */
- ret = ivpu_pll_disable(vdev);
- if (ret)
- ivpu_warn(vdev, "Failed to disable PLL: %d\n", ret);
-
- ret = ivpu_hw_37xx_d0i3_disable(vdev);
- if (ret)
- ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
-
- ret = ivpu_pll_enable(vdev);
- if (ret) {
- ivpu_err(vdev, "Failed to enable PLL: %d\n", ret);
- return ret;
- }
-
- ret = ivpu_boot_host_ss_configure(vdev);
- if (ret) {
- ivpu_err(vdev, "Failed to configure host SS: %d\n", ret);
- return ret;
- }
-
- /*
- * The control circuitry for vpu_idle indication logic powers up active.
- * To ensure unnecessary low power mode signal from LRT during bring up,
- * KMD disables the circuitry prior to bringing up the Main Power island.
- */
- ivpu_boot_vpu_idle_gen_disable(vdev);
-
- ret = ivpu_boot_pwr_domain_enable(vdev);
- if (ret) {
- ivpu_err(vdev, "Failed to enable power domain: %d\n", ret);
- return ret;
- }
-
- ret = ivpu_boot_host_ss_axi_enable(vdev);
- if (ret) {
- ivpu_err(vdev, "Failed to enable AXI: %d\n", ret);
- return ret;
- }
-
- ret = ivpu_boot_host_ss_top_noc_enable(vdev);
- if (ret)
- ivpu_err(vdev, "Failed to enable TOP NOC: %d\n", ret);
-
- return ret;
-}
-
-static int ivpu_hw_37xx_boot_fw(struct ivpu_device *vdev)
-{
- ivpu_boot_no_snoop_enable(vdev);
- ivpu_boot_tbu_mmu_enable(vdev);
- ivpu_boot_soc_cpu_boot(vdev);
-
- return 0;
-}
-
-static bool ivpu_hw_37xx_is_idle(struct ivpu_device *vdev)
-{
- u32 val;
-
- if (IVPU_WA(punit_disabled))
- return true;
-
- val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_STATUS);
- return REG_TEST_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, READY, val) &&
- REG_TEST_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, IDLE, val);
-}
-
-static int ivpu_hw_37xx_wait_for_idle(struct ivpu_device *vdev)
-{
- return REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, IDLE, 0x1, IDLE_TIMEOUT_US);
-}
-
-static void ivpu_hw_37xx_save_d0i3_entry_timestamp(struct ivpu_device *vdev)
-{
- vdev->hw->d0i3_entry_host_ts = ktime_get_boottime();
- vdev->hw->d0i3_entry_vpu_ts = REGV_RD64(VPU_37XX_CPU_SS_TIM_PERF_FREE_CNT);
-}
-
-static int ivpu_hw_37xx_power_down(struct ivpu_device *vdev)
-{
- int ret = 0;
-
- ivpu_hw_37xx_save_d0i3_entry_timestamp(vdev);
-
- if (!ivpu_hw_37xx_is_idle(vdev))
- ivpu_warn(vdev, "NPU not idle during power down\n");
-
- if (ivpu_hw_37xx_reset(vdev)) {
- ivpu_err(vdev, "Failed to reset NPU\n");
- ret = -EIO;
- }
-
- if (ivpu_hw_37xx_d0i3_enable(vdev)) {
- ivpu_err(vdev, "Failed to enter D0I3\n");
- ret = -EIO;
- }
-
- return ret;
-}
-
-static void ivpu_hw_37xx_wdt_disable(struct ivpu_device *vdev)
-{
- u32 val;
-
- /* Enable writing and set non-zero WDT value */
- REGV_WR32(VPU_37XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
- REGV_WR32(VPU_37XX_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE);
-
- /* Enable writing and disable watchdog timer */
- REGV_WR32(VPU_37XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
- REGV_WR32(VPU_37XX_CPU_SS_TIM_WDOG_EN, 0);
-
- /* Now clear the timeout interrupt */
- val = REGV_RD32(VPU_37XX_CPU_SS_TIM_GEN_CONFIG);
- val = REG_CLR_FLD(VPU_37XX_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val);
- REGV_WR32(VPU_37XX_CPU_SS_TIM_GEN_CONFIG, val);
-}
-
-static u32 ivpu_hw_37xx_profiling_freq_get(struct ivpu_device *vdev)
-{
- return PLL_PROF_CLK_FREQ;
-}
-
-static void ivpu_hw_37xx_profiling_freq_drive(struct ivpu_device *vdev, bool enable)
-{
- /* Profiling freq - is a debug feature. Unavailable on VPU 37XX. */
-}
-
-static u32 ivpu_hw_37xx_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
-{
- u32 pll_clock = PLL_REF_CLK_FREQ * ratio;
- u32 cpu_clock;
-
- if ((vdev->hw->config & 0xff) == PLL_RATIO_4_3)
- cpu_clock = pll_clock * 2 / 4;
- else
- cpu_clock = pll_clock * 2 / 5;
-
- return cpu_clock;
-}
-
-/* Register indirect accesses */
-static u32 ivpu_hw_37xx_reg_pll_freq_get(struct ivpu_device *vdev)
-{
- u32 pll_curr_ratio;
-
- pll_curr_ratio = REGB_RD32(VPU_37XX_BUTTRESS_CURRENT_PLL);
- pll_curr_ratio &= VPU_37XX_BUTTRESS_CURRENT_PLL_RATIO_MASK;
-
- if (!ivpu_is_silicon(vdev))
- return PLL_SIMULATION_FREQ;
-
- return ivpu_hw_37xx_ratio_to_freq(vdev, pll_curr_ratio);
-}
-
-static u32 ivpu_hw_37xx_reg_telemetry_offset_get(struct ivpu_device *vdev)
-{
- return REGB_RD32(VPU_37XX_BUTTRESS_VPU_TELEMETRY_OFFSET);
-}
-
-static u32 ivpu_hw_37xx_reg_telemetry_size_get(struct ivpu_device *vdev)
-{
- return REGB_RD32(VPU_37XX_BUTTRESS_VPU_TELEMETRY_SIZE);
-}
-
-static u32 ivpu_hw_37xx_reg_telemetry_enable_get(struct ivpu_device *vdev)
-{
- return REGB_RD32(VPU_37XX_BUTTRESS_VPU_TELEMETRY_ENABLE);
-}
-
-static void ivpu_hw_37xx_reg_db_set(struct ivpu_device *vdev, u32 db_id)
-{
- u32 reg_stride = VPU_37XX_CPU_SS_DOORBELL_1 - VPU_37XX_CPU_SS_DOORBELL_0;
- u32 val = REG_FLD(VPU_37XX_CPU_SS_DOORBELL_0, SET);
-
- REGV_WR32I(VPU_37XX_CPU_SS_DOORBELL_0, reg_stride, db_id, val);
-}
-
-static u32 ivpu_hw_37xx_reg_ipc_rx_addr_get(struct ivpu_device *vdev)
-{
- return REGV_RD32(VPU_37XX_HOST_SS_TIM_IPC_FIFO_ATM);
-}
-
-static u32 ivpu_hw_37xx_reg_ipc_rx_count_get(struct ivpu_device *vdev)
-{
- u32 count = REGV_RD32_SILENT(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT);
-
- return REG_GET_FLD(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
-}
-
-static void ivpu_hw_37xx_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
-{
- REGV_WR32(VPU_37XX_CPU_SS_TIM_IPC_FIFO, vpu_addr);
-}
-
-static void ivpu_hw_37xx_irq_clear(struct ivpu_device *vdev)
-{
- REGV_WR64(VPU_37XX_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK);
-}
-
-static void ivpu_hw_37xx_irq_enable(struct ivpu_device *vdev)
-{
- REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK);
- REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK);
- REGB_WR32(VPU_37XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_ENABLE_MASK);
- REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
-}
-
-static void ivpu_hw_37xx_irq_disable(struct ivpu_device *vdev)
-{
- REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
- REGB_WR32(VPU_37XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_DISABLE_MASK);
- REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, 0x0ull);
- REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, 0x0);
-}
-
-static void ivpu_hw_37xx_irq_wdt_nce_handler(struct ivpu_device *vdev)
-{
- ivpu_pm_trigger_recovery(vdev, "WDT NCE IRQ");
-}
-
-static void ivpu_hw_37xx_irq_wdt_mss_handler(struct ivpu_device *vdev)
-{
- ivpu_hw_wdt_disable(vdev);
- ivpu_pm_trigger_recovery(vdev, "WDT MSS IRQ");
-}
-
-static void ivpu_hw_37xx_irq_noc_firewall_handler(struct ivpu_device *vdev)
-{
- ivpu_pm_trigger_recovery(vdev, "NOC Firewall IRQ");
-}
-
-/* Handler for IRQs from VPU core (irqV) */
-static bool ivpu_hw_37xx_irqv_handler(struct ivpu_device *vdev, int irq, bool *wake_thread)
-{
- u32 status = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
-
- if (!status)
- return false;
-
- REGV_WR32(VPU_37XX_HOST_SS_ICB_CLEAR_0, status);
-
- if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
- ivpu_mmu_irq_evtq_handler(vdev);
-
- if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
- ivpu_ipc_irq_handler(vdev, wake_thread);
-
- if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
- ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
-
- if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status))
- ivpu_mmu_irq_gerr_handler(vdev);
-
- if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status))
- ivpu_hw_37xx_irq_wdt_mss_handler(vdev);
-
- if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status))
- ivpu_hw_37xx_irq_wdt_nce_handler(vdev);
-
- if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
- ivpu_hw_37xx_irq_noc_firewall_handler(vdev);
-
- return true;
-}
-
-/* Handler for IRQs from Buttress core (irqB) */
-static bool ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq)
-{
- u32 status = REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
- bool schedule_recovery = false;
-
- if (!status)
- return false;
-
- if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status))
- ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x",
- REGB_RD32(VPU_37XX_BUTTRESS_CURRENT_PLL));
-
- if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, status)) {
- ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_37XX_BUTTRESS_ATS_ERR_LOG_0));
- REGB_WR32(VPU_37XX_BUTTRESS_ATS_ERR_CLEAR, 0x1);
- schedule_recovery = true;
- }
-
- if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR, status)) {
- u32 ufi_log = REGB_RD32(VPU_37XX_BUTTRESS_UFI_ERR_LOG);
-
- ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
- ufi_log, REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log),
- REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log),
- REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log));
- REGB_WR32(VPU_37XX_BUTTRESS_UFI_ERR_CLEAR, 0x1);
- schedule_recovery = true;
- }
-
- /* This must be done after interrupts are cleared at the source. */
- if (IVPU_WA(interrupt_clear_with_0))
- /*
- * Writing 1 triggers an interrupt, so we can't perform read update write.
- * Clear local interrupt status by writing 0 to all bits.
- */
- REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, 0x0);
- else
- REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, status);
-
- if (schedule_recovery)
- ivpu_pm_trigger_recovery(vdev, "Buttress IRQ");
-
- return true;
-}
-
-static irqreturn_t ivpu_hw_37xx_irq_handler(int irq, void *ptr)
-{
- struct ivpu_device *vdev = ptr;
- bool irqv_handled, irqb_handled, wake_thread = false;
-
- REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
-
- irqv_handled = ivpu_hw_37xx_irqv_handler(vdev, irq, &wake_thread);
- irqb_handled = ivpu_hw_37xx_irqb_handler(vdev, irq);
-
- /* Re-enable global interrupts to re-trigger MSI for pending interrupts */
- REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
-
- if (wake_thread)
- return IRQ_WAKE_THREAD;
- if (irqv_handled || irqb_handled)
- return IRQ_HANDLED;
- return IRQ_NONE;
-}
-
-static void ivpu_hw_37xx_diagnose_failure(struct ivpu_device *vdev)
-{
- u32 irqv = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
- u32 irqb = REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
-
- if (ivpu_hw_37xx_reg_ipc_rx_count_get(vdev))
- ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ");
-
- if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, irqv))
- ivpu_err(vdev, "WDT MSS timeout detected\n");
-
- if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, irqv))
- ivpu_err(vdev, "WDT NCE timeout detected\n");
-
- if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, irqv))
- ivpu_err(vdev, "NOC Firewall irq detected\n");
-
- if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, irqb))
- ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_37XX_BUTTRESS_ATS_ERR_LOG_0));
-
- if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR, irqb)) {
- u32 ufi_log = REGB_RD32(VPU_37XX_BUTTRESS_UFI_ERR_LOG);
-
- ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
- ufi_log, REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log),
- REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log),
- REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log));
- }
-}
-
-const struct ivpu_hw_ops ivpu_hw_37xx_ops = {
- .info_init = ivpu_hw_37xx_info_init,
- .power_up = ivpu_hw_37xx_power_up,
- .is_idle = ivpu_hw_37xx_is_idle,
- .wait_for_idle = ivpu_hw_37xx_wait_for_idle,
- .power_down = ivpu_hw_37xx_power_down,
- .reset = ivpu_hw_37xx_reset,
- .boot_fw = ivpu_hw_37xx_boot_fw,
- .wdt_disable = ivpu_hw_37xx_wdt_disable,
- .diagnose_failure = ivpu_hw_37xx_diagnose_failure,
- .profiling_freq_get = ivpu_hw_37xx_profiling_freq_get,
- .profiling_freq_drive = ivpu_hw_37xx_profiling_freq_drive,
- .reg_pll_freq_get = ivpu_hw_37xx_reg_pll_freq_get,
- .ratio_to_freq = ivpu_hw_37xx_ratio_to_freq,
- .reg_telemetry_offset_get = ivpu_hw_37xx_reg_telemetry_offset_get,
- .reg_telemetry_size_get = ivpu_hw_37xx_reg_telemetry_size_get,
- .reg_telemetry_enable_get = ivpu_hw_37xx_reg_telemetry_enable_get,
- .reg_db_set = ivpu_hw_37xx_reg_db_set,
- .reg_ipc_rx_addr_get = ivpu_hw_37xx_reg_ipc_rx_addr_get,
- .reg_ipc_rx_count_get = ivpu_hw_37xx_reg_ipc_rx_count_get,
- .reg_ipc_tx_set = ivpu_hw_37xx_reg_ipc_tx_set,
- .irq_clear = ivpu_hw_37xx_irq_clear,
- .irq_enable = ivpu_hw_37xx_irq_enable,
- .irq_disable = ivpu_hw_37xx_irq_disable,
- .irq_handler = ivpu_hw_37xx_irq_handler,
-};
diff --git a/drivers/accel/ivpu/ivpu_hw_37xx_reg.h b/drivers/accel/ivpu/ivpu_hw_37xx_reg.h
index f6fec1919202..cf5e2f01049c 100644
--- a/drivers/accel/ivpu/ivpu_hw_37xx_reg.h
+++ b/drivers/accel/ivpu/ivpu_hw_37xx_reg.h
@@ -8,78 +8,6 @@
#include <linux/bits.h>
-#define VPU_37XX_BUTTRESS_INTERRUPT_TYPE 0x00000000u
-
-#define VPU_37XX_BUTTRESS_INTERRUPT_STAT 0x00000004u
-#define VPU_37XX_BUTTRESS_INTERRUPT_STAT_FREQ_CHANGE_MASK BIT_MASK(0)
-#define VPU_37XX_BUTTRESS_INTERRUPT_STAT_ATS_ERR_MASK BIT_MASK(1)
-#define VPU_37XX_BUTTRESS_INTERRUPT_STAT_UFI_ERR_MASK BIT_MASK(2)
-
-#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0 0x00000008u
-#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0_MIN_RATIO_MASK GENMASK(15, 0)
-#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0_MAX_RATIO_MASK GENMASK(31, 16)
-
-#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1 0x0000000cu
-#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1_TARGET_RATIO_MASK GENMASK(15, 0)
-#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1_EPP_MASK GENMASK(31, 16)
-
-#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2 0x00000010u
-#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2_CONFIG_MASK GENMASK(15, 0)
-
-#define VPU_37XX_BUTTRESS_WP_REQ_CMD 0x00000014u
-#define VPU_37XX_BUTTRESS_WP_REQ_CMD_SEND_MASK BIT_MASK(0)
-
-#define VPU_37XX_BUTTRESS_WP_DOWNLOAD 0x00000018u
-#define VPU_37XX_BUTTRESS_WP_DOWNLOAD_TARGET_RATIO_MASK GENMASK(15, 0)
-
-#define VPU_37XX_BUTTRESS_CURRENT_PLL 0x0000001cu
-#define VPU_37XX_BUTTRESS_CURRENT_PLL_RATIO_MASK GENMASK(15, 0)
-
-#define VPU_37XX_BUTTRESS_PLL_ENABLE 0x00000020u
-
-#define VPU_37XX_BUTTRESS_FMIN_FUSE 0x00000024u
-#define VPU_37XX_BUTTRESS_FMIN_FUSE_MIN_RATIO_MASK GENMASK(7, 0)
-#define VPU_37XX_BUTTRESS_FMIN_FUSE_PN_RATIO_MASK GENMASK(15, 8)
-
-#define VPU_37XX_BUTTRESS_FMAX_FUSE 0x00000028u
-#define VPU_37XX_BUTTRESS_FMAX_FUSE_MAX_RATIO_MASK GENMASK(7, 0)
-
-#define VPU_37XX_BUTTRESS_TILE_FUSE 0x0000002cu
-#define VPU_37XX_BUTTRESS_TILE_FUSE_VALID_MASK BIT_MASK(0)
-#define VPU_37XX_BUTTRESS_TILE_FUSE_SKU_MASK GENMASK(3, 2)
-
-#define VPU_37XX_BUTTRESS_LOCAL_INT_MASK 0x00000030u
-#define VPU_37XX_BUTTRESS_GLOBAL_INT_MASK 0x00000034u
-
-#define VPU_37XX_BUTTRESS_PLL_STATUS 0x00000040u
-#define VPU_37XX_BUTTRESS_PLL_STATUS_LOCK_MASK BIT_MASK(1)
-
-#define VPU_37XX_BUTTRESS_VPU_STATUS 0x00000044u
-#define VPU_37XX_BUTTRESS_VPU_STATUS_READY_MASK BIT_MASK(0)
-#define VPU_37XX_BUTTRESS_VPU_STATUS_IDLE_MASK BIT_MASK(1)
-
-#define VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL 0x00000060u
-#define VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL_INPROGRESS_MASK BIT_MASK(0)
-#define VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL_I3_MASK BIT_MASK(2)
-
-#define VPU_37XX_BUTTRESS_VPU_IP_RESET 0x00000050u
-#define VPU_37XX_BUTTRESS_VPU_IP_RESET_TRIGGER_MASK BIT_MASK(0)
-
-#define VPU_37XX_BUTTRESS_VPU_TELEMETRY_OFFSET 0x00000080u
-#define VPU_37XX_BUTTRESS_VPU_TELEMETRY_SIZE 0x00000084u
-#define VPU_37XX_BUTTRESS_VPU_TELEMETRY_ENABLE 0x00000088u
-
-#define VPU_37XX_BUTTRESS_ATS_ERR_LOG_0 0x000000a0u
-#define VPU_37XX_BUTTRESS_ATS_ERR_LOG_1 0x000000a4u
-#define VPU_37XX_BUTTRESS_ATS_ERR_CLEAR 0x000000a8u
-
-#define VPU_37XX_BUTTRESS_UFI_ERR_LOG 0x000000b0u
-#define VPU_37XX_BUTTRESS_UFI_ERR_LOG_CQ_ID_MASK GENMASK(11, 0)
-#define VPU_37XX_BUTTRESS_UFI_ERR_LOG_AXI_ID_MASK GENMASK(19, 12)
-#define VPU_37XX_BUTTRESS_UFI_ERR_LOG_OPCODE_MASK GENMASK(24, 20)
-
-#define VPU_37XX_BUTTRESS_UFI_ERR_CLEAR 0x000000b4u
-
#define VPU_37XX_HOST_SS_CPR_CLK_SET 0x00000084u
#define VPU_37XX_HOST_SS_CPR_CLK_SET_TOP_NOC_MASK BIT_MASK(1)
#define VPU_37XX_HOST_SS_CPR_CLK_SET_DSS_MAS_MASK BIT_MASK(10)
diff --git a/drivers/accel/ivpu/ivpu_hw_40xx.c b/drivers/accel/ivpu/ivpu_hw_40xx.c
deleted file mode 100644
index b0b88d4c8926..000000000000
--- a/drivers/accel/ivpu/ivpu_hw_40xx.c
+++ /dev/null
@@ -1,1250 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2020-2023 Intel Corporation
- */
-
-#include "ivpu_drv.h"
-#include "ivpu_fw.h"
-#include "ivpu_hw.h"
-#include "ivpu_hw_40xx_reg.h"
-#include "ivpu_hw_reg_io.h"
-#include "ivpu_ipc.h"
-#include "ivpu_mmu.h"
-#include "ivpu_pm.h"
-
-#include <linux/dmi.h>
-
-#define TILE_MAX_NUM 6
-#define TILE_MAX_MASK 0x3f
-
-#define LNL_HW_ID 0x4040
-
-#define SKU_TILE_SHIFT 0u
-#define SKU_TILE_MASK 0x0000ffffu
-#define SKU_HW_ID_SHIFT 16u
-#define SKU_HW_ID_MASK 0xffff0000u
-
-#define PLL_CONFIG_DEFAULT 0x0
-#define PLL_CDYN_DEFAULT 0x80
-#define PLL_EPP_DEFAULT 0x80
-#define PLL_REF_CLK_FREQ (50 * 1000000)
-#define PLL_RATIO_TO_FREQ(x) ((x) * PLL_REF_CLK_FREQ)
-
-#define PLL_PROFILING_FREQ_DEFAULT 38400000
-#define PLL_PROFILING_FREQ_HIGH 400000000
-
-#define TIM_SAFE_ENABLE 0xf1d0dead
-#define TIM_WATCHDOG_RESET_VALUE 0xffffffff
-
-#define TIMEOUT_US (150 * USEC_PER_MSEC)
-#define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC)
-#define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC)
-#define IDLE_TIMEOUT_US (5 * USEC_PER_MSEC)
-
-#define WEIGHTS_DEFAULT 0xf711f711u
-#define WEIGHTS_ATS_DEFAULT 0x0000f711u
-
-#define ICB_0_IRQ_MASK ((REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
- (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
- (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \
- (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \
- (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \
- (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \
- (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT)))
-
-#define ICB_1_IRQ_MASK ((REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \
- (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \
- (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT)))
-
-#define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK)
-
-#define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
- (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI0_ERR)) | \
- (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI1_ERR)) | \
- (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR0_ERR)) | \
- (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR1_ERR)) | \
- (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, SURV_ERR)))
-
-#define BUTTRESS_IRQ_ENABLE_MASK ((u32)~BUTTRESS_IRQ_MASK)
-#define BUTTRESS_IRQ_DISABLE_MASK ((u32)-1)
-
-#define ITF_FIREWALL_VIOLATION_MASK ((REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \
- (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \
- (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \
- (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \
- (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \
- (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \
- (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX)))
-
-static char *ivpu_platform_to_str(u32 platform)
-{
- switch (platform) {
- case IVPU_PLATFORM_SILICON:
- return "SILICON";
- case IVPU_PLATFORM_SIMICS:
- return "SIMICS";
- case IVPU_PLATFORM_FPGA:
- return "FPGA";
- default:
- return "Invalid platform";
- }
-}
-
-static const struct dmi_system_id ivpu_dmi_platform_simulation[] = {
- {
- .ident = "Intel Simics",
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "lnlrvp"),
- DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
- DMI_MATCH(DMI_BOARD_SERIAL, "123456789"),
- },
- },
- {
- .ident = "Intel Simics",
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "Simics"),
- },
- },
- { }
-};
-
-static void ivpu_hw_read_platform(struct ivpu_device *vdev)
-{
- if (dmi_check_system(ivpu_dmi_platform_simulation))
- vdev->platform = IVPU_PLATFORM_SIMICS;
- else
- vdev->platform = IVPU_PLATFORM_SILICON;
-
- ivpu_dbg(vdev, MISC, "Platform type: %s (%d)\n",
- ivpu_platform_to_str(vdev->platform), vdev->platform);
-}
-
-static void ivpu_hw_wa_init(struct ivpu_device *vdev)
-{
- vdev->wa.punit_disabled = ivpu_is_fpga(vdev);
- vdev->wa.clear_runtime_mem = false;
-
- if (ivpu_hw_gen(vdev) == IVPU_HW_40XX)
- vdev->wa.disable_clock_relinquish = true;
-
- IVPU_PRINT_WA(punit_disabled);
- IVPU_PRINT_WA(clear_runtime_mem);
- IVPU_PRINT_WA(disable_clock_relinquish);
-}
-
-static void ivpu_hw_timeouts_init(struct ivpu_device *vdev)
-{
- if (ivpu_is_fpga(vdev)) {
- vdev->timeout.boot = 100000;
- vdev->timeout.jsm = 50000;
- vdev->timeout.tdr = 2000000;
- vdev->timeout.reschedule_suspend = 1000;
- vdev->timeout.autosuspend = -1;
- vdev->timeout.d0i3_entry_msg = 500;
- } else if (ivpu_is_simics(vdev)) {
- vdev->timeout.boot = 50;
- vdev->timeout.jsm = 500;
- vdev->timeout.tdr = 10000;
- vdev->timeout.reschedule_suspend = 10;
- vdev->timeout.autosuspend = -1;
- vdev->timeout.d0i3_entry_msg = 100;
- } else {
- vdev->timeout.boot = 1000;
- vdev->timeout.jsm = 500;
- vdev->timeout.tdr = 2000;
- vdev->timeout.reschedule_suspend = 10;
- vdev->timeout.autosuspend = 10;
- vdev->timeout.d0i3_entry_msg = 5;
- }
-}
-
-static int ivpu_pll_wait_for_cmd_send(struct ivpu_device *vdev)
-{
- return REGB_POLL_FLD(VPU_40XX_BUTTRESS_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US);
-}
-
-static int ivpu_pll_cmd_send(struct ivpu_device *vdev, u16 min_ratio, u16 max_ratio,
- u16 target_ratio, u16 epp, u16 config, u16 cdyn)
-{
- int ret;
- u32 val;
-
- ret = ivpu_pll_wait_for_cmd_send(vdev);
- if (ret) {
- ivpu_err(vdev, "Failed to sync before WP request: %d\n", ret);
- return ret;
- }
-
- val = REGB_RD32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0);
- val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0, MIN_RATIO, min_ratio, val);
- val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0, MAX_RATIO, max_ratio, val);
- REGB_WR32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0, val);
-
- val = REGB_RD32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1);
- val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1, TARGET_RATIO, target_ratio, val);
- val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1, EPP, epp, val);
- REGB_WR32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1, val);
-
- val = REGB_RD32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2);
- val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2, CONFIG, config, val);
- val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2, CDYN, cdyn, val);
- REGB_WR32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2, val);
-
- val = REGB_RD32(VPU_40XX_BUTTRESS_WP_REQ_CMD);
- val = REG_SET_FLD(VPU_40XX_BUTTRESS_WP_REQ_CMD, SEND, val);
- REGB_WR32(VPU_40XX_BUTTRESS_WP_REQ_CMD, val);
-
- ret = ivpu_pll_wait_for_cmd_send(vdev);
- if (ret)
- ivpu_err(vdev, "Failed to sync after WP request: %d\n", ret);
-
- return ret;
-}
-
-static int ivpu_pll_wait_for_status_ready(struct ivpu_device *vdev)
-{
- return REGB_POLL_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, READY, 1, PLL_TIMEOUT_US);
-}
-
-static int ivpu_wait_for_clock_own_resource_ack(struct ivpu_device *vdev)
-{
- if (ivpu_is_simics(vdev))
- return 0;
-
- return REGB_POLL_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, CLOCK_RESOURCE_OWN_ACK, 1, TIMEOUT_US);
-}
-
-static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev)
-{
- struct ivpu_hw_info *hw = vdev->hw;
- u8 fuse_min_ratio, fuse_pn_ratio, fuse_max_ratio;
- u32 fmin_fuse, fmax_fuse;
-
- fmin_fuse = REGB_RD32(VPU_40XX_BUTTRESS_FMIN_FUSE);
- fuse_min_ratio = REG_GET_FLD(VPU_40XX_BUTTRESS_FMIN_FUSE, MIN_RATIO, fmin_fuse);
- fuse_pn_ratio = REG_GET_FLD(VPU_40XX_BUTTRESS_FMIN_FUSE, PN_RATIO, fmin_fuse);
-
- fmax_fuse = REGB_RD32(VPU_40XX_BUTTRESS_FMAX_FUSE);
- fuse_max_ratio = REG_GET_FLD(VPU_40XX_BUTTRESS_FMAX_FUSE, MAX_RATIO, fmax_fuse);
-
- hw->pll.min_ratio = clamp_t(u8, ivpu_pll_min_ratio, fuse_min_ratio, fuse_max_ratio);
- hw->pll.max_ratio = clamp_t(u8, ivpu_pll_max_ratio, hw->pll.min_ratio, fuse_max_ratio);
- hw->pll.pn_ratio = clamp_t(u8, fuse_pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
-}
-
-static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable)
-{
- u16 config = enable ? PLL_CONFIG_DEFAULT : 0;
- u16 cdyn = enable ? PLL_CDYN_DEFAULT : 0;
- u16 epp = enable ? PLL_EPP_DEFAULT : 0;
- struct ivpu_hw_info *hw = vdev->hw;
- u16 target_ratio = hw->pll.pn_ratio;
- int ret;
-
- ivpu_dbg(vdev, PM, "PLL workpoint request: %u Hz, epp: 0x%x, config: 0x%x, cdyn: 0x%x\n",
- PLL_RATIO_TO_FREQ(target_ratio), epp, config, cdyn);
-
- ret = ivpu_pll_cmd_send(vdev, hw->pll.min_ratio, hw->pll.max_ratio,
- target_ratio, epp, config, cdyn);
- if (ret) {
- ivpu_err(vdev, "Failed to send PLL workpoint request: %d\n", ret);
- return ret;
- }
-
- if (enable) {
- ret = ivpu_pll_wait_for_status_ready(vdev);
- if (ret) {
- ivpu_err(vdev, "Timed out waiting for PLL ready status\n");
- return ret;
- }
- }
-
- return 0;
-}
-
-static int ivpu_pll_enable(struct ivpu_device *vdev)
-{
- return ivpu_pll_drive(vdev, true);
-}
-
-static int ivpu_pll_disable(struct ivpu_device *vdev)
-{
- return ivpu_pll_drive(vdev, false);
-}
-
-static void ivpu_boot_host_ss_rst_drive(struct ivpu_device *vdev, bool enable)
-{
- u32 val = REGV_RD32(VPU_40XX_HOST_SS_CPR_RST_EN);
-
- if (enable) {
- val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, TOP_NOC, val);
- val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, DSS_MAS, val);
- val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, CSS_MAS, val);
- } else {
- val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, TOP_NOC, val);
- val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, DSS_MAS, val);
- val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, CSS_MAS, val);
- }
-
- REGV_WR32(VPU_40XX_HOST_SS_CPR_RST_EN, val);
-}
-
-static void ivpu_boot_host_ss_clk_drive(struct ivpu_device *vdev, bool enable)
-{
- u32 val = REGV_RD32(VPU_40XX_HOST_SS_CPR_CLK_EN);
-
- if (enable) {
- val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, TOP_NOC, val);
- val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, DSS_MAS, val);
- val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, CSS_MAS, val);
- } else {
- val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, TOP_NOC, val);
- val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, DSS_MAS, val);
- val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, CSS_MAS, val);
- }
-
- REGV_WR32(VPU_40XX_HOST_SS_CPR_CLK_EN, val);
-}
-
-static int ivpu_boot_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val)
-{
- u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QREQN);
-
- if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val))
- return -EIO;
-
- return 0;
-}
-
-static int ivpu_boot_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
-{
- u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QACCEPTN);
-
- if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val))
- return -EIO;
-
- return 0;
-}
-
-static int ivpu_boot_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
-{
- u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QDENY);
-
- if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val))
- return -EIO;
-
- return 0;
-}
-
-static int ivpu_boot_top_noc_qrenqn_check(struct ivpu_device *vdev, u32 exp_val)
-{
- u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QREQN);
-
- if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) ||
- !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val))
- return -EIO;
-
- return 0;
-}
-
-static int ivpu_boot_top_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
-{
- u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QACCEPTN);
-
- if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) ||
- !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val))
- return -EIO;
-
- return 0;
-}
-
-static int ivpu_boot_top_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
-{
- u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QDENY);
-
- if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) ||
- !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val))
- return -EIO;
-
- return 0;
-}
-
-static void ivpu_boot_idle_gen_drive(struct ivpu_device *vdev, bool enable)
-{
- u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_IDLE_GEN);
-
- if (enable)
- val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_IDLE_GEN, EN, val);
- else
- val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_IDLE_GEN, EN, val);
-
- REGV_WR32(VPU_40XX_HOST_SS_AON_IDLE_GEN, val);
-}
-
-static int ivpu_boot_host_ss_check(struct ivpu_device *vdev)
-{
- int ret;
-
- ret = ivpu_boot_noc_qreqn_check(vdev, 0x0);
- if (ret) {
- ivpu_err(vdev, "Failed qreqn check: %d\n", ret);
- return ret;
- }
-
- ret = ivpu_boot_noc_qacceptn_check(vdev, 0x0);
- if (ret) {
- ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
- return ret;
- }
-
- ret = ivpu_boot_noc_qdeny_check(vdev, 0x0);
- if (ret)
- ivpu_err(vdev, "Failed qdeny check %d\n", ret);
-
- return ret;
-}
-
-static int ivpu_boot_host_ss_axi_drive(struct ivpu_device *vdev, bool enable)
-{
- int ret;
- u32 val;
-
- val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QREQN);
- if (enable)
- val = REG_SET_FLD(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
- else
- val = REG_CLR_FLD(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
- REGV_WR32(VPU_40XX_HOST_SS_NOC_QREQN, val);
-
- ret = ivpu_boot_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
- if (ret) {
- ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
- return ret;
- }
-
- ret = ivpu_boot_noc_qdeny_check(vdev, 0x0);
- if (ret) {
- ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
- return ret;
- }
-
- if (enable) {
- REGB_WR32(VPU_40XX_BUTTRESS_PORT_ARBITRATION_WEIGHTS, WEIGHTS_DEFAULT);
- REGB_WR32(VPU_40XX_BUTTRESS_PORT_ARBITRATION_WEIGHTS_ATS, WEIGHTS_ATS_DEFAULT);
- }
-
- return ret;
-}
-
-static int ivpu_boot_host_ss_axi_enable(struct ivpu_device *vdev)
-{
- return ivpu_boot_host_ss_axi_drive(vdev, true);
-}
-
-static int ivpu_boot_host_ss_top_noc_drive(struct ivpu_device *vdev, bool enable)
-{
- int ret;
- u32 val;
-
- val = REGV_RD32(VPU_40XX_TOP_NOC_QREQN);
- if (enable) {
- val = REG_SET_FLD(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, val);
- val = REG_SET_FLD(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
- } else {
- val = REG_CLR_FLD(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, val);
- val = REG_CLR_FLD(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
- }
- REGV_WR32(VPU_40XX_TOP_NOC_QREQN, val);
-
- ret = ivpu_boot_top_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
- if (ret) {
- ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
- return ret;
- }
-
- ret = ivpu_boot_top_noc_qdeny_check(vdev, 0x0);
- if (ret)
- ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
-
- return ret;
-}
-
-static int ivpu_boot_host_ss_top_noc_enable(struct ivpu_device *vdev)
-{
- return ivpu_boot_host_ss_top_noc_drive(vdev, true);
-}
-
-static void ivpu_boot_pwr_island_trickle_drive(struct ivpu_device *vdev, bool enable)
-{
- u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
-
- if (enable)
- val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, CSS_CPU, val);
- else
- val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, CSS_CPU, val);
-
- REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val);
-
- if (enable)
- ndelay(500);
-}
-
-static void ivpu_boot_pwr_island_drive(struct ivpu_device *vdev, bool enable)
-{
- u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0);
-
- if (enable)
- val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, CSS_CPU, val);
- else
- val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, CSS_CPU, val);
-
- REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, val);
-
- if (!enable)
- ndelay(500);
-}
-
-static int ivpu_boot_wait_for_pwr_island_status(struct ivpu_device *vdev, u32 exp_val)
-{
- if (ivpu_is_fpga(vdev))
- return 0;
-
- return REGV_POLL_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_STATUS0, CSS_CPU,
- exp_val, PWR_ISLAND_STATUS_TIMEOUT_US);
-}
-
-static void ivpu_boot_pwr_island_isolation_drive(struct ivpu_device *vdev, bool enable)
-{
- u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0);
-
- if (enable)
- val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, CSS_CPU, val);
- else
- val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, CSS_CPU, val);
-
- REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, val);
-}
-
-static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
-{
- u32 val = REGV_RD32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES);
-
- val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, SNOOP_OVERRIDE_EN, val);
- val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val);
- val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val);
-
- REGV_WR32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, val);
-}
-
-static void ivpu_boot_tbu_mmu_enable(struct ivpu_device *vdev)
-{
- u32 val = REGV_RD32(VPU_40XX_HOST_IF_TBU_MMUSSIDV);
-
- val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
- val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
- val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU1_AWMMUSSIDV, val);
- val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU1_ARMMUSSIDV, val);
- val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
- val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
-
- REGV_WR32(VPU_40XX_HOST_IF_TBU_MMUSSIDV, val);
-}
-
-static int ivpu_boot_cpu_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
-{
- u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN);
-
- if (!REG_TEST_FLD_NUM(VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN, TOP_MMIO, exp_val, val))
- return -EIO;
-
- return 0;
-}
-
-static int ivpu_boot_cpu_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
-{
- u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QDENY);
-
- if (!REG_TEST_FLD_NUM(VPU_40XX_CPU_SS_CPR_NOC_QDENY, TOP_MMIO, exp_val, val))
- return -EIO;
-
- return 0;
-}
-
-static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
-{
- int ret;
-
- ret = ivpu_wait_for_clock_own_resource_ack(vdev);
- if (ret) {
- ivpu_err(vdev, "Timed out waiting for clock own resource ACK\n");
- return ret;
- }
-
- ivpu_boot_pwr_island_trickle_drive(vdev, true);
- ivpu_boot_pwr_island_drive(vdev, true);
-
- ret = ivpu_boot_wait_for_pwr_island_status(vdev, 0x1);
- if (ret) {
- ivpu_err(vdev, "Timed out waiting for power island status\n");
- return ret;
- }
-
- ret = ivpu_boot_top_noc_qrenqn_check(vdev, 0x0);
- if (ret) {
- ivpu_err(vdev, "Failed qrenqn check %d\n", ret);
- return ret;
- }
-
- ivpu_boot_host_ss_clk_drive(vdev, true);
- ivpu_boot_host_ss_rst_drive(vdev, true);
- ivpu_boot_pwr_island_isolation_drive(vdev, false);
-
- return ret;
-}
-
-static int ivpu_boot_soc_cpu_drive(struct ivpu_device *vdev, bool enable)
-{
- int ret;
- u32 val;
-
- val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QREQN);
- if (enable)
- val = REG_SET_FLD(VPU_40XX_CPU_SS_CPR_NOC_QREQN, TOP_MMIO, val);
- else
- val = REG_CLR_FLD(VPU_40XX_CPU_SS_CPR_NOC_QREQN, TOP_MMIO, val);
- REGV_WR32(VPU_40XX_CPU_SS_CPR_NOC_QREQN, val);
-
- ret = ivpu_boot_cpu_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
- if (ret) {
- ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
- return ret;
- }
-
- ret = ivpu_boot_cpu_noc_qdeny_check(vdev, 0x0);
- if (ret)
- ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
-
- return ret;
-}
-
-static int ivpu_boot_soc_cpu_enable(struct ivpu_device *vdev)
-{
- return ivpu_boot_soc_cpu_drive(vdev, true);
-}
-
-static int ivpu_boot_soc_cpu_boot(struct ivpu_device *vdev)
-{
- int ret;
- u32 val;
- u64 val64;
-
- ret = ivpu_boot_soc_cpu_enable(vdev);
- if (ret) {
- ivpu_err(vdev, "Failed to enable SOC CPU: %d\n", ret);
- return ret;
- }
-
- val64 = vdev->fw->entry_point;
- val64 <<= ffs(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_IMAGE_LOCATION_MASK) - 1;
- REGV_WR64(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, val64);
-
- val = REGV_RD32(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO);
- val = REG_SET_FLD(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, DONE, val);
- REGV_WR32(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, val);
-
- ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n",
- ivpu_fw_is_cold_boot(vdev) ? "cold boot" : "resume");
-
- return 0;
-}
-
-static int ivpu_boot_d0i3_drive(struct ivpu_device *vdev, bool enable)
-{
- int ret;
- u32 val;
-
- ret = REGB_POLL_FLD(VPU_40XX_BUTTRESS_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
- if (ret) {
- ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret);
- return ret;
- }
-
- val = REGB_RD32(VPU_40XX_BUTTRESS_D0I3_CONTROL);
- if (enable)
- val = REG_SET_FLD(VPU_40XX_BUTTRESS_D0I3_CONTROL, I3, val);
- else
- val = REG_CLR_FLD(VPU_40XX_BUTTRESS_D0I3_CONTROL, I3, val);
- REGB_WR32(VPU_40XX_BUTTRESS_D0I3_CONTROL, val);
-
- ret = REGB_POLL_FLD(VPU_40XX_BUTTRESS_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
- if (ret) {
- ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-static bool ivpu_tile_disable_check(u32 config)
-{
- /* Allowed values: 0 or one bit from range 0-5 (6 tiles) */
- if (config == 0)
- return true;
-
- if (config > BIT(TILE_MAX_NUM - 1))
- return false;
-
- if ((config & (config - 1)) == 0)
- return true;
-
- return false;
-}
-
-static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev)
-{
- struct ivpu_hw_info *hw = vdev->hw;
- u32 tile_disable;
- u32 fuse;
-
- fuse = REGB_RD32(VPU_40XX_BUTTRESS_TILE_FUSE);
- if (!REG_TEST_FLD(VPU_40XX_BUTTRESS_TILE_FUSE, VALID, fuse)) {
- ivpu_err(vdev, "Fuse: invalid (0x%x)\n", fuse);
- return -EIO;
- }
-
- tile_disable = REG_GET_FLD(VPU_40XX_BUTTRESS_TILE_FUSE, CONFIG, fuse);
- if (!ivpu_tile_disable_check(tile_disable)) {
- ivpu_err(vdev, "Fuse: Invalid tile disable config (0x%x)\n", tile_disable);
- return -EIO;
- }
-
- if (tile_disable)
- ivpu_dbg(vdev, MISC, "Fuse: %d tiles enabled. Tile number %d disabled\n",
- TILE_MAX_NUM - 1, ffs(tile_disable) - 1);
- else
- ivpu_dbg(vdev, MISC, "Fuse: All %d tiles enabled\n", TILE_MAX_NUM);
-
- hw->tile_fuse = tile_disable;
- hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
-
- ivpu_pll_init_frequency_ratios(vdev);
-
- ivpu_hw_init_range(&vdev->hw->ranges.global, 0x80000000, SZ_512M);
- ivpu_hw_init_range(&vdev->hw->ranges.user, 0x80000000, SZ_256M);
- ivpu_hw_init_range(&vdev->hw->ranges.shave, 0x80000000 + SZ_256M, SZ_2G - SZ_256M);
- ivpu_hw_init_range(&vdev->hw->ranges.dma, 0x200000000, SZ_8G);
-
- ivpu_hw_read_platform(vdev);
- ivpu_hw_wa_init(vdev);
- ivpu_hw_timeouts_init(vdev);
-
- return 0;
-}
-
-static int ivpu_hw_40xx_ip_reset(struct ivpu_device *vdev)
-{
- int ret;
- u32 val;
-
- ret = REGB_POLL_FLD(VPU_40XX_BUTTRESS_IP_RESET, TRIGGER, 0, TIMEOUT_US);
- if (ret) {
- ivpu_err(vdev, "Wait for *_TRIGGER timed out\n");
- return ret;
- }
-
- val = REGB_RD32(VPU_40XX_BUTTRESS_IP_RESET);
- val = REG_SET_FLD(VPU_40XX_BUTTRESS_IP_RESET, TRIGGER, val);
- REGB_WR32(VPU_40XX_BUTTRESS_IP_RESET, val);
-
- ret = REGB_POLL_FLD(VPU_40XX_BUTTRESS_IP_RESET, TRIGGER, 0, TIMEOUT_US);
- if (ret)
- ivpu_err(vdev, "Timed out waiting for RESET completion\n");
-
- return ret;
-}
-
-static int ivpu_hw_40xx_reset(struct ivpu_device *vdev)
-{
- int ret = 0;
-
- if (ivpu_hw_40xx_ip_reset(vdev)) {
- ivpu_err(vdev, "Failed to reset NPU IP\n");
- ret = -EIO;
- }
-
- if (ivpu_pll_disable(vdev)) {
- ivpu_err(vdev, "Failed to disable PLL\n");
- ret = -EIO;
- }
-
- return ret;
-}
-
-static int ivpu_hw_40xx_d0i3_enable(struct ivpu_device *vdev)
-{
- int ret;
-
- if (IVPU_WA(punit_disabled))
- return 0;
-
- ret = ivpu_boot_d0i3_drive(vdev, true);
- if (ret)
- ivpu_err(vdev, "Failed to enable D0i3: %d\n", ret);
-
- udelay(5); /* VPU requires 5 us to complete the transition */
-
- return ret;
-}
-
-static int ivpu_hw_40xx_d0i3_disable(struct ivpu_device *vdev)
-{
- int ret;
-
- if (IVPU_WA(punit_disabled))
- return 0;
-
- ret = ivpu_boot_d0i3_drive(vdev, false);
- if (ret)
- ivpu_err(vdev, "Failed to disable D0i3: %d\n", ret);
-
- return ret;
-}
-
-static void ivpu_hw_40xx_profiling_freq_reg_set(struct ivpu_device *vdev)
-{
- u32 val = REGB_RD32(VPU_40XX_BUTTRESS_VPU_STATUS);
-
- if (vdev->hw->pll.profiling_freq == PLL_PROFILING_FREQ_DEFAULT)
- val = REG_CLR_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, PERF_CLK, val);
- else
- val = REG_SET_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, PERF_CLK, val);
-
- REGB_WR32(VPU_40XX_BUTTRESS_VPU_STATUS, val);
-}
-
-static void ivpu_hw_40xx_ats_print(struct ivpu_device *vdev)
-{
- ivpu_dbg(vdev, MISC, "Buttress ATS: %s\n",
- REGB_RD32(VPU_40XX_BUTTRESS_HM_ATS) ? "Enable" : "Disable");
-}
-
-static void ivpu_hw_40xx_clock_relinquish_disable(struct ivpu_device *vdev)
-{
- u32 val = REGB_RD32(VPU_40XX_BUTTRESS_VPU_STATUS);
-
- val = REG_SET_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, DISABLE_CLK_RELINQUISH, val);
- REGB_WR32(VPU_40XX_BUTTRESS_VPU_STATUS, val);
-}
-
-static int ivpu_hw_40xx_power_up(struct ivpu_device *vdev)
-{
- int ret;
-
- ret = ivpu_hw_40xx_d0i3_disable(vdev);
- if (ret)
- ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
-
- ret = ivpu_pll_enable(vdev);
- if (ret) {
- ivpu_err(vdev, "Failed to enable PLL: %d\n", ret);
- return ret;
- }
-
- if (IVPU_WA(disable_clock_relinquish))
- ivpu_hw_40xx_clock_relinquish_disable(vdev);
- ivpu_hw_40xx_profiling_freq_reg_set(vdev);
- ivpu_hw_40xx_ats_print(vdev);
-
- ret = ivpu_boot_host_ss_check(vdev);
- if (ret) {
- ivpu_err(vdev, "Failed to configure host SS: %d\n", ret);
- return ret;
- }
-
- ivpu_boot_idle_gen_drive(vdev, false);
-
- ret = ivpu_boot_pwr_domain_enable(vdev);
- if (ret) {
- ivpu_err(vdev, "Failed to enable power domain: %d\n", ret);
- return ret;
- }
-
- ret = ivpu_boot_host_ss_axi_enable(vdev);
- if (ret) {
- ivpu_err(vdev, "Failed to enable AXI: %d\n", ret);
- return ret;
- }
-
- ret = ivpu_boot_host_ss_top_noc_enable(vdev);
- if (ret)
- ivpu_err(vdev, "Failed to enable TOP NOC: %d\n", ret);
-
- return ret;
-}
-
-static int ivpu_hw_40xx_boot_fw(struct ivpu_device *vdev)
-{
- int ret;
-
- ivpu_boot_no_snoop_enable(vdev);
- ivpu_boot_tbu_mmu_enable(vdev);
-
- ret = ivpu_boot_soc_cpu_boot(vdev);
- if (ret)
- ivpu_err(vdev, "Failed to boot SOC CPU: %d\n", ret);
-
- return ret;
-}
-
-static bool ivpu_hw_40xx_is_idle(struct ivpu_device *vdev)
-{
- u32 val;
-
- if (IVPU_WA(punit_disabled))
- return true;
-
- val = REGB_RD32(VPU_40XX_BUTTRESS_VPU_STATUS);
- return REG_TEST_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, READY, val) &&
- REG_TEST_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, IDLE, val);
-}
-
-static int ivpu_hw_40xx_wait_for_idle(struct ivpu_device *vdev)
-{
- return REGB_POLL_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, IDLE, 0x1, IDLE_TIMEOUT_US);
-}
-
-static void ivpu_hw_40xx_save_d0i3_entry_timestamp(struct ivpu_device *vdev)
-{
- vdev->hw->d0i3_entry_host_ts = ktime_get_boottime();
- vdev->hw->d0i3_entry_vpu_ts = REGV_RD64(VPU_40XX_CPU_SS_TIM_PERF_EXT_FREE_CNT);
-}
-
-static int ivpu_hw_40xx_power_down(struct ivpu_device *vdev)
-{
- int ret = 0;
-
- ivpu_hw_40xx_save_d0i3_entry_timestamp(vdev);
-
- if (!ivpu_hw_40xx_is_idle(vdev) && ivpu_hw_40xx_ip_reset(vdev))
- ivpu_warn(vdev, "Failed to reset the NPU\n");
-
- if (ivpu_pll_disable(vdev)) {
- ivpu_err(vdev, "Failed to disable PLL\n");
- ret = -EIO;
- }
-
- if (ivpu_hw_40xx_d0i3_enable(vdev)) {
- ivpu_err(vdev, "Failed to enter D0I3\n");
- ret = -EIO;
- }
-
- return ret;
-}
-
-static void ivpu_hw_40xx_wdt_disable(struct ivpu_device *vdev)
-{
- u32 val;
-
- REGV_WR32(VPU_40XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
- REGV_WR32(VPU_40XX_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE);
-
- REGV_WR32(VPU_40XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
- REGV_WR32(VPU_40XX_CPU_SS_TIM_WDOG_EN, 0);
-
- val = REGV_RD32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG);
- val = REG_CLR_FLD(VPU_40XX_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val);
- REGV_WR32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG, val);
-}
-
-static u32 ivpu_hw_40xx_profiling_freq_get(struct ivpu_device *vdev)
-{
- return vdev->hw->pll.profiling_freq;
-}
-
-static void ivpu_hw_40xx_profiling_freq_drive(struct ivpu_device *vdev, bool enable)
-{
- if (enable)
- vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_HIGH;
- else
- vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
-}
-
-/* Register indirect accesses */
-static u32 ivpu_hw_40xx_reg_pll_freq_get(struct ivpu_device *vdev)
-{
- u32 pll_curr_ratio;
-
- pll_curr_ratio = REGB_RD32(VPU_40XX_BUTTRESS_PLL_FREQ);
- pll_curr_ratio &= VPU_40XX_BUTTRESS_PLL_FREQ_RATIO_MASK;
-
- return PLL_RATIO_TO_FREQ(pll_curr_ratio);
-}
-
-static u32 ivpu_hw_40xx_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
-{
- return PLL_RATIO_TO_FREQ(ratio);
-}
-
-static u32 ivpu_hw_40xx_reg_telemetry_offset_get(struct ivpu_device *vdev)
-{
- return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_OFFSET);
-}
-
-static u32 ivpu_hw_40xx_reg_telemetry_size_get(struct ivpu_device *vdev)
-{
- return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_SIZE);
-}
-
-static u32 ivpu_hw_40xx_reg_telemetry_enable_get(struct ivpu_device *vdev)
-{
- return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_ENABLE);
-}
-
-static void ivpu_hw_40xx_reg_db_set(struct ivpu_device *vdev, u32 db_id)
-{
- u32 reg_stride = VPU_40XX_CPU_SS_DOORBELL_1 - VPU_40XX_CPU_SS_DOORBELL_0;
- u32 val = REG_FLD(VPU_40XX_CPU_SS_DOORBELL_0, SET);
-
- REGV_WR32I(VPU_40XX_CPU_SS_DOORBELL_0, reg_stride, db_id, val);
-}
-
-static u32 ivpu_hw_40xx_reg_ipc_rx_addr_get(struct ivpu_device *vdev)
-{
- return REGV_RD32(VPU_40XX_HOST_SS_TIM_IPC_FIFO_ATM);
-}
-
-static u32 ivpu_hw_40xx_reg_ipc_rx_count_get(struct ivpu_device *vdev)
-{
- u32 count = REGV_RD32_SILENT(VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT);
-
- return REG_GET_FLD(VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
-}
-
-static void ivpu_hw_40xx_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
-{
- REGV_WR32(VPU_40XX_CPU_SS_TIM_IPC_FIFO, vpu_addr);
-}
-
-static void ivpu_hw_40xx_irq_clear(struct ivpu_device *vdev)
-{
- REGV_WR64(VPU_40XX_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK);
-}
-
-static void ivpu_hw_40xx_irq_enable(struct ivpu_device *vdev)
-{
- REGV_WR32(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK);
- REGV_WR64(VPU_40XX_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK);
- REGB_WR32(VPU_40XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_ENABLE_MASK);
- REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
-}
-
-static void ivpu_hw_40xx_irq_disable(struct ivpu_device *vdev)
-{
- REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
- REGB_WR32(VPU_40XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_DISABLE_MASK);
- REGV_WR64(VPU_40XX_HOST_SS_ICB_ENABLE_0, 0x0ull);
- REGV_WR32(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, 0x0ul);
-}
-
-static void ivpu_hw_40xx_irq_wdt_nce_handler(struct ivpu_device *vdev)
-{
- /* TODO: For LNN hang consider engine reset instead of full recovery */
- ivpu_pm_trigger_recovery(vdev, "WDT NCE IRQ");
-}
-
-static void ivpu_hw_40xx_irq_wdt_mss_handler(struct ivpu_device *vdev)
-{
- ivpu_hw_wdt_disable(vdev);
- ivpu_pm_trigger_recovery(vdev, "WDT MSS IRQ");
-}
-
-static void ivpu_hw_40xx_irq_noc_firewall_handler(struct ivpu_device *vdev)
-{
- ivpu_pm_trigger_recovery(vdev, "NOC Firewall IRQ");
-}
-
-/* Handler for IRQs from VPU core (irqV) */
-static bool ivpu_hw_40xx_irqv_handler(struct ivpu_device *vdev, int irq, bool *wake_thread)
-{
- u32 status = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
-
- if (!status)
- return false;
-
- REGV_WR32(VPU_40XX_HOST_SS_ICB_CLEAR_0, status);
-
- if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
- ivpu_mmu_irq_evtq_handler(vdev);
-
- if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
- ivpu_ipc_irq_handler(vdev, wake_thread);
-
- if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
- ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
-
- if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status))
- ivpu_mmu_irq_gerr_handler(vdev);
-
- if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status))
- ivpu_hw_40xx_irq_wdt_mss_handler(vdev);
-
- if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status))
- ivpu_hw_40xx_irq_wdt_nce_handler(vdev);
-
- if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
- ivpu_hw_40xx_irq_noc_firewall_handler(vdev);
-
- return true;
-}
-
-/* Handler for IRQs from Buttress core (irqB) */
-static bool ivpu_hw_40xx_irqb_handler(struct ivpu_device *vdev, int irq)
-{
- bool schedule_recovery = false;
- u32 status = REGB_RD32(VPU_40XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
-
- if (!status)
- return false;
-
- if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status))
- ivpu_dbg(vdev, IRQ, "FREQ_CHANGE");
-
- if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, status)) {
- ivpu_err(vdev, "ATS_ERR LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n",
- REGB_RD32(VPU_40XX_BUTTRESS_ATS_ERR_LOG1),
- REGB_RD32(VPU_40XX_BUTTRESS_ATS_ERR_LOG2));
- REGB_WR32(VPU_40XX_BUTTRESS_ATS_ERR_CLEAR, 0x1);
- schedule_recovery = true;
- }
-
- if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI0_ERR, status)) {
- ivpu_err(vdev, "CFI0_ERR 0x%08x", REGB_RD32(VPU_40XX_BUTTRESS_CFI0_ERR_LOG));
- REGB_WR32(VPU_40XX_BUTTRESS_CFI0_ERR_CLEAR, 0x1);
- schedule_recovery = true;
- }
-
- if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI1_ERR, status)) {
- ivpu_err(vdev, "CFI1_ERR 0x%08x", REGB_RD32(VPU_40XX_BUTTRESS_CFI1_ERR_LOG));
- REGB_WR32(VPU_40XX_BUTTRESS_CFI1_ERR_CLEAR, 0x1);
- schedule_recovery = true;
- }
-
- if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR0_ERR, status)) {
- ivpu_err(vdev, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x",
- REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_LOW),
- REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_HIGH));
- REGB_WR32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_CLEAR, 0x1);
- schedule_recovery = true;
- }
-
- if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR1_ERR, status)) {
- ivpu_err(vdev, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x",
- REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_LOW),
- REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_HIGH));
- REGB_WR32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_CLEAR, 0x1);
- schedule_recovery = true;
- }
-
- if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, SURV_ERR, status)) {
- ivpu_err(vdev, "Survivability error detected\n");
- schedule_recovery = true;
- }
-
- /* This must be done after interrupts are cleared at the source. */
- REGB_WR32(VPU_40XX_BUTTRESS_INTERRUPT_STAT, status);
-
- if (schedule_recovery)
- ivpu_pm_trigger_recovery(vdev, "Buttress IRQ");
-
- return true;
-}
-
-static irqreturn_t ivpu_hw_40xx_irq_handler(int irq, void *ptr)
-{
- bool irqv_handled, irqb_handled, wake_thread = false;
- struct ivpu_device *vdev = ptr;
-
- REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
-
- irqv_handled = ivpu_hw_40xx_irqv_handler(vdev, irq, &wake_thread);
- irqb_handled = ivpu_hw_40xx_irqb_handler(vdev, irq);
-
- /* Re-enable global interrupts to re-trigger MSI for pending interrupts */
- REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
-
- if (wake_thread)
- return IRQ_WAKE_THREAD;
- if (irqv_handled || irqb_handled)
- return IRQ_HANDLED;
- return IRQ_NONE;
-}
-
-static void ivpu_hw_40xx_diagnose_failure(struct ivpu_device *vdev)
-{
- u32 irqv = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
- u32 irqb = REGB_RD32(VPU_40XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
-
- if (ivpu_hw_40xx_reg_ipc_rx_count_get(vdev))
- ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ");
-
- if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, irqv))
- ivpu_err(vdev, "WDT MSS timeout detected\n");
-
- if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, irqv))
- ivpu_err(vdev, "WDT NCE timeout detected\n");
-
- if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, irqv))
- ivpu_err(vdev, "NOC Firewall irq detected\n");
-
- if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, irqb)) {
- ivpu_err(vdev, "ATS_ERR_LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n",
- REGB_RD32(VPU_40XX_BUTTRESS_ATS_ERR_LOG1),
- REGB_RD32(VPU_40XX_BUTTRESS_ATS_ERR_LOG2));
- }
-
- if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI0_ERR, irqb))
- ivpu_err(vdev, "CFI0_ERR_LOG 0x%08x\n", REGB_RD32(VPU_40XX_BUTTRESS_CFI0_ERR_LOG));
-
- if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI1_ERR, irqb))
- ivpu_err(vdev, "CFI1_ERR_LOG 0x%08x\n", REGB_RD32(VPU_40XX_BUTTRESS_CFI1_ERR_LOG));
-
- if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR0_ERR, irqb))
- ivpu_err(vdev, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x\n",
- REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_LOW),
- REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_HIGH));
-
- if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR1_ERR, irqb))
- ivpu_err(vdev, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x\n",
- REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_LOW),
- REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_HIGH));
-
- if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, SURV_ERR, irqb))
- ivpu_err(vdev, "Survivability error detected\n");
-}
-
-const struct ivpu_hw_ops ivpu_hw_40xx_ops = {
- .info_init = ivpu_hw_40xx_info_init,
- .power_up = ivpu_hw_40xx_power_up,
- .is_idle = ivpu_hw_40xx_is_idle,
- .wait_for_idle = ivpu_hw_40xx_wait_for_idle,
- .power_down = ivpu_hw_40xx_power_down,
- .reset = ivpu_hw_40xx_reset,
- .boot_fw = ivpu_hw_40xx_boot_fw,
- .wdt_disable = ivpu_hw_40xx_wdt_disable,
- .diagnose_failure = ivpu_hw_40xx_diagnose_failure,
- .profiling_freq_get = ivpu_hw_40xx_profiling_freq_get,
- .profiling_freq_drive = ivpu_hw_40xx_profiling_freq_drive,
- .reg_pll_freq_get = ivpu_hw_40xx_reg_pll_freq_get,
- .ratio_to_freq = ivpu_hw_40xx_ratio_to_freq,
- .reg_telemetry_offset_get = ivpu_hw_40xx_reg_telemetry_offset_get,
- .reg_telemetry_size_get = ivpu_hw_40xx_reg_telemetry_size_get,
- .reg_telemetry_enable_get = ivpu_hw_40xx_reg_telemetry_enable_get,
- .reg_db_set = ivpu_hw_40xx_reg_db_set,
- .reg_ipc_rx_addr_get = ivpu_hw_40xx_reg_ipc_rx_addr_get,
- .reg_ipc_rx_count_get = ivpu_hw_40xx_reg_ipc_rx_count_get,
- .reg_ipc_tx_set = ivpu_hw_40xx_reg_ipc_tx_set,
- .irq_clear = ivpu_hw_40xx_irq_clear,
- .irq_enable = ivpu_hw_40xx_irq_enable,
- .irq_disable = ivpu_hw_40xx_irq_disable,
- .irq_handler = ivpu_hw_40xx_irq_handler,
-};
diff --git a/drivers/accel/ivpu/ivpu_hw_40xx_reg.h b/drivers/accel/ivpu/ivpu_hw_40xx_reg.h
index ff4a5d4f5821..fc0ee8d637f9 100644
--- a/drivers/accel/ivpu/ivpu_hw_40xx_reg.h
+++ b/drivers/accel/ivpu/ivpu_hw_40xx_reg.h
@@ -8,91 +8,6 @@
#include <linux/bits.h>
-#define VPU_40XX_BUTTRESS_INTERRUPT_STAT 0x00000000u
-#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_FREQ_CHANGE_MASK BIT_MASK(0)
-#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_ATS_ERR_MASK BIT_MASK(1)
-#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_CFI0_ERR_MASK BIT_MASK(2)
-#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_CFI1_ERR_MASK BIT_MASK(3)
-#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_IMR0_ERR_MASK BIT_MASK(4)
-#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_IMR1_ERR_MASK BIT_MASK(5)
-#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_SURV_ERR_MASK BIT_MASK(6)
-
-#define VPU_40XX_BUTTRESS_LOCAL_INT_MASK 0x00000004u
-#define VPU_40XX_BUTTRESS_GLOBAL_INT_MASK 0x00000008u
-
-#define VPU_40XX_BUTTRESS_HM_ATS 0x0000000cu
-
-#define VPU_40XX_BUTTRESS_ATS_ERR_LOG1 0x00000010u
-#define VPU_40XX_BUTTRESS_ATS_ERR_LOG2 0x00000014u
-#define VPU_40XX_BUTTRESS_ATS_ERR_CLEAR 0x00000018u
-
-#define VPU_40XX_BUTTRESS_CFI0_ERR_LOG 0x0000001cu
-#define VPU_40XX_BUTTRESS_CFI0_ERR_CLEAR 0x00000020u
-
-#define VPU_40XX_BUTTRESS_PORT_ARBITRATION_WEIGHTS_ATS 0x00000024u
-
-#define VPU_40XX_BUTTRESS_CFI1_ERR_LOG 0x00000040u
-#define VPU_40XX_BUTTRESS_CFI1_ERR_CLEAR 0x00000044u
-
-#define VPU_40XX_BUTTRESS_IMR_ERR_CFI0_LOW 0x00000048u
-#define VPU_40XX_BUTTRESS_IMR_ERR_CFI0_HIGH 0x0000004cu
-#define VPU_40XX_BUTTRESS_IMR_ERR_CFI0_CLEAR 0x00000050u
-
-#define VPU_40XX_BUTTRESS_PORT_ARBITRATION_WEIGHTS 0x00000054u
-
-#define VPU_40XX_BUTTRESS_IMR_ERR_CFI1_LOW 0x00000058u
-#define VPU_40XX_BUTTRESS_IMR_ERR_CFI1_HIGH 0x0000005cu
-#define VPU_40XX_BUTTRESS_IMR_ERR_CFI1_CLEAR 0x00000060u
-
-#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0 0x00000130u
-#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0_MIN_RATIO_MASK GENMASK(15, 0)
-#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0_MAX_RATIO_MASK GENMASK(31, 16)
-
-#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1 0x00000134u
-#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1_TARGET_RATIO_MASK GENMASK(15, 0)
-#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1_EPP_MASK GENMASK(31, 16)
-
-#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2 0x00000138u
-#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2_CONFIG_MASK GENMASK(15, 0)
-#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2_CDYN_MASK GENMASK(31, 16)
-
-#define VPU_40XX_BUTTRESS_WP_REQ_CMD 0x0000013cu
-#define VPU_40XX_BUTTRESS_WP_REQ_CMD_SEND_MASK BIT_MASK(0)
-
-#define VPU_40XX_BUTTRESS_PLL_FREQ 0x00000148u
-#define VPU_40XX_BUTTRESS_PLL_FREQ_RATIO_MASK GENMASK(15, 0)
-
-#define VPU_40XX_BUTTRESS_TILE_FUSE 0x00000150u
-#define VPU_40XX_BUTTRESS_TILE_FUSE_VALID_MASK BIT_MASK(0)
-#define VPU_40XX_BUTTRESS_TILE_FUSE_CONFIG_MASK GENMASK(6, 1)
-
-#define VPU_40XX_BUTTRESS_VPU_STATUS 0x00000154u
-#define VPU_40XX_BUTTRESS_VPU_STATUS_READY_MASK BIT_MASK(0)
-#define VPU_40XX_BUTTRESS_VPU_STATUS_IDLE_MASK BIT_MASK(1)
-#define VPU_40XX_BUTTRESS_VPU_STATUS_DUP_IDLE_MASK BIT_MASK(2)
-#define VPU_40XX_BUTTRESS_VPU_STATUS_CLOCK_RESOURCE_OWN_ACK_MASK BIT_MASK(6)
-#define VPU_40XX_BUTTRESS_VPU_STATUS_POWER_RESOURCE_OWN_ACK_MASK BIT_MASK(7)
-#define VPU_40XX_BUTTRESS_VPU_STATUS_PERF_CLK_MASK BIT_MASK(11)
-#define VPU_40XX_BUTTRESS_VPU_STATUS_DISABLE_CLK_RELINQUISH_MASK BIT_MASK(12)
-
-#define VPU_40XX_BUTTRESS_IP_RESET 0x00000160u
-#define VPU_40XX_BUTTRESS_IP_RESET_TRIGGER_MASK BIT_MASK(0)
-
-#define VPU_40XX_BUTTRESS_D0I3_CONTROL 0x00000164u
-#define VPU_40XX_BUTTRESS_D0I3_CONTROL_INPROGRESS_MASK BIT_MASK(0)
-#define VPU_40XX_BUTTRESS_D0I3_CONTROL_I3_MASK BIT_MASK(2)
-
-#define VPU_40XX_BUTTRESS_VPU_TELEMETRY_OFFSET 0x00000168u
-#define VPU_40XX_BUTTRESS_VPU_TELEMETRY_SIZE 0x0000016cu
-#define VPU_40XX_BUTTRESS_VPU_TELEMETRY_ENABLE 0x00000170u
-
-#define VPU_40XX_BUTTRESS_FMIN_FUSE 0x00000174u
-#define VPU_40XX_BUTTRESS_FMIN_FUSE_MIN_RATIO_MASK GENMASK(7, 0)
-#define VPU_40XX_BUTTRESS_FMIN_FUSE_PN_RATIO_MASK GENMASK(15, 8)
-
-#define VPU_40XX_BUTTRESS_FMAX_FUSE 0x00000178u
-#define VPU_40XX_BUTTRESS_FMAX_FUSE_MAX_RATIO_MASK GENMASK(7, 0)
-
#define VPU_40XX_HOST_SS_CPR_CLK_EN 0x00000080u
#define VPU_40XX_HOST_SS_CPR_CLK_EN_TOP_NOC_MASK BIT_MASK(1)
#define VPU_40XX_HOST_SS_CPR_CLK_EN_DSS_MAS_MASK BIT_MASK(10)
@@ -198,6 +113,14 @@
#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_STATUS0 0x0003002cu
#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_STATUS0_CSS_CPU_MASK BIT_MASK(3)
+#define VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY 0x00030068u
+#define VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY_POST_DLY_MASK GENMASK(7, 0)
+#define VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY_POST1_DLY_MASK GENMASK(15, 8)
+#define VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY_POST2_DLY_MASK GENMASK(23, 16)
+
+#define VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY 0x0003006cu
+#define VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY_STATUS_DLY_MASK GENMASK(7, 0)
+
#define VPU_40XX_HOST_SS_AON_IDLE_GEN 0x00030200u
#define VPU_40XX_HOST_SS_AON_IDLE_GEN_EN_MASK BIT_MASK(0)
#define VPU_40XX_HOST_SS_AON_IDLE_GEN_HW_PG_EN_MASK BIT_MASK(1)
@@ -205,6 +128,9 @@
#define VPU_40XX_HOST_SS_AON_DPU_ACTIVE 0x00030204u
#define VPU_40XX_HOST_SS_AON_DPU_ACTIVE_DPU_ACTIVE_MASK BIT_MASK(0)
+#define VPU_50XX_HOST_SS_AON_FABRIC_REQ_OVERRIDE 0x00030210u
+#define VPU_50XX_HOST_SS_AON_FABRIC_REQ_OVERRIDE_REQ_OVERRIDE_MASK BIT_MASK(0)
+
#define VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO 0x00040040u
#define VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_DONE_MASK BIT_MASK(0)
#define VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_IOSF_RS_ID_MASK GENMASK(2, 1)
diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.c b/drivers/accel/ivpu/ivpu_hw_btrs.c
new file mode 100644
index 000000000000..06e65c592618
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_hw_btrs.c
@@ -0,0 +1,905 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2025 Intel Corporation
+ */
+
+#include <linux/units.h>
+
+#include "ivpu_drv.h"
+#include "ivpu_hw.h"
+#include "ivpu_hw_btrs.h"
+#include "ivpu_hw_btrs_lnl_reg.h"
+#include "ivpu_hw_btrs_mtl_reg.h"
+#include "ivpu_hw_reg_io.h"
+#include "ivpu_pm.h"
+
+#define BTRS_MTL_IRQ_MASK ((REG_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, ATS_ERR)) | \
+ (REG_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, UFI_ERR)))
+
+#define BTRS_LNL_IRQ_MASK ((REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, ATS_ERR)) | \
+ (REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI0_ERR)) | \
+ (REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI1_ERR)) | \
+ (REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR0_ERR)) | \
+ (REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR1_ERR)) | \
+ (REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR)))
+
+#define BTRS_MTL_ALL_IRQ_MASK (BTRS_MTL_IRQ_MASK | (REG_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, \
+ FREQ_CHANGE)))
+
+#define BTRS_IRQ_DISABLE_MASK ((u32)-1)
+
+#define BTRS_LNL_ALL_IRQ_MASK ((u32)-1)
+
+
+#define PLL_CDYN_DEFAULT 0x80
+#define PLL_EPP_DEFAULT 0x80
+#define PLL_REF_CLK_FREQ 50000000ull
+#define PLL_RATIO_TO_FREQ(x) ((x) * PLL_REF_CLK_FREQ)
+
+#define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC)
+#define IDLE_TIMEOUT_US (5 * USEC_PER_MSEC)
+#define TIMEOUT_US (150 * USEC_PER_MSEC)
+
+/* Work point configuration values */
+#define WP_CONFIG(tile, ratio) (((tile) << 8) | (ratio))
+#define MTL_CONFIG_1_TILE 0x01
+#define MTL_CONFIG_2_TILE 0x02
+#define MTL_PLL_RATIO_5_3 0x01
+#define MTL_PLL_RATIO_4_3 0x02
+#define BTRS_MTL_TILE_FUSE_ENABLE_BOTH 0x0
+#define BTRS_MTL_TILE_SKU_BOTH 0x3630
+
+#define BTRS_LNL_TILE_MAX_NUM 6
+#define BTRS_LNL_TILE_MAX_MASK 0x3f
+
+#define WEIGHTS_DEFAULT 0xf711f711u
+#define WEIGHTS_ATS_DEFAULT 0x0000f711u
+
+#define DCT_REQ 0x2
+#define DCT_ENABLE 0x1
+#define DCT_DISABLE 0x0
+
+static u32 pll_ratio_to_dpu_freq(struct ivpu_device *vdev, u32 ratio);
+
+int ivpu_hw_btrs_irqs_clear_with_0_mtl(struct ivpu_device *vdev)
+{
+ REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, BTRS_MTL_ALL_IRQ_MASK);
+ if (REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT) == BTRS_MTL_ALL_IRQ_MASK) {
+ /* Writing 1s does not clear the interrupt status register */
+ REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, 0x0);
+ return true;
+ }
+
+ return false;
+}
+
+static void freq_ratios_init_mtl(struct ivpu_device *vdev)
+{
+ struct ivpu_hw_info *hw = vdev->hw;
+ u32 fmin_fuse, fmax_fuse;
+
+ fmin_fuse = REGB_RD32(VPU_HW_BTRS_MTL_FMIN_FUSE);
+ hw->pll.min_ratio = REG_GET_FLD(VPU_HW_BTRS_MTL_FMIN_FUSE, MIN_RATIO, fmin_fuse);
+ hw->pll.pn_ratio = REG_GET_FLD(VPU_HW_BTRS_MTL_FMIN_FUSE, PN_RATIO, fmin_fuse);
+
+ fmax_fuse = REGB_RD32(VPU_HW_BTRS_MTL_FMAX_FUSE);
+ hw->pll.max_ratio = REG_GET_FLD(VPU_HW_BTRS_MTL_FMAX_FUSE, MAX_RATIO, fmax_fuse);
+}
+
+static void freq_ratios_init_lnl(struct ivpu_device *vdev)
+{
+ struct ivpu_hw_info *hw = vdev->hw;
+ u32 fmin_fuse, fmax_fuse;
+
+ fmin_fuse = REGB_RD32(VPU_HW_BTRS_LNL_FMIN_FUSE);
+ hw->pll.min_ratio = REG_GET_FLD(VPU_HW_BTRS_LNL_FMIN_FUSE, MIN_RATIO, fmin_fuse);
+ hw->pll.pn_ratio = REG_GET_FLD(VPU_HW_BTRS_LNL_FMIN_FUSE, PN_RATIO, fmin_fuse);
+
+ fmax_fuse = REGB_RD32(VPU_HW_BTRS_LNL_FMAX_FUSE);
+ hw->pll.max_ratio = REG_GET_FLD(VPU_HW_BTRS_LNL_FMAX_FUSE, MAX_RATIO, fmax_fuse);
+}
+
+void ivpu_hw_btrs_freq_ratios_init(struct ivpu_device *vdev)
+{
+ struct ivpu_hw_info *hw = vdev->hw;
+
+ if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
+ freq_ratios_init_mtl(vdev);
+ else
+ freq_ratios_init_lnl(vdev);
+
+ hw->pll.min_ratio = clamp_t(u8, ivpu_pll_min_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
+ hw->pll.max_ratio = clamp_t(u8, ivpu_pll_max_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
+ hw->pll.pn_ratio = clamp_t(u8, hw->pll.pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
+}
+
+static bool tile_disable_check(u32 config)
+{
+ /* Allowed values: 0 or one bit from range 0-5 (6 tiles) */
+ if (config == 0)
+ return true;
+
+ if (config > BIT(BTRS_LNL_TILE_MAX_NUM - 1))
+ return false;
+
+ if ((config & (config - 1)) == 0)
+ return true;
+
+ return false;
+}
+
+static int read_tile_config_fuse(struct ivpu_device *vdev, u32 *tile_fuse_config)
+{
+ u32 fuse;
+ u32 config;
+
+ fuse = REGB_RD32(VPU_HW_BTRS_LNL_TILE_FUSE);
+ if (!REG_TEST_FLD(VPU_HW_BTRS_LNL_TILE_FUSE, VALID, fuse)) {
+ ivpu_err(vdev, "Fuse: invalid (0x%x)\n", fuse);
+ return -EIO;
+ }
+
+ config = REG_GET_FLD(VPU_HW_BTRS_LNL_TILE_FUSE, CONFIG, fuse);
+ if (!tile_disable_check(config))
+ ivpu_warn(vdev, "More than 1 tile disabled, tile fuse config mask: 0x%x\n", config);
+
+ ivpu_dbg(vdev, MISC, "Tile disable config mask: 0x%x\n", config);
+
+ *tile_fuse_config = config;
+ return 0;
+}
+
+static int info_init_mtl(struct ivpu_device *vdev)
+{
+ struct ivpu_hw_info *hw = vdev->hw;
+
+ hw->tile_fuse = BTRS_MTL_TILE_FUSE_ENABLE_BOTH;
+ hw->sku = BTRS_MTL_TILE_SKU_BOTH;
+ hw->config = WP_CONFIG(MTL_CONFIG_2_TILE, MTL_PLL_RATIO_4_3);
+
+ return 0;
+}
+
+static int info_init_lnl(struct ivpu_device *vdev)
+{
+ struct ivpu_hw_info *hw = vdev->hw;
+ u32 tile_fuse_config;
+ int ret;
+
+ ret = read_tile_config_fuse(vdev, &tile_fuse_config);
+ if (ret)
+ return ret;
+
+ hw->tile_fuse = tile_fuse_config;
+ hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
+
+ return 0;
+}
+
+int ivpu_hw_btrs_info_init(struct ivpu_device *vdev)
+{
+ if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
+ return info_init_mtl(vdev);
+ else
+ return info_init_lnl(vdev);
+}
+
+static int wp_request_sync(struct ivpu_device *vdev)
+{
+ if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
+ return REGB_POLL_FLD(VPU_HW_BTRS_MTL_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US);
+ else
+ return REGB_POLL_FLD(VPU_HW_BTRS_LNL_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US);
+}
+
+static int wait_for_status_ready(struct ivpu_device *vdev, bool enable)
+{
+ u32 exp_val = enable ? 0x1 : 0x0;
+
+ if (IVPU_WA(punit_disabled))
+ return 0;
+
+ if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
+ return REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_STATUS, READY, exp_val, PLL_TIMEOUT_US);
+ else
+ return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, READY, exp_val, PLL_TIMEOUT_US);
+}
+
+struct wp_request {
+ u16 min;
+ u16 max;
+ u16 target;
+ u16 cfg;
+ u16 epp;
+ u16 cdyn;
+};
+
+static void wp_request_mtl(struct ivpu_device *vdev, struct wp_request *wp)
+{
+ u32 val;
+
+ val = REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0);
+ val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0, MIN_RATIO, wp->min, val);
+ val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0, MAX_RATIO, wp->max, val);
+ REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0, val);
+
+ val = REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1);
+ val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1, TARGET_RATIO, wp->target, val);
+ val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1, EPP, PLL_EPP_DEFAULT, val);
+ REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1, val);
+
+ val = REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD2);
+ val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD2, CONFIG, wp->cfg, val);
+ REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD2, val);
+
+ val = REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_CMD);
+ val = REG_SET_FLD(VPU_HW_BTRS_MTL_WP_REQ_CMD, SEND, val);
+ REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_CMD, val);
+}
+
+static void wp_request_lnl(struct ivpu_device *vdev, struct wp_request *wp)
+{
+ u32 val;
+
+ val = REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0);
+ val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0, MIN_RATIO, wp->min, val);
+ val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0, MAX_RATIO, wp->max, val);
+ REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0, val);
+
+ val = REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1);
+ val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1, TARGET_RATIO, wp->target, val);
+ val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1, EPP, wp->epp, val);
+ REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1, val);
+
+ val = REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2);
+ val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2, CONFIG, wp->cfg, val);
+ val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2, CDYN, wp->cdyn, val);
+ REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2, val);
+
+ val = REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_CMD);
+ val = REG_SET_FLD(VPU_HW_BTRS_LNL_WP_REQ_CMD, SEND, val);
+ REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_CMD, val);
+}
+
+static void wp_request(struct ivpu_device *vdev, struct wp_request *wp)
+{
+ if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
+ wp_request_mtl(vdev, wp);
+ else
+ wp_request_lnl(vdev, wp);
+}
+
+static int wp_request_send(struct ivpu_device *vdev, struct wp_request *wp)
+{
+ int ret;
+
+ ret = wp_request_sync(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to sync before workpoint request: %d\n", ret);
+ return ret;
+ }
+
+ wp_request(vdev, wp);
+
+ ret = wp_request_sync(vdev);
+ if (ret)
+ ivpu_err(vdev, "Failed to sync after workpoint request: %d\n", ret);
+
+ return ret;
+}
+
+static void prepare_wp_request(struct ivpu_device *vdev, struct wp_request *wp, bool enable)
+{
+ struct ivpu_hw_info *hw = vdev->hw;
+
+ wp->min = hw->pll.min_ratio;
+ wp->max = hw->pll.max_ratio;
+
+ if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) {
+ wp->target = enable ? hw->pll.pn_ratio : 0;
+ wp->cfg = enable ? hw->config : 0;
+ wp->cdyn = 0;
+ wp->epp = 0;
+ } else {
+ wp->target = hw->pll.pn_ratio;
+ wp->cfg = 0;
+ wp->cdyn = enable ? PLL_CDYN_DEFAULT : 0;
+ wp->epp = enable ? PLL_EPP_DEFAULT : 0;
+ }
+}
+
+static int wait_for_pll_lock(struct ivpu_device *vdev, bool enable)
+{
+ u32 exp_val = enable ? 0x1 : 0x0;
+
+ if (ivpu_hw_btrs_gen(vdev) != IVPU_HW_BTRS_MTL)
+ return 0;
+
+ if (IVPU_WA(punit_disabled))
+ return 0;
+
+ return REGB_POLL_FLD(VPU_HW_BTRS_MTL_PLL_STATUS, LOCK, exp_val, PLL_TIMEOUT_US);
+}
+
+static int wait_for_cdyn_deassert(struct ivpu_device *vdev)
+{
+ if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
+ return 0;
+
+ return REGB_POLL_FLD(VPU_HW_BTRS_LNL_CDYN, CDYN, 0, PLL_TIMEOUT_US);
+}
+
+int ivpu_hw_btrs_wp_drive(struct ivpu_device *vdev, bool enable)
+{
+ struct wp_request wp;
+ int ret;
+
+ if (IVPU_WA(punit_disabled)) {
+ ivpu_dbg(vdev, PM, "Skipping workpoint request\n");
+ return 0;
+ }
+
+ prepare_wp_request(vdev, &wp, enable);
+
+ ivpu_dbg(vdev, PM, "PLL workpoint request: %lu MHz, config: 0x%x, epp: 0x%x, cdyn: 0x%x\n",
+ pll_ratio_to_dpu_freq(vdev, wp.target) / HZ_PER_MHZ, wp.cfg, wp.epp, wp.cdyn);
+
+ ret = wp_request_send(vdev, &wp);
+ if (ret) {
+ ivpu_err(vdev, "Failed to send workpoint request: %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_pll_lock(vdev, enable);
+ if (ret) {
+ ivpu_err(vdev, "Timed out waiting for PLL lock\n");
+ return ret;
+ }
+
+ ret = wait_for_status_ready(vdev, enable);
+ if (ret) {
+ ivpu_err(vdev, "Timed out waiting for NPU ready status\n");
+ return ret;
+ }
+
+ if (!enable) {
+ ret = wait_for_cdyn_deassert(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Timed out waiting for CDYN deassert\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int d0i3_drive_mtl(struct ivpu_device *vdev, bool enable)
+{
+ int ret;
+ u32 val;
+
+ ret = REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
+ if (ret) {
+ ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret);
+ return ret;
+ }
+
+ val = REGB_RD32(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL);
+ if (enable)
+ val = REG_SET_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, I3, val);
+ else
+ val = REG_CLR_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, I3, val);
+ REGB_WR32(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, val);
+
+ ret = REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
+ if (ret)
+ ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret);
+
+ return ret;
+}
+
+static int d0i3_drive_lnl(struct ivpu_device *vdev, bool enable)
+{
+ int ret;
+ u32 val;
+
+ ret = REGB_POLL_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
+ if (ret) {
+ ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret);
+ return ret;
+ }
+
+ val = REGB_RD32(VPU_HW_BTRS_LNL_D0I3_CONTROL);
+ if (enable)
+ val = REG_SET_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL, I3, val);
+ else
+ val = REG_CLR_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL, I3, val);
+ REGB_WR32(VPU_HW_BTRS_LNL_D0I3_CONTROL, val);
+
+ ret = REGB_POLL_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
+ if (ret) {
+ ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int d0i3_drive(struct ivpu_device *vdev, bool enable)
+{
+ if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
+ return d0i3_drive_mtl(vdev, enable);
+ else
+ return d0i3_drive_lnl(vdev, enable);
+}
+
+int ivpu_hw_btrs_d0i3_enable(struct ivpu_device *vdev)
+{
+ int ret;
+
+ if (IVPU_WA(punit_disabled))
+ return 0;
+
+ ret = d0i3_drive(vdev, true);
+ if (ret)
+ ivpu_err(vdev, "Failed to enable D0i3: %d\n", ret);
+
+ udelay(5); /* VPU requires 5 us to complete the transition */
+
+ return ret;
+}
+
+int ivpu_hw_btrs_d0i3_disable(struct ivpu_device *vdev)
+{
+ int ret;
+
+ if (IVPU_WA(punit_disabled))
+ return 0;
+
+ ret = d0i3_drive(vdev, false);
+ if (ret)
+ ivpu_err(vdev, "Failed to disable D0i3: %d\n", ret);
+
+ return ret;
+}
+
+int ivpu_hw_btrs_wait_for_clock_res_own_ack(struct ivpu_device *vdev)
+{
+ if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
+ return 0;
+
+ return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, CLOCK_RESOURCE_OWN_ACK, 1, TIMEOUT_US);
+}
+
+void ivpu_hw_btrs_set_port_arbitration_weights_lnl(struct ivpu_device *vdev)
+{
+ REGB_WR32(VPU_HW_BTRS_LNL_PORT_ARBITRATION_WEIGHTS, WEIGHTS_DEFAULT);
+ REGB_WR32(VPU_HW_BTRS_LNL_PORT_ARBITRATION_WEIGHTS_ATS, WEIGHTS_ATS_DEFAULT);
+}
+
+static int ip_reset_mtl(struct ivpu_device *vdev)
+{
+ int ret;
+ u32 val;
+
+ ret = REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
+ if (ret) {
+ ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n");
+ return ret;
+ }
+
+ val = REGB_RD32(VPU_HW_BTRS_MTL_VPU_IP_RESET);
+ val = REG_SET_FLD(VPU_HW_BTRS_MTL_VPU_IP_RESET, TRIGGER, val);
+ REGB_WR32(VPU_HW_BTRS_MTL_VPU_IP_RESET, val);
+
+ ret = REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
+ if (ret)
+ ivpu_err(vdev, "Timed out waiting for RESET completion\n");
+
+ return ret;
+}
+
+static int ip_reset_lnl(struct ivpu_device *vdev)
+{
+ int ret;
+ u32 val;
+
+ ivpu_hw_btrs_clock_relinquish_disable_lnl(vdev);
+
+ ret = REGB_POLL_FLD(VPU_HW_BTRS_LNL_IP_RESET, TRIGGER, 0, TIMEOUT_US);
+ if (ret) {
+ ivpu_err(vdev, "Wait for *_TRIGGER timed out\n");
+ return ret;
+ }
+
+ val = REGB_RD32(VPU_HW_BTRS_LNL_IP_RESET);
+ val = REG_SET_FLD(VPU_HW_BTRS_LNL_IP_RESET, TRIGGER, val);
+ REGB_WR32(VPU_HW_BTRS_LNL_IP_RESET, val);
+
+ ret = REGB_POLL_FLD(VPU_HW_BTRS_LNL_IP_RESET, TRIGGER, 0, TIMEOUT_US);
+ if (ret)
+ ivpu_err(vdev, "Timed out waiting for RESET completion\n");
+
+ return ret;
+}
+
+int ivpu_hw_btrs_ip_reset(struct ivpu_device *vdev)
+{
+ if (IVPU_WA(punit_disabled))
+ return 0;
+
+ if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
+ return ip_reset_mtl(vdev);
+ else
+ return ip_reset_lnl(vdev);
+}
+
+void ivpu_hw_btrs_profiling_freq_reg_set_lnl(struct ivpu_device *vdev)
+{
+ u32 val = REGB_RD32(VPU_HW_BTRS_LNL_VPU_STATUS);
+
+ if (vdev->hw->pll.profiling_freq == PLL_PROFILING_FREQ_DEFAULT)
+ val = REG_CLR_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, PERF_CLK, val);
+ else
+ val = REG_SET_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, PERF_CLK, val);
+
+ REGB_WR32(VPU_HW_BTRS_LNL_VPU_STATUS, val);
+}
+
+void ivpu_hw_btrs_ats_print_lnl(struct ivpu_device *vdev)
+{
+ ivpu_dbg(vdev, MISC, "Buttress ATS: %s\n",
+ REGB_RD32(VPU_HW_BTRS_LNL_HM_ATS) ? "Enable" : "Disable");
+}
+
+void ivpu_hw_btrs_clock_relinquish_disable_lnl(struct ivpu_device *vdev)
+{
+ u32 val = REGB_RD32(VPU_HW_BTRS_LNL_VPU_STATUS);
+
+ val = REG_SET_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, DISABLE_CLK_RELINQUISH, val);
+ REGB_WR32(VPU_HW_BTRS_LNL_VPU_STATUS, val);
+}
+
+bool ivpu_hw_btrs_is_idle(struct ivpu_device *vdev)
+{
+ u32 val;
+
+ if (IVPU_WA(punit_disabled))
+ return true;
+
+ if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) {
+ val = REGB_RD32(VPU_HW_BTRS_MTL_VPU_STATUS);
+
+ return REG_TEST_FLD(VPU_HW_BTRS_MTL_VPU_STATUS, READY, val) &&
+ REG_TEST_FLD(VPU_HW_BTRS_MTL_VPU_STATUS, IDLE, val);
+ } else {
+ val = REGB_RD32(VPU_HW_BTRS_LNL_VPU_STATUS);
+
+ return REG_TEST_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, READY, val) &&
+ REG_TEST_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, IDLE, val);
+ }
+}
+
+int ivpu_hw_btrs_wait_for_idle(struct ivpu_device *vdev)
+{
+ if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
+ return REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_STATUS, IDLE, 0x1, IDLE_TIMEOUT_US);
+ else
+ return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, IDLE, 0x1, IDLE_TIMEOUT_US);
+}
+
+static u32 pll_config_get_mtl(struct ivpu_device *vdev)
+{
+ return REGB_RD32(VPU_HW_BTRS_MTL_CURRENT_PLL);
+}
+
+static u32 pll_config_get_lnl(struct ivpu_device *vdev)
+{
+ return REGB_RD32(VPU_HW_BTRS_LNL_PLL_FREQ);
+}
+
+static u32 pll_ratio_to_dpu_freq_mtl(u16 ratio)
+{
+ return (PLL_RATIO_TO_FREQ(ratio) * 2) / 3;
+}
+
+static u32 pll_ratio_to_dpu_freq_lnl(u16 ratio)
+{
+ return PLL_RATIO_TO_FREQ(ratio) / 2;
+}
+
+static u32 pll_ratio_to_dpu_freq(struct ivpu_device *vdev, u32 ratio)
+{
+ if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
+ return pll_ratio_to_dpu_freq_mtl(ratio);
+ else
+ return pll_ratio_to_dpu_freq_lnl(ratio);
+}
+
+u32 ivpu_hw_btrs_dpu_max_freq_get(struct ivpu_device *vdev)
+{
+ return pll_ratio_to_dpu_freq(vdev, vdev->hw->pll.max_ratio);
+}
+
+u32 ivpu_hw_btrs_dpu_freq_get(struct ivpu_device *vdev)
+{
+ if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
+ return pll_ratio_to_dpu_freq_mtl(pll_config_get_mtl(vdev));
+ else
+ return pll_ratio_to_dpu_freq_lnl(pll_config_get_lnl(vdev));
+}
+
+/* Handler for IRQs from Buttress core (irqB) */
+bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq)
+{
+ u32 status = REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT) & BTRS_MTL_IRQ_MASK;
+ bool schedule_recovery = false;
+
+ if (!status)
+ return false;
+
+ if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, FREQ_CHANGE, status)) {
+ u32 pll = pll_config_get_mtl(vdev);
+
+ ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq, wp %08x, %lu MHz",
+ pll, pll_ratio_to_dpu_freq_mtl(pll) / HZ_PER_MHZ);
+ }
+
+ if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, ATS_ERR, status)) {
+ ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_HW_BTRS_MTL_ATS_ERR_LOG_0));
+ REGB_WR32(VPU_HW_BTRS_MTL_ATS_ERR_CLEAR, 0x1);
+ schedule_recovery = true;
+ }
+
+ if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, UFI_ERR, status)) {
+ u32 ufi_log = REGB_RD32(VPU_HW_BTRS_MTL_UFI_ERR_LOG);
+
+ ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
+ ufi_log, REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, OPCODE, ufi_log),
+ REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, AXI_ID, ufi_log),
+ REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, CQ_ID, ufi_log));
+ REGB_WR32(VPU_HW_BTRS_MTL_UFI_ERR_CLEAR, 0x1);
+ schedule_recovery = true;
+ }
+
+ /* This must be done after interrupts are cleared at the source. */
+ if (IVPU_WA(interrupt_clear_with_0))
+ /*
+ * Writing 1 triggers an interrupt, so we can't perform read update write.
+ * Clear local interrupt status by writing 0 to all bits.
+ */
+ REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, 0x0);
+ else
+ REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, status);
+
+ if (schedule_recovery)
+ ivpu_pm_trigger_recovery(vdev, "Buttress IRQ");
+
+ return true;
+}
+
+/* Handler for IRQs from Buttress core (irqB) */
+bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq)
+{
+ u32 status = REGB_RD32(VPU_HW_BTRS_LNL_INTERRUPT_STAT) & BTRS_LNL_IRQ_MASK;
+ bool schedule_recovery = false;
+
+ if (!status)
+ return false;
+
+ if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR, status)) {
+ ivpu_dbg(vdev, IRQ, "Survivability IRQ\n");
+ queue_work(system_percpu_wq, &vdev->irq_dct_work);
+ }
+
+ if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, FREQ_CHANGE, status)) {
+ u32 pll = pll_config_get_lnl(vdev);
+
+ ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq, wp %08x, %lu MHz",
+ pll, pll_ratio_to_dpu_freq_lnl(pll) / HZ_PER_MHZ);
+ }
+
+ if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, ATS_ERR, status)) {
+ ivpu_err(vdev, "ATS_ERR LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n",
+ REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG1),
+ REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG2));
+ REGB_WR32(VPU_HW_BTRS_LNL_ATS_ERR_CLEAR, 0x1);
+ schedule_recovery = true;
+ }
+
+ if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI0_ERR, status)) {
+ ivpu_err(vdev, "CFI0_ERR 0x%08x", REGB_RD32(VPU_HW_BTRS_LNL_CFI0_ERR_LOG));
+ REGB_WR32(VPU_HW_BTRS_LNL_CFI0_ERR_CLEAR, 0x1);
+ schedule_recovery = true;
+ }
+
+ if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI1_ERR, status)) {
+ ivpu_err(vdev, "CFI1_ERR 0x%08x", REGB_RD32(VPU_HW_BTRS_LNL_CFI1_ERR_LOG));
+ REGB_WR32(VPU_HW_BTRS_LNL_CFI1_ERR_CLEAR, 0x1);
+ schedule_recovery = true;
+ }
+
+ if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR0_ERR, status)) {
+ ivpu_err(vdev, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x",
+ REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_LOW),
+ REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_HIGH));
+ REGB_WR32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_CLEAR, 0x1);
+ schedule_recovery = true;
+ }
+
+ if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR1_ERR, status)) {
+ ivpu_err(vdev, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x",
+ REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_LOW),
+ REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_HIGH));
+ REGB_WR32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_CLEAR, 0x1);
+ schedule_recovery = true;
+ }
+
+ /* This must be done after interrupts are cleared at the source. */
+ REGB_WR32(VPU_HW_BTRS_LNL_INTERRUPT_STAT, status);
+
+ if (schedule_recovery)
+ ivpu_pm_trigger_recovery(vdev, "Buttress IRQ");
+
+ return true;
+}
+
+int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable)
+{
+ u32 val = REGB_RD32(VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW);
+ u32 cmd = REG_GET_FLD(VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW, CMD, val);
+ u32 param1 = REG_GET_FLD(VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW, PARAM1, val);
+
+ if (cmd != DCT_REQ) {
+ ivpu_err_ratelimited(vdev, "Unsupported PCODE command: 0x%x\n", cmd);
+ return -EBADR;
+ }
+
+ switch (param1) {
+ case DCT_ENABLE:
+ *enable = true;
+ return 0;
+ case DCT_DISABLE:
+ *enable = false;
+ return 0;
+ default:
+ ivpu_err_ratelimited(vdev, "Invalid PARAM1 value: %u\n", param1);
+ return -EINVAL;
+ }
+}
+
+void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u8 active_percent)
+{
+ u32 val = 0;
+ u32 cmd = enable ? DCT_ENABLE : DCT_DISABLE;
+
+ val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, CMD, DCT_REQ, val);
+ val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, PARAM1, cmd, val);
+ val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, PARAM2, active_percent, val);
+
+ REGB_WR32(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, val);
+}
+
+u32 ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device *vdev)
+{
+ if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
+ return REGB_RD32(VPU_HW_BTRS_MTL_VPU_TELEMETRY_OFFSET);
+ else
+ return REGB_RD32(VPU_HW_BTRS_LNL_VPU_TELEMETRY_OFFSET);
+}
+
+u32 ivpu_hw_btrs_telemetry_size_get(struct ivpu_device *vdev)
+{
+ if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
+ return REGB_RD32(VPU_HW_BTRS_MTL_VPU_TELEMETRY_SIZE);
+ else
+ return REGB_RD32(VPU_HW_BTRS_LNL_VPU_TELEMETRY_SIZE);
+}
+
+u32 ivpu_hw_btrs_telemetry_enable_get(struct ivpu_device *vdev)
+{
+ if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
+ return REGB_RD32(VPU_HW_BTRS_MTL_VPU_TELEMETRY_ENABLE);
+ else
+ return REGB_RD32(VPU_HW_BTRS_LNL_VPU_TELEMETRY_ENABLE);
+}
+
+void ivpu_hw_btrs_global_int_disable(struct ivpu_device *vdev)
+{
+ if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
+ REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK, 0x1);
+ else
+ REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK, 0x1);
+}
+
+void ivpu_hw_btrs_global_int_enable(struct ivpu_device *vdev)
+{
+ if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
+ REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK, 0x0);
+ else
+ REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK, 0x0);
+}
+
+void ivpu_hw_btrs_irq_enable(struct ivpu_device *vdev)
+{
+ if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) {
+ REGB_WR32(VPU_HW_BTRS_MTL_LOCAL_INT_MASK, (u32)(~BTRS_MTL_IRQ_MASK));
+ REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK, 0x0);
+ } else {
+ REGB_WR32(VPU_HW_BTRS_LNL_LOCAL_INT_MASK, (u32)(~BTRS_LNL_IRQ_MASK));
+ REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK, 0x0);
+ }
+}
+
+void ivpu_hw_btrs_irq_disable(struct ivpu_device *vdev)
+{
+ if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) {
+ REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK, 0x1);
+ REGB_WR32(VPU_HW_BTRS_MTL_LOCAL_INT_MASK, BTRS_IRQ_DISABLE_MASK);
+ } else {
+ REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK, 0x1);
+ REGB_WR32(VPU_HW_BTRS_LNL_LOCAL_INT_MASK, BTRS_IRQ_DISABLE_MASK);
+ }
+}
+
+static void diagnose_failure_mtl(struct ivpu_device *vdev)
+{
+ u32 reg = REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT) & BTRS_MTL_IRQ_MASK;
+
+ if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, ATS_ERR, reg))
+ ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_HW_BTRS_MTL_ATS_ERR_LOG_0));
+
+ if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, UFI_ERR, reg)) {
+ u32 log = REGB_RD32(VPU_HW_BTRS_MTL_UFI_ERR_LOG);
+
+ ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
+ log, REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, OPCODE, log),
+ REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, AXI_ID, log),
+ REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, CQ_ID, log));
+ }
+}
+
+static void diagnose_failure_lnl(struct ivpu_device *vdev)
+{
+ u32 reg = REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT) & BTRS_LNL_IRQ_MASK;
+
+ if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, ATS_ERR, reg)) {
+ ivpu_err(vdev, "ATS_ERR_LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n",
+ REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG1),
+ REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG2));
+ }
+
+ if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI0_ERR, reg))
+ ivpu_err(vdev, "CFI0_ERR_LOG 0x%08x\n", REGB_RD32(VPU_HW_BTRS_LNL_CFI0_ERR_LOG));
+
+ if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI1_ERR, reg))
+ ivpu_err(vdev, "CFI1_ERR_LOG 0x%08x\n", REGB_RD32(VPU_HW_BTRS_LNL_CFI1_ERR_LOG));
+
+ if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR0_ERR, reg))
+ ivpu_err(vdev, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x\n",
+ REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_LOW),
+ REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_HIGH));
+
+ if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR1_ERR, reg))
+ ivpu_err(vdev, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x\n",
+ REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_LOW),
+ REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_HIGH));
+
+ if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR, reg))
+ ivpu_err(vdev, "Survivability IRQ\n");
+}
+
+void ivpu_hw_btrs_diagnose_failure(struct ivpu_device *vdev)
+{
+ if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
+ return diagnose_failure_mtl(vdev);
+ else
+ return diagnose_failure_lnl(vdev);
+}
+
+int ivpu_hw_btrs_platform_read(struct ivpu_device *vdev)
+{
+ u32 reg = REGB_RD32(VPU_HW_BTRS_LNL_VPU_STATUS);
+
+ return REG_GET_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, PLATFORM, reg);
+}
diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.h b/drivers/accel/ivpu/ivpu_hw_btrs.h
new file mode 100644
index 000000000000..c4c10e22f30f
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_hw_btrs.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2025 Intel Corporation
+ */
+
+#ifndef __IVPU_HW_BTRS_H__
+#define __IVPU_HW_BTRS_H__
+
+#include "ivpu_drv.h"
+#include "ivpu_hw_37xx_reg.h"
+#include "ivpu_hw_40xx_reg.h"
+#include "ivpu_hw_reg_io.h"
+
+#define PLL_PROFILING_FREQ_DEFAULT 38400000
+#define PLL_PROFILING_FREQ_HIGH 400000000
+
+#define DCT_DEFAULT_ACTIVE_PERCENT 30u
+#define DCT_PERIOD_US 35300u
+
+int ivpu_hw_btrs_info_init(struct ivpu_device *vdev);
+void ivpu_hw_btrs_freq_ratios_init(struct ivpu_device *vdev);
+int ivpu_hw_btrs_irqs_clear_with_0_mtl(struct ivpu_device *vdev);
+int ivpu_hw_btrs_wp_drive(struct ivpu_device *vdev, bool enable);
+int ivpu_hw_btrs_wait_for_clock_res_own_ack(struct ivpu_device *vdev);
+int ivpu_hw_btrs_d0i3_enable(struct ivpu_device *vdev);
+int ivpu_hw_btrs_d0i3_disable(struct ivpu_device *vdev);
+void ivpu_hw_btrs_set_port_arbitration_weights_lnl(struct ivpu_device *vdev);
+bool ivpu_hw_btrs_is_idle(struct ivpu_device *vdev);
+int ivpu_hw_btrs_wait_for_idle(struct ivpu_device *vdev);
+int ivpu_hw_btrs_ip_reset(struct ivpu_device *vdev);
+void ivpu_hw_btrs_profiling_freq_reg_set_lnl(struct ivpu_device *vdev);
+void ivpu_hw_btrs_ats_print_lnl(struct ivpu_device *vdev);
+void ivpu_hw_btrs_clock_relinquish_disable_lnl(struct ivpu_device *vdev);
+u32 ivpu_hw_btrs_dpu_max_freq_get(struct ivpu_device *vdev);
+u32 ivpu_hw_btrs_dpu_freq_get(struct ivpu_device *vdev);
+bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq);
+bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq);
+int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable);
+void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u8 active_percent);
+u32 ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device *vdev);
+u32 ivpu_hw_btrs_telemetry_size_get(struct ivpu_device *vdev);
+u32 ivpu_hw_btrs_telemetry_enable_get(struct ivpu_device *vdev);
+void ivpu_hw_btrs_global_int_enable(struct ivpu_device *vdev);
+void ivpu_hw_btrs_global_int_disable(struct ivpu_device *vdev);
+void ivpu_hw_btrs_irq_enable(struct ivpu_device *vdev);
+void ivpu_hw_btrs_irq_disable(struct ivpu_device *vdev);
+void ivpu_hw_btrs_diagnose_failure(struct ivpu_device *vdev);
+int ivpu_hw_btrs_platform_read(struct ivpu_device *vdev);
+
+#endif /* __IVPU_HW_BTRS_H__ */
diff --git a/drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h b/drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h
new file mode 100644
index 000000000000..a81a9ba540fa
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2024 Intel Corporation
+ */
+
+#ifndef __IVPU_HW_BTRS_LNL_REG_H__
+#define __IVPU_HW_BTRS_LNL_REG_H__
+
+#include <linux/bits.h>
+
+#define VPU_HW_BTRS_LNL_INTERRUPT_STAT 0x00000000u
+#define VPU_HW_BTRS_LNL_INTERRUPT_STAT_FREQ_CHANGE_MASK BIT_MASK(0)
+#define VPU_HW_BTRS_LNL_INTERRUPT_STAT_ATS_ERR_MASK BIT_MASK(1)
+#define VPU_HW_BTRS_LNL_INTERRUPT_STAT_CFI0_ERR_MASK BIT_MASK(2)
+#define VPU_HW_BTRS_LNL_INTERRUPT_STAT_CFI1_ERR_MASK BIT_MASK(3)
+#define VPU_HW_BTRS_LNL_INTERRUPT_STAT_IMR0_ERR_MASK BIT_MASK(4)
+#define VPU_HW_BTRS_LNL_INTERRUPT_STAT_IMR1_ERR_MASK BIT_MASK(5)
+#define VPU_HW_BTRS_LNL_INTERRUPT_STAT_SURV_ERR_MASK BIT_MASK(6)
+
+#define VPU_HW_BTRS_LNL_LOCAL_INT_MASK 0x00000004u
+#define VPU_HW_BTRS_LNL_GLOBAL_INT_MASK 0x00000008u
+
+#define VPU_HW_BTRS_LNL_HM_ATS 0x0000000cu
+
+#define VPU_HW_BTRS_LNL_ATS_ERR_LOG1 0x00000010u
+#define VPU_HW_BTRS_LNL_ATS_ERR_LOG2 0x00000014u
+#define VPU_HW_BTRS_LNL_ATS_ERR_CLEAR 0x00000018u
+
+#define VPU_HW_BTRS_LNL_CFI0_ERR_LOG 0x0000001cu
+#define VPU_HW_BTRS_LNL_CFI0_ERR_CLEAR 0x00000020u
+
+#define VPU_HW_BTRS_LNL_PORT_ARBITRATION_WEIGHTS_ATS 0x00000024u
+
+#define VPU_HW_BTRS_LNL_CFI1_ERR_LOG 0x00000040u
+#define VPU_HW_BTRS_LNL_CFI1_ERR_CLEAR 0x00000044u
+
+#define VPU_HW_BTRS_LNL_IMR_ERR_CFI0_LOW 0x00000048u
+#define VPU_HW_BTRS_LNL_IMR_ERR_CFI0_HIGH 0x0000004cu
+#define VPU_HW_BTRS_LNL_IMR_ERR_CFI0_CLEAR 0x00000050u
+
+#define VPU_HW_BTRS_LNL_PORT_ARBITRATION_WEIGHTS 0x00000054u
+
+#define VPU_HW_BTRS_LNL_IMR_ERR_CFI1_LOW 0x00000058u
+#define VPU_HW_BTRS_LNL_IMR_ERR_CFI1_HIGH 0x0000005cu
+#define VPU_HW_BTRS_LNL_IMR_ERR_CFI1_CLEAR 0x00000060u
+
+#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS 0x00000070u
+#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS_CMD_MASK GENMASK(7, 0)
+#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS_PARAM1_MASK GENMASK(15, 8)
+#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS_PARAM2_MASK GENMASK(23, 16)
+#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS_PARAM3_MASK GENMASK(31, 24)
+
+#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW 0x00000074u
+#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW_CMD_MASK GENMASK(7, 0)
+#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW_PARAM1_MASK GENMASK(15, 8)
+#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW_PARAM2_MASK GENMASK(23, 16)
+#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW_PARAM3_MASK GENMASK(31, 24)
+
+#define VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0 0x00000130u
+#define VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0_MIN_RATIO_MASK GENMASK(15, 0)
+#define VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0_MAX_RATIO_MASK GENMASK(31, 16)
+
+#define VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1 0x00000134u
+#define VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1_TARGET_RATIO_MASK GENMASK(15, 0)
+#define VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1_EPP_MASK GENMASK(31, 16)
+
+#define VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2 0x00000138u
+#define VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2_CONFIG_MASK GENMASK(15, 0)
+#define VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2_CDYN_MASK GENMASK(31, 16)
+
+#define VPU_HW_BTRS_LNL_WP_REQ_CMD 0x0000013cu
+#define VPU_HW_BTRS_LNL_WP_REQ_CMD_SEND_MASK BIT_MASK(0)
+
+#define VPU_HW_BTRS_LNL_PLL_FREQ 0x00000148u
+#define VPU_HW_BTRS_LNL_PLL_FREQ_RATIO_MASK GENMASK(15, 0)
+
+#define VPU_HW_BTRS_LNL_CDYN 0x0000014cu
+#define VPU_HW_BTRS_LNL_CDYN_CDYN_MASK GENMASK(15, 0)
+
+#define VPU_HW_BTRS_LNL_TILE_FUSE 0x00000150u
+#define VPU_HW_BTRS_LNL_TILE_FUSE_VALID_MASK BIT_MASK(0)
+#define VPU_HW_BTRS_LNL_TILE_FUSE_CONFIG_MASK GENMASK(6, 1)
+
+#define VPU_HW_BTRS_LNL_VPU_STATUS 0x00000154u
+#define VPU_HW_BTRS_LNL_VPU_STATUS_READY_MASK BIT_MASK(0)
+#define VPU_HW_BTRS_LNL_VPU_STATUS_IDLE_MASK BIT_MASK(1)
+#define VPU_HW_BTRS_LNL_VPU_STATUS_DUP_IDLE_MASK BIT_MASK(2)
+#define VPU_HW_BTRS_LNL_VPU_STATUS_CLOCK_RESOURCE_OWN_ACK_MASK BIT_MASK(6)
+#define VPU_HW_BTRS_LNL_VPU_STATUS_POWER_RESOURCE_OWN_ACK_MASK BIT_MASK(7)
+#define VPU_HW_BTRS_LNL_VPU_STATUS_PERF_CLK_MASK BIT_MASK(11)
+#define VPU_HW_BTRS_LNL_VPU_STATUS_DISABLE_CLK_RELINQUISH_MASK BIT_MASK(12)
+#define VPU_HW_BTRS_LNL_VPU_STATUS_PLATFORM_MASK GENMASK(31, 29)
+
+#define VPU_HW_BTRS_LNL_IP_RESET 0x00000160u
+#define VPU_HW_BTRS_LNL_IP_RESET_TRIGGER_MASK BIT_MASK(0)
+
+#define VPU_HW_BTRS_LNL_D0I3_CONTROL 0x00000164u
+#define VPU_HW_BTRS_LNL_D0I3_CONTROL_INPROGRESS_MASK BIT_MASK(0)
+#define VPU_HW_BTRS_LNL_D0I3_CONTROL_I3_MASK BIT_MASK(2)
+
+#define VPU_HW_BTRS_LNL_VPU_TELEMETRY_OFFSET 0x00000168u
+#define VPU_HW_BTRS_LNL_VPU_TELEMETRY_SIZE 0x0000016cu
+#define VPU_HW_BTRS_LNL_VPU_TELEMETRY_ENABLE 0x00000170u
+
+#define VPU_HW_BTRS_LNL_FMIN_FUSE 0x00000174u
+#define VPU_HW_BTRS_LNL_FMIN_FUSE_MIN_RATIO_MASK GENMASK(7, 0)
+#define VPU_HW_BTRS_LNL_FMIN_FUSE_PN_RATIO_MASK GENMASK(15, 8)
+
+#define VPU_HW_BTRS_LNL_FMAX_FUSE 0x00000178u
+#define VPU_HW_BTRS_LNL_FMAX_FUSE_MAX_RATIO_MASK GENMASK(7, 0)
+
+#endif /* __IVPU_HW_BTRS_LNL_REG_H__ */
diff --git a/drivers/accel/ivpu/ivpu_hw_btrs_mtl_reg.h b/drivers/accel/ivpu/ivpu_hw_btrs_mtl_reg.h
new file mode 100644
index 000000000000..e93d539e066f
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_hw_btrs_mtl_reg.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#ifndef __IVPU_HW_BTRS_MTL_REG_H__
+#define __IVPU_HW_BTRS_MTL_REG_H__
+
+#include <linux/bits.h>
+
+#define VPU_HW_BTRS_MTL_INTERRUPT_TYPE 0x00000000u
+
+#define VPU_HW_BTRS_MTL_INTERRUPT_STAT 0x00000004u
+#define VPU_HW_BTRS_MTL_INTERRUPT_STAT_FREQ_CHANGE_MASK BIT_MASK(0)
+#define VPU_HW_BTRS_MTL_INTERRUPT_STAT_ATS_ERR_MASK BIT_MASK(1)
+#define VPU_HW_BTRS_MTL_INTERRUPT_STAT_UFI_ERR_MASK BIT_MASK(2)
+
+#define VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0 0x00000008u
+#define VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0_MIN_RATIO_MASK GENMASK(15, 0)
+#define VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0_MAX_RATIO_MASK GENMASK(31, 16)
+
+#define VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1 0x0000000cu
+#define VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1_TARGET_RATIO_MASK GENMASK(15, 0)
+#define VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1_EPP_MASK GENMASK(31, 16)
+
+#define VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD2 0x00000010u
+#define VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD2_CONFIG_MASK GENMASK(15, 0)
+
+#define VPU_HW_BTRS_MTL_WP_REQ_CMD 0x00000014u
+#define VPU_HW_BTRS_MTL_WP_REQ_CMD_SEND_MASK BIT_MASK(0)
+
+#define VPU_HW_BTRS_MTL_WP_DOWNLOAD 0x00000018u
+#define VPU_HW_BTRS_MTL_WP_DOWNLOAD_TARGET_RATIO_MASK GENMASK(15, 0)
+
+#define VPU_HW_BTRS_MTL_CURRENT_PLL 0x0000001cu
+#define VPU_HW_BTRS_MTL_CURRENT_PLL_RATIO_MASK GENMASK(15, 0)
+
+#define VPU_HW_BTRS_MTL_PLL_ENABLE 0x00000020u
+
+#define VPU_HW_BTRS_MTL_FMIN_FUSE 0x00000024u
+#define VPU_HW_BTRS_MTL_FMIN_FUSE_MIN_RATIO_MASK GENMASK(7, 0)
+#define VPU_HW_BTRS_MTL_FMIN_FUSE_PN_RATIO_MASK GENMASK(15, 8)
+
+#define VPU_HW_BTRS_MTL_FMAX_FUSE 0x00000028u
+#define VPU_HW_BTRS_MTL_FMAX_FUSE_MAX_RATIO_MASK GENMASK(7, 0)
+
+#define VPU_HW_BTRS_MTL_TILE_FUSE 0x0000002cu
+#define VPU_HW_BTRS_MTL_TILE_FUSE_VALID_MASK BIT_MASK(0)
+#define VPU_HW_BTRS_MTL_TILE_FUSE_SKU_MASK GENMASK(3, 2)
+
+#define VPU_HW_BTRS_MTL_LOCAL_INT_MASK 0x00000030u
+#define VPU_HW_BTRS_MTL_GLOBAL_INT_MASK 0x00000034u
+
+#define VPU_HW_BTRS_MTL_PLL_STATUS 0x00000040u
+#define VPU_HW_BTRS_MTL_PLL_STATUS_LOCK_MASK BIT_MASK(1)
+
+#define VPU_HW_BTRS_MTL_VPU_STATUS 0x00000044u
+#define VPU_HW_BTRS_MTL_VPU_STATUS_READY_MASK BIT_MASK(0)
+#define VPU_HW_BTRS_MTL_VPU_STATUS_IDLE_MASK BIT_MASK(1)
+
+#define VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL 0x00000060u
+#define VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL_INPROGRESS_MASK BIT_MASK(0)
+#define VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL_I3_MASK BIT_MASK(2)
+
+#define VPU_HW_BTRS_MTL_VPU_IP_RESET 0x00000050u
+#define VPU_HW_BTRS_MTL_VPU_IP_RESET_TRIGGER_MASK BIT_MASK(0)
+
+#define VPU_HW_BTRS_MTL_VPU_TELEMETRY_OFFSET 0x00000080u
+#define VPU_HW_BTRS_MTL_VPU_TELEMETRY_SIZE 0x00000084u
+#define VPU_HW_BTRS_MTL_VPU_TELEMETRY_ENABLE 0x00000088u
+
+#define VPU_HW_BTRS_MTL_ATS_ERR_LOG_0 0x000000a0u
+#define VPU_HW_BTRS_MTL_ATS_ERR_LOG_1 0x000000a4u
+#define VPU_HW_BTRS_MTL_ATS_ERR_CLEAR 0x000000a8u
+
+#define VPU_HW_BTRS_MTL_UFI_ERR_LOG 0x000000b0u
+#define VPU_HW_BTRS_MTL_UFI_ERR_LOG_CQ_ID_MASK GENMASK(11, 0)
+#define VPU_HW_BTRS_MTL_UFI_ERR_LOG_AXI_ID_MASK GENMASK(19, 12)
+#define VPU_HW_BTRS_MTL_UFI_ERR_LOG_OPCODE_MASK GENMASK(24, 20)
+
+#define VPU_HW_BTRS_MTL_UFI_ERR_CLEAR 0x000000b4u
+
+#endif /* __IVPU_HW_BTRS_MTL_REG_H__ */
diff --git a/drivers/accel/ivpu/ivpu_hw_ip.c b/drivers/accel/ivpu/ivpu_hw_ip.c
new file mode 100644
index 000000000000..06aa1e7dc50b
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_hw_ip.c
@@ -0,0 +1,1199 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2024 Intel Corporation
+ */
+
+#include "ivpu_drv.h"
+#include "ivpu_fw.h"
+#include "ivpu_hw.h"
+#include "ivpu_hw_37xx_reg.h"
+#include "ivpu_hw_40xx_reg.h"
+#include "ivpu_hw_btrs.h"
+#include "ivpu_hw_ip.h"
+#include "ivpu_hw_reg_io.h"
+#include "ivpu_mmu.h"
+#include "ivpu_pm.h"
+
+#define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC)
+
+#define TIM_SAFE_ENABLE 0xf1d0dead
+#define TIM_WATCHDOG_RESET_VALUE 0xffffffff
+
+#define ICB_0_IRQ_MASK_37XX ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT)))
+
+#define ICB_1_IRQ_MASK_37XX ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT)))
+
+#define ICB_0_1_IRQ_MASK_37XX ((((u64)ICB_1_IRQ_MASK_37XX) << 32) | ICB_0_IRQ_MASK_37XX)
+
+#define ICB_0_IRQ_MASK_40XX ((REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT)))
+
+#define ICB_1_IRQ_MASK_40XX ((REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT)))
+
+#define ICB_0_1_IRQ_MASK_40XX ((((u64)ICB_1_IRQ_MASK_40XX) << 32) | ICB_0_IRQ_MASK_40XX)
+
+#define ITF_FIREWALL_VIOLATION_MASK_37XX ((REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX)))
+
+#define ITF_FIREWALL_VIOLATION_MASK_40XX ((REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX)))
+
+static int wait_for_ip_bar(struct ivpu_device *vdev)
+{
+ return REGV_POLL_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, AON, 0, 100);
+}
+
+static void host_ss_rst_clr(struct ivpu_device *vdev)
+{
+ u32 val = 0;
+
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, TOP_NOC, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, DSS_MAS, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, MSS_MAS, val);
+
+ REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_CLR, val);
+}
+
+static int host_ss_noc_qreqn_check_37xx(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN);
+
+ if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int host_ss_noc_qreqn_check_40xx(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QREQN);
+
+ if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int host_ss_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val)
+{
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
+ return host_ss_noc_qreqn_check_37xx(vdev, exp_val);
+ else
+ return host_ss_noc_qreqn_check_40xx(vdev, exp_val);
+}
+
+static int host_ss_noc_qacceptn_check_37xx(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QACCEPTN);
+
+ if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int host_ss_noc_qacceptn_check_40xx(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QACCEPTN);
+
+ if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int host_ss_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
+{
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
+ return host_ss_noc_qacceptn_check_37xx(vdev, exp_val);
+ else
+ return host_ss_noc_qacceptn_check_40xx(vdev, exp_val);
+}
+
+static int host_ss_noc_qdeny_check_37xx(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QDENY);
+
+ if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int host_ss_noc_qdeny_check_40xx(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QDENY);
+
+ if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int host_ss_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
+{
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
+ return host_ss_noc_qdeny_check_37xx(vdev, exp_val);
+ else
+ return host_ss_noc_qdeny_check_40xx(vdev, exp_val);
+}
+
+static int top_noc_qrenqn_check_37xx(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QREQN);
+
+ if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) ||
+ !REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int top_noc_qrenqn_check_40xx(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QREQN);
+
+ if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) ||
+ !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int top_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val)
+{
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
+ return top_noc_qrenqn_check_37xx(vdev, exp_val);
+ else
+ return top_noc_qrenqn_check_40xx(vdev, exp_val);
+}
+
+int ivpu_hw_ip_host_ss_configure(struct ivpu_device *vdev)
+{
+ int ret;
+
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
+ ret = wait_for_ip_bar(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Timed out waiting for NPU IP bar\n");
+ return ret;
+ }
+ host_ss_rst_clr(vdev);
+ }
+
+ ret = host_ss_noc_qreqn_check(vdev, 0x0);
+ if (ret) {
+ ivpu_err(vdev, "Failed qreqn check: %d\n", ret);
+ return ret;
+ }
+
+ ret = host_ss_noc_qacceptn_check(vdev, 0x0);
+ if (ret) {
+ ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
+ return ret;
+ }
+
+ ret = host_ss_noc_qdeny_check(vdev, 0x0);
+ if (ret)
+ ivpu_err(vdev, "Failed qdeny check %d\n", ret);
+
+ return ret;
+}
+
+static void idle_gen_drive_37xx(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN);
+
+ if (enable)
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN, EN, val);
+ else
+ val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN, EN, val);
+
+ REGV_WR32(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN, val);
+}
+
+static void idle_gen_drive_40xx(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_IDLE_GEN);
+
+ if (enable)
+ val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_IDLE_GEN, EN, val);
+ else
+ val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_IDLE_GEN, EN, val);
+
+ REGV_WR32(VPU_40XX_HOST_SS_AON_IDLE_GEN, val);
+}
+
+void ivpu_hw_ip_idle_gen_enable(struct ivpu_device *vdev)
+{
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
+ idle_gen_drive_37xx(vdev, true);
+ else
+ idle_gen_drive_40xx(vdev, true);
+}
+
+void ivpu_hw_ip_idle_gen_disable(struct ivpu_device *vdev)
+{
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
+ idle_gen_drive_37xx(vdev, false);
+ else
+ idle_gen_drive_40xx(vdev, false);
+}
+
+static void
+pwr_island_delay_set_50xx(struct ivpu_device *vdev, u32 post, u32 post1, u32 post2, u32 status)
+{
+ u32 val;
+
+ val = REGV_RD32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY);
+ val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, POST_DLY, post, val);
+ val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, POST1_DLY, post1, val);
+ val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, POST2_DLY, post2, val);
+ REGV_WR32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, val);
+
+ val = REGV_RD32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY);
+ val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY, STATUS_DLY, status, val);
+ REGV_WR32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY, val);
+}
+
+static void pwr_island_trickle_drive_37xx(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
+
+ if (enable)
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
+ else
+ val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
+
+ REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val);
+}
+
+static void pwr_island_trickle_drive_40xx(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
+
+ if (enable)
+ val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, CSS_CPU, val);
+ else
+ val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, CSS_CPU, val);
+
+ REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val);
+}
+
+static void pwr_island_drive_37xx(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0);
+
+ if (enable)
+ val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, CSS_CPU, val);
+ else
+ val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, CSS_CPU, val);
+
+ REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, val);
+}
+
+static void pwr_island_drive_40xx(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0);
+
+ if (enable)
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
+ else
+ val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
+
+ REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, val);
+}
+
+static void pwr_island_enable(struct ivpu_device *vdev)
+{
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
+ pwr_island_trickle_drive_37xx(vdev, true);
+ ndelay(500);
+ pwr_island_drive_37xx(vdev, true);
+ } else {
+ pwr_island_trickle_drive_40xx(vdev, true);
+ ndelay(500);
+ pwr_island_drive_40xx(vdev, true);
+ }
+}
+
+static int wait_for_pwr_island_status(struct ivpu_device *vdev, u32 exp_val)
+{
+ if (IVPU_WA(punit_disabled))
+ return 0;
+
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
+ return REGV_POLL_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_STATUS0, MSS_CPU, exp_val,
+ PWR_ISLAND_STATUS_TIMEOUT_US);
+ else
+ return REGV_POLL_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_STATUS0, CSS_CPU, exp_val,
+ PWR_ISLAND_STATUS_TIMEOUT_US);
+}
+
+static void pwr_island_isolation_drive_37xx(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0);
+
+ if (enable)
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
+ else
+ val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
+
+ REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, val);
+}
+
+static void pwr_island_isolation_drive_40xx(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0);
+
+ if (enable)
+ val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, CSS_CPU, val);
+ else
+ val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, CSS_CPU, val);
+
+ REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, val);
+}
+
+static void pwr_island_isolation_drive(struct ivpu_device *vdev, bool enable)
+{
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
+ pwr_island_isolation_drive_37xx(vdev, enable);
+ else
+ pwr_island_isolation_drive_40xx(vdev, enable);
+}
+
+static void pwr_island_isolation_disable(struct ivpu_device *vdev)
+{
+ pwr_island_isolation_drive(vdev, false);
+}
+
+static void host_ss_clk_drive_37xx(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_CLK_SET);
+
+ if (enable) {
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
+ } else {
+ val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
+ val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
+ val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
+ }
+
+ REGV_WR32(VPU_37XX_HOST_SS_CPR_CLK_SET, val);
+}
+
+static void host_ss_clk_drive_40xx(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(VPU_40XX_HOST_SS_CPR_CLK_EN);
+
+ if (enable) {
+ val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, TOP_NOC, val);
+ val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, DSS_MAS, val);
+ val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, CSS_MAS, val);
+ } else {
+ val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, TOP_NOC, val);
+ val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, DSS_MAS, val);
+ val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, CSS_MAS, val);
+ }
+
+ REGV_WR32(VPU_40XX_HOST_SS_CPR_CLK_EN, val);
+}
+
+static void host_ss_clk_drive(struct ivpu_device *vdev, bool enable)
+{
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
+ host_ss_clk_drive_37xx(vdev, enable);
+ else
+ host_ss_clk_drive_40xx(vdev, enable);
+}
+
+static void host_ss_clk_enable(struct ivpu_device *vdev)
+{
+ host_ss_clk_drive(vdev, true);
+}
+
+static void host_ss_rst_drive_37xx(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_RST_SET);
+
+ if (enable) {
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val);
+ } else {
+ val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val);
+ val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val);
+ val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val);
+ }
+
+ REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_SET, val);
+}
+
+static void host_ss_rst_drive_40xx(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(VPU_40XX_HOST_SS_CPR_RST_EN);
+
+ if (enable) {
+ val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, TOP_NOC, val);
+ val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, DSS_MAS, val);
+ val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, CSS_MAS, val);
+ } else {
+ val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, TOP_NOC, val);
+ val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, DSS_MAS, val);
+ val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, CSS_MAS, val);
+ }
+
+ REGV_WR32(VPU_40XX_HOST_SS_CPR_RST_EN, val);
+}
+
+static void host_ss_rst_drive(struct ivpu_device *vdev, bool enable)
+{
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
+ host_ss_rst_drive_37xx(vdev, enable);
+ else
+ host_ss_rst_drive_40xx(vdev, enable);
+}
+
+static void host_ss_rst_enable(struct ivpu_device *vdev)
+{
+ host_ss_rst_drive(vdev, true);
+}
+
+static void host_ss_noc_qreqn_top_socmmio_drive_37xx(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN);
+
+ if (enable)
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
+ else
+ val = REG_CLR_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
+ REGV_WR32(VPU_37XX_HOST_SS_NOC_QREQN, val);
+}
+
+static void host_ss_noc_qreqn_top_socmmio_drive_40xx(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QREQN);
+
+ if (enable)
+ val = REG_SET_FLD(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
+ else
+ val = REG_CLR_FLD(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
+ REGV_WR32(VPU_40XX_HOST_SS_NOC_QREQN, val);
+}
+
+static void host_ss_noc_qreqn_top_socmmio_drive(struct ivpu_device *vdev, bool enable)
+{
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
+ host_ss_noc_qreqn_top_socmmio_drive_37xx(vdev, enable);
+ else
+ host_ss_noc_qreqn_top_socmmio_drive_40xx(vdev, enable);
+}
+
+static int host_ss_axi_drive(struct ivpu_device *vdev, bool enable)
+{
+ int ret;
+
+ host_ss_noc_qreqn_top_socmmio_drive(vdev, enable);
+
+ ret = host_ss_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
+ if (ret) {
+ ivpu_err(vdev, "Failed HOST SS NOC QACCEPTN check: %d\n", ret);
+ return ret;
+ }
+
+ ret = host_ss_noc_qdeny_check(vdev, 0x0);
+ if (ret)
+ ivpu_err(vdev, "Failed HOST SS NOC QDENY check: %d\n", ret);
+
+ return ret;
+}
+
+static void top_noc_qreqn_drive_40xx(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QREQN);
+
+ if (enable) {
+ val = REG_SET_FLD(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, val);
+ val = REG_SET_FLD(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
+ } else {
+ val = REG_CLR_FLD(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, val);
+ val = REG_CLR_FLD(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
+ }
+
+ REGV_WR32(VPU_40XX_TOP_NOC_QREQN, val);
+}
+
+static void top_noc_qreqn_drive_37xx(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QREQN);
+
+ if (enable) {
+ val = REG_SET_FLD(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, val);
+ val = REG_SET_FLD(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
+ } else {
+ val = REG_CLR_FLD(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, val);
+ val = REG_CLR_FLD(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
+ }
+
+ REGV_WR32(VPU_37XX_TOP_NOC_QREQN, val);
+}
+
+static void top_noc_qreqn_drive(struct ivpu_device *vdev, bool enable)
+{
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
+ top_noc_qreqn_drive_37xx(vdev, enable);
+ else
+ top_noc_qreqn_drive_40xx(vdev, enable);
+}
+
+int ivpu_hw_ip_host_ss_axi_enable(struct ivpu_device *vdev)
+{
+ return host_ss_axi_drive(vdev, true);
+}
+
+static int top_noc_qacceptn_check_37xx(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QACCEPTN);
+
+ if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) ||
+ !REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int top_noc_qacceptn_check_40xx(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QACCEPTN);
+
+ if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) ||
+ !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int top_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
+{
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
+ return top_noc_qacceptn_check_37xx(vdev, exp_val);
+ else
+ return top_noc_qacceptn_check_40xx(vdev, exp_val);
+}
+
+static int top_noc_qdeny_check_37xx(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QDENY);
+
+ if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) ||
+ !REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int top_noc_qdeny_check_40xx(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QDENY);
+
+ if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) ||
+ !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int top_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
+{
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
+ return top_noc_qdeny_check_37xx(vdev, exp_val);
+ else
+ return top_noc_qdeny_check_40xx(vdev, exp_val);
+}
+
+static int top_noc_drive(struct ivpu_device *vdev, bool enable)
+{
+ int ret;
+
+ top_noc_qreqn_drive(vdev, enable);
+
+ ret = top_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
+ if (ret) {
+ ivpu_err(vdev, "Failed TOP NOC QACCEPTN check: %d\n", ret);
+ return ret;
+ }
+
+ ret = top_noc_qdeny_check(vdev, 0x0);
+ if (ret)
+ ivpu_err(vdev, "Failed TOP NOC QDENY check: %d\n", ret);
+
+ return ret;
+}
+
+int ivpu_hw_ip_top_noc_enable(struct ivpu_device *vdev)
+{
+ return top_noc_drive(vdev, true);
+}
+
+static void dpu_active_drive_37xx(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE);
+
+ if (enable)
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
+ else
+ val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
+
+ REGV_WR32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, val);
+}
+
+static void pwr_island_delay_set(struct ivpu_device *vdev)
+{
+ bool high = vdev->hw->pll.profiling_freq == PLL_PROFILING_FREQ_HIGH;
+ u32 post, post1, post2, status;
+
+ if (ivpu_hw_ip_gen(vdev) < IVPU_HW_IP_50XX)
+ return;
+
+ switch (ivpu_device_id(vdev)) {
+ case PCI_DEVICE_ID_WCL:
+ case PCI_DEVICE_ID_PTL_P:
+ post = high ? 18 : 0;
+ post1 = 0;
+ post2 = 0;
+ status = high ? 46 : 3;
+ break;
+
+ case PCI_DEVICE_ID_NVL:
+ post = high ? 198 : 17;
+ post1 = 0;
+ post2 = high ? 198 : 17;
+ status = 0;
+ break;
+
+ default:
+ dump_stack();
+ ivpu_err(vdev, "Unknown device ID\n");
+ return;
+ }
+
+ pwr_island_delay_set_50xx(vdev, post, post1, post2, status);
+}
+
+int ivpu_hw_ip_pwr_domain_enable(struct ivpu_device *vdev)
+{
+ int ret;
+
+ pwr_island_delay_set(vdev);
+ pwr_island_enable(vdev);
+
+ ret = wait_for_pwr_island_status(vdev, 0x1);
+ if (ret) {
+ ivpu_err(vdev, "Timed out waiting for power island status\n");
+ return ret;
+ }
+
+ ret = top_noc_qreqn_check(vdev, 0x0);
+ if (ret) {
+ ivpu_err(vdev, "Failed TOP NOC QREQN check %d\n", ret);
+ return ret;
+ }
+
+ host_ss_clk_enable(vdev);
+ pwr_island_isolation_disable(vdev);
+ host_ss_rst_enable(vdev);
+
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
+ dpu_active_drive_37xx(vdev, true);
+
+ return ret;
+}
+
+u64 ivpu_hw_ip_read_perf_timer_counter(struct ivpu_device *vdev)
+{
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
+ return REGV_RD64(VPU_37XX_CPU_SS_TIM_PERF_FREE_CNT);
+ else
+ return REGV_RD64(VPU_40XX_CPU_SS_TIM_PERF_EXT_FREE_CNT);
+}
+
+static void ivpu_hw_ip_snoop_disable_37xx(struct ivpu_device *vdev)
+{
+ u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES);
+
+ val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val);
+ val = REG_CLR_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
+
+ if (ivpu_is_force_snoop_enabled(vdev))
+ val = REG_CLR_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
+ else
+ val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
+
+ REGV_WR32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, val);
+}
+
+static void ivpu_hw_ip_snoop_disable_40xx(struct ivpu_device *vdev)
+{
+ u32 val = REGV_RD32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES);
+
+ val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, SNOOP_OVERRIDE_EN, val);
+ val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val);
+
+ if (ivpu_is_force_snoop_enabled(vdev))
+ val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val);
+ else
+ val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val);
+
+ REGV_WR32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, val);
+}
+
+void ivpu_hw_ip_snoop_disable(struct ivpu_device *vdev)
+{
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
+ return ivpu_hw_ip_snoop_disable_37xx(vdev);
+ else
+ return ivpu_hw_ip_snoop_disable_40xx(vdev);
+}
+
+static void ivpu_hw_ip_tbu_mmu_enable_37xx(struct ivpu_device *vdev)
+{
+ u32 val = REGV_RD32(VPU_37XX_HOST_IF_TBU_MMUSSIDV);
+
+ val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
+
+ REGV_WR32(VPU_37XX_HOST_IF_TBU_MMUSSIDV, val);
+}
+
+static void ivpu_hw_ip_tbu_mmu_enable_40xx(struct ivpu_device *vdev)
+{
+ u32 val = REGV_RD32(VPU_40XX_HOST_IF_TBU_MMUSSIDV);
+
+ val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
+ val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
+ val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU1_AWMMUSSIDV, val);
+ val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU1_ARMMUSSIDV, val);
+ val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
+ val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
+
+ REGV_WR32(VPU_40XX_HOST_IF_TBU_MMUSSIDV, val);
+}
+
+void ivpu_hw_ip_tbu_mmu_enable(struct ivpu_device *vdev)
+{
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
+ return ivpu_hw_ip_tbu_mmu_enable_37xx(vdev);
+ else
+ return ivpu_hw_ip_tbu_mmu_enable_40xx(vdev);
+}
+
+static int soc_cpu_boot_37xx(struct ivpu_device *vdev)
+{
+ u32 val;
+
+ val = REGV_RD32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC);
+ val = REG_SET_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTRUN0, val);
+
+ val = REG_CLR_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTVEC, val);
+ REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
+
+ val = REG_SET_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val);
+ REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
+
+ val = REG_CLR_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val);
+ REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
+
+ val = vdev->fw->entry_point >> 9;
+ REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val);
+
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, DONE, val);
+ REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val);
+
+ ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n",
+ vdev->fw->entry_point == vdev->fw->cold_boot_entry_point ? "cold boot" : "resume");
+
+ return 0;
+}
+
+static int cpu_noc_qacceptn_check_40xx(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN);
+
+ if (!REG_TEST_FLD_NUM(VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN, TOP_MMIO, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int cpu_noc_qdeny_check_40xx(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QDENY);
+
+ if (!REG_TEST_FLD_NUM(VPU_40XX_CPU_SS_CPR_NOC_QDENY, TOP_MMIO, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static void cpu_noc_top_mmio_drive_40xx(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QREQN);
+
+ if (enable)
+ val = REG_SET_FLD(VPU_40XX_CPU_SS_CPR_NOC_QREQN, TOP_MMIO, val);
+ else
+ val = REG_CLR_FLD(VPU_40XX_CPU_SS_CPR_NOC_QREQN, TOP_MMIO, val);
+ REGV_WR32(VPU_40XX_CPU_SS_CPR_NOC_QREQN, val);
+}
+
+static int soc_cpu_drive_40xx(struct ivpu_device *vdev, bool enable)
+{
+ int ret;
+
+ cpu_noc_top_mmio_drive_40xx(vdev, enable);
+
+ ret = cpu_noc_qacceptn_check_40xx(vdev, enable ? 0x1 : 0x0);
+ if (ret) {
+ ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
+ return ret;
+ }
+
+ ret = cpu_noc_qdeny_check_40xx(vdev, 0x0);
+ if (ret)
+ ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
+
+ return ret;
+}
+
+static int soc_cpu_enable(struct ivpu_device *vdev)
+{
+ if (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_60XX)
+ return 0;
+
+ return soc_cpu_drive_40xx(vdev, true);
+}
+
+static int soc_cpu_boot_40xx(struct ivpu_device *vdev)
+{
+ int ret;
+ u32 val;
+ u64 val64;
+
+ ret = soc_cpu_enable(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to enable SOC CPU: %d\n", ret);
+ return ret;
+ }
+
+ val64 = vdev->fw->entry_point;
+ val64 <<= ffs(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_IMAGE_LOCATION_MASK) - 1;
+ REGV_WR64(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, val64);
+
+ val = REGV_RD32(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO);
+ val = REG_SET_FLD(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, DONE, val);
+ REGV_WR32(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, val);
+
+ ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n",
+ ivpu_fw_is_cold_boot(vdev) ? "cold boot" : "resume");
+
+ return 0;
+}
+
+int ivpu_hw_ip_soc_cpu_boot(struct ivpu_device *vdev)
+{
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
+ return soc_cpu_boot_37xx(vdev);
+ else
+ return soc_cpu_boot_40xx(vdev);
+}
+
+static void wdt_disable_37xx(struct ivpu_device *vdev)
+{
+ u32 val;
+
+ /* Enable writing and set non-zero WDT value */
+ REGV_WR32(VPU_37XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
+ REGV_WR32(VPU_37XX_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE);
+
+ /* Enable writing and disable watchdog timer */
+ REGV_WR32(VPU_37XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
+ REGV_WR32(VPU_37XX_CPU_SS_TIM_WDOG_EN, 0);
+
+ /* Now clear the timeout interrupt */
+ val = REGV_RD32(VPU_37XX_CPU_SS_TIM_GEN_CONFIG);
+ val = REG_CLR_FLD(VPU_37XX_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val);
+ REGV_WR32(VPU_37XX_CPU_SS_TIM_GEN_CONFIG, val);
+}
+
+static void wdt_disable_40xx(struct ivpu_device *vdev)
+{
+ u32 val;
+
+ REGV_WR32(VPU_40XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
+ REGV_WR32(VPU_40XX_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE);
+
+ REGV_WR32(VPU_40XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
+ REGV_WR32(VPU_40XX_CPU_SS_TIM_WDOG_EN, 0);
+
+ val = REGV_RD32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG);
+ val = REG_CLR_FLD(VPU_40XX_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val);
+ REGV_WR32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG, val);
+}
+
+void ivpu_hw_ip_wdt_disable(struct ivpu_device *vdev)
+{
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
+ return wdt_disable_37xx(vdev);
+ else
+ return wdt_disable_40xx(vdev);
+}
+
+static u32 ipc_rx_count_get_37xx(struct ivpu_device *vdev)
+{
+ u32 count = readl(vdev->regv + VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT);
+
+ return REG_GET_FLD(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
+}
+
+static u32 ipc_rx_count_get_40xx(struct ivpu_device *vdev)
+{
+ u32 count = readl(vdev->regv + VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT);
+
+ return REG_GET_FLD(VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
+}
+
+u32 ivpu_hw_ip_ipc_rx_count_get(struct ivpu_device *vdev)
+{
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
+ return ipc_rx_count_get_37xx(vdev);
+ else
+ return ipc_rx_count_get_40xx(vdev);
+}
+
+void ivpu_hw_ip_irq_enable(struct ivpu_device *vdev)
+{
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
+ REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK_37XX);
+ REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK_37XX);
+ } else {
+ REGV_WR32(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK_40XX);
+ REGV_WR64(VPU_40XX_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK_40XX);
+ }
+}
+
+void ivpu_hw_ip_irq_disable(struct ivpu_device *vdev)
+{
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
+ REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, 0x0ull);
+ REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, 0x0);
+ } else {
+ REGV_WR64(VPU_40XX_HOST_SS_ICB_ENABLE_0, 0x0ull);
+ REGV_WR32(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, 0x0ul);
+ }
+}
+
+static void diagnose_failure_37xx(struct ivpu_device *vdev)
+{
+ u32 reg = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK_37XX;
+
+ if (ipc_rx_count_get_37xx(vdev))
+ ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ");
+
+ if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, reg))
+ ivpu_err(vdev, "WDT MSS timeout detected\n");
+
+ if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, reg))
+ ivpu_err(vdev, "WDT NCE timeout detected\n");
+
+ if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, reg))
+ ivpu_err(vdev, "NOC Firewall irq detected\n");
+}
+
+static void diagnose_failure_40xx(struct ivpu_device *vdev)
+{
+ u32 reg = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK_40XX;
+
+ if (ipc_rx_count_get_40xx(vdev))
+ ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ");
+
+ if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, reg))
+ ivpu_err(vdev, "WDT MSS timeout detected\n");
+
+ if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, reg))
+ ivpu_err(vdev, "WDT NCE timeout detected\n");
+
+ if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, reg))
+ ivpu_err(vdev, "NOC Firewall irq detected\n");
+}
+
+void ivpu_hw_ip_diagnose_failure(struct ivpu_device *vdev)
+{
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
+ diagnose_failure_37xx(vdev);
+ else
+ diagnose_failure_40xx(vdev);
+}
+
+void ivpu_hw_ip_irq_clear(struct ivpu_device *vdev)
+{
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
+ REGV_WR64(VPU_37XX_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK_37XX);
+ else
+ REGV_WR64(VPU_40XX_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK_40XX);
+}
+
+static void irq_wdt_nce_handler(struct ivpu_device *vdev)
+{
+ ivpu_pm_trigger_recovery(vdev, "WDT NCE IRQ");
+}
+
+static void irq_wdt_mss_handler(struct ivpu_device *vdev)
+{
+ ivpu_hw_ip_wdt_disable(vdev);
+ ivpu_pm_trigger_recovery(vdev, "WDT MSS IRQ");
+}
+
+static void irq_noc_firewall_handler(struct ivpu_device *vdev)
+{
+ atomic_inc(&vdev->hw->firewall_irq_counter);
+
+ ivpu_dbg(vdev, IRQ, "NOC Firewall interrupt detected, counter %d\n",
+ atomic_read(&vdev->hw->firewall_irq_counter));
+}
+
+/* Handler for IRQs from NPU core */
+bool ivpu_hw_ip_irq_handler_37xx(struct ivpu_device *vdev, int irq)
+{
+ u32 status = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK_37XX;
+
+ if (!status)
+ return false;
+
+ REGV_WR32(VPU_37XX_HOST_SS_ICB_CLEAR_0, status);
+
+ if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
+ ivpu_mmu_irq_evtq_handler(vdev);
+
+ if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
+ ivpu_ipc_irq_handler(vdev);
+
+ if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
+ ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
+
+ if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status))
+ ivpu_mmu_irq_gerr_handler(vdev);
+
+ if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status))
+ irq_wdt_mss_handler(vdev);
+
+ if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status))
+ irq_wdt_nce_handler(vdev);
+
+ if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
+ irq_noc_firewall_handler(vdev);
+
+ return true;
+}
+
+/* Handler for IRQs from NPU core */
+bool ivpu_hw_ip_irq_handler_40xx(struct ivpu_device *vdev, int irq)
+{
+ u32 status = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK_40XX;
+
+ if (!status)
+ return false;
+
+ REGV_WR32(VPU_40XX_HOST_SS_ICB_CLEAR_0, status);
+
+ if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
+ ivpu_mmu_irq_evtq_handler(vdev);
+
+ if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
+ ivpu_ipc_irq_handler(vdev);
+
+ if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
+ ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
+
+ if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status))
+ ivpu_mmu_irq_gerr_handler(vdev);
+
+ if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status))
+ irq_wdt_mss_handler(vdev);
+
+ if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status))
+ irq_wdt_nce_handler(vdev);
+
+ if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
+ irq_noc_firewall_handler(vdev);
+
+ return true;
+}
+
+static void db_set_37xx(struct ivpu_device *vdev, u32 db_id)
+{
+ u32 reg_stride = VPU_37XX_CPU_SS_DOORBELL_1 - VPU_37XX_CPU_SS_DOORBELL_0;
+ u32 val = REG_FLD(VPU_37XX_CPU_SS_DOORBELL_0, SET);
+
+ REGV_WR32I(VPU_37XX_CPU_SS_DOORBELL_0, reg_stride, db_id, val);
+}
+
+static void db_set_40xx(struct ivpu_device *vdev, u32 db_id)
+{
+ u32 reg_stride = VPU_40XX_CPU_SS_DOORBELL_1 - VPU_40XX_CPU_SS_DOORBELL_0;
+ u32 val = REG_FLD(VPU_40XX_CPU_SS_DOORBELL_0, SET);
+
+ REGV_WR32I(VPU_40XX_CPU_SS_DOORBELL_0, reg_stride, db_id, val);
+}
+
+void ivpu_hw_ip_db_set(struct ivpu_device *vdev, u32 db_id)
+{
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
+ db_set_37xx(vdev, db_id);
+ else
+ db_set_40xx(vdev, db_id);
+}
+
+u32 ivpu_hw_ip_ipc_rx_addr_get(struct ivpu_device *vdev)
+{
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
+ return REGV_RD32(VPU_37XX_HOST_SS_TIM_IPC_FIFO_ATM);
+ else
+ return REGV_RD32(VPU_40XX_HOST_SS_TIM_IPC_FIFO_ATM);
+}
+
+void ivpu_hw_ip_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
+{
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
+ REGV_WR32(VPU_37XX_CPU_SS_TIM_IPC_FIFO, vpu_addr);
+ else
+ REGV_WR32(VPU_40XX_CPU_SS_TIM_IPC_FIFO, vpu_addr);
+}
diff --git a/drivers/accel/ivpu/ivpu_hw_ip.h b/drivers/accel/ivpu/ivpu_hw_ip.h
new file mode 100644
index 000000000000..5b1b391aa577
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_hw_ip.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2024 Intel Corporation
+ */
+
+#ifndef __IVPU_HW_IP_H__
+#define __IVPU_HW_IP_H__
+
+#include "ivpu_drv.h"
+
+int ivpu_hw_ip_host_ss_configure(struct ivpu_device *vdev);
+void ivpu_hw_ip_idle_gen_enable(struct ivpu_device *vdev);
+void ivpu_hw_ip_idle_gen_disable(struct ivpu_device *vdev);
+int ivpu_hw_ip_pwr_domain_enable(struct ivpu_device *vdev);
+int ivpu_hw_ip_host_ss_axi_enable(struct ivpu_device *vdev);
+int ivpu_hw_ip_top_noc_enable(struct ivpu_device *vdev);
+u64 ivpu_hw_ip_read_perf_timer_counter(struct ivpu_device *vdev);
+void ivpu_hw_ip_snoop_disable(struct ivpu_device *vdev);
+void ivpu_hw_ip_tbu_mmu_enable(struct ivpu_device *vdev);
+int ivpu_hw_ip_soc_cpu_boot(struct ivpu_device *vdev);
+void ivpu_hw_ip_wdt_disable(struct ivpu_device *vdev);
+void ivpu_hw_ip_diagnose_failure(struct ivpu_device *vdev);
+u32 ivpu_hw_ip_ipc_rx_count_get(struct ivpu_device *vdev);
+void ivpu_hw_ip_irq_clear(struct ivpu_device *vdev);
+bool ivpu_hw_ip_irq_handler_37xx(struct ivpu_device *vdev, int irq);
+bool ivpu_hw_ip_irq_handler_40xx(struct ivpu_device *vdev, int irq);
+void ivpu_hw_ip_db_set(struct ivpu_device *vdev, u32 db_id);
+u32 ivpu_hw_ip_ipc_rx_addr_get(struct ivpu_device *vdev);
+void ivpu_hw_ip_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr);
+void ivpu_hw_ip_irq_enable(struct ivpu_device *vdev);
+void ivpu_hw_ip_irq_disable(struct ivpu_device *vdev);
+void ivpu_hw_ip_diagnose_failure(struct ivpu_device *vdev);
+void ivpu_hw_ip_fabric_req_override_enable_50xx(struct ivpu_device *vdev);
+void ivpu_hw_ip_fabric_req_override_disable_50xx(struct ivpu_device *vdev);
+
+#endif /* __IVPU_HW_IP_H__ */
diff --git a/drivers/accel/ivpu/ivpu_hw_reg_io.h b/drivers/accel/ivpu/ivpu_hw_reg_io.h
index 79b3f441eac4..66259b0ead02 100644
--- a/drivers/accel/ivpu/ivpu_hw_reg_io.h
+++ b/drivers/accel/ivpu/ivpu_hw_reg_io.h
@@ -7,6 +7,7 @@
#define __IVPU_HW_REG_IO_H__
#include <linux/bitfield.h>
+#include <linux/fault-inject.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -16,13 +17,11 @@
#define REG_IO_ERROR 0xffffffff
#define REGB_RD32(reg) ivpu_hw_reg_rd32(vdev, vdev->regb, (reg), #reg, __func__)
-#define REGB_RD32_SILENT(reg) readl(vdev->regb + (reg))
#define REGB_RD64(reg) ivpu_hw_reg_rd64(vdev, vdev->regb, (reg), #reg, __func__)
#define REGB_WR32(reg, val) ivpu_hw_reg_wr32(vdev, vdev->regb, (reg), (val), #reg, __func__)
#define REGB_WR64(reg, val) ivpu_hw_reg_wr64(vdev, vdev->regb, (reg), (val), #reg, __func__)
#define REGV_RD32(reg) ivpu_hw_reg_rd32(vdev, vdev->regv, (reg), #reg, __func__)
-#define REGV_RD32_SILENT(reg) readl(vdev->regv + (reg))
#define REGV_RD64(reg) ivpu_hw_reg_rd64(vdev, vdev->regv, (reg), #reg, __func__)
#define REGV_WR32(reg, val) ivpu_hw_reg_wr32(vdev, vdev->regv, (reg), (val), #reg, __func__)
#define REGV_WR64(reg, val) ivpu_hw_reg_wr64(vdev, vdev->regv, (reg), (val), #reg, __func__)
@@ -47,31 +46,42 @@
#define REG_TEST_FLD_NUM(REG, FLD, num, val) \
((num) == FIELD_GET(REG##_##FLD##_MASK, val))
-#define REGB_POLL_FLD(reg, fld, val, timeout_us) \
-({ \
- u32 var; \
- int r; \
- ivpu_dbg(vdev, REG, "%s : %s (0x%08x) Polling field %s started (expected 0x%x)\n", \
- __func__, #reg, reg, #fld, val); \
- r = read_poll_timeout(REGB_RD32_SILENT, var, (FIELD_GET(reg##_##fld##_MASK, var) == (val)),\
- REG_POLL_SLEEP_US, timeout_us, false, (reg)); \
- ivpu_dbg(vdev, REG, "%s : %s (0x%08x) Polling field %s %s (reg val 0x%08x)\n", \
- __func__, #reg, reg, #fld, r ? "ETIMEDOUT" : "OK", var); \
- r; \
-})
-
-#define REGV_POLL_FLD(reg, fld, val, timeout_us) \
-({ \
- u32 var; \
- int r; \
- ivpu_dbg(vdev, REG, "%s : %s (0x%08x) Polling field %s started (expected 0x%x)\n", \
- __func__, #reg, reg, #fld, val); \
- r = read_poll_timeout(REGV_RD32_SILENT, var, (FIELD_GET(reg##_##fld##_MASK, var) == (val)),\
- REG_POLL_SLEEP_US, timeout_us, false, (reg)); \
- ivpu_dbg(vdev, REG, "%s : %s (0x%08x) Polling field %s %s (reg val 0x%08x)\n", \
- __func__, #reg, reg, #fld, r ? "ETIMEDOUT" : "OK", var); \
- r; \
-})
+#define REGB_POLL_FLD(reg, fld, exp_fld_val, timeout_us) \
+ ivpu_hw_reg_poll_fld(vdev, vdev->regb, reg, reg##_##fld##_MASK, \
+ FIELD_PREP(reg##_##fld##_MASK, exp_fld_val), timeout_us, \
+ __func__, #reg, #fld)
+
+#define REGV_POLL_FLD(reg, fld, exp_fld_val, timeout_us) \
+ ivpu_hw_reg_poll_fld(vdev, vdev->regv, reg, reg##_##fld##_MASK, \
+ FIELD_PREP(reg##_##fld##_MASK, exp_fld_val), timeout_us, \
+ __func__, #reg, #fld)
+
+extern struct fault_attr ivpu_hw_failure;
+
+static inline int __must_check
+ivpu_hw_reg_poll_fld(struct ivpu_device *vdev, void __iomem *base,
+ u32 reg_offset, u32 reg_mask, u32 exp_masked_val, u32 timeout_us,
+ const char *func_name, const char *reg_name, const char *fld_name)
+{
+ u32 reg_val;
+ int ret;
+
+ ivpu_dbg(vdev, REG, "%s : %s (0x%08x) POLL %s started (exp_val 0x%x)\n",
+ func_name, reg_name, reg_offset, fld_name, exp_masked_val);
+
+ ret = read_poll_timeout(readl, reg_val, (reg_val & reg_mask) == exp_masked_val,
+ REG_POLL_SLEEP_US, timeout_us, false, base + reg_offset);
+
+#ifdef CONFIG_FAULT_INJECTION
+ if (should_fail(&ivpu_hw_failure, 1))
+ ret = -ETIMEDOUT;
+#endif
+
+ ivpu_dbg(vdev, REG, "%s : %s (0x%08x) POLL %s %s (reg_val 0x%08x)\n",
+ func_name, reg_name, reg_offset, fld_name, ret ? "ETIMEDOUT" : "OK", reg_val);
+
+ return ret;
+}
static inline u32
ivpu_hw_reg_rd32(struct ivpu_device *vdev, void __iomem *base, u32 reg,
diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c
index 56ff067f63e2..1f13bf95b2b3 100644
--- a/drivers/accel/ivpu/ivpu_ipc.c
+++ b/drivers/accel/ivpu/ivpu_ipc.c
@@ -15,6 +15,7 @@
#include "ivpu_ipc.h"
#include "ivpu_jsm_msg.h"
#include "ivpu_pm.h"
+#include "ivpu_trace.h"
#define IPC_MAX_RX_MSG 128
@@ -129,7 +130,7 @@ static void ivpu_ipc_tx_release(struct ivpu_device *vdev, u32 vpu_addr)
static void ivpu_ipc_tx(struct ivpu_device *vdev, u32 vpu_addr)
{
- ivpu_hw_reg_ipc_tx_set(vdev, vpu_addr);
+ ivpu_hw_ipc_tx_set(vdev, vpu_addr);
}
static void
@@ -140,7 +141,6 @@ ivpu_ipc_rx_msg_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
struct ivpu_ipc_rx_msg *rx_msg;
lockdep_assert_held(&ipc->cons_lock);
- lockdep_assert_irqs_disabled();
rx_msg = kzalloc(sizeof(*rx_msg), GFP_ATOMIC);
if (!rx_msg) {
@@ -210,8 +210,7 @@ void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *c
ivpu_ipc_tx_release(vdev, cons->tx_vpu_addr);
}
-static int
-ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, struct vpu_jsm_msg *req)
+int ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, struct vpu_jsm_msg *req)
{
struct ivpu_ipc_info *ipc = vdev->ipc;
int ret;
@@ -228,6 +227,7 @@ ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, struct v
goto unlock;
ivpu_ipc_tx(vdev, cons->tx_vpu_addr);
+ trace_jsm("[tx]", req);
unlock:
mutex_unlock(&ipc->lock);
@@ -279,12 +279,13 @@ int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
u32 size = min_t(int, rx_msg->ipc_hdr->data_size, sizeof(*jsm_msg));
if (rx_msg->jsm_msg->result != VPU_JSM_STATUS_SUCCESS) {
- ivpu_dbg(vdev, IPC, "IPC resp result error: %d\n", rx_msg->jsm_msg->result);
+ ivpu_err(vdev, "IPC resp result error: %d\n", rx_msg->jsm_msg->result);
ret = -EBADMSG;
}
if (jsm_msg)
memcpy(jsm_msg, rx_msg->jsm_msg, size);
+ trace_jsm("[rx]", rx_msg->jsm_msg);
}
ivpu_ipc_rx_msg_del(vdev, rx_msg);
@@ -292,15 +293,17 @@ int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
return ret;
}
-static int
+int
ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
enum vpu_ipc_msg_type expected_resp_type,
- struct vpu_jsm_msg *resp, u32 channel,
- unsigned long timeout_ms)
+ struct vpu_jsm_msg *resp, u32 channel, unsigned long timeout_ms)
{
struct ivpu_ipc_consumer cons;
int ret;
+ drm_WARN_ON(&vdev->drm, pm_runtime_status_suspended(vdev->drm.dev) &&
+ pm_runtime_enabled(vdev->drm.dev));
+
ivpu_ipc_consumer_add(vdev, &cons, channel, NULL);
ret = ivpu_ipc_send(vdev, &cons, req);
@@ -326,19 +329,21 @@ consumer_del:
return ret;
}
-int ivpu_ipc_send_receive_active(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
- enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
- u32 channel, unsigned long timeout_ms)
+int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
+ enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
+ u32 channel, unsigned long timeout_ms)
{
struct vpu_jsm_msg hb_req = { .type = VPU_JSM_MSG_QUERY_ENGINE_HB };
struct vpu_jsm_msg hb_resp;
int ret, hb_ret;
- drm_WARN_ON(&vdev->drm, pm_runtime_status_suspended(vdev->drm.dev));
+ ret = ivpu_rpm_get(vdev);
+ if (ret < 0)
+ return ret;
ret = ivpu_ipc_send_receive_internal(vdev, req, expected_resp, resp, channel, timeout_ms);
if (ret != -ETIMEDOUT)
- return ret;
+ goto rpm_put;
hb_ret = ivpu_ipc_send_receive_internal(vdev, &hb_req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE,
&hb_resp, VPU_IPC_CHAN_ASYNC_CMD,
@@ -346,21 +351,33 @@ int ivpu_ipc_send_receive_active(struct ivpu_device *vdev, struct vpu_jsm_msg *r
if (hb_ret == -ETIMEDOUT)
ivpu_pm_trigger_recovery(vdev, "IPC timeout");
+rpm_put:
+ ivpu_rpm_put(vdev);
return ret;
}
-int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
- enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
- u32 channel, unsigned long timeout_ms)
+int ivpu_ipc_send_and_wait(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
+ u32 channel, unsigned long timeout_ms)
{
+ struct ivpu_ipc_consumer cons;
int ret;
ret = ivpu_rpm_get(vdev);
if (ret < 0)
return ret;
- ret = ivpu_ipc_send_receive_active(vdev, req, expected_resp, resp, channel, timeout_ms);
+ ivpu_ipc_consumer_add(vdev, &cons, channel, NULL);
+ ret = ivpu_ipc_send(vdev, &cons, req);
+ if (ret) {
+ ivpu_warn_ratelimited(vdev, "IPC send failed: %d\n", ret);
+ goto consumer_del;
+ }
+
+ msleep(timeout_ms);
+
+consumer_del:
+ ivpu_ipc_consumer_del(vdev, &cons);
ivpu_rpm_put(vdev);
return ret;
}
@@ -378,7 +395,7 @@ ivpu_ipc_match_consumer(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons
return false;
}
-void ivpu_ipc_irq_handler(struct ivpu_device *vdev, bool *wake_thread)
+void ivpu_ipc_irq_handler(struct ivpu_device *vdev)
{
struct ivpu_ipc_info *ipc = vdev->ipc;
struct ivpu_ipc_consumer *cons;
@@ -392,8 +409,8 @@ void ivpu_ipc_irq_handler(struct ivpu_device *vdev, bool *wake_thread)
* Driver needs to purge all messages from IPC FIFO to clear IPC interrupt.
* Without purge IPC FIFO to 0 next IPC interrupts won't be generated.
*/
- while (ivpu_hw_reg_ipc_rx_count_get(vdev)) {
- vpu_addr = ivpu_hw_reg_ipc_rx_addr_get(vdev);
+ while (ivpu_hw_ipc_rx_count_get(vdev)) {
+ vpu_addr = ivpu_hw_ipc_rx_addr_get(vdev);
if (vpu_addr == REG_IO_ERROR) {
ivpu_err_ratelimited(vdev, "Failed to read IPC rx addr register\n");
return;
@@ -442,12 +459,12 @@ void ivpu_ipc_irq_handler(struct ivpu_device *vdev, bool *wake_thread)
}
}
- if (wake_thread)
- *wake_thread = !list_empty(&ipc->cb_msg_list);
+ queue_work(system_percpu_wq, &vdev->irq_ipc_work);
}
-irqreturn_t ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev)
+void ivpu_ipc_irq_work_fn(struct work_struct *work)
{
+ struct ivpu_device *vdev = container_of(work, struct ivpu_device, irq_ipc_work);
struct ivpu_ipc_info *ipc = vdev->ipc;
struct ivpu_ipc_rx_msg *rx_msg, *r;
struct list_head cb_msg_list;
@@ -462,8 +479,6 @@ irqreturn_t ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev)
rx_msg->callback(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg);
ivpu_ipc_rx_msg_del(vdev, rx_msg);
}
-
- return IRQ_HANDLED;
}
int ivpu_ipc_init(struct ivpu_device *vdev)
@@ -520,7 +535,6 @@ void ivpu_ipc_fini(struct ivpu_device *vdev)
{
struct ivpu_ipc_info *ipc = vdev->ipc;
- drm_WARN_ON(&vdev->drm, ipc->on);
drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cons_list));
drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cb_msg_list));
drm_WARN_ON(&vdev->drm, atomic_read(&ipc->rx_msg_count) > 0);
diff --git a/drivers/accel/ivpu/ivpu_ipc.h b/drivers/accel/ivpu/ivpu_ipc.h
index 40ca3cc4e61f..b524a1985b9d 100644
--- a/drivers/accel/ivpu/ivpu_ipc.h
+++ b/drivers/accel/ivpu/ivpu_ipc.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#ifndef __IVPU_IPC_H__
@@ -89,22 +89,25 @@ void ivpu_ipc_enable(struct ivpu_device *vdev);
void ivpu_ipc_disable(struct ivpu_device *vdev);
void ivpu_ipc_reset(struct ivpu_device *vdev);
-void ivpu_ipc_irq_handler(struct ivpu_device *vdev, bool *wake_thread);
-irqreturn_t ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev);
+void ivpu_ipc_irq_handler(struct ivpu_device *vdev);
+void ivpu_ipc_irq_work_fn(struct work_struct *work);
void ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
u32 channel, ivpu_ipc_rx_callback_t callback);
void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons);
+int ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
+ struct vpu_jsm_msg *req);
int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
struct ivpu_ipc_hdr *ipc_buf, struct vpu_jsm_msg *jsm_msg,
unsigned long timeout_ms);
-
-int ivpu_ipc_send_receive_active(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
- enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
- u32 channel, unsigned long timeout_ms);
+int ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
+ enum vpu_ipc_msg_type expected_resp_type,
+ struct vpu_jsm_msg *resp, u32 channel, unsigned long timeout_ms);
int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
u32 channel, unsigned long timeout_ms);
+int ivpu_ipc_send_and_wait(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
+ u32 channel, unsigned long timeout_ms);
#endif /* __IVPU_IPC_H__ */
diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
index a49bc9105ed0..4f8564e2878a 100644
--- a/drivers/accel/ivpu/ivpu_job.c
+++ b/drivers/accel/ivpu/ivpu_job.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#include <drm/drm_file.h>
@@ -8,158 +8,381 @@
#include <linux/bitfield.h>
#include <linux/highmem.h>
#include <linux/pci.h>
+#include <linux/pm_runtime.h>
#include <linux/module.h>
#include <uapi/drm/ivpu_accel.h>
#include "ivpu_drv.h"
+#include "ivpu_fw.h"
#include "ivpu_hw.h"
#include "ivpu_ipc.h"
#include "ivpu_job.h"
#include "ivpu_jsm_msg.h"
+#include "ivpu_mmu.h"
#include "ivpu_pm.h"
+#include "ivpu_trace.h"
+#include "vpu_boot_api.h"
#define CMD_BUF_IDX 0
-#define JOB_ID_JOB_MASK GENMASK(7, 0)
-#define JOB_ID_CONTEXT_MASK GENMASK(31, 8)
#define JOB_MAX_BUFFER_COUNT 65535
static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq)
{
- ivpu_hw_reg_db_set(vdev, cmdq->db_id);
+ ivpu_hw_db_set(vdev, cmdq->db_id);
}
-static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv, u16 engine)
+static int ivpu_preemption_buffers_create(struct ivpu_device *vdev,
+ struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
+{
+ if (ivpu_fw_preempt_buf_size(vdev) == 0)
+ return 0;
+
+ cmdq->primary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.user,
+ vdev->fw->primary_preempt_buf_size,
+ DRM_IVPU_BO_WC);
+ if (!cmdq->primary_preempt_buf) {
+ ivpu_err(vdev, "Failed to create primary preemption buffer\n");
+ return -ENOMEM;
+ }
+
+ cmdq->secondary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.dma,
+ vdev->fw->secondary_preempt_buf_size,
+ DRM_IVPU_BO_WC);
+ if (!cmdq->secondary_preempt_buf) {
+ ivpu_err(vdev, "Failed to create secondary preemption buffer\n");
+ goto err_free_primary;
+ }
+
+ return 0;
+
+err_free_primary:
+ ivpu_bo_free(cmdq->primary_preempt_buf);
+ cmdq->primary_preempt_buf = NULL;
+ return -ENOMEM;
+}
+
+static void ivpu_preemption_buffers_free(struct ivpu_device *vdev,
+ struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
+{
+ if (cmdq->primary_preempt_buf)
+ ivpu_bo_free(cmdq->primary_preempt_buf);
+ if (cmdq->secondary_preempt_buf)
+ ivpu_bo_free(cmdq->secondary_preempt_buf);
+}
+
+static int ivpu_preemption_job_init(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv,
+ struct ivpu_cmdq *cmdq, struct ivpu_job *job)
+{
+ int ret;
+
+ /* Use preemption buffer provided by the user space */
+ if (job->primary_preempt_buf)
+ return 0;
+
+ if (!cmdq->primary_preempt_buf) {
+ /* Allocate per command queue preemption buffers */
+ ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq);
+ if (ret)
+ return ret;
+ }
+
+ /* Use preemption buffers allocated by the kernel */
+ job->primary_preempt_buf = cmdq->primary_preempt_buf;
+ job->secondary_preempt_buf = cmdq->secondary_preempt_buf;
+
+ return 0;
+}
+
+static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
{
- struct xa_limit db_xa_limit = {.max = IVPU_MAX_DB, .min = IVPU_MIN_DB};
struct ivpu_device *vdev = file_priv->vdev;
- struct vpu_job_queue_header *jobq_header;
struct ivpu_cmdq *cmdq;
- int ret;
cmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL);
if (!cmdq)
return NULL;
- ret = xa_alloc(&vdev->db_xa, &cmdq->db_id, NULL, db_xa_limit, GFP_KERNEL);
- if (ret) {
- ivpu_err(vdev, "Failed to allocate doorbell id: %d\n", ret);
- goto err_free_cmdq;
- }
-
cmdq->mem = ivpu_bo_create_global(vdev, SZ_4K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
if (!cmdq->mem)
- goto err_erase_xa;
-
- cmdq->entry_count = (u32)((ivpu_bo_size(cmdq->mem) - sizeof(struct vpu_job_queue_header)) /
- sizeof(struct vpu_job_queue_entry));
-
- cmdq->jobq = (struct vpu_job_queue *)ivpu_bo_vaddr(cmdq->mem);
- jobq_header = &cmdq->jobq->header;
- jobq_header->engine_idx = engine;
- jobq_header->head = 0;
- jobq_header->tail = 0;
- wmb(); /* Flush WC buffer for jobq->header */
+ goto err_free_cmdq;
return cmdq;
-err_erase_xa:
- xa_erase(&vdev->db_xa, cmdq->db_id);
err_free_cmdq:
kfree(cmdq);
return NULL;
}
-static void ivpu_cmdq_free(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
+/**
+ * ivpu_cmdq_get_entry_count - Calculate the number of entries in the command queue.
+ * @cmdq: Pointer to the command queue structure.
+ *
+ * Returns the number of entries that can fit in the command queue memory.
+ */
+static inline u32 ivpu_cmdq_get_entry_count(struct ivpu_cmdq *cmdq)
{
- if (!cmdq)
- return;
+ size_t size = ivpu_bo_size(cmdq->mem) - sizeof(struct vpu_job_queue_header);
+
+ return size / sizeof(struct vpu_job_queue_entry);
+}
+
+/**
+ * ivpu_cmdq_get_flags - Get command queue flags based on input flags and test mode.
+ * @vdev: Pointer to the ivpu device structure.
+ * @flags: Input flags to determine the command queue flags.
+ *
+ * Returns the calculated command queue flags, considering both the input flags
+ * and the current test mode settings.
+ */
+static u32 ivpu_cmdq_get_flags(struct ivpu_device *vdev, u32 flags)
+{
+ u32 cmdq_flags = 0;
+
+ if ((flags & DRM_IVPU_CMDQ_FLAG_TURBO) && (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_40XX))
+ cmdq_flags |= VPU_JOB_QUEUE_FLAGS_TURBO_MODE;
+ /* Test mode can override the TURBO flag coming from the application */
+ if (ivpu_test_mode & IVPU_TEST_MODE_TURBO_ENABLE)
+ cmdq_flags |= VPU_JOB_QUEUE_FLAGS_TURBO_MODE;
+ if (ivpu_test_mode & IVPU_TEST_MODE_TURBO_DISABLE)
+ cmdq_flags &= ~VPU_JOB_QUEUE_FLAGS_TURBO_MODE;
+
+ return cmdq_flags;
+}
+
+static void ivpu_cmdq_free(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
+{
+ ivpu_preemption_buffers_free(file_priv->vdev, file_priv, cmdq);
ivpu_bo_free(cmdq->mem);
- xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
kfree(cmdq);
}
-static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u16 engine)
+static struct ivpu_cmdq *ivpu_cmdq_create(struct ivpu_file_priv *file_priv, u8 priority, u32 flags)
{
struct ivpu_device *vdev = file_priv->vdev;
- struct ivpu_cmdq *cmdq = file_priv->cmdq[engine];
+ struct ivpu_cmdq *cmdq = NULL;
int ret;
lockdep_assert_held(&file_priv->lock);
+ cmdq = ivpu_cmdq_alloc(file_priv);
if (!cmdq) {
- cmdq = ivpu_cmdq_alloc(file_priv, engine);
- if (!cmdq)
- return NULL;
- file_priv->cmdq[engine] = cmdq;
+ ivpu_err(vdev, "Failed to allocate command queue\n");
+ return NULL;
}
+ ret = xa_alloc_cyclic(&file_priv->cmdq_xa, &cmdq->id, cmdq, file_priv->cmdq_limit,
+ &file_priv->cmdq_id_next, GFP_KERNEL);
+ if (ret < 0) {
+ ivpu_err(vdev, "Failed to allocate command queue ID: %d\n", ret);
+ goto err_free_cmdq;
+ }
+
+ cmdq->entry_count = ivpu_cmdq_get_entry_count(cmdq);
+ cmdq->priority = priority;
- if (cmdq->db_registered)
- return cmdq;
+ cmdq->jobq = (struct vpu_job_queue *)ivpu_bo_vaddr(cmdq->mem);
+ cmdq->jobq->header.engine_idx = VPU_ENGINE_COMPUTE;
+ cmdq->jobq->header.flags = ivpu_cmdq_get_flags(vdev, flags);
+
+ ivpu_dbg(vdev, JOB, "Command queue %d created, ctx %d, flags 0x%08x\n",
+ cmdq->id, file_priv->ctx.id, cmdq->jobq->header.flags);
+ return cmdq;
+
+err_free_cmdq:
+ ivpu_cmdq_free(file_priv, cmdq);
+ return NULL;
+}
+
+static int ivpu_hws_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u16 engine,
+ u8 priority)
+{
+ struct ivpu_device *vdev = file_priv->vdev;
+ int ret;
- ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id,
- cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
+ ret = ivpu_jsm_hws_create_cmdq(vdev, file_priv->ctx.id, file_priv->ctx.id, cmdq->id,
+ task_pid_nr(current), engine,
+ cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
if (ret)
- return NULL;
+ return ret;
- cmdq->db_registered = true;
+ ret = ivpu_jsm_hws_set_context_sched_properties(vdev, file_priv->ctx.id, cmdq->id,
+ priority);
+ if (ret)
+ return ret;
- return cmdq;
+ return 0;
+}
+
+static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
+{
+ struct ivpu_device *vdev = file_priv->vdev;
+ int ret;
+
+ ret = xa_alloc_cyclic(&vdev->db_xa, &cmdq->db_id, NULL, vdev->db_limit, &vdev->db_next,
+ GFP_KERNEL);
+ if (ret < 0) {
+ ivpu_err(vdev, "Failed to allocate doorbell ID: %d\n", ret);
+ return ret;
+ }
+
+ if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
+ ret = ivpu_jsm_hws_register_db(vdev, file_priv->ctx.id, cmdq->id, cmdq->db_id,
+ cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
+ else
+ ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id,
+ cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
+
+ if (!ret) {
+ ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d priority %d\n",
+ cmdq->db_id, cmdq->id, file_priv->ctx.id, cmdq->priority);
+ } else {
+ xa_erase(&vdev->db_xa, cmdq->db_id);
+ cmdq->db_id = 0;
+ }
+
+ return ret;
+}
+
+static void ivpu_cmdq_jobq_reset(struct ivpu_device *vdev, struct vpu_job_queue *jobq)
+{
+ jobq->header.head = 0;
+ jobq->header.tail = 0;
+
+ wmb(); /* Flush WC buffer for jobq->header */
}
-static void ivpu_cmdq_release_locked(struct ivpu_file_priv *file_priv, u16 engine)
+static int ivpu_cmdq_register(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
{
- struct ivpu_cmdq *cmdq = file_priv->cmdq[engine];
+ struct ivpu_device *vdev = file_priv->vdev;
+ int ret;
lockdep_assert_held(&file_priv->lock);
- if (cmdq) {
- file_priv->cmdq[engine] = NULL;
- if (cmdq->db_registered)
- ivpu_jsm_unregister_db(file_priv->vdev, cmdq->db_id);
+ if (cmdq->db_id)
+ return 0;
- ivpu_cmdq_free(file_priv, cmdq);
+ ivpu_cmdq_jobq_reset(vdev, cmdq->jobq);
+
+ if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
+ ret = ivpu_hws_cmdq_init(file_priv, cmdq, VPU_ENGINE_COMPUTE, cmdq->priority);
+ if (ret)
+ return ret;
}
+
+ ret = ivpu_register_db(file_priv, cmdq);
+ if (ret)
+ return ret;
+
+ return 0;
}
-void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
+static int ivpu_cmdq_unregister(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
{
- int i;
+ struct ivpu_device *vdev = file_priv->vdev;
+ int ret;
lockdep_assert_held(&file_priv->lock);
- for (i = 0; i < IVPU_NUM_ENGINES; i++)
- ivpu_cmdq_release_locked(file_priv, i);
+ if (!cmdq->db_id)
+ return 0;
+
+ ret = ivpu_jsm_unregister_db(vdev, cmdq->db_id);
+ if (!ret)
+ ivpu_dbg(vdev, JOB, "DB %d unregistered\n", cmdq->db_id);
+
+ if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
+ ret = ivpu_jsm_hws_destroy_cmdq(vdev, file_priv->ctx.id, cmdq->id);
+ if (!ret)
+ ivpu_dbg(vdev, JOB, "Command queue %d destroyed, ctx %d\n",
+ cmdq->id, file_priv->ctx.id);
+ }
+
+ xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
+ cmdq->db_id = 0;
+
+ return 0;
}
-/*
- * Mark the doorbell as unregistered and reset job queue pointers.
- * This function needs to be called when the VPU hardware is restarted
- * and FW loses job queue state. The next time job queue is used it
- * will be registered again.
- */
-static void ivpu_cmdq_reset_locked(struct ivpu_file_priv *file_priv, u16 engine)
+static inline u8 ivpu_job_to_jsm_priority(u8 priority)
+{
+ if (priority == DRM_IVPU_JOB_PRIORITY_DEFAULT)
+ return VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL;
+
+ return priority - 1;
+}
+
+static void ivpu_cmdq_destroy(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
{
- struct ivpu_cmdq *cmdq = file_priv->cmdq[engine];
+ ivpu_cmdq_unregister(file_priv, cmdq);
+ xa_erase(&file_priv->cmdq_xa, cmdq->id);
+ ivpu_cmdq_free(file_priv, cmdq);
+}
+
+static struct ivpu_cmdq *ivpu_cmdq_acquire_legacy(struct ivpu_file_priv *file_priv, u8 priority)
+{
+ struct ivpu_cmdq *cmdq;
+ unsigned long id;
lockdep_assert_held(&file_priv->lock);
- if (cmdq) {
- cmdq->db_registered = false;
- cmdq->jobq->header.head = 0;
- cmdq->jobq->header.tail = 0;
- wmb(); /* Flush WC buffer for jobq header */
+ xa_for_each(&file_priv->cmdq_xa, id, cmdq)
+ if (cmdq->is_legacy && cmdq->priority == priority)
+ break;
+
+ if (!cmdq) {
+ cmdq = ivpu_cmdq_create(file_priv, priority, 0);
+ if (!cmdq)
+ return NULL;
+ cmdq->is_legacy = true;
+ }
+
+ return cmdq;
+}
+
+static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u32 cmdq_id)
+{
+ struct ivpu_device *vdev = file_priv->vdev;
+ struct ivpu_cmdq *cmdq;
+
+ lockdep_assert_held(&file_priv->lock);
+
+ cmdq = xa_load(&file_priv->cmdq_xa, cmdq_id);
+ if (!cmdq) {
+ ivpu_dbg(vdev, IOCTL, "Failed to find command queue with ID: %u\n", cmdq_id);
+ return NULL;
}
+
+ return cmdq;
+}
+
+void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
+{
+ struct ivpu_cmdq *cmdq;
+ unsigned long cmdq_id;
+
+ lockdep_assert_held(&file_priv->lock);
+
+ xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
+ ivpu_cmdq_destroy(file_priv, cmdq);
}
-static void ivpu_cmdq_reset_all(struct ivpu_file_priv *file_priv)
+/*
+ * Mark the doorbell as unregistered
+ * This function needs to be called when the VPU hardware is restarted
+ * and FW loses job queue state. The next time job queue is used it
+ * will be registered again.
+ */
+static void ivpu_cmdq_reset(struct ivpu_file_priv *file_priv)
{
- int i;
+ struct ivpu_cmdq *cmdq;
+ unsigned long cmdq_id;
mutex_lock(&file_priv->lock);
- for (i = 0; i < IVPU_NUM_ENGINES; i++)
- ivpu_cmdq_reset_locked(file_priv, i);
+ xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq) {
+ xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
+ cmdq->db_id = 0;
+ }
mutex_unlock(&file_priv->lock);
}
@@ -172,10 +395,29 @@ void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev)
mutex_lock(&vdev->context_list_lock);
xa_for_each(&vdev->context_xa, ctx_id, file_priv)
- ivpu_cmdq_reset_all(file_priv);
+ ivpu_cmdq_reset(file_priv);
mutex_unlock(&vdev->context_list_lock);
+}
+void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv)
+{
+ struct ivpu_device *vdev = file_priv->vdev;
+ struct ivpu_cmdq *cmdq;
+ unsigned long cmdq_id;
+
+ lockdep_assert_held(&file_priv->lock);
+ ivpu_dbg(vdev, JOB, "Context ID: %u abort\n", file_priv->ctx.id);
+
+ xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
+ ivpu_cmdq_unregister(file_priv, cmdq);
+
+ if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_OS)
+ ivpu_jsm_context_release(vdev, file_priv->ctx.id);
+
+ ivpu_mmu_disable_ssid_events(vdev, file_priv->ctx.id);
+
+ file_priv->aborted = true;
}
static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
@@ -188,17 +430,28 @@ static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
/* Check if there is space left in job queue */
if (next_entry == header->head) {
- ivpu_dbg(vdev, JOB, "Job queue full: ctx %d engine %d db %d head %d tail %d\n",
- job->file_priv->ctx.id, job->engine_idx, cmdq->db_id, header->head, tail);
+ ivpu_dbg(vdev, JOB, "Job queue full: ctx %d cmdq %d db %d head %d tail %d\n",
+ job->file_priv->ctx.id, cmdq->id, cmdq->db_id, header->head, tail);
return -EBUSY;
}
- entry = &cmdq->jobq->job[tail];
+ entry = &cmdq->jobq->slot[tail].job;
entry->batch_buf_addr = job->cmd_buf_vpu_addr;
entry->job_id = job->job_id;
entry->flags = 0;
if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_SUBMISSION))
entry->flags = VPU_JOB_FLAGS_NULL_SUBMISSION_MASK;
+
+ if (job->primary_preempt_buf) {
+ entry->primary_preempt_buf_addr = job->primary_preempt_buf->vpu_addr;
+ entry->primary_preempt_buf_size = ivpu_bo_size(job->primary_preempt_buf);
+ }
+
+ if (job->secondary_preempt_buf) {
+ entry->secondary_preempt_buf_addr = job->secondary_preempt_buf->vpu_addr;
+ entry->secondary_preempt_buf_size = ivpu_bo_size(job->secondary_preempt_buf);
+ }
+
wmb(); /* Ensure that tail is updated after filling entry */
header->tail = next_entry;
wmb(); /* Flush WC buffer for jobq header */
@@ -254,8 +507,8 @@ static void ivpu_job_destroy(struct ivpu_job *job)
struct ivpu_device *vdev = job->vdev;
u32 i;
- ivpu_dbg(vdev, JOB, "Job destroyed: id %3u ctx %2d engine %d",
- job->job_id, job->file_priv->ctx.id, job->engine_idx);
+ ivpu_dbg(vdev, JOB, "Job destroyed: id %3u ctx %2d cmdq_id %u engine %d",
+ job->job_id, job->file_priv->ctx.id, job->cmdq_id, job->engine_idx);
for (i = 0; i < job->bo_count; i++)
if (job->bos[i])
@@ -281,12 +534,13 @@ ivpu_job_create(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
job->bo_count = bo_count;
job->done_fence = ivpu_fence_create(vdev);
if (!job->done_fence) {
- ivpu_warn_ratelimited(vdev, "Failed to create a fence\n");
+ ivpu_err(vdev, "Failed to create a fence\n");
goto err_free_job;
}
job->file_priv = ivpu_file_priv_get(file_priv);
+ trace_job("create", job);
ivpu_dbg(vdev, JOB, "Job created: ctx %2d engine %d", file_priv->ctx.id, job->engine_idx);
return job;
@@ -295,27 +549,94 @@ err_free_job:
return NULL;
}
-static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32 job_status)
+static struct ivpu_job *ivpu_job_remove_from_submitted_jobs(struct ivpu_device *vdev, u32 job_id)
{
struct ivpu_job *job;
+ lockdep_assert_held(&vdev->submitted_jobs_lock);
+
job = xa_erase(&vdev->submitted_jobs_xa, job_id);
+ if (xa_empty(&vdev->submitted_jobs_xa) && job) {
+ vdev->busy_time = ktime_add(ktime_sub(ktime_get(), vdev->busy_start_ts),
+ vdev->busy_time);
+ }
+
+ return job;
+}
+
+bool ivpu_job_handle_engine_error(struct ivpu_device *vdev, u32 job_id, u32 job_status)
+{
+ lockdep_assert_held(&vdev->submitted_jobs_lock);
+
+ switch (job_status) {
+ case VPU_JSM_STATUS_PROCESSING_ERR:
+ case VPU_JSM_STATUS_ENGINE_RESET_REQUIRED_MIN ... VPU_JSM_STATUS_ENGINE_RESET_REQUIRED_MAX:
+ {
+ struct ivpu_job *job = xa_load(&vdev->submitted_jobs_xa, job_id);
+
+ if (!job)
+ return false;
+
+ /* Trigger an engine reset */
+ guard(mutex)(&job->file_priv->lock);
+
+ job->job_status = job_status;
+
+ if (job->file_priv->has_mmu_faults)
+ return false;
+
+ /*
+ * Mark context as faulty and defer destruction of the job to jobs abort thread
+ * handler to synchronize between both faults and jobs returning context violation
+ * status and ensure both are handled in the same way
+ */
+ job->file_priv->has_mmu_faults = true;
+ queue_work(system_percpu_wq, &vdev->context_abort_work);
+ return true;
+ }
+ default:
+ /* Complete job with error status, engine reset not required */
+ break;
+ }
+
+ return false;
+}
+
+static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32 job_status)
+{
+ struct ivpu_job *job;
+
+ lockdep_assert_held(&vdev->submitted_jobs_lock);
+
+ job = xa_load(&vdev->submitted_jobs_xa, job_id);
if (!job)
return -ENOENT;
- if (job->file_priv->has_mmu_faults)
- job_status = DRM_IVPU_JOB_STATUS_ABORTED;
+ ivpu_job_remove_from_submitted_jobs(vdev, job_id);
- job->bos[CMD_BUF_IDX]->job_status = job_status;
+ if (job->job_status == VPU_JSM_STATUS_SUCCESS) {
+ if (job->file_priv->has_mmu_faults)
+ job->job_status = DRM_IVPU_JOB_STATUS_ABORTED;
+ else
+ job->job_status = job_status;
+ }
+
+ job->bos[CMD_BUF_IDX]->job_status = job->job_status;
dma_fence_signal(job->done_fence);
- ivpu_dbg(vdev, JOB, "Job complete: id %3u ctx %2d engine %d status 0x%x\n",
- job->job_id, job->file_priv->ctx.id, job->engine_idx, job_status);
+ trace_job("done", job);
+ ivpu_dbg(vdev, JOB, "Job complete: id %3u ctx %2d cmdq_id %u engine %d status 0x%x\n",
+ job->job_id, job->file_priv->ctx.id, job->cmdq_id, job->engine_idx,
+ job->job_status);
ivpu_job_destroy(job);
ivpu_stop_job_timeout_detection(vdev);
ivpu_rpm_put(vdev);
+
+ if (!xa_empty(&vdev->submitted_jobs_xa))
+ ivpu_start_job_timeout_detection(vdev);
+
return 0;
}
@@ -324,42 +645,75 @@ void ivpu_jobs_abort_all(struct ivpu_device *vdev)
struct ivpu_job *job;
unsigned long id;
+ mutex_lock(&vdev->submitted_jobs_lock);
+
xa_for_each(&vdev->submitted_jobs_xa, id, job)
ivpu_job_signal_and_destroy(vdev, id, DRM_IVPU_JOB_STATUS_ABORTED);
+
+ mutex_unlock(&vdev->submitted_jobs_lock);
}
-static int ivpu_job_submit(struct ivpu_job *job)
+void ivpu_cmdq_abort_all_jobs(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id)
+{
+ struct ivpu_job *job;
+ unsigned long id;
+
+ mutex_lock(&vdev->submitted_jobs_lock);
+
+ xa_for_each(&vdev->submitted_jobs_xa, id, job)
+ if (job->file_priv->ctx.id == ctx_id && job->cmdq_id == cmdq_id)
+ ivpu_job_signal_and_destroy(vdev, id, DRM_IVPU_JOB_STATUS_ABORTED);
+
+ mutex_unlock(&vdev->submitted_jobs_lock);
+}
+
+static int ivpu_job_submit(struct ivpu_job *job, u8 priority, u32 cmdq_id)
{
struct ivpu_file_priv *file_priv = job->file_priv;
struct ivpu_device *vdev = job->vdev;
- struct xa_limit job_id_range;
struct ivpu_cmdq *cmdq;
+ bool is_first_job;
int ret;
ret = ivpu_rpm_get(vdev);
if (ret < 0)
return ret;
+ mutex_lock(&vdev->submitted_jobs_lock);
mutex_lock(&file_priv->lock);
- cmdq = ivpu_cmdq_acquire(job->file_priv, job->engine_idx);
+ if (cmdq_id == 0)
+ cmdq = ivpu_cmdq_acquire_legacy(file_priv, priority);
+ else
+ cmdq = ivpu_cmdq_acquire(file_priv, cmdq_id);
if (!cmdq) {
- ivpu_warn_ratelimited(vdev, "Failed get job queue, ctx %d engine %d\n",
- file_priv->ctx.id, job->engine_idx);
ret = -EINVAL;
- goto err_unlock_file_priv;
+ goto err_unlock;
}
- job_id_range.min = FIELD_PREP(JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1));
- job_id_range.max = job_id_range.min | JOB_ID_JOB_MASK;
+ ret = ivpu_cmdq_register(file_priv, cmdq);
+ if (ret) {
+ ivpu_err(vdev, "Failed to register command queue: %d\n", ret);
+ goto err_unlock;
+ }
- xa_lock(&vdev->submitted_jobs_xa);
- ret = __xa_alloc(&vdev->submitted_jobs_xa, &job->job_id, job, job_id_range, GFP_KERNEL);
+ ret = ivpu_preemption_job_init(vdev, file_priv, cmdq, job);
if (ret) {
+ ivpu_err(vdev, "Failed to initialize preemption buffers for job %d: %d\n",
+ job->job_id, ret);
+ goto err_unlock;
+ }
+
+ job->cmdq_id = cmdq->id;
+
+ is_first_job = xa_empty(&vdev->submitted_jobs_xa);
+ ret = xa_alloc_cyclic(&vdev->submitted_jobs_xa, &job->job_id, job, file_priv->job_limit,
+ &file_priv->job_id_next, GFP_KERNEL);
+ if (ret < 0) {
ivpu_dbg(vdev, JOB, "Too many active jobs in ctx %d\n",
file_priv->ctx.id);
ret = -EBUSY;
- goto err_unlock_submitted_jobs_xa;
+ goto err_unlock;
}
ret = ivpu_cmdq_push_job(cmdq, job);
@@ -373,36 +727,39 @@ static int ivpu_job_submit(struct ivpu_job *job)
wmb(); /* Flush WC buffer for jobq header */
} else {
ivpu_cmdq_ring_db(vdev, cmdq);
+ if (is_first_job)
+ vdev->busy_start_ts = ktime_get();
}
- ivpu_dbg(vdev, JOB, "Job submitted: id %3u ctx %2d engine %d addr 0x%llx next %d\n",
- job->job_id, file_priv->ctx.id, job->engine_idx,
+ trace_job("submit", job);
+ ivpu_dbg(vdev, JOB, "Job submitted: id %3u ctx %2d cmdq_id %u engine %d prio %d addr 0x%llx next %d\n",
+ job->job_id, file_priv->ctx.id, cmdq->id, job->engine_idx, cmdq->priority,
job->cmd_buf_vpu_addr, cmdq->jobq->header.tail);
- xa_unlock(&vdev->submitted_jobs_xa);
-
mutex_unlock(&file_priv->lock);
- if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW))
+ if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW)) {
ivpu_job_signal_and_destroy(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS);
+ }
+
+ mutex_unlock(&vdev->submitted_jobs_lock);
return 0;
err_erase_xa:
- __xa_erase(&vdev->submitted_jobs_xa, job->job_id);
-err_unlock_submitted_jobs_xa:
- xa_unlock(&vdev->submitted_jobs_xa);
-err_unlock_file_priv:
+ xa_erase(&vdev->submitted_jobs_xa, job->job_id);
+err_unlock:
mutex_unlock(&file_priv->lock);
+ mutex_unlock(&vdev->submitted_jobs_lock);
ivpu_rpm_put(vdev);
return ret;
}
static int
ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 *buf_handles,
- u32 buf_count, u32 commands_offset)
+ u32 buf_count, u32 commands_offset, u32 preempt_buffer_index)
{
- struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct ivpu_file_priv *file_priv = job->file_priv;
struct ivpu_device *vdev = file_priv->vdev;
struct ww_acquire_ctx acquire_ctx;
enum dma_resv_usage usage;
@@ -413,40 +770,58 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32
for (i = 0; i < buf_count; i++) {
struct drm_gem_object *obj = drm_gem_object_lookup(file, buf_handles[i]);
- if (!obj)
+ if (!obj) {
+ ivpu_dbg(vdev, IOCTL, "Failed to lookup GEM object with handle %u\n",
+ buf_handles[i]);
return -ENOENT;
+ }
job->bos[i] = to_ivpu_bo(obj);
- ret = ivpu_bo_pin(job->bos[i]);
+ ret = ivpu_bo_bind(job->bos[i]);
if (ret)
return ret;
}
bo = job->bos[CMD_BUF_IDX];
if (!dma_resv_test_signaled(bo->base.base.resv, DMA_RESV_USAGE_READ)) {
- ivpu_warn(vdev, "Buffer is already in use\n");
+ ivpu_dbg(vdev, IOCTL, "Buffer is already in use by another job\n");
return -EBUSY;
}
if (commands_offset >= ivpu_bo_size(bo)) {
- ivpu_warn(vdev, "Invalid command buffer offset %u\n", commands_offset);
+ ivpu_dbg(vdev, IOCTL, "Invalid commands offset %u for buffer size %zu\n",
+ commands_offset, ivpu_bo_size(bo));
return -EINVAL;
}
job->cmd_buf_vpu_addr = bo->vpu_addr + commands_offset;
+ if (preempt_buffer_index) {
+ struct ivpu_bo *preempt_bo = job->bos[preempt_buffer_index];
+
+ if (ivpu_bo_size(preempt_bo) < ivpu_fw_preempt_buf_size(vdev)) {
+ ivpu_dbg(vdev, IOCTL, "Preemption buffer is too small\n");
+ return -EINVAL;
+ }
+ if (ivpu_bo_is_mappable(preempt_bo)) {
+ ivpu_dbg(vdev, IOCTL, "Preemption buffer cannot be mappable\n");
+ return -EINVAL;
+ }
+ job->primary_preempt_buf = preempt_bo;
+ }
+
ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, buf_count,
&acquire_ctx);
if (ret) {
- ivpu_warn(vdev, "Failed to lock reservations: %d\n", ret);
+ ivpu_warn_ratelimited(vdev, "Failed to lock reservations: %d\n", ret);
return ret;
}
for (i = 0; i < buf_count; i++) {
ret = dma_resv_reserve_fences(job->bos[i]->base.base.resv, 1);
if (ret) {
- ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret);
+ ivpu_warn_ratelimited(vdev, "Failed to reserve fences: %d\n", ret);
goto unlock_reservations;
}
}
@@ -464,40 +839,20 @@ unlock_reservations:
return ret;
}
-int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+static int ivpu_submit(struct drm_file *file, struct ivpu_file_priv *file_priv, u32 cmdq_id,
+ u32 buffer_count, u32 engine, void __user *buffers_ptr, u32 cmds_offset,
+ u32 preempt_buffer_index, u8 priority)
{
- struct ivpu_file_priv *file_priv = file->driver_priv;
struct ivpu_device *vdev = file_priv->vdev;
- struct drm_ivpu_submit *params = data;
struct ivpu_job *job;
u32 *buf_handles;
int idx, ret;
- if (params->engine > DRM_IVPU_ENGINE_COPY)
- return -EINVAL;
-
- if (params->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
- return -EINVAL;
-
- if (params->buffer_count == 0 || params->buffer_count > JOB_MAX_BUFFER_COUNT)
- return -EINVAL;
-
- if (!IS_ALIGNED(params->commands_offset, 8))
- return -EINVAL;
-
- if (!file_priv->ctx.id)
- return -EINVAL;
-
- if (file_priv->has_mmu_faults)
- return -EBADFD;
-
- buf_handles = kcalloc(params->buffer_count, sizeof(u32), GFP_KERNEL);
+ buf_handles = kcalloc(buffer_count, sizeof(u32), GFP_KERNEL);
if (!buf_handles)
return -ENOMEM;
- ret = copy_from_user(buf_handles,
- (void __user *)params->buffers_ptr,
- params->buffer_count * sizeof(u32));
+ ret = copy_from_user(buf_handles, buffers_ptr, buffer_count * sizeof(u32));
if (ret) {
ret = -EFAULT;
goto err_free_handles;
@@ -508,25 +863,22 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto err_free_handles;
}
- ivpu_dbg(vdev, JOB, "Submit ioctl: ctx %u buf_count %u\n",
- file_priv->ctx.id, params->buffer_count);
+ ivpu_dbg(vdev, JOB, "Submit ioctl: ctx %u cmdq_id %u buf_count %u\n",
+ file_priv->ctx.id, cmdq_id, buffer_count);
- job = ivpu_job_create(file_priv, params->engine, params->buffer_count);
+ job = ivpu_job_create(file_priv, engine, buffer_count);
if (!job) {
- ivpu_err(vdev, "Failed to create job\n");
ret = -ENOMEM;
goto err_exit_dev;
}
- ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, params->buffer_count,
- params->commands_offset);
- if (ret) {
- ivpu_err(vdev, "Failed to prepare job: %d\n", ret);
+ ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, buffer_count, cmds_offset,
+ preempt_buffer_index);
+ if (ret)
goto err_destroy_job;
- }
down_read(&vdev->pm->reset_lock);
- ret = ivpu_job_submit(job);
+ ret = ivpu_job_submit(job, priority, cmdq_id);
up_read(&vdev->pm->reset_lock);
if (ret)
goto err_signal_fence;
@@ -546,12 +898,176 @@ err_free_handles:
return ret;
}
+int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct ivpu_device *vdev = file_priv->vdev;
+ struct drm_ivpu_submit *args = data;
+ u8 priority;
+
+ if (args->engine != DRM_IVPU_ENGINE_COMPUTE) {
+ ivpu_dbg(vdev, IOCTL, "Invalid engine %d\n", args->engine);
+ return -EINVAL;
+ }
+
+ if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME) {
+ ivpu_dbg(vdev, IOCTL, "Invalid priority %d\n", args->priority);
+ return -EINVAL;
+ }
+
+ if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT) {
+ ivpu_dbg(vdev, IOCTL, "Invalid buffer count %u\n", args->buffer_count);
+ return -EINVAL;
+ }
+
+ if (!IS_ALIGNED(args->commands_offset, 8)) {
+ ivpu_dbg(vdev, IOCTL, "Invalid commands offset %u\n", args->commands_offset);
+ return -EINVAL;
+ }
+
+ if (!file_priv->ctx.id) {
+ ivpu_dbg(vdev, IOCTL, "Context not initialized\n");
+ return -EINVAL;
+ }
+
+ if (file_priv->has_mmu_faults) {
+ ivpu_dbg(vdev, IOCTL, "Context %u has MMU faults\n", file_priv->ctx.id);
+ return -EBADFD;
+ }
+
+ priority = ivpu_job_to_jsm_priority(args->priority);
+
+ return ivpu_submit(file, file_priv, 0, args->buffer_count, args->engine,
+ (void __user *)args->buffers_ptr, args->commands_offset, 0, priority);
+}
+
+int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct ivpu_device *vdev = file_priv->vdev;
+ struct drm_ivpu_cmdq_submit *args = data;
+
+ if (!ivpu_is_capable(file_priv->vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) {
+ ivpu_dbg(vdev, IOCTL, "Command queue management not supported\n");
+ return -ENODEV;
+ }
+
+ if (args->cmdq_id < IVPU_CMDQ_MIN_ID || args->cmdq_id > IVPU_CMDQ_MAX_ID) {
+ ivpu_dbg(vdev, IOCTL, "Invalid command queue ID %u\n", args->cmdq_id);
+ return -EINVAL;
+ }
+
+ if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT) {
+ ivpu_dbg(vdev, IOCTL, "Invalid buffer count %u\n", args->buffer_count);
+ return -EINVAL;
+ }
+
+ if (args->preempt_buffer_index >= args->buffer_count) {
+ ivpu_dbg(vdev, IOCTL, "Invalid preemption buffer index %u\n",
+ args->preempt_buffer_index);
+ return -EINVAL;
+ }
+
+ if (!IS_ALIGNED(args->commands_offset, 8)) {
+ ivpu_dbg(vdev, IOCTL, "Invalid commands offset %u\n", args->commands_offset);
+ return -EINVAL;
+ }
+
+ if (!file_priv->ctx.id) {
+ ivpu_dbg(vdev, IOCTL, "Context not initialized\n");
+ return -EINVAL;
+ }
+
+ if (file_priv->has_mmu_faults) {
+ ivpu_dbg(vdev, IOCTL, "Context %u has MMU faults\n", file_priv->ctx.id);
+ return -EBADFD;
+ }
+
+ return ivpu_submit(file, file_priv, args->cmdq_id, args->buffer_count, VPU_ENGINE_COMPUTE,
+ (void __user *)args->buffers_ptr, args->commands_offset,
+ args->preempt_buffer_index, 0);
+}
+
+int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct ivpu_device *vdev = file_priv->vdev;
+ struct drm_ivpu_cmdq_create *args = data;
+ struct ivpu_cmdq *cmdq;
+ int ret;
+
+ if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) {
+ ivpu_dbg(vdev, IOCTL, "Command queue management not supported\n");
+ return -ENODEV;
+ }
+
+ if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME) {
+ ivpu_dbg(vdev, IOCTL, "Invalid priority %d\n", args->priority);
+ return -EINVAL;
+ }
+
+ ret = ivpu_rpm_get(vdev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&file_priv->lock);
+
+ cmdq = ivpu_cmdq_create(file_priv, ivpu_job_to_jsm_priority(args->priority), args->flags);
+ if (cmdq)
+ args->cmdq_id = cmdq->id;
+
+ mutex_unlock(&file_priv->lock);
+
+ ivpu_rpm_put(vdev);
+
+ return cmdq ? 0 : -ENOMEM;
+}
+
+int ivpu_cmdq_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct ivpu_device *vdev = file_priv->vdev;
+ struct drm_ivpu_cmdq_destroy *args = data;
+ struct ivpu_cmdq *cmdq;
+ u32 cmdq_id = 0;
+ int ret;
+
+ if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) {
+ ivpu_dbg(vdev, IOCTL, "Command queue management not supported\n");
+ return -ENODEV;
+ }
+
+ ret = ivpu_rpm_get(vdev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&file_priv->lock);
+
+ cmdq = xa_load(&file_priv->cmdq_xa, args->cmdq_id);
+ if (!cmdq || cmdq->is_legacy) {
+ ret = -ENOENT;
+ } else {
+ cmdq_id = cmdq->id;
+ ivpu_cmdq_destroy(file_priv, cmdq);
+ ret = 0;
+ }
+
+ mutex_unlock(&file_priv->lock);
+
+ /* Abort any pending jobs only if cmdq was destroyed */
+ if (!ret)
+ ivpu_cmdq_abort_all_jobs(vdev, file_priv->ctx.id, cmdq_id);
+
+ ivpu_rpm_put(vdev);
+
+ return ret;
+}
+
static void
ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
struct vpu_jsm_msg *jsm_msg)
{
struct vpu_ipc_msg_payload_job_done *payload;
- int ret;
if (!jsm_msg) {
ivpu_err(vdev, "IPC message has no JSM payload\n");
@@ -564,9 +1080,12 @@ ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
}
payload = (struct vpu_ipc_msg_payload_job_done *)&jsm_msg->payload;
- ret = ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status);
- if (!ret && !xa_empty(&vdev->submitted_jobs_xa))
- ivpu_start_job_timeout_detection(vdev);
+
+ mutex_lock(&vdev->submitted_jobs_lock);
+ if (!ivpu_job_handle_engine_error(vdev, payload->job_id, payload->job_status))
+ /* No engine error, complete the job normally */
+ ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status);
+ mutex_unlock(&vdev->submitted_jobs_lock);
}
void ivpu_job_done_consumer_init(struct ivpu_device *vdev)
@@ -579,3 +1098,56 @@ void ivpu_job_done_consumer_fini(struct ivpu_device *vdev)
{
ivpu_ipc_consumer_del(vdev, &vdev->job_done_consumer);
}
+
+void ivpu_context_abort_work_fn(struct work_struct *work)
+{
+ struct ivpu_device *vdev = container_of(work, struct ivpu_device, context_abort_work);
+ struct ivpu_file_priv *file_priv;
+ struct ivpu_job *job;
+ unsigned long ctx_id;
+ unsigned long id;
+
+ if (drm_WARN_ON(&vdev->drm, pm_runtime_get_if_active(vdev->drm.dev) <= 0))
+ return;
+
+ if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
+ if (ivpu_jsm_reset_engine(vdev, 0))
+ goto runtime_put;
+
+ mutex_lock(&vdev->context_list_lock);
+ xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
+ if (!file_priv->has_mmu_faults || file_priv->aborted)
+ continue;
+
+ mutex_lock(&file_priv->lock);
+ ivpu_context_abort_locked(file_priv);
+ mutex_unlock(&file_priv->lock);
+ }
+ mutex_unlock(&vdev->context_list_lock);
+
+ /*
+ * We will not receive new MMU event interrupts until existing events are discarded
+ * however, we want to discard these events only after aborting the faulty context
+ * to avoid generating new faults from that context
+ */
+ ivpu_mmu_discard_events(vdev);
+
+ if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW)
+ goto runtime_put;
+
+ if (ivpu_jsm_hws_resume_engine(vdev, 0))
+ goto runtime_put;
+ /*
+ * In hardware scheduling mode NPU already has stopped processing jobs
+ * and won't send us any further notifications, thus we have to free job related resources
+ * and notify userspace
+ */
+ mutex_lock(&vdev->submitted_jobs_lock);
+ xa_for_each(&vdev->submitted_jobs_xa, id, job)
+ if (job->file_priv->aborted)
+ ivpu_job_signal_and_destroy(vdev, job->job_id, DRM_IVPU_JOB_STATUS_ABORTED);
+ mutex_unlock(&vdev->submitted_jobs_lock);
+
+runtime_put:
+ pm_runtime_put_autosuspend(vdev->drm.dev);
+}
diff --git a/drivers/accel/ivpu/ivpu_job.h b/drivers/accel/ivpu/ivpu_job.h
index ca4984071cc7..3ab61e6a5616 100644
--- a/drivers/accel/ivpu/ivpu_job.h
+++ b/drivers/accel/ivpu/ivpu_job.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#ifndef __IVPU_JOB_H__
@@ -15,51 +15,78 @@ struct ivpu_device;
struct ivpu_file_priv;
/**
- * struct ivpu_cmdq - Object representing device queue used to send jobs.
- * @jobq: Pointer to job queue memory shared with the device
- * @mem: Memory allocated for the job queue, shared with device
- * @entry_count Number of job entries in the queue
- * @db_id: Doorbell assigned to this job queue
- * @db_registered: True if doorbell is registered in device
+ * struct ivpu_cmdq - Represents a command queue for submitting jobs to the VPU.
+ * Tracks queue memory, preemption buffers, and metadata for job management.
+ * @jobq: Pointer to job queue memory shared with the device
+ * @primary_preempt_buf: Primary preemption buffer for this queue (optional)
+ * @secondary_preempt_buf: Secondary preemption buffer for this queue (optional)
+ * @mem: Memory allocated for the job queue, shared with device
+ * @entry_count: Number of job entries in the queue
+ * @id: Unique command queue ID
+ * @db_id: Doorbell ID assigned to this job queue
+ * @priority: Priority level of the command queue
+ * @is_legacy: True if this is a legacy command queue
*/
struct ivpu_cmdq {
struct vpu_job_queue *jobq;
+ struct ivpu_bo *primary_preempt_buf;
+ struct ivpu_bo *secondary_preempt_buf;
struct ivpu_bo *mem;
u32 entry_count;
+ u32 id;
u32 db_id;
- bool db_registered;
+ u8 priority;
+ bool is_legacy;
};
/**
- * struct ivpu_job - KMD object that represents batchbuffer / DMA buffer.
- * Each batch / DMA buffer is a job to be submitted and executed by the VPU FW.
- * This is a unit of execution, and be tracked by the job_id for
- * any status reporting from VPU FW through IPC JOB RET/DONE message.
- * @file_priv: The client that submitted this job
- * @job_id: Job ID for KMD tracking and job status reporting from VPU FW
- * @status: Status of the Job from IPC JOB RET/DONE message
- * @batch_buffer: CPU vaddr points to the batch buffer memory allocated for the job
- * @submit_status_offset: Offset within batch buffer where job completion handler
- will update the job status
+ * struct ivpu_job - Representing a batch or DMA buffer submitted to the VPU.
+ * Each job is a unit of execution, tracked by job_id for status reporting from VPU FW.
+ * The structure holds all resources and metadata needed for job submission, execution,
+ * and completion handling.
+ * @vdev: Pointer to the VPU device
+ * @file_priv: The client context that submitted this job
+ * @done_fence: Fence signaled when job completes
+ * @cmd_buf_vpu_addr: VPU address of the command buffer for this job
+ * @cmdq_id: Command queue ID used for submission
+ * @job_id: Unique job ID for tracking and status reporting
+ * @engine_idx: Engine index for job execution
+ * @job_status: Status reported by firmware for this job
+ * @primary_preempt_buf: Primary preemption buffer for job
+ * @secondary_preempt_buf: Secondary preemption buffer for job (optional)
+ * @bo_count: Number of buffer objects associated with this job
+ * @bos: Array of buffer objects used by the job (batch buffer is at index 0)
*/
struct ivpu_job {
struct ivpu_device *vdev;
struct ivpu_file_priv *file_priv;
struct dma_fence *done_fence;
u64 cmd_buf_vpu_addr;
+ u32 cmdq_id;
u32 job_id;
u32 engine_idx;
+ u32 job_status;
+ struct ivpu_bo *primary_preempt_buf;
+ struct ivpu_bo *secondary_preempt_buf;
size_t bo_count;
struct ivpu_bo *bos[] __counted_by(bo_count);
};
int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
+int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
+int ivpu_cmdq_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
+int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
+
+void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv);
void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv);
void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev);
+void ivpu_cmdq_abort_all_jobs(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id);
void ivpu_job_done_consumer_init(struct ivpu_device *vdev);
void ivpu_job_done_consumer_fini(struct ivpu_device *vdev);
+bool ivpu_job_handle_engine_error(struct ivpu_device *vdev, u32 job_id, u32 job_status);
+void ivpu_context_abort_work_fn(struct work_struct *work);
void ivpu_jobs_abort_all(struct ivpu_device *vdev);
diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.c b/drivers/accel/ivpu/ivpu_jsm_msg.c
index 8cea0dd731b9..0256b2dfefc1 100644
--- a/drivers/accel/ivpu/ivpu_jsm_msg.c
+++ b/drivers/accel/ivpu/ivpu_jsm_msg.c
@@ -1,12 +1,14 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#include "ivpu_drv.h"
#include "ivpu_hw.h"
#include "ivpu_ipc.h"
#include "ivpu_jsm_msg.h"
+#include "ivpu_pm.h"
+#include "vpu_jsm_api.h"
const char *ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type)
{
@@ -48,9 +50,10 @@ const char *ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type)
IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE);
IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP);
IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP_RSP);
- IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT_DEPRECATED);
IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL);
IVPU_CASE_TO_STR(VPU_JSM_MSG_JOB_DONE);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED);
IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_RESET_DONE);
IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_PREEMPT_DONE);
IVPU_CASE_TO_STR(VPU_JSM_MSG_REGISTER_DB_DONE);
@@ -103,14 +106,10 @@ int ivpu_jsm_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 db_id,
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
- if (ret) {
- ivpu_err_ratelimited(vdev, "Failed to register doorbell %d: %d\n", db_id, ret);
- return ret;
- }
-
- ivpu_dbg(vdev, JSM, "Doorbell %d registered to context %d\n", db_id, ctx_id);
+ if (ret)
+ ivpu_err_ratelimited(vdev, "Failed to register doorbell %u: %d\n", db_id, ret);
- return 0;
+ return ret;
}
int ivpu_jsm_unregister_db(struct ivpu_device *vdev, u32 db_id)
@@ -123,14 +122,10 @@ int ivpu_jsm_unregister_db(struct ivpu_device *vdev, u32 db_id)
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_UNREGISTER_DB_DONE, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
- if (ret) {
- ivpu_warn_ratelimited(vdev, "Failed to unregister doorbell %d: %d\n", db_id, ret);
- return ret;
- }
-
- ivpu_dbg(vdev, JSM, "Doorbell %d unregistered\n", db_id);
+ if (ret)
+ ivpu_warn_ratelimited(vdev, "Failed to unregister doorbell %u: %d\n", db_id, ret);
- return 0;
+ return ret;
}
int ivpu_jsm_get_heartbeat(struct ivpu_device *vdev, u32 engine, u64 *heartbeat)
@@ -139,7 +134,7 @@ int ivpu_jsm_get_heartbeat(struct ivpu_device *vdev, u32 engine, u64 *heartbeat)
struct vpu_jsm_msg resp;
int ret;
- if (engine > VPU_ENGINE_COPY)
+ if (engine != VPU_ENGINE_COMPUTE)
return -EINVAL;
req.payload.query_engine_hb.engine_idx = engine;
@@ -162,15 +157,17 @@ int ivpu_jsm_reset_engine(struct ivpu_device *vdev, u32 engine)
struct vpu_jsm_msg resp;
int ret;
- if (engine > VPU_ENGINE_COPY)
+ if (engine != VPU_ENGINE_COMPUTE)
return -EINVAL;
req.payload.engine_reset.engine_idx = engine;
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_RESET_DONE, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
- if (ret)
+ if (ret) {
ivpu_err_ratelimited(vdev, "Failed to reset engine %d: %d\n", engine, ret);
+ ivpu_pm_trigger_recovery(vdev, "Engine reset failed");
+ }
return ret;
}
@@ -181,7 +178,7 @@ int ivpu_jsm_preempt_engine(struct ivpu_device *vdev, u32 engine, u32 preempt_id
struct vpu_jsm_msg resp;
int ret;
- if (engine > VPU_ENGINE_COPY)
+ if (engine != VPU_ENGINE_COMPUTE)
return -EINVAL;
req.payload.engine_preempt.engine_idx = engine;
@@ -204,7 +201,7 @@ int ivpu_jsm_dyndbg_control(struct ivpu_device *vdev, char *command, size_t size
strscpy(req.payload.dyndbg_control.dyndbg_cmd, command, VPU_DYNDBG_CMD_MAX_LEN);
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DYNDBG_CONTROL_RSP, &resp,
- VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ VPU_IPC_CHAN_GEN_CMD, vdev->timeout.jsm);
if (ret)
ivpu_warn_ratelimited(vdev, "Failed to send command \"%s\": ret %d\n",
command, ret);
@@ -255,11 +252,16 @@ int ivpu_jsm_context_release(struct ivpu_device *vdev, u32 host_ssid)
{
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SSID_RELEASE };
struct vpu_jsm_msg resp;
+ int ret;
req.payload.ssid_release.host_ssid = host_ssid;
- return ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SSID_RELEASE_DONE, &resp,
- VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SSID_RELEASE_DONE, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret)
+ ivpu_warn_ratelimited(vdev, "Failed to release context: %d\n", ret);
+
+ return ret;
}
int ivpu_jsm_pwr_d0i3_enter(struct ivpu_device *vdev)
@@ -273,11 +275,288 @@ int ivpu_jsm_pwr_d0i3_enter(struct ivpu_device *vdev)
req.payload.pwr_d0i3_enter.send_response = 1;
- ret = ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_PWR_D0I3_ENTER_DONE,
- &resp, VPU_IPC_CHAN_GEN_CMD,
- vdev->timeout.d0i3_entry_msg);
+ ret = ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_PWR_D0I3_ENTER_DONE, &resp,
+ VPU_IPC_CHAN_GEN_CMD, vdev->timeout.d0i3_entry_msg);
if (ret)
return ret;
return ivpu_hw_wait_for_idle(vdev);
}
+
+int ivpu_jsm_hws_create_cmdq(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_group, u32 cmdq_id,
+ u32 pid, u32 engine, u64 cmdq_base, u32 cmdq_size)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_CREATE_CMD_QUEUE };
+ struct vpu_jsm_msg resp;
+ int ret;
+
+ req.payload.hws_create_cmdq.host_ssid = ctx_id;
+ req.payload.hws_create_cmdq.process_id = pid;
+ req.payload.hws_create_cmdq.engine_idx = engine;
+ req.payload.hws_create_cmdq.cmdq_group = cmdq_group;
+ req.payload.hws_create_cmdq.cmdq_id = cmdq_id;
+ req.payload.hws_create_cmdq.cmdq_base = cmdq_base;
+ req.payload.hws_create_cmdq.cmdq_size = cmdq_size;
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret)
+ ivpu_warn_ratelimited(vdev, "Failed to create command queue: %d\n", ret);
+
+ return ret;
+}
+
+int ivpu_jsm_hws_destroy_cmdq(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DESTROY_CMD_QUEUE };
+ struct vpu_jsm_msg resp;
+ int ret;
+
+ req.payload.hws_destroy_cmdq.host_ssid = ctx_id;
+ req.payload.hws_destroy_cmdq.cmdq_id = cmdq_id;
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret)
+ ivpu_warn_ratelimited(vdev, "Failed to destroy command queue: %d\n", ret);
+
+ return ret;
+}
+
+int ivpu_jsm_hws_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id, u32 db_id,
+ u64 cmdq_base, u32 cmdq_size)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_REGISTER_DB };
+ struct vpu_jsm_msg resp;
+ int ret = 0;
+
+ req.payload.hws_register_db.db_id = db_id;
+ req.payload.hws_register_db.host_ssid = ctx_id;
+ req.payload.hws_register_db.cmdq_id = cmdq_id;
+ req.payload.hws_register_db.cmdq_base = cmdq_base;
+ req.payload.hws_register_db.cmdq_size = cmdq_size;
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret)
+ ivpu_err_ratelimited(vdev, "Failed to register doorbell %u: %d\n", db_id, ret);
+
+ return ret;
+}
+
+int ivpu_jsm_hws_resume_engine(struct ivpu_device *vdev, u32 engine)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_ENGINE_RESUME };
+ struct vpu_jsm_msg resp;
+ int ret;
+
+ if (engine != VPU_ENGINE_COMPUTE)
+ return -EINVAL;
+
+ req.payload.hws_resume_engine.engine_idx = engine;
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret) {
+ ivpu_err_ratelimited(vdev, "Failed to resume engine %d: %d\n", engine, ret);
+ ivpu_pm_trigger_recovery(vdev, "Engine resume failed");
+ }
+
+ return ret;
+}
+
+int ivpu_jsm_hws_set_context_sched_properties(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id,
+ u32 priority)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES };
+ struct vpu_jsm_msg resp;
+ int ret;
+
+ req.payload.hws_set_context_sched_properties.host_ssid = ctx_id;
+ req.payload.hws_set_context_sched_properties.cmdq_id = cmdq_id;
+ req.payload.hws_set_context_sched_properties.priority_band = priority;
+ req.payload.hws_set_context_sched_properties.realtime_priority_level = 0;
+ req.payload.hws_set_context_sched_properties.in_process_priority = 0;
+ req.payload.hws_set_context_sched_properties.context_quantum = 20000;
+ req.payload.hws_set_context_sched_properties.grace_period_same_priority = 10000;
+ req.payload.hws_set_context_sched_properties.grace_period_lower_priority = 0;
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret)
+ ivpu_warn_ratelimited(vdev, "Failed to set context sched properties: %d\n", ret);
+
+ return ret;
+}
+
+int ivpu_jsm_hws_set_scheduling_log(struct ivpu_device *vdev, u32 engine_idx, u32 host_ssid,
+ u64 vpu_log_buffer_va)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG };
+ struct vpu_jsm_msg resp;
+ int ret;
+
+ req.payload.hws_set_scheduling_log.engine_idx = engine_idx;
+ req.payload.hws_set_scheduling_log.host_ssid = host_ssid;
+ req.payload.hws_set_scheduling_log.vpu_log_buffer_va = vpu_log_buffer_va;
+ req.payload.hws_set_scheduling_log.notify_index = 0;
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret)
+ ivpu_warn_ratelimited(vdev, "Failed to set scheduling log: %d\n", ret);
+
+ return ret;
+}
+
+int ivpu_jsm_hws_setup_priority_bands(struct ivpu_device *vdev)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP };
+ struct vpu_jsm_msg resp;
+ struct ivpu_hw_info *hw = vdev->hw;
+ struct vpu_ipc_msg_payload_hws_priority_band_setup *setup =
+ &req.payload.hws_priority_band_setup;
+ int ret;
+
+ for (int band = VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE;
+ band < VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT; band++) {
+ setup->grace_period[band] = hw->hws.grace_period[band];
+ setup->process_grace_period[band] = hw->hws.process_grace_period[band];
+ setup->process_quantum[band] = hw->hws.process_quantum[band];
+ }
+ setup->normal_band_percentage = 10;
+
+ ret = ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP,
+ &resp, VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret)
+ ivpu_warn_ratelimited(vdev, "Failed to set priority bands: %d\n", ret);
+
+ return ret;
+}
+
+int ivpu_jsm_metric_streamer_start(struct ivpu_device *vdev, u64 metric_group_mask,
+ u64 sampling_rate, u64 buffer_addr, u64 buffer_size)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_START };
+ struct vpu_jsm_msg resp;
+ int ret;
+
+ req.payload.metric_streamer_start.metric_group_mask = metric_group_mask;
+ req.payload.metric_streamer_start.sampling_rate = sampling_rate;
+ req.payload.metric_streamer_start.buffer_addr = buffer_addr;
+ req.payload.metric_streamer_start.buffer_size = buffer_size;
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_START_DONE, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret) {
+ ivpu_warn_ratelimited(vdev, "Failed to start metric streamer: ret %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+int ivpu_jsm_metric_streamer_stop(struct ivpu_device *vdev, u64 metric_group_mask)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_STOP };
+ struct vpu_jsm_msg resp;
+ int ret;
+
+ req.payload.metric_streamer_stop.metric_group_mask = metric_group_mask;
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_STOP_DONE, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret)
+ ivpu_warn_ratelimited(vdev, "Failed to stop metric streamer: ret %d\n", ret);
+
+ return ret;
+}
+
+int ivpu_jsm_metric_streamer_update(struct ivpu_device *vdev, u64 metric_group_mask,
+ u64 buffer_addr, u64 buffer_size, u64 *bytes_written)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_UPDATE };
+ struct vpu_jsm_msg resp;
+ int ret;
+
+ req.payload.metric_streamer_update.metric_group_mask = metric_group_mask;
+ req.payload.metric_streamer_update.buffer_addr = buffer_addr;
+ req.payload.metric_streamer_update.buffer_size = buffer_size;
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_UPDATE_DONE, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret) {
+ ivpu_warn_ratelimited(vdev, "Failed to update metric streamer: ret %d\n", ret);
+ return ret;
+ }
+
+ if (buffer_size && resp.payload.metric_streamer_done.bytes_written > buffer_size) {
+ ivpu_warn_ratelimited(vdev, "MS buffer overflow: bytes_written %#llx > buffer_size %#llx\n",
+ resp.payload.metric_streamer_done.bytes_written, buffer_size);
+ return -EOVERFLOW;
+ }
+
+ *bytes_written = resp.payload.metric_streamer_done.bytes_written;
+
+ return ret;
+}
+
+int ivpu_jsm_metric_streamer_info(struct ivpu_device *vdev, u64 metric_group_mask, u64 buffer_addr,
+ u64 buffer_size, u32 *sample_size, u64 *info_size)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_INFO };
+ struct vpu_jsm_msg resp;
+ int ret;
+
+ req.payload.metric_streamer_start.metric_group_mask = metric_group_mask;
+ req.payload.metric_streamer_start.buffer_addr = buffer_addr;
+ req.payload.metric_streamer_start.buffer_size = buffer_size;
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_INFO_DONE, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret) {
+ ivpu_warn_ratelimited(vdev, "Failed to get metric streamer info: ret %d\n", ret);
+ return ret;
+ }
+
+ if (!resp.payload.metric_streamer_done.sample_size) {
+ ivpu_warn_ratelimited(vdev, "Invalid sample size\n");
+ return -EBADMSG;
+ }
+
+ if (sample_size)
+ *sample_size = resp.payload.metric_streamer_done.sample_size;
+ if (info_size)
+ *info_size = resp.payload.metric_streamer_done.bytes_written;
+
+ return ret;
+}
+
+int ivpu_jsm_dct_enable(struct ivpu_device *vdev, u32 active_us, u32 inactive_us)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DCT_ENABLE };
+ struct vpu_jsm_msg resp;
+
+ req.payload.pwr_dct_control.dct_active_us = active_us;
+ req.payload.pwr_dct_control.dct_inactive_us = inactive_us;
+
+ return ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_DCT_ENABLE_DONE, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+}
+
+int ivpu_jsm_dct_disable(struct ivpu_device *vdev)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DCT_DISABLE };
+ struct vpu_jsm_msg resp;
+
+ return ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_DCT_DISABLE_DONE, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+}
+
+int ivpu_jsm_state_dump(struct ivpu_device *vdev)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_STATE_DUMP };
+
+ return ivpu_ipc_send_and_wait(vdev, &req, VPU_IPC_CHAN_ASYNC_CMD,
+ vdev->timeout.state_dump_msg);
+}
diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.h b/drivers/accel/ivpu/ivpu_jsm_msg.h
index ae75e5dbcc41..9e84d3526a14 100644
--- a/drivers/accel/ivpu/ivpu_jsm_msg.h
+++ b/drivers/accel/ivpu/ivpu_jsm_msg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#ifndef __IVPU_JSM_MSG_H__
@@ -23,4 +23,26 @@ int ivpu_jsm_trace_set_config(struct ivpu_device *vdev, u32 trace_level, u32 tra
u64 trace_hw_component_mask);
int ivpu_jsm_context_release(struct ivpu_device *vdev, u32 host_ssid);
int ivpu_jsm_pwr_d0i3_enter(struct ivpu_device *vdev);
+int ivpu_jsm_hws_create_cmdq(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_group, u32 cmdq_id,
+ u32 pid, u32 engine, u64 cmdq_base, u32 cmdq_size);
+int ivpu_jsm_hws_destroy_cmdq(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id);
+int ivpu_jsm_hws_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id, u32 db_id,
+ u64 cmdq_base, u32 cmdq_size);
+int ivpu_jsm_hws_resume_engine(struct ivpu_device *vdev, u32 engine);
+int ivpu_jsm_hws_set_context_sched_properties(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id,
+ u32 priority);
+int ivpu_jsm_hws_set_scheduling_log(struct ivpu_device *vdev, u32 engine_idx, u32 host_ssid,
+ u64 vpu_log_buffer_va);
+int ivpu_jsm_hws_setup_priority_bands(struct ivpu_device *vdev);
+int ivpu_jsm_metric_streamer_start(struct ivpu_device *vdev, u64 metric_group_mask,
+ u64 sampling_rate, u64 buffer_addr, u64 buffer_size);
+int ivpu_jsm_metric_streamer_stop(struct ivpu_device *vdev, u64 metric_group_mask);
+int ivpu_jsm_metric_streamer_update(struct ivpu_device *vdev, u64 metric_group_mask,
+ u64 buffer_addr, u64 buffer_size, u64 *bytes_written);
+int ivpu_jsm_metric_streamer_info(struct ivpu_device *vdev, u64 metric_group_mask, u64 buffer_addr,
+ u64 buffer_size, u32 *sample_size, u64 *info_size);
+int ivpu_jsm_dct_enable(struct ivpu_device *vdev, u32 active_us, u32 inactive_us);
+int ivpu_jsm_dct_disable(struct ivpu_device *vdev);
+int ivpu_jsm_state_dump(struct ivpu_device *vdev);
+
#endif
diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c
index 2e46b322c450..e1baf6b64935 100644
--- a/drivers/accel/ivpu/ivpu_mmu.c
+++ b/drivers/accel/ivpu/ivpu_mmu.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#include <linux/circ_buf.h>
@@ -20,6 +20,12 @@
#define IVPU_MMU_REG_CR0 0x00200020u
#define IVPU_MMU_REG_CR0ACK 0x00200024u
#define IVPU_MMU_REG_CR0ACK_VAL_MASK GENMASK(31, 0)
+#define IVPU_MMU_REG_CR0_ATSCHK_MASK BIT(4)
+#define IVPU_MMU_REG_CR0_CMDQEN_MASK BIT(3)
+#define IVPU_MMU_REG_CR0_EVTQEN_MASK BIT(2)
+#define IVPU_MMU_REG_CR0_PRIQEN_MASK BIT(1)
+#define IVPU_MMU_REG_CR0_SMMUEN_MASK BIT(0)
+
#define IVPU_MMU_REG_CR1 0x00200028u
#define IVPU_MMU_REG_CR2 0x0020002cu
#define IVPU_MMU_REG_IRQ_CTRL 0x00200050u
@@ -141,12 +147,6 @@
#define IVPU_MMU_IRQ_EVTQ_EN BIT(2)
#define IVPU_MMU_IRQ_GERROR_EN BIT(0)
-#define IVPU_MMU_CR0_ATSCHK BIT(4)
-#define IVPU_MMU_CR0_CMDQEN BIT(3)
-#define IVPU_MMU_CR0_EVTQEN BIT(2)
-#define IVPU_MMU_CR0_PRIQEN BIT(1)
-#define IVPU_MMU_CR0_SMMUEN BIT(0)
-
#define IVPU_MMU_CR1_TABLE_SH GENMASK(11, 10)
#define IVPU_MMU_CR1_TABLE_OC GENMASK(9, 8)
#define IVPU_MMU_CR1_TABLE_IC GENMASK(7, 6)
@@ -519,7 +519,8 @@ static int ivpu_mmu_cmdq_sync(struct ivpu_device *vdev)
if (ret)
return ret;
- clflush_cache_range(q->base, IVPU_MMU_CMDQ_SIZE);
+ if (!ivpu_is_force_snoop_enabled(vdev))
+ clflush_cache_range(q->base, IVPU_MMU_CMDQ_SIZE);
REGV_WR32(IVPU_MMU_REG_CMDQ_PROD, q->prod);
ret = ivpu_mmu_cmdq_wait_for_cons(vdev);
@@ -567,7 +568,8 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
int ret;
memset(mmu->cmdq.base, 0, IVPU_MMU_CMDQ_SIZE);
- clflush_cache_range(mmu->cmdq.base, IVPU_MMU_CMDQ_SIZE);
+ if (!ivpu_is_force_snoop_enabled(vdev))
+ clflush_cache_range(mmu->cmdq.base, IVPU_MMU_CMDQ_SIZE);
mmu->cmdq.prod = 0;
mmu->cmdq.cons = 0;
@@ -594,7 +596,7 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
REGV_WR32(IVPU_MMU_REG_CMDQ_PROD, 0);
REGV_WR32(IVPU_MMU_REG_CMDQ_CONS, 0);
- val = IVPU_MMU_CR0_CMDQEN;
+ val = REG_SET_FLD(IVPU_MMU_REG_CR0, CMDQEN, 0);
ret = ivpu_mmu_reg_write_cr0(vdev, val);
if (ret)
return ret;
@@ -615,12 +617,12 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
REGV_WR32(IVPU_MMU_REG_EVTQ_PROD_SEC, 0);
REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, 0);
- val |= IVPU_MMU_CR0_EVTQEN;
+ val = REG_SET_FLD(IVPU_MMU_REG_CR0, EVTQEN, val);
ret = ivpu_mmu_reg_write_cr0(vdev, val);
if (ret)
return ret;
- val |= IVPU_MMU_CR0_ATSCHK;
+ val = REG_SET_FLD(IVPU_MMU_REG_CR0, ATSCHK, val);
ret = ivpu_mmu_reg_write_cr0(vdev, val);
if (ret)
return ret;
@@ -629,7 +631,7 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
if (ret)
return ret;
- val |= IVPU_MMU_CR0_SMMUEN;
+ val = REG_SET_FLD(IVPU_MMU_REG_CR0, SMMUEN, val);
return ivpu_mmu_reg_write_cr0(vdev, val);
}
@@ -661,7 +663,8 @@ static void ivpu_mmu_strtab_link_cd(struct ivpu_device *vdev, u32 sid)
WRITE_ONCE(entry[1], str[1]);
WRITE_ONCE(entry[0], str[0]);
- clflush_cache_range(entry, IVPU_MMU_STRTAB_ENT_SIZE);
+ if (!ivpu_is_force_snoop_enabled(vdev))
+ clflush_cache_range(entry, IVPU_MMU_STRTAB_ENT_SIZE);
ivpu_dbg(vdev, MMU, "STRTAB write entry (SSID=%u): 0x%llx, 0x%llx\n", sid, str[0], str[1]);
}
@@ -693,7 +696,7 @@ unlock:
return ret;
}
-static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma)
+static int ivpu_mmu_cdtab_entry_set(struct ivpu_device *vdev, u32 ssid, u64 cd_dma, bool valid)
{
struct ivpu_mmu_info *mmu = vdev->mmu;
struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
@@ -705,40 +708,40 @@ static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma)
return -EINVAL;
entry = cdtab->base + (ssid * IVPU_MMU_CDTAB_ENT_SIZE);
-
- if (cd_dma != 0) {
- cd[0] = FIELD_PREP(IVPU_MMU_CD_0_TCR_T0SZ, IVPU_MMU_T0SZ_48BIT) |
- FIELD_PREP(IVPU_MMU_CD_0_TCR_TG0, 0) |
- FIELD_PREP(IVPU_MMU_CD_0_TCR_IRGN0, 0) |
- FIELD_PREP(IVPU_MMU_CD_0_TCR_ORGN0, 0) |
- FIELD_PREP(IVPU_MMU_CD_0_TCR_SH0, 0) |
- FIELD_PREP(IVPU_MMU_CD_0_TCR_IPS, IVPU_MMU_IPS_48BIT) |
- FIELD_PREP(IVPU_MMU_CD_0_ASID, ssid) |
- IVPU_MMU_CD_0_TCR_EPD1 |
- IVPU_MMU_CD_0_AA64 |
- IVPU_MMU_CD_0_R |
- IVPU_MMU_CD_0_ASET |
- IVPU_MMU_CD_0_V;
- cd[1] = cd_dma & IVPU_MMU_CD_1_TTB0_MASK;
- cd[2] = 0;
- cd[3] = 0x0000000000007444;
-
- /* For global context generate memory fault on VPU */
- if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID)
- cd[0] |= IVPU_MMU_CD_0_A;
- } else {
- memset(cd, 0, sizeof(cd));
- }
+ drm_WARN_ON(&vdev->drm, (entry[0] & IVPU_MMU_CD_0_V) == valid);
+
+ cd[0] = FIELD_PREP(IVPU_MMU_CD_0_TCR_T0SZ, IVPU_MMU_T0SZ_48BIT) |
+ FIELD_PREP(IVPU_MMU_CD_0_TCR_TG0, 0) |
+ FIELD_PREP(IVPU_MMU_CD_0_TCR_IRGN0, 0) |
+ FIELD_PREP(IVPU_MMU_CD_0_TCR_ORGN0, 0) |
+ FIELD_PREP(IVPU_MMU_CD_0_TCR_SH0, 0) |
+ FIELD_PREP(IVPU_MMU_CD_0_TCR_IPS, IVPU_MMU_IPS_48BIT) |
+ FIELD_PREP(IVPU_MMU_CD_0_ASID, ssid) |
+ IVPU_MMU_CD_0_TCR_EPD1 |
+ IVPU_MMU_CD_0_AA64 |
+ IVPU_MMU_CD_0_R |
+ IVPU_MMU_CD_0_ASET;
+ cd[1] = cd_dma & IVPU_MMU_CD_1_TTB0_MASK;
+ cd[2] = 0;
+ cd[3] = 0x0000000000007444;
+
+ /* For global and reserved contexts generate memory fault on VPU */
+ if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID || ssid == IVPU_RESERVED_CONTEXT_MMU_SSID)
+ cd[0] |= IVPU_MMU_CD_0_A;
+
+ if (valid)
+ cd[0] |= IVPU_MMU_CD_0_V;
WRITE_ONCE(entry[1], cd[1]);
WRITE_ONCE(entry[2], cd[2]);
WRITE_ONCE(entry[3], cd[3]);
WRITE_ONCE(entry[0], cd[0]);
- clflush_cache_range(entry, IVPU_MMU_CDTAB_ENT_SIZE);
+ if (!ivpu_is_force_snoop_enabled(vdev))
+ clflush_cache_range(entry, IVPU_MMU_CDTAB_ENT_SIZE);
- ivpu_dbg(vdev, MMU, "CDTAB %s entry (SSID=%u, dma=%pad): 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
- cd_dma ? "write" : "clear", ssid, &cd_dma, cd[0], cd[1], cd[2], cd[3]);
+ ivpu_dbg(vdev, MMU, "CDTAB set %s entry (SSID=%u, dma=%pad): 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
+ valid ? "valid" : "invalid", ssid, &cd_dma, cd[0], cd[1], cd[2], cd[3]);
mutex_lock(&mmu->lock);
if (!mmu->on)
@@ -746,38 +749,18 @@ static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma)
ret = ivpu_mmu_cmdq_write_cfgi_all(vdev);
if (ret)
- goto unlock;
+ goto err_invalidate;
ret = ivpu_mmu_cmdq_sync(vdev);
+ if (ret)
+ goto err_invalidate;
unlock:
mutex_unlock(&mmu->lock);
- return ret;
-}
-
-static int ivpu_mmu_cd_add_gbl(struct ivpu_device *vdev)
-{
- int ret;
-
- ret = ivpu_mmu_cd_add(vdev, 0, vdev->gctx.pgtable.pgd_dma);
- if (ret)
- ivpu_err(vdev, "Failed to add global CD entry: %d\n", ret);
-
- return ret;
-}
-
-static int ivpu_mmu_cd_add_user(struct ivpu_device *vdev, u32 ssid, dma_addr_t cd_dma)
-{
- int ret;
-
- if (ssid == 0) {
- ivpu_err(vdev, "Invalid SSID: %u\n", ssid);
- return -EINVAL;
- }
-
- ret = ivpu_mmu_cd_add(vdev, ssid, cd_dma);
- if (ret)
- ivpu_err(vdev, "Failed to add CD entry SSID=%u: %d\n", ssid, ret);
+ return 0;
+err_invalidate:
+ WRITE_ONCE(entry[0], 0);
+ mutex_unlock(&mmu->lock);
return ret;
}
@@ -804,12 +787,6 @@ int ivpu_mmu_init(struct ivpu_device *vdev)
return ret;
}
- ret = ivpu_mmu_cd_add_gbl(vdev);
- if (ret) {
- ivpu_err(vdev, "Failed to initialize strtab: %d\n", ret);
- return ret;
- }
-
ret = ivpu_mmu_enable(vdev);
if (ret) {
ivpu_err(vdev, "Failed to resume MMU: %d\n", ret);
@@ -874,8 +851,9 @@ static void ivpu_mmu_dump_event(struct ivpu_device *vdev, u32 *event)
u64 in_addr = ((u64)event[5]) << 32 | event[4];
u32 sid = event[1];
- ivpu_err(vdev, "MMU EVTQ: 0x%x (%s) SSID: %d SID: %d, e[2] %08x, e[3] %08x, in addr: 0x%llx, fetch addr: 0x%llx\n",
- op, ivpu_mmu_event_to_str(op), ssid, sid, event[2], event[3], in_addr, fetch_addr);
+ ivpu_err_ratelimited(vdev, "MMU EVTQ: 0x%x (%s) SSID: %d SID: %d, e[2] %08x, e[3] %08x, in addr: 0x%llx, fetch addr: 0x%llx\n",
+ op, ivpu_mmu_event_to_str(op), ssid, sid,
+ event[2], event[3], in_addr, fetch_addr);
}
static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
@@ -892,25 +870,107 @@ static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
return evt;
}
+static int ivpu_mmu_evtq_set(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(IVPU_MMU_REG_CR0);
+
+ if (enable)
+ val = REG_SET_FLD(IVPU_MMU_REG_CR0, EVTQEN, val);
+ else
+ val = REG_CLR_FLD(IVPU_MMU_REG_CR0, EVTQEN, val);
+ REGV_WR32(IVPU_MMU_REG_CR0, val);
+
+ return REGV_POLL_FLD(IVPU_MMU_REG_CR0ACK, VAL, val, IVPU_MMU_REG_TIMEOUT_US);
+}
+
+static int ivpu_mmu_evtq_enable(struct ivpu_device *vdev)
+{
+ return ivpu_mmu_evtq_set(vdev, true);
+}
+
+static int ivpu_mmu_evtq_disable(struct ivpu_device *vdev)
+{
+ return ivpu_mmu_evtq_set(vdev, false);
+}
+
+void ivpu_mmu_discard_events(struct ivpu_device *vdev)
+{
+ struct ivpu_mmu_info *mmu = vdev->mmu;
+
+ mutex_lock(&mmu->lock);
+ /*
+ * Disable event queue (stop MMU from updating the producer)
+ * to allow synchronization of consumer and producer indexes
+ */
+ ivpu_mmu_evtq_disable(vdev);
+
+ vdev->mmu->evtq.cons = REGV_RD32(IVPU_MMU_REG_EVTQ_PROD_SEC);
+ REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, vdev->mmu->evtq.cons);
+ vdev->mmu->evtq.prod = REGV_RD32(IVPU_MMU_REG_EVTQ_PROD_SEC);
+
+ ivpu_mmu_evtq_enable(vdev);
+
+ drm_WARN_ON_ONCE(&vdev->drm, vdev->mmu->evtq.cons != vdev->mmu->evtq.prod);
+
+ mutex_unlock(&mmu->lock);
+}
+
+int ivpu_mmu_disable_ssid_events(struct ivpu_device *vdev, u32 ssid)
+{
+ struct ivpu_mmu_info *mmu = vdev->mmu;
+ struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
+ u64 *entry;
+ u64 val;
+
+ if (ssid > IVPU_MMU_CDTAB_ENT_COUNT)
+ return -EINVAL;
+
+ mutex_lock(&mmu->lock);
+
+ entry = cdtab->base + (ssid * IVPU_MMU_CDTAB_ENT_SIZE);
+
+ val = READ_ONCE(entry[0]);
+ val &= ~IVPU_MMU_CD_0_R;
+ WRITE_ONCE(entry[0], val);
+
+ if (!ivpu_is_force_snoop_enabled(vdev))
+ clflush_cache_range(entry, IVPU_MMU_CDTAB_ENT_SIZE);
+
+ ivpu_mmu_cmdq_write_cfgi_all(vdev);
+ ivpu_mmu_cmdq_sync(vdev);
+
+ mutex_unlock(&mmu->lock);
+
+ return 0;
+}
+
void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
{
+ struct ivpu_file_priv *file_priv;
u32 *event;
u32 ssid;
ivpu_dbg(vdev, IRQ, "MMU event queue\n");
- while ((event = ivpu_mmu_get_event(vdev)) != NULL) {
- ivpu_mmu_dump_event(vdev, event);
-
- ssid = FIELD_GET(IVPU_MMU_EVT_SSID_MASK, event[0]);
- if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID) {
+ while ((event = ivpu_mmu_get_event(vdev))) {
+ ssid = FIELD_GET(IVPU_MMU_EVT_SSID_MASK, *event);
+ if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID ||
+ ssid == IVPU_RESERVED_CONTEXT_MMU_SSID) {
+ ivpu_mmu_dump_event(vdev, event);
ivpu_pm_trigger_recovery(vdev, "MMU event");
return;
}
- ivpu_mmu_user_context_mark_invalid(vdev, ssid);
- REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, vdev->mmu->evtq.cons);
+ file_priv = xa_load(&vdev->context_xa, ssid);
+ if (file_priv) {
+ if (!READ_ONCE(file_priv->has_mmu_faults)) {
+ ivpu_mmu_dump_event(vdev, event);
+ WRITE_ONCE(file_priv->has_mmu_faults, true);
+ }
+ }
}
+
+ queue_work(system_percpu_wq, &vdev->context_abort_work);
}
void ivpu_mmu_evtq_dump(struct ivpu_device *vdev)
@@ -958,12 +1018,12 @@ void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev)
REGV_WR32(IVPU_MMU_REG_GERRORN, gerror_val);
}
-int ivpu_mmu_set_pgtable(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable)
+int ivpu_mmu_cd_set(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable)
{
- return ivpu_mmu_cd_add_user(vdev, ssid, pgtable->pgd_dma);
+ return ivpu_mmu_cdtab_entry_set(vdev, ssid, pgtable->pgd_dma, true);
}
-void ivpu_mmu_clear_pgtable(struct ivpu_device *vdev, int ssid)
+void ivpu_mmu_cd_clear(struct ivpu_device *vdev, int ssid)
{
- ivpu_mmu_cd_add_user(vdev, ssid, 0); /* 0 will clear CD entry */
+ ivpu_mmu_cdtab_entry_set(vdev, ssid, 0, false);
}
diff --git a/drivers/accel/ivpu/ivpu_mmu.h b/drivers/accel/ivpu/ivpu_mmu.h
index 6fa35c240710..1ce7529746ad 100644
--- a/drivers/accel/ivpu/ivpu_mmu.h
+++ b/drivers/accel/ivpu/ivpu_mmu.h
@@ -40,12 +40,14 @@ struct ivpu_mmu_info {
int ivpu_mmu_init(struct ivpu_device *vdev);
void ivpu_mmu_disable(struct ivpu_device *vdev);
int ivpu_mmu_enable(struct ivpu_device *vdev);
-int ivpu_mmu_set_pgtable(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable);
-void ivpu_mmu_clear_pgtable(struct ivpu_device *vdev, int ssid);
+int ivpu_mmu_cd_set(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable);
+void ivpu_mmu_cd_clear(struct ivpu_device *vdev, int ssid);
int ivpu_mmu_invalidate_tlb(struct ivpu_device *vdev, u16 ssid);
void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev);
void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev);
void ivpu_mmu_evtq_dump(struct ivpu_device *vdev);
+void ivpu_mmu_discard_events(struct ivpu_device *vdev);
+int ivpu_mmu_disable_ssid_events(struct ivpu_device *vdev, u32 ssid);
#endif /* __IVPU_MMU_H__ */
diff --git a/drivers/accel/ivpu/ivpu_mmu_context.c b/drivers/accel/ivpu/ivpu_mmu_context.c
index 128aef8e5a19..87ad593ef47d 100644
--- a/drivers/accel/ivpu/ivpu_mmu_context.c
+++ b/drivers/accel/ivpu/ivpu_mmu_context.c
@@ -24,6 +24,7 @@
#define IVPU_MMU_ENTRY_FLAG_CONT BIT(52)
#define IVPU_MMU_ENTRY_FLAG_NG BIT(11)
#define IVPU_MMU_ENTRY_FLAG_AF BIT(10)
+#define IVPU_MMU_ENTRY_FLAG_RO BIT(7)
#define IVPU_MMU_ENTRY_FLAG_USER BIT(6)
#define IVPU_MMU_ENTRY_FLAG_LLC_COHERENT BIT(2)
#define IVPU_MMU_ENTRY_FLAG_TYPE_PAGE BIT(1)
@@ -89,19 +90,6 @@ static void ivpu_pgtable_free_page(struct ivpu_device *vdev, u64 *cpu_addr, dma_
}
}
-static int ivpu_mmu_pgtable_init(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
-{
- dma_addr_t pgd_dma;
-
- pgtable->pgd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pgd_dma);
- if (!pgtable->pgd_dma_ptr)
- return -ENOMEM;
-
- pgtable->pgd_dma = pgd_dma;
-
- return 0;
-}
-
static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
{
int pgd_idx, pud_idx, pmd_idx;
@@ -139,6 +127,27 @@ static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgt
}
ivpu_pgtable_free_page(vdev, pgtable->pgd_dma_ptr, pgtable->pgd_dma);
+ pgtable->pgd_dma_ptr = NULL;
+ pgtable->pgd_dma = 0;
+}
+
+static u64*
+ivpu_mmu_ensure_pgd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
+{
+ u64 *pgd_dma_ptr = pgtable->pgd_dma_ptr;
+ dma_addr_t pgd_dma;
+
+ if (pgd_dma_ptr)
+ return pgd_dma_ptr;
+
+ pgd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pgd_dma);
+ if (!pgd_dma_ptr)
+ return NULL;
+
+ pgtable->pgd_dma_ptr = pgd_dma_ptr;
+ pgtable->pgd_dma = pgd_dma;
+
+ return pgd_dma_ptr;
}
static u64*
@@ -236,6 +245,12 @@ ivpu_mmu_context_map_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx
int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
+ drm_WARN_ON(&vdev->drm, ctx->id == IVPU_RESERVED_CONTEXT_MMU_SSID);
+
+ /* Allocate PGD - first level page table if needed */
+ if (!ivpu_mmu_ensure_pgd(vdev, &ctx->pgtable))
+ return -ENOMEM;
+
/* Allocate PUD - second level page table if needed */
if (!ivpu_mmu_ensure_pud(vdev, &ctx->pgtable, pgd_idx))
return -ENOMEM;
@@ -319,6 +334,91 @@ ivpu_mmu_context_map_pages(struct ivpu_device *vdev, struct ivpu_mmu_context *ct
return 0;
}
+static void ivpu_mmu_context_set_page_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
+ u64 vpu_addr)
+{
+ int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
+ int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
+ int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
+ int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
+
+ ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] |= IVPU_MMU_ENTRY_FLAG_RO;
+}
+
+static void ivpu_mmu_context_split_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
+ u64 vpu_addr)
+{
+ int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
+ int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
+ int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
+ int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
+
+ ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] &= ~IVPU_MMU_ENTRY_FLAG_CONT;
+}
+
+static void ivpu_mmu_context_split_64k_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
+ u64 vpu_addr)
+{
+ u64 start = ALIGN_DOWN(vpu_addr, IVPU_MMU_CONT_PAGES_SIZE);
+ u64 end = ALIGN(vpu_addr, IVPU_MMU_CONT_PAGES_SIZE);
+ u64 offset = 0;
+
+ ivpu_dbg(vdev, MMU_MAP, "Split 64K page ctx: %u vpu_addr: 0x%llx\n", ctx->id, vpu_addr);
+
+ while (start + offset < end) {
+ ivpu_mmu_context_split_page(vdev, ctx, start + offset);
+ offset += IVPU_MMU_PAGE_SIZE;
+ }
+}
+
+int
+ivpu_mmu_context_set_pages_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr,
+ size_t size)
+{
+ u64 end = vpu_addr + size;
+ size_t size_left = size;
+ int ret;
+
+ if (size == 0)
+ return 0;
+
+ if (drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vpu_addr | size, IVPU_MMU_PAGE_SIZE)))
+ return -EINVAL;
+
+ mutex_lock(&ctx->lock);
+
+ ivpu_dbg(vdev, MMU_MAP, "Set read-only pages ctx: %u vpu_addr: 0x%llx size: %lu\n",
+ ctx->id, vpu_addr, size);
+
+ if (!ivpu_disable_mmu_cont_pages) {
+ /* Split 64K contiguous page at the beginning if needed */
+ if (!IS_ALIGNED(vpu_addr, IVPU_MMU_CONT_PAGES_SIZE))
+ ivpu_mmu_context_split_64k_page(vdev, ctx, vpu_addr);
+
+ /* Split 64K contiguous page at the end if needed */
+ if (!IS_ALIGNED(vpu_addr + size, IVPU_MMU_CONT_PAGES_SIZE))
+ ivpu_mmu_context_split_64k_page(vdev, ctx, vpu_addr + size);
+ }
+
+ while (size_left) {
+ if (vpu_addr < end)
+ ivpu_mmu_context_set_page_ro(vdev, ctx, vpu_addr);
+
+ vpu_addr += IVPU_MMU_PAGE_SIZE;
+ size_left -= IVPU_MMU_PAGE_SIZE;
+ }
+
+ /* Ensure page table modifications are flushed from wc buffers to memory */
+ wmb();
+
+ mutex_unlock(&ctx->lock);
+ ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
+ if (ret)
+ ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
+
+ return 0;
+}
+
static void ivpu_mmu_context_unmap_pages(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size)
{
while (size) {
@@ -330,8 +430,9 @@ static void ivpu_mmu_context_unmap_pages(struct ivpu_mmu_context *ctx, u64 vpu_a
int
ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
- u64 vpu_addr, struct sg_table *sgt, bool llc_coherent)
+ u64 vpu_addr, struct sg_table *sgt, bool llc_coherent, bool read_only)
{
+ size_t start_vpu_addr = vpu_addr;
struct scatterlist *sg;
int ret;
u64 prot;
@@ -349,6 +450,8 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
prot = IVPU_MMU_ENTRY_MAPPED;
if (llc_coherent)
prot |= IVPU_MMU_ENTRY_FLAG_LLC_COHERENT;
+ if (read_only)
+ prot |= IVPU_MMU_ENTRY_FLAG_RO;
mutex_lock(&ctx->lock);
@@ -362,20 +465,36 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot);
if (ret) {
ivpu_err(vdev, "Failed to map context pages\n");
- mutex_unlock(&ctx->lock);
- return ret;
+ goto err_unmap_pages;
}
vpu_addr += size;
}
+ if (!ctx->is_cd_valid) {
+ ret = ivpu_mmu_cd_set(vdev, ctx->id, &ctx->pgtable);
+ if (ret) {
+ ivpu_err(vdev, "Failed to set context descriptor for context %u: %d\n",
+ ctx->id, ret);
+ goto err_unmap_pages;
+ }
+ ctx->is_cd_valid = true;
+ }
+
/* Ensure page table modifications are flushed from wc buffers to memory */
wmb();
- mutex_unlock(&ctx->lock);
-
ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
- if (ret)
+ if (ret) {
ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
+ goto err_unmap_pages;
+ }
+
+ mutex_unlock(&ctx->lock);
+ return 0;
+
+err_unmap_pages:
+ ivpu_mmu_context_unmap_pages(ctx, start_vpu_addr, vpu_addr - start_vpu_addr);
+ mutex_unlock(&ctx->lock);
return ret;
}
@@ -410,7 +529,8 @@ ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ct
ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
if (ret)
- ivpu_warn(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
+ ivpu_warn_ratelimited(vdev, "Failed to invalidate TLB for ctx %u: %d\n",
+ ctx->id, ret);
}
int
@@ -444,109 +564,77 @@ ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *n
mutex_unlock(&ctx->lock);
}
-static int
-ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id)
+void ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id)
{
u64 start, end;
- int ret;
mutex_init(&ctx->lock);
- ret = ivpu_mmu_pgtable_init(vdev, &ctx->pgtable);
- if (ret) {
- ivpu_err(vdev, "Failed to initialize pgtable for ctx %u: %d\n", context_id, ret);
- return ret;
- }
-
if (!context_id) {
- start = vdev->hw->ranges.global.start;
+ start = vdev->hw->ranges.runtime.start;
end = vdev->hw->ranges.shave.end;
} else {
- start = vdev->hw->ranges.user.start;
- end = vdev->hw->ranges.dma.end;
+ start = min_t(u64, vdev->hw->ranges.user.start, vdev->hw->ranges.shave.start);
+ end = max_t(u64, vdev->hw->ranges.user.end, vdev->hw->ranges.dma.end);
}
drm_mm_init(&ctx->mm, start, end - start);
ctx->id = context_id;
-
- return 0;
}
-static void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
+void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
{
- if (drm_WARN_ON(&vdev->drm, !ctx->pgtable.pgd_dma_ptr))
- return;
+ if (ctx->is_cd_valid) {
+ ivpu_mmu_cd_clear(vdev, ctx->id);
+ ctx->is_cd_valid = false;
+ }
mutex_destroy(&ctx->lock);
ivpu_mmu_pgtables_free(vdev, &ctx->pgtable);
drm_mm_takedown(&ctx->mm);
-
- ctx->pgtable.pgd_dma_ptr = NULL;
- ctx->pgtable.pgd_dma = 0;
}
-int ivpu_mmu_global_context_init(struct ivpu_device *vdev)
+void ivpu_mmu_global_context_init(struct ivpu_device *vdev)
{
- return ivpu_mmu_context_init(vdev, &vdev->gctx, IVPU_GLOBAL_CONTEXT_MMU_SSID);
+ ivpu_mmu_context_init(vdev, &vdev->gctx, IVPU_GLOBAL_CONTEXT_MMU_SSID);
}
void ivpu_mmu_global_context_fini(struct ivpu_device *vdev)
{
- return ivpu_mmu_context_fini(vdev, &vdev->gctx);
+ ivpu_mmu_context_fini(vdev, &vdev->gctx);
}
int ivpu_mmu_reserved_context_init(struct ivpu_device *vdev)
{
- return ivpu_mmu_user_context_init(vdev, &vdev->rctx, IVPU_RESERVED_CONTEXT_MMU_SSID);
-}
-
-void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev)
-{
- return ivpu_mmu_user_context_fini(vdev, &vdev->rctx);
-}
-
-void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid)
-{
- struct ivpu_file_priv *file_priv;
-
- xa_lock(&vdev->context_xa);
-
- file_priv = xa_load(&vdev->context_xa, ssid);
- if (file_priv)
- file_priv->has_mmu_faults = true;
-
- xa_unlock(&vdev->context_xa);
-}
-
-int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 ctx_id)
-{
int ret;
- drm_WARN_ON(&vdev->drm, !ctx_id);
+ ivpu_mmu_context_init(vdev, &vdev->rctx, IVPU_RESERVED_CONTEXT_MMU_SSID);
- ret = ivpu_mmu_context_init(vdev, ctx, ctx_id);
- if (ret) {
- ivpu_err(vdev, "Failed to initialize context %u: %d\n", ctx_id, ret);
- return ret;
+ mutex_lock(&vdev->rctx.lock);
+
+ if (!ivpu_mmu_ensure_pgd(vdev, &vdev->rctx.pgtable)) {
+ ivpu_err(vdev, "Failed to allocate root page table for reserved context\n");
+ ret = -ENOMEM;
+ goto err_ctx_fini;
}
- ret = ivpu_mmu_set_pgtable(vdev, ctx_id, &ctx->pgtable);
+ ret = ivpu_mmu_cd_set(vdev, vdev->rctx.id, &vdev->rctx.pgtable);
if (ret) {
- ivpu_err(vdev, "Failed to set page table for context %u: %d\n", ctx_id, ret);
- goto err_context_fini;
+ ivpu_err(vdev, "Failed to set context descriptor for reserved context\n");
+ goto err_ctx_fini;
}
- return 0;
+ mutex_unlock(&vdev->rctx.lock);
+ return ret;
-err_context_fini:
- ivpu_mmu_context_fini(vdev, ctx);
+err_ctx_fini:
+ mutex_unlock(&vdev->rctx.lock);
+ ivpu_mmu_context_fini(vdev, &vdev->rctx);
return ret;
}
-void ivpu_mmu_user_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
+void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev)
{
- drm_WARN_ON(&vdev->drm, !ctx->id);
-
- ivpu_mmu_clear_pgtable(vdev, ctx->id);
- ivpu_mmu_context_fini(vdev, ctx);
+ ivpu_mmu_cd_clear(vdev, vdev->rctx.id);
+ ivpu_mmu_context_fini(vdev, &vdev->rctx);
}
diff --git a/drivers/accel/ivpu/ivpu_mmu_context.h b/drivers/accel/ivpu/ivpu_mmu_context.h
index 535db3a1fc74..663a11a9db11 100644
--- a/drivers/accel/ivpu/ivpu_mmu_context.h
+++ b/drivers/accel/ivpu/ivpu_mmu_context.h
@@ -23,28 +23,29 @@ struct ivpu_mmu_pgtable {
};
struct ivpu_mmu_context {
- struct mutex lock; /* Protects: mm, pgtable */
+ struct mutex lock; /* Protects: mm, pgtable, is_cd_valid */
struct drm_mm mm;
struct ivpu_mmu_pgtable pgtable;
+ bool is_cd_valid;
u32 id;
};
-int ivpu_mmu_global_context_init(struct ivpu_device *vdev);
+void ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id);
+void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
+void ivpu_mmu_global_context_init(struct ivpu_device *vdev);
void ivpu_mmu_global_context_fini(struct ivpu_device *vdev);
int ivpu_mmu_reserved_context_init(struct ivpu_device *vdev);
void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev);
-int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 ctx_id);
-void ivpu_mmu_user_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
-void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid);
-
int ivpu_mmu_context_insert_node(struct ivpu_mmu_context *ctx, const struct ivpu_addr_range *range,
u64 size, struct drm_mm_node *node);
void ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *node);
int ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
- u64 vpu_addr, struct sg_table *sgt, bool llc_coherent);
+ u64 vpu_addr, struct sg_table *sgt, bool llc_coherent, bool read_only);
void ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr, struct sg_table *sgt);
+int ivpu_mmu_context_set_pages_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
+ u64 vpu_addr, size_t size);
#endif /* __IVPU_MMU_CONTEXT_H__ */
diff --git a/drivers/accel/ivpu/ivpu_ms.c b/drivers/accel/ivpu/ivpu_ms.c
new file mode 100644
index 000000000000..1d9c1cb17924
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_ms.c
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Copyright (C) 2020-2024 Intel Corporation
+ */
+
+#include <drm/drm_file.h>
+#include <linux/pm_runtime.h>
+
+#include "ivpu_drv.h"
+#include "ivpu_gem.h"
+#include "ivpu_hw.h"
+#include "ivpu_jsm_msg.h"
+#include "ivpu_ms.h"
+#include "ivpu_pm.h"
+
+#define MS_INFO_BUFFER_SIZE SZ_64K
+#define MS_NUM_BUFFERS 2
+#define MS_READ_PERIOD_MULTIPLIER 2
+#define MS_MIN_SAMPLE_PERIOD_NS 1000000
+
+static struct ivpu_ms_instance *
+get_instance_by_mask(struct ivpu_file_priv *file_priv, u64 metric_mask)
+{
+ struct ivpu_ms_instance *ms;
+
+ lockdep_assert_held(&file_priv->ms_lock);
+
+ list_for_each_entry(ms, &file_priv->ms_instance_list, ms_instance_node)
+ if (ms->mask == metric_mask)
+ return ms;
+
+ return NULL;
+}
+
+int ivpu_ms_start_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct drm_ivpu_metric_streamer_start *args = data;
+ struct ivpu_device *vdev = file_priv->vdev;
+ struct ivpu_ms_instance *ms;
+ u32 sample_size;
+ u64 buf_size;
+ int ret;
+
+ if (!args->metric_group_mask || !args->read_period_samples ||
+ args->sampling_period_ns < MS_MIN_SAMPLE_PERIOD_NS)
+ return -EINVAL;
+
+ ret = ivpu_rpm_get(vdev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&file_priv->ms_lock);
+
+ if (get_instance_by_mask(file_priv, args->metric_group_mask)) {
+ ivpu_dbg(vdev, IOCTL, "Instance already exists (mask %#llx)\n",
+ args->metric_group_mask);
+ ret = -EALREADY;
+ goto unlock;
+ }
+
+ ms = kzalloc(sizeof(*ms), GFP_KERNEL);
+ if (!ms) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ ms->mask = args->metric_group_mask;
+
+ ret = ivpu_jsm_metric_streamer_info(vdev, ms->mask, 0, 0, &sample_size, NULL);
+ if (ret)
+ goto err_free_ms;
+
+ buf_size = PAGE_ALIGN((u64)args->read_period_samples * sample_size *
+ MS_READ_PERIOD_MULTIPLIER * MS_NUM_BUFFERS);
+ if (buf_size > ivpu_hw_range_size(&vdev->hw->ranges.global)) {
+ ivpu_dbg(vdev, IOCTL, "Requested MS buffer size %llu exceeds range size %llu\n",
+ buf_size, ivpu_hw_range_size(&vdev->hw->ranges.global));
+ ret = -EINVAL;
+ goto err_free_ms;
+ }
+
+ ms->bo = ivpu_bo_create_global(vdev, buf_size, DRM_IVPU_BO_CACHED | DRM_IVPU_BO_MAPPABLE);
+ if (!ms->bo) {
+ ivpu_dbg(vdev, IOCTL, "Failed to allocate MS buffer (size %llu)\n", buf_size);
+ ret = -ENOMEM;
+ goto err_free_ms;
+ }
+
+ ms->buff_size = ivpu_bo_size(ms->bo) / MS_NUM_BUFFERS;
+ ms->active_buff_vpu_addr = ms->bo->vpu_addr;
+ ms->inactive_buff_vpu_addr = ms->bo->vpu_addr + ms->buff_size;
+ ms->active_buff_ptr = ivpu_bo_vaddr(ms->bo);
+ ms->inactive_buff_ptr = ivpu_bo_vaddr(ms->bo) + ms->buff_size;
+
+ ret = ivpu_jsm_metric_streamer_start(vdev, ms->mask, args->sampling_period_ns,
+ ms->active_buff_vpu_addr, ms->buff_size);
+ if (ret)
+ goto err_free_bo;
+
+ args->sample_size = sample_size;
+ args->max_data_size = ivpu_bo_size(ms->bo);
+ list_add_tail(&ms->ms_instance_node, &file_priv->ms_instance_list);
+ goto unlock;
+
+err_free_bo:
+ ivpu_bo_free(ms->bo);
+err_free_ms:
+ kfree(ms);
+unlock:
+ mutex_unlock(&file_priv->ms_lock);
+
+ ivpu_rpm_put(vdev);
+ return ret;
+}
+
+static int
+copy_leftover_bytes(struct ivpu_ms_instance *ms,
+ void __user *user_ptr, u64 user_size, u64 *user_bytes_copied)
+{
+ u64 copy_bytes;
+
+ if (ms->leftover_bytes) {
+ copy_bytes = min(user_size - *user_bytes_copied, ms->leftover_bytes);
+ if (copy_to_user(user_ptr + *user_bytes_copied, ms->leftover_addr, copy_bytes))
+ return -EFAULT;
+
+ ms->leftover_bytes -= copy_bytes;
+ ms->leftover_addr += copy_bytes;
+ *user_bytes_copied += copy_bytes;
+ }
+
+ return 0;
+}
+
+static int
+copy_samples_to_user(struct ivpu_device *vdev, struct ivpu_ms_instance *ms,
+ void __user *user_ptr, u64 user_size, u64 *user_bytes_copied)
+{
+ u64 bytes_written;
+ int ret;
+
+ *user_bytes_copied = 0;
+
+ ret = copy_leftover_bytes(ms, user_ptr, user_size, user_bytes_copied);
+ if (ret)
+ return ret;
+
+ if (*user_bytes_copied == user_size)
+ return 0;
+
+ ret = ivpu_jsm_metric_streamer_update(vdev, ms->mask, ms->inactive_buff_vpu_addr,
+ ms->buff_size, &bytes_written);
+ if (ret)
+ return ret;
+
+ swap(ms->active_buff_vpu_addr, ms->inactive_buff_vpu_addr);
+ swap(ms->active_buff_ptr, ms->inactive_buff_ptr);
+
+ ms->leftover_bytes = bytes_written;
+ ms->leftover_addr = ms->inactive_buff_ptr;
+
+ return copy_leftover_bytes(ms, user_ptr, user_size, user_bytes_copied);
+}
+
+int ivpu_ms_get_data_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_ivpu_metric_streamer_get_data *args = data;
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct ivpu_device *vdev = file_priv->vdev;
+ struct ivpu_ms_instance *ms;
+ u64 bytes_written;
+ int ret;
+
+ if (!args->metric_group_mask)
+ return -EINVAL;
+
+ ret = ivpu_rpm_get(vdev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&file_priv->ms_lock);
+
+ ms = get_instance_by_mask(file_priv, args->metric_group_mask);
+ if (!ms) {
+ ivpu_dbg(vdev, IOCTL, "Instance doesn't exist for mask: %#llx\n",
+ args->metric_group_mask);
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ if (!args->buffer_size) {
+ ret = ivpu_jsm_metric_streamer_update(vdev, ms->mask, 0, 0, &bytes_written);
+ if (ret)
+ goto unlock;
+ args->data_size = bytes_written + ms->leftover_bytes;
+ goto unlock;
+ }
+
+ if (!args->buffer_ptr) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ ret = copy_samples_to_user(vdev, ms, u64_to_user_ptr(args->buffer_ptr),
+ args->buffer_size, &args->data_size);
+unlock:
+ mutex_unlock(&file_priv->ms_lock);
+
+ ivpu_rpm_put(vdev);
+ return ret;
+}
+
+static void free_instance(struct ivpu_file_priv *file_priv, struct ivpu_ms_instance *ms)
+{
+ lockdep_assert_held(&file_priv->ms_lock);
+
+ list_del(&ms->ms_instance_node);
+ ivpu_jsm_metric_streamer_stop(file_priv->vdev, ms->mask);
+ ivpu_bo_free(ms->bo);
+ kfree(ms);
+}
+
+int ivpu_ms_stop_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct drm_ivpu_metric_streamer_stop *args = data;
+ struct ivpu_device *vdev = file_priv->vdev;
+ struct ivpu_ms_instance *ms;
+ int ret;
+
+ if (!args->metric_group_mask)
+ return -EINVAL;
+
+ ret = ivpu_rpm_get(vdev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&file_priv->ms_lock);
+
+ ms = get_instance_by_mask(file_priv, args->metric_group_mask);
+ if (ms)
+ free_instance(file_priv, ms);
+
+ mutex_unlock(&file_priv->ms_lock);
+
+ ivpu_rpm_put(vdev);
+ return ms ? 0 : -EINVAL;
+}
+
+static inline struct ivpu_bo *get_ms_info_bo(struct ivpu_file_priv *file_priv)
+{
+ lockdep_assert_held(&file_priv->ms_lock);
+
+ if (file_priv->ms_info_bo)
+ return file_priv->ms_info_bo;
+
+ file_priv->ms_info_bo = ivpu_bo_create_global(file_priv->vdev, MS_INFO_BUFFER_SIZE,
+ DRM_IVPU_BO_CACHED | DRM_IVPU_BO_MAPPABLE);
+ return file_priv->ms_info_bo;
+}
+
+int ivpu_ms_get_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_ivpu_metric_streamer_get_data *args = data;
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct ivpu_device *vdev = file_priv->vdev;
+ struct ivpu_bo *bo;
+ u64 info_size;
+ int ret;
+
+ if (!args->metric_group_mask)
+ return -EINVAL;
+
+ if (!args->buffer_size)
+ return ivpu_jsm_metric_streamer_info(vdev, args->metric_group_mask,
+ 0, 0, NULL, &args->data_size);
+ if (!args->buffer_ptr)
+ return -EINVAL;
+
+ mutex_lock(&file_priv->ms_lock);
+
+ bo = get_ms_info_bo(file_priv);
+ if (!bo) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ ret = ivpu_jsm_metric_streamer_info(vdev, args->metric_group_mask, bo->vpu_addr,
+ ivpu_bo_size(bo), NULL, &info_size);
+ if (ret)
+ goto unlock;
+
+ if (args->buffer_size < info_size) {
+ ret = -ENOSPC;
+ goto unlock;
+ }
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer_ptr), ivpu_bo_vaddr(bo), info_size))
+ ret = -EFAULT;
+
+ args->data_size = info_size;
+unlock:
+ mutex_unlock(&file_priv->ms_lock);
+
+ return ret;
+}
+
+void ivpu_ms_cleanup(struct ivpu_file_priv *file_priv)
+{
+ struct ivpu_ms_instance *ms, *tmp;
+ struct ivpu_device *vdev = file_priv->vdev;
+
+ pm_runtime_get_sync(vdev->drm.dev);
+
+ mutex_lock(&file_priv->ms_lock);
+
+ if (file_priv->ms_info_bo) {
+ ivpu_bo_free(file_priv->ms_info_bo);
+ file_priv->ms_info_bo = NULL;
+ }
+
+ list_for_each_entry_safe(ms, tmp, &file_priv->ms_instance_list, ms_instance_node)
+ free_instance(file_priv, ms);
+
+ mutex_unlock(&file_priv->ms_lock);
+
+ pm_runtime_put_autosuspend(vdev->drm.dev);
+}
+
+void ivpu_ms_cleanup_all(struct ivpu_device *vdev)
+{
+ struct ivpu_file_priv *file_priv;
+ unsigned long ctx_id;
+
+ mutex_lock(&vdev->context_list_lock);
+
+ xa_for_each(&vdev->context_xa, ctx_id, file_priv)
+ ivpu_ms_cleanup(file_priv);
+
+ mutex_unlock(&vdev->context_list_lock);
+}
diff --git a/drivers/accel/ivpu/ivpu_ms.h b/drivers/accel/ivpu/ivpu_ms.h
new file mode 100644
index 000000000000..fbd5ebebc3d9
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_ms.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/*
+ * Copyright (C) 2020-2024 Intel Corporation
+ */
+#ifndef __IVPU_MS_H__
+#define __IVPU_MS_H__
+
+#include <linux/list.h>
+
+struct drm_device;
+struct drm_file;
+struct ivpu_bo;
+struct ivpu_device;
+struct ivpu_file_priv;
+
+struct ivpu_ms_instance {
+ struct ivpu_bo *bo;
+ struct list_head ms_instance_node;
+ u64 mask;
+ u64 buff_size;
+ u64 active_buff_vpu_addr;
+ u64 inactive_buff_vpu_addr;
+ void *active_buff_ptr;
+ void *inactive_buff_ptr;
+ u64 leftover_bytes;
+ void *leftover_addr;
+};
+
+int ivpu_ms_start_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
+int ivpu_ms_stop_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
+int ivpu_ms_get_data_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
+int ivpu_ms_get_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
+void ivpu_ms_cleanup(struct ivpu_file_priv *file_priv);
+void ivpu_ms_cleanup_all(struct ivpu_device *vdev);
+
+#endif /* __IVPU_MS_H__ */
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index 4f5ea466731f..480c075d87f6 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -9,25 +9,34 @@
#include <linux/pm_runtime.h>
#include <linux/reboot.h>
-#include "vpu_boot_api.h"
+#include "ivpu_coredump.h"
#include "ivpu_drv.h"
-#include "ivpu_hw.h"
#include "ivpu_fw.h"
#include "ivpu_fw_log.h"
+#include "ivpu_hw.h"
#include "ivpu_ipc.h"
#include "ivpu_job.h"
#include "ivpu_jsm_msg.h"
#include "ivpu_mmu.h"
+#include "ivpu_ms.h"
#include "ivpu_pm.h"
+#include "ivpu_trace.h"
+#include "vpu_boot_api.h"
static bool ivpu_disable_recovery;
+#if IS_ENABLED(CONFIG_DRM_ACCEL_IVPU_DEBUG)
module_param_named_unsafe(disable_recovery, ivpu_disable_recovery, bool, 0644);
MODULE_PARM_DESC(disable_recovery, "Disables recovery when NPU hang is detected");
+#endif
static unsigned long ivpu_tdr_timeout_ms;
module_param_named(tdr_timeout_ms, ivpu_tdr_timeout_ms, ulong, 0644);
MODULE_PARM_DESC(tdr_timeout_ms, "Timeout for device hang detection, in milliseconds, 0 - default");
+static unsigned long ivpu_inference_timeout_ms;
+module_param_named(inference_timeout_ms, ivpu_inference_timeout_ms, ulong, 0644);
+MODULE_PARM_DESC(inference_timeout_ms, "Inference maximum duration, in milliseconds, 0 - default");
+
#define PM_RESCHEDULE_LIMIT 5
static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev)
@@ -36,14 +45,16 @@ static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev)
ivpu_cmdq_reset_all_contexts(vdev);
ivpu_ipc_reset(vdev);
+ ivpu_fw_log_reset(vdev);
ivpu_fw_load(vdev);
fw->entry_point = fw->cold_boot_entry_point;
+ fw->last_heartbeat = 0;
}
static void ivpu_pm_prepare_warm_boot(struct ivpu_device *vdev)
{
struct ivpu_fw_info *fw = vdev->fw;
- struct vpu_boot_params *bp = ivpu_bo_vaddr(fw->mem);
+ struct vpu_boot_params *bp = ivpu_bo_vaddr(fw->mem_bp);
if (!bp->save_restore_ret_address) {
ivpu_pm_prepare_cold_boot(vdev);
@@ -72,8 +83,8 @@ static int ivpu_resume(struct ivpu_device *vdev)
int ret;
retry:
- pci_restore_state(to_pci_dev(vdev->drm.dev));
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0);
+ pci_restore_state(to_pci_dev(vdev->drm.dev));
ret = ivpu_hw_power_up(vdev);
if (ret) {
@@ -109,39 +120,57 @@ err_power_down:
return ret;
}
-static void ivpu_pm_recovery_work(struct work_struct *work)
+static void ivpu_pm_reset_begin(struct ivpu_device *vdev)
{
- struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, recovery_work);
- struct ivpu_device *vdev = pm->vdev;
- char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
- int ret;
-
- ivpu_err(vdev, "Recovering the NPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter));
-
- ret = pm_runtime_resume_and_get(vdev->drm.dev);
- if (ret)
- ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
-
- ivpu_fw_log_dump(vdev);
+ pm_runtime_disable(vdev->drm.dev);
atomic_inc(&vdev->pm->reset_counter);
atomic_set(&vdev->pm->reset_pending, 1);
down_write(&vdev->pm->reset_lock);
+}
+
+static void ivpu_pm_reset_complete(struct ivpu_device *vdev)
+{
+ int ret;
- ivpu_suspend(vdev);
ivpu_pm_prepare_cold_boot(vdev);
ivpu_jobs_abort_all(vdev);
+ ivpu_ms_cleanup_all(vdev);
ret = ivpu_resume(vdev);
- if (ret)
+ if (ret) {
ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
+ pm_runtime_set_suspended(vdev->drm.dev);
+ } else {
+ pm_runtime_set_active(vdev->drm.dev);
+ }
up_write(&vdev->pm->reset_lock);
atomic_set(&vdev->pm->reset_pending, 0);
- kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
pm_runtime_mark_last_busy(vdev->drm.dev);
- pm_runtime_put_autosuspend(vdev->drm.dev);
+ pm_runtime_enable(vdev->drm.dev);
+}
+
+static void ivpu_pm_recovery_work(struct work_struct *work)
+{
+ struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, recovery_work);
+ struct ivpu_device *vdev = pm->vdev;
+ char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
+
+ ivpu_err(vdev, "Recovering the NPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter));
+
+ ivpu_pm_reset_begin(vdev);
+
+ if (!pm_runtime_status_suspended(vdev->drm.dev)) {
+ ivpu_jsm_state_dump(vdev);
+ ivpu_dev_coredump(vdev);
+ ivpu_suspend(vdev);
+ }
+
+ ivpu_pm_reset_complete(vdev);
+
+ kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
}
void ivpu_pm_trigger_recovery(struct ivpu_device *vdev, const char *reason)
@@ -153,16 +182,11 @@ void ivpu_pm_trigger_recovery(struct ivpu_device *vdev, const char *reason)
return;
}
- if (ivpu_is_fpga(vdev)) {
- ivpu_err(vdev, "Recovery not available on FPGA\n");
- return;
- }
-
/* Trigger recovery if it's not in progress */
if (atomic_cmpxchg(&vdev->pm->reset_pending, 0, 1) == 0) {
ivpu_hw_diagnose_failure(vdev);
ivpu_hw_irq_disable(vdev); /* Disable IRQ early to protect from IRQ storm */
- queue_work(system_long_wq, &vdev->pm->recovery_work);
+ queue_work(system_dfl_wq, &vdev->pm->recovery_work);
}
}
@@ -170,7 +194,30 @@ static void ivpu_job_timeout_work(struct work_struct *work)
{
struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, job_timeout_work.work);
struct ivpu_device *vdev = pm->vdev;
+ unsigned long timeout_ms = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr;
+ unsigned long inference_timeout_ms = ivpu_inference_timeout_ms ? ivpu_inference_timeout_ms :
+ vdev->timeout.inference;
+ u64 inference_max_retries;
+ u64 heartbeat;
+
+ if (ivpu_jsm_get_heartbeat(vdev, 0, &heartbeat) || heartbeat <= vdev->fw->last_heartbeat) {
+ ivpu_err(vdev, "Job timeout detected, heartbeat not progressed\n");
+ goto recovery;
+ }
+ inference_max_retries = DIV_ROUND_UP(inference_timeout_ms, timeout_ms);
+ if (atomic_fetch_inc(&vdev->job_timeout_counter) >= inference_max_retries) {
+ ivpu_err(vdev, "Job timeout detected, heartbeat limit (%lld) exceeded\n",
+ inference_max_retries);
+ goto recovery;
+ }
+
+ vdev->fw->last_heartbeat = heartbeat;
+ ivpu_start_job_timeout_detection(vdev);
+ return;
+
+recovery:
+ atomic_set(&vdev->job_timeout_counter, 0);
ivpu_pm_trigger_recovery(vdev, "TDR");
}
@@ -179,12 +226,14 @@ void ivpu_start_job_timeout_detection(struct ivpu_device *vdev)
unsigned long timeout_ms = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr;
/* No-op if already queued */
- queue_delayed_work(system_wq, &vdev->pm->job_timeout_work, msecs_to_jiffies(timeout_ms));
+ queue_delayed_work(system_percpu_wq, &vdev->pm->job_timeout_work,
+ msecs_to_jiffies(timeout_ms));
}
void ivpu_stop_job_timeout_detection(struct ivpu_device *vdev)
{
cancel_delayed_work_sync(&vdev->pm->job_timeout_work);
+ atomic_set(&vdev->job_timeout_counter, 0);
}
int ivpu_pm_suspend_cb(struct device *dev)
@@ -193,6 +242,7 @@ int ivpu_pm_suspend_cb(struct device *dev)
struct ivpu_device *vdev = to_ivpu_device(drm);
unsigned long timeout;
+ trace_pm("suspend");
ivpu_dbg(vdev, PM, "Suspend..\n");
timeout = jiffies + msecs_to_jiffies(vdev->timeout.tdr);
@@ -210,6 +260,7 @@ int ivpu_pm_suspend_cb(struct device *dev)
ivpu_pm_prepare_warm_boot(vdev);
ivpu_dbg(vdev, PM, "Suspend done.\n");
+ trace_pm("suspend done");
return 0;
}
@@ -220,6 +271,7 @@ int ivpu_pm_resume_cb(struct device *dev)
struct ivpu_device *vdev = to_ivpu_device(drm);
int ret;
+ trace_pm("resume");
ivpu_dbg(vdev, PM, "Resume..\n");
ret = ivpu_resume(vdev);
@@ -227,6 +279,7 @@ int ivpu_pm_resume_cb(struct device *dev)
ivpu_err(vdev, "Failed to resume: %d\n", ret);
ivpu_dbg(vdev, PM, "Resume done.\n");
+ trace_pm("resume done");
return ret;
}
@@ -235,42 +288,40 @@ int ivpu_pm_runtime_suspend_cb(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
struct ivpu_device *vdev = to_ivpu_device(drm);
- bool hw_is_idle = true;
- int ret;
+ int ret, ret_d0i3;
+ bool is_idle;
drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
drm_WARN_ON(&vdev->drm, work_pending(&vdev->pm->recovery_work));
+ trace_pm("runtime suspend");
ivpu_dbg(vdev, PM, "Runtime suspend..\n");
- if (!ivpu_hw_is_idle(vdev) && vdev->pm->suspend_reschedule_counter) {
- ivpu_dbg(vdev, PM, "Failed to enter idle, rescheduling suspend, retries left %d\n",
- vdev->pm->suspend_reschedule_counter);
- pm_schedule_suspend(dev, vdev->timeout.reschedule_suspend);
- vdev->pm->suspend_reschedule_counter--;
- return -EAGAIN;
- }
+ ivpu_mmu_disable(vdev);
- if (!vdev->pm->suspend_reschedule_counter)
- hw_is_idle = false;
- else if (ivpu_jsm_pwr_d0i3_enter(vdev))
- hw_is_idle = false;
+ is_idle = ivpu_hw_is_idle(vdev) || vdev->pm->dct_active_percent;
+ if (!is_idle)
+ ivpu_err(vdev, "NPU is not idle before autosuspend\n");
+
+ ret_d0i3 = ivpu_jsm_pwr_d0i3_enter(vdev);
+ if (ret_d0i3)
+ ivpu_err(vdev, "Failed to prepare for d0i3: %d\n", ret_d0i3);
ret = ivpu_suspend(vdev);
if (ret)
ivpu_err(vdev, "Failed to suspend NPU: %d\n", ret);
- if (!hw_is_idle) {
- ivpu_err(vdev, "NPU failed to enter idle, force suspended.\n");
- ivpu_fw_log_dump(vdev);
+ if (!is_idle || ret_d0i3) {
+ ivpu_err(vdev, "Forcing cold boot due to previous errors\n");
+ atomic_inc(&vdev->pm->reset_counter);
+ ivpu_dev_coredump(vdev);
ivpu_pm_prepare_cold_boot(vdev);
} else {
ivpu_pm_prepare_warm_boot(vdev);
}
- vdev->pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT;
-
ivpu_dbg(vdev, PM, "Runtime suspend done.\n");
+ trace_pm("runtime suspend done");
return 0;
}
@@ -281,6 +332,7 @@ int ivpu_pm_runtime_resume_cb(struct device *dev)
struct ivpu_device *vdev = to_ivpu_device(drm);
int ret;
+ trace_pm("runtime resume");
ivpu_dbg(vdev, PM, "Runtime resume..\n");
ret = ivpu_resume(vdev);
@@ -288,6 +340,7 @@ int ivpu_pm_runtime_resume_cb(struct device *dev)
ivpu_err(vdev, "Failed to set RESUME state: %d\n", ret);
ivpu_dbg(vdev, PM, "Runtime resume done.\n");
+ trace_pm("runtime resume done");
return ret;
}
@@ -297,25 +350,16 @@ int ivpu_rpm_get(struct ivpu_device *vdev)
int ret;
ret = pm_runtime_resume_and_get(vdev->drm.dev);
- if (!drm_WARN_ON(&vdev->drm, ret < 0))
- vdev->pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT;
-
- return ret;
-}
-
-int ivpu_rpm_get_if_active(struct ivpu_device *vdev)
-{
- int ret;
-
- ret = pm_runtime_get_if_in_use(vdev->drm.dev);
- drm_WARN_ON(&vdev->drm, ret < 0);
+ if (ret < 0) {
+ ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
+ pm_runtime_set_suspended(vdev->drm.dev);
+ }
return ret;
}
void ivpu_rpm_put(struct ivpu_device *vdev)
{
- pm_runtime_mark_last_busy(vdev->drm.dev);
pm_runtime_put_autosuspend(vdev->drm.dev);
}
@@ -324,33 +368,26 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
struct ivpu_device *vdev = pci_get_drvdata(pdev);
ivpu_dbg(vdev, PM, "Pre-reset..\n");
- atomic_inc(&vdev->pm->reset_counter);
- atomic_set(&vdev->pm->reset_pending, 1);
- pm_runtime_get_sync(vdev->drm.dev);
- down_write(&vdev->pm->reset_lock);
- ivpu_prepare_for_reset(vdev);
- ivpu_hw_reset(vdev);
- ivpu_pm_prepare_cold_boot(vdev);
- ivpu_jobs_abort_all(vdev);
+ ivpu_pm_reset_begin(vdev);
+
+ if (!pm_runtime_status_suspended(vdev->drm.dev)) {
+ ivpu_prepare_for_reset(vdev);
+ ivpu_hw_reset(vdev);
+ }
+
ivpu_dbg(vdev, PM, "Pre-reset done.\n");
}
void ivpu_pm_reset_done_cb(struct pci_dev *pdev)
{
struct ivpu_device *vdev = pci_get_drvdata(pdev);
- int ret;
ivpu_dbg(vdev, PM, "Post-reset..\n");
- ret = ivpu_resume(vdev);
- if (ret)
- ivpu_err(vdev, "Failed to set RESUME state: %d\n", ret);
- up_write(&vdev->pm->reset_lock);
- atomic_set(&vdev->pm->reset_pending, 0);
- ivpu_dbg(vdev, PM, "Post-reset done.\n");
- pm_runtime_mark_last_busy(vdev->drm.dev);
- pm_runtime_put_autosuspend(vdev->drm.dev);
+ ivpu_pm_reset_complete(vdev);
+
+ ivpu_dbg(vdev, PM, "Post-reset done.\n");
}
void ivpu_pm_init(struct ivpu_device *vdev)
@@ -360,7 +397,6 @@ void ivpu_pm_init(struct ivpu_device *vdev)
int delay;
pm->vdev = vdev;
- pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT;
init_rwsem(&pm->reset_lock);
atomic_set(&pm->reset_pending, 0);
@@ -376,23 +412,22 @@ void ivpu_pm_init(struct ivpu_device *vdev)
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, delay);
+ pm_runtime_set_active(dev);
ivpu_dbg(vdev, PM, "Autosuspend delay = %d\n", delay);
}
-void ivpu_pm_cancel_recovery(struct ivpu_device *vdev)
+void ivpu_pm_disable_recovery(struct ivpu_device *vdev)
{
drm_WARN_ON(&vdev->drm, delayed_work_pending(&vdev->pm->job_timeout_work));
- cancel_work_sync(&vdev->pm->recovery_work);
+ disable_work_sync(&vdev->pm->recovery_work);
}
void ivpu_pm_enable(struct ivpu_device *vdev)
{
struct device *dev = vdev->drm.dev;
- pm_runtime_set_active(dev);
pm_runtime_allow(dev);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
}
@@ -401,3 +436,76 @@ void ivpu_pm_disable(struct ivpu_device *vdev)
pm_runtime_get_noresume(vdev->drm.dev);
pm_runtime_forbid(vdev->drm.dev);
}
+
+int ivpu_pm_dct_init(struct ivpu_device *vdev)
+{
+ if (vdev->pm->dct_active_percent)
+ return ivpu_pm_dct_enable(vdev, vdev->pm->dct_active_percent);
+
+ return 0;
+}
+
+int ivpu_pm_dct_enable(struct ivpu_device *vdev, u8 active_percent)
+{
+ u32 active_us, inactive_us;
+ int ret;
+
+ if (active_percent == 0 || active_percent > 100)
+ return -EINVAL;
+
+ active_us = (DCT_PERIOD_US * active_percent) / 100;
+ inactive_us = DCT_PERIOD_US - active_us;
+
+ vdev->pm->dct_active_percent = active_percent;
+
+ ivpu_dbg(vdev, PM, "DCT requested %u%% (D0: %uus, D0i2: %uus)\n",
+ active_percent, active_us, inactive_us);
+
+ ret = ivpu_jsm_dct_enable(vdev, active_us, inactive_us);
+ if (ret) {
+ ivpu_err_ratelimited(vdev, "Failed to enable DCT: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ivpu_pm_dct_disable(struct ivpu_device *vdev)
+{
+ int ret;
+
+ vdev->pm->dct_active_percent = 0;
+
+ ivpu_dbg(vdev, PM, "DCT requested to be disabled\n");
+
+ ret = ivpu_jsm_dct_disable(vdev);
+ if (ret) {
+ ivpu_err_ratelimited(vdev, "Failed to disable DCT: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void ivpu_pm_irq_dct_work_fn(struct work_struct *work)
+{
+ struct ivpu_device *vdev = container_of(work, struct ivpu_device, irq_dct_work);
+ bool enable;
+ int ret;
+
+ if (ivpu_hw_btrs_dct_get_request(vdev, &enable))
+ return;
+
+ if (enable)
+ ret = ivpu_pm_dct_enable(vdev, DCT_DEFAULT_ACTIVE_PERCENT);
+ else
+ ret = ivpu_pm_dct_disable(vdev);
+
+ if (!ret) {
+ /* Convert percent to U1.7 format */
+ u8 val = DIV_ROUND_CLOSEST(vdev->pm->dct_active_percent * 128, 100);
+
+ ivpu_hw_btrs_dct_set_status(vdev, enable, val);
+ }
+
+}
diff --git a/drivers/accel/ivpu/ivpu_pm.h b/drivers/accel/ivpu/ivpu_pm.h
index ec60fbeefefc..a2aa7a27f32e 100644
--- a/drivers/accel/ivpu/ivpu_pm.h
+++ b/drivers/accel/ivpu/ivpu_pm.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#ifndef __IVPU_PM_H__
@@ -19,13 +19,13 @@ struct ivpu_pm_info {
atomic_t reset_counter;
atomic_t reset_pending;
bool is_warmboot;
- u32 suspend_reschedule_counter;
+ u8 dct_active_percent;
};
void ivpu_pm_init(struct ivpu_device *vdev);
void ivpu_pm_enable(struct ivpu_device *vdev);
void ivpu_pm_disable(struct ivpu_device *vdev);
-void ivpu_pm_cancel_recovery(struct ivpu_device *vdev);
+void ivpu_pm_disable_recovery(struct ivpu_device *vdev);
int ivpu_pm_suspend_cb(struct device *dev);
int ivpu_pm_resume_cb(struct device *dev);
@@ -36,11 +36,15 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev);
void ivpu_pm_reset_done_cb(struct pci_dev *pdev);
int __must_check ivpu_rpm_get(struct ivpu_device *vdev);
-int __must_check ivpu_rpm_get_if_active(struct ivpu_device *vdev);
void ivpu_rpm_put(struct ivpu_device *vdev);
void ivpu_pm_trigger_recovery(struct ivpu_device *vdev, const char *reason);
void ivpu_start_job_timeout_detection(struct ivpu_device *vdev);
void ivpu_stop_job_timeout_detection(struct ivpu_device *vdev);
+int ivpu_pm_dct_init(struct ivpu_device *vdev);
+int ivpu_pm_dct_enable(struct ivpu_device *vdev, u8 active_percent);
+int ivpu_pm_dct_disable(struct ivpu_device *vdev);
+void ivpu_pm_irq_dct_work_fn(struct work_struct *work);
+
#endif /* __IVPU_PM_H__ */
diff --git a/drivers/accel/ivpu/ivpu_sysfs.c b/drivers/accel/ivpu/ivpu_sysfs.c
new file mode 100644
index 000000000000..d250a10caca9
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_sysfs.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/pm_runtime.h>
+#include <linux/units.h>
+
+#include "ivpu_drv.h"
+#include "ivpu_gem.h"
+#include "ivpu_fw.h"
+#include "ivpu_hw.h"
+#include "ivpu_sysfs.h"
+
+/**
+ * DOC: npu_busy_time_us
+ *
+ * npu_busy_time_us is the time that the device spent executing jobs.
+ * The time is counted when and only when there are jobs submitted to firmware.
+ *
+ * This time can be used to measure the utilization of NPU, either by calculating
+ * npu_busy_time_us difference between two timepoints (i.e. measuring the time
+ * that the NPU was active during some workload) or monitoring utilization percentage
+ * by reading npu_busy_time_us periodically.
+ *
+ * When reading the value periodically, it shouldn't be read too often as it may have
+ * an impact on job submission performance. Recommended period is 1 second.
+ */
+static ssize_t
+npu_busy_time_us_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct ivpu_device *vdev = to_ivpu_device(drm);
+ ktime_t total, now = 0;
+
+ mutex_lock(&vdev->submitted_jobs_lock);
+
+ total = vdev->busy_time;
+ if (!xa_empty(&vdev->submitted_jobs_xa))
+ now = ktime_sub(ktime_get(), vdev->busy_start_ts);
+ mutex_unlock(&vdev->submitted_jobs_lock);
+
+ return sysfs_emit(buf, "%lld\n", ktime_to_us(ktime_add(total, now)));
+}
+
+static DEVICE_ATTR_RO(npu_busy_time_us);
+
+/**
+ * DOC: npu_memory_utilization
+ *
+ * The npu_memory_utilization is used to report in bytes a current NPU memory utilization.
+ *
+ */
+static ssize_t
+npu_memory_utilization_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct ivpu_device *vdev = to_ivpu_device(drm);
+ struct ivpu_bo *bo;
+ u64 total_npu_memory = 0;
+
+ mutex_lock(&vdev->bo_list_lock);
+ list_for_each_entry(bo, &vdev->bo_list, bo_list_node)
+ if (ivpu_bo_is_resident(bo))
+ total_npu_memory += ivpu_bo_size(bo);
+ mutex_unlock(&vdev->bo_list_lock);
+
+ return sysfs_emit(buf, "%lld\n", total_npu_memory);
+}
+
+static DEVICE_ATTR_RO(npu_memory_utilization);
+
+/**
+ * DOC: sched_mode
+ *
+ * The sched_mode is used to report current NPU scheduling mode.
+ *
+ * It returns following strings:
+ * - "HW" - Hardware Scheduler mode
+ * - "OS" - Operating System Scheduler mode
+ *
+ */
+static ssize_t
+sched_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct ivpu_device *vdev = to_ivpu_device(drm);
+
+ return sysfs_emit(buf, "%s\n", vdev->fw->sched_mode ? "HW" : "OS");
+}
+
+static DEVICE_ATTR_RO(sched_mode);
+
+/**
+ * DOC: npu_max_frequency
+ *
+ * The npu_max_frequency shows maximum frequency in MHz of the NPU's data
+ * processing unit
+ */
+static ssize_t
+npu_max_frequency_mhz_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct ivpu_device *vdev = to_ivpu_device(drm);
+ u32 freq = ivpu_hw_dpu_max_freq_get(vdev);
+
+ return sysfs_emit(buf, "%lu\n", freq / HZ_PER_MHZ);
+}
+
+static DEVICE_ATTR_RO(npu_max_frequency_mhz);
+
+/**
+ * DOC: npu_current_frequency_mhz
+ *
+ * The npu_current_frequency_mhz shows current frequency in MHz of the NPU's
+ * data processing unit
+ */
+static ssize_t
+npu_current_frequency_mhz_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct ivpu_device *vdev = to_ivpu_device(drm);
+ u32 freq = 0;
+
+ /* Read frequency only if device is active, otherwise frequency is 0 */
+ if (pm_runtime_get_if_active(vdev->drm.dev) > 0) {
+ freq = ivpu_hw_dpu_freq_get(vdev);
+
+ pm_runtime_put_autosuspend(vdev->drm.dev);
+ }
+
+ return sysfs_emit(buf, "%lu\n", freq / HZ_PER_MHZ);
+}
+
+static DEVICE_ATTR_RO(npu_current_frequency_mhz);
+
+static struct attribute *ivpu_dev_attrs[] = {
+ &dev_attr_npu_busy_time_us.attr,
+ &dev_attr_npu_memory_utilization.attr,
+ &dev_attr_sched_mode.attr,
+ &dev_attr_npu_max_frequency_mhz.attr,
+ &dev_attr_npu_current_frequency_mhz.attr,
+ NULL,
+};
+
+static struct attribute_group ivpu_dev_attr_group = {
+ .attrs = ivpu_dev_attrs,
+};
+
+void ivpu_sysfs_init(struct ivpu_device *vdev)
+{
+ int ret;
+
+ ret = devm_device_add_group(vdev->drm.dev, &ivpu_dev_attr_group);
+ if (ret)
+ ivpu_warn(vdev, "Failed to add group to device, ret %d", ret);
+}
diff --git a/drivers/accel/ivpu/ivpu_sysfs.h b/drivers/accel/ivpu/ivpu_sysfs.h
new file mode 100644
index 000000000000..9836f09b35a3
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_sysfs.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+
+#ifndef __IVPU_SYSFS_H__
+#define __IVPU_SYSFS_H__
+
+#include "ivpu_drv.h"
+
+void ivpu_sysfs_init(struct ivpu_device *vdev);
+
+#endif /* __IVPU_SYSFS_H__ */
diff --git a/drivers/accel/ivpu/ivpu_trace.h b/drivers/accel/ivpu/ivpu_trace.h
new file mode 100644
index 000000000000..eb792038e701
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_trace.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2024 Intel Corporation
+ */
+
+#if !defined(__IVPU_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ)
+#define __IVPU_TRACE_H__
+
+#include <linux/tracepoint.h>
+#include "ivpu_drv.h"
+#include "ivpu_job.h"
+#include "vpu_jsm_api.h"
+#include "ivpu_jsm_msg.h"
+#include "ivpu_ipc.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM vpu
+#define TRACE_INCLUDE_FILE ivpu_trace
+
+TRACE_EVENT(pm,
+ TP_PROTO(const char *event),
+ TP_ARGS(event),
+ TP_STRUCT__entry(__field(const char *, event)),
+ TP_fast_assign(__entry->event = event;),
+ TP_printk("%s", __entry->event)
+);
+
+TRACE_EVENT(job,
+ TP_PROTO(const char *event, struct ivpu_job *job),
+ TP_ARGS(event, job),
+ TP_STRUCT__entry(__field(const char *, event)
+ __field(u32, ctx_id)
+ __field(u32, engine_id)
+ __field(u32, job_id)
+ ),
+ TP_fast_assign(__entry->event = event;
+ __entry->ctx_id = job->file_priv->ctx.id;
+ __entry->engine_id = job->engine_idx;
+ __entry->job_id = job->job_id;),
+ TP_printk("%s context:%d engine:%d job:%d",
+ __entry->event,
+ __entry->ctx_id,
+ __entry->engine_id,
+ __entry->job_id)
+);
+
+TRACE_EVENT(jsm,
+ TP_PROTO(const char *event, struct vpu_jsm_msg *msg),
+ TP_ARGS(event, msg),
+ TP_STRUCT__entry(__field(const char *, event)
+ __field(const char *, type)
+ __field(enum vpu_ipc_msg_status, status)
+ __field(u32, request_id)
+ __field(u32, result)
+ ),
+ TP_fast_assign(__entry->event = event;
+ __entry->type = ivpu_jsm_msg_type_to_str(msg->type);
+ __entry->status = msg->status;
+ __entry->request_id = msg->request_id;
+ __entry->result = msg->result;),
+ TP_printk("%s type:%s, status:%#x, id:%#x, result:%#x",
+ __entry->event,
+ __entry->type,
+ __entry->status,
+ __entry->request_id,
+ __entry->result)
+);
+
+#endif /* __IVPU_TRACE_H__ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/accel/ivpu/ivpu_trace_points.c b/drivers/accel/ivpu/ivpu_trace_points.c
new file mode 100644
index 000000000000..f8fb99de0de3
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_trace_points.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2024 Intel Corporation
+ */
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "ivpu_trace.h"
+#endif
diff --git a/drivers/accel/ivpu/vpu_boot_api.h b/drivers/accel/ivpu/vpu_boot_api.h
index 87cac7bc730a..218468bbbcad 100644
--- a/drivers/accel/ivpu/vpu_boot_api.h
+++ b/drivers/accel/ivpu/vpu_boot_api.h
@@ -1,14 +1,13 @@
/* SPDX-License-Identifier: MIT */
/*
- * Copyright (c) 2020-2023, Intel Corporation.
+ * Copyright (c) 2020-2024, Intel Corporation.
*/
#ifndef VPU_BOOT_API_H
#define VPU_BOOT_API_H
/*
- * =========== FW API version information beginning ================
- * The bellow values will be used to construct the version info this way:
+ * The below values will be used to construct the version info this way:
* fw_bin_header->api_version[VPU_BOOT_API_VER_ID] = (VPU_BOOT_API_VER_MAJOR << 16) |
* VPU_BOOT_API_VER_MINOR;
* VPU_BOOT_API_VER_PATCH will be ignored. KMD and compatibility is not affected if this changes
@@ -27,19 +26,18 @@
* Minor version changes when API backward compatibility is preserved.
* Resets to 0 if Major version is incremented.
*/
-#define VPU_BOOT_API_VER_MINOR 22
+#define VPU_BOOT_API_VER_MINOR 28
/*
* API header changed (field names, documentation, formatting) but API itself has not been changed
*/
-#define VPU_BOOT_API_VER_PATCH 0
+#define VPU_BOOT_API_VER_PATCH 3
/*
* Index in the API version table
* Must be unique for each API
*/
#define VPU_BOOT_API_VER_INDEX 0
-/* ------------ FW API version information end ---------------------*/
#pragma pack(push, 4)
@@ -78,8 +76,20 @@ struct vpu_firmware_header {
* submission queue size and device capabilities.
*/
u32 preemption_buffer_2_size;
+ /*
+ * Maximum preemption buffer size that the FW can use: no need for the host
+ * driver to allocate more space than that specified by these fields.
+ * A value of 0 means no declared limit.
+ */
+ u32 preemption_buffer_1_max_size;
+ u32 preemption_buffer_2_max_size;
/* Space reserved for future preemption-related fields. */
- u32 preemption_reserved[6];
+ u32 preemption_reserved[4];
+ /* FW image read only section start address, 4KB aligned */
+ u64 ro_section_start_address;
+ /* FW image read only section size, 4KB aligned */
+ u32 ro_section_size;
+ u32 reserved;
};
/*
@@ -131,7 +141,7 @@ enum vpu_trace_destination {
/*
* Processor bit shifts (for loggable HW components).
*/
-#define VPU_TRACE_PROC_BIT_ARM 0
+#define VPU_TRACE_PROC_BIT_RESERVED 0
#define VPU_TRACE_PROC_BIT_LRT 1
#define VPU_TRACE_PROC_BIT_LNN 2
#define VPU_TRACE_PROC_BIT_SHV_0 3
@@ -159,8 +169,6 @@ enum vpu_trace_destination {
/* VPU 30xx HW component IDs are sequential, so define first and last IDs. */
#define VPU_TRACE_PROC_BIT_30XX_FIRST VPU_TRACE_PROC_BIT_LRT
#define VPU_TRACE_PROC_BIT_30XX_LAST VPU_TRACE_PROC_BIT_SHV_15
-#define VPU_TRACE_PROC_BIT_KMB_FIRST VPU_TRACE_PROC_BIT_30XX_FIRST
-#define VPU_TRACE_PROC_BIT_KMB_LAST VPU_TRACE_PROC_BIT_30XX_LAST
struct vpu_boot_l2_cache_config {
u8 use;
@@ -194,6 +202,17 @@ struct vpu_warm_boot_section {
*/
#define POWER_PROFILE_SURVIVABILITY 0x1
+/**
+ * Enum for dvfs_mode boot param.
+ */
+enum vpu_governor {
+ VPU_GOV_DEFAULT = 0, /* Default Governor for the system */
+ VPU_GOV_MAX_PERFORMANCE = 1, /* Maximum performance governor */
+ VPU_GOV_ON_DEMAND = 2, /* On Demand frequency control governor */
+ VPU_GOV_POWER_SAVE = 3, /* Power save governor */
+ VPU_GOV_ON_DEMAND_PRIORITY_AWARE = 4 /* On Demand priority based governor */
+};
+
struct vpu_boot_params {
u32 magic;
u32 vpu_id;
@@ -296,7 +315,14 @@ struct vpu_boot_params {
u32 temp_sensor_period_ms;
/** PLL ratio for efficient clock frequency */
u32 pn_freq_pll_ratio;
- /** DVFS Mode: Default: 0, Max Performance: 1, On Demand: 2, Power Save: 3 */
+ /**
+ * DVFS Mode:
+ * 0 - Default, DVFS mode selected by the firmware
+ * 1 - Max Performance
+ * 2 - On Demand
+ * 3 - Power Save
+ * 4 - On Demand Priority Aware
+ */
u32 dvfs_mode;
/**
* Depending on DVFS Mode:
@@ -327,13 +353,20 @@ struct vpu_boot_params {
u64 d0i3_entry_vpu_ts;
/*
* The system time of the host operating system in microseconds.
- * E.g the number of microseconds since 1st of January 1970, or whatever date the
- * host operating system uses to maintain system time.
+ * E.g the number of microseconds since 1st of January 1970, or whatever
+ * date the host operating system uses to maintain system time.
* This value will be used to track system time on the VPU.
* The KMD is required to update this value on every VPU reset.
*/
u64 system_time_us;
- u32 pad4[18];
+ u32 pad4[2];
+ /*
+ * The delta between device monotonic time and the current value of the
+ * HW timestamp register, in ticks. Written by the firmware during boot.
+ * Can be used by the KMD to calculate device time.
+ */
+ u64 device_time_delta_ticks;
+ u32 pad7[14];
/* Warm boot information: 0x400 - 0x43F */
u32 warm_boot_sections_count;
u32 warm_boot_start_address_reference;
@@ -370,10 +403,7 @@ struct vpu_boot_params {
u32 pad6[734];
};
-/*
- * Magic numbers set between host and vpu to detect corruptio of tracing init
- */
-
+/* Magic numbers set between host and vpu to detect corruption of tracing init */
#define VPU_TRACING_BUFFER_CANARY (0xCAFECAFE)
/* Tracing buffer message format definitions */
@@ -393,7 +423,9 @@ struct vpu_tracing_buffer_header {
u32 host_canary_start;
/* offset from start of buffer for trace entries */
u32 read_index;
- u32 pad_to_cache_line_size_0[14];
+ /* keeps track of wrapping on the reader side */
+ u32 read_wrap_count;
+ u32 pad_to_cache_line_size_0[13];
/* End of first cache line */
/**
diff --git a/drivers/accel/ivpu/vpu_jsm_api.h b/drivers/accel/ivpu/vpu_jsm_api.h
index e46f3531211a..bca6a44dc041 100644
--- a/drivers/accel/ivpu/vpu_jsm_api.h
+++ b/drivers/accel/ivpu/vpu_jsm_api.h
@@ -1,15 +1,16 @@
/* SPDX-License-Identifier: MIT */
/*
- * Copyright (c) 2020-2023, Intel Corporation.
+ * Copyright (c) 2020-2025, Intel Corporation.
+ */
+
+/**
+ * @addtogroup Jsm
+ * @{
*/
/**
* @file
* @brief JSM shared definitions
- *
- * @ingroup Jsm
- * @brief JSM shared definitions
- * @{
*/
#ifndef VPU_JSM_API_H
#define VPU_JSM_API_H
@@ -22,12 +23,12 @@
/*
* Minor version changes when API backward compatibility is preserved.
*/
-#define VPU_JSM_API_VER_MINOR 15
+#define VPU_JSM_API_VER_MINOR 33
/*
* API header changed (field names, documentation, formatting) but API itself has not been changed
*/
-#define VPU_JSM_API_VER_PATCH 6
+#define VPU_JSM_API_VER_PATCH 0
/*
* Index in the API version table
@@ -36,7 +37,7 @@
/*
* Number of Priority Bands for Hardware Scheduling
- * Bands: RealTime, Focus, Normal, Idle
+ * Bands: Idle(0), Normal(1), Focus(2), RealTime(3)
*/
#define VPU_HWS_NUM_PRIORITY_BANDS 4
@@ -53,8 +54,7 @@
* Engine indexes.
*/
#define VPU_ENGINE_COMPUTE 0
-#define VPU_ENGINE_COPY 1
-#define VPU_ENGINE_NB 2
+#define VPU_ENGINE_NB 1
/*
* VPU status values.
@@ -72,8 +72,15 @@
#define VPU_JSM_STATUS_MVNCI_OUT_OF_RESOURCES 0xAU
#define VPU_JSM_STATUS_MVNCI_NOT_IMPLEMENTED 0xBU
#define VPU_JSM_STATUS_MVNCI_INTERNAL_ERROR 0xCU
-/* Job status returned when the job was preempted mid-inference */
+/* @deprecated (use VPU_JSM_STATUS_PREEMPTED_MID_COMMAND instead) */
#define VPU_JSM_STATUS_PREEMPTED_MID_INFERENCE 0xDU
+/* Job status returned when the job was preempted mid-command */
+#define VPU_JSM_STATUS_PREEMPTED_MID_COMMAND 0xDU
+/* Range of status codes that require engine reset */
+#define VPU_JSM_STATUS_ENGINE_RESET_REQUIRED_MIN 0xEU
+#define VPU_JSM_STATUS_MVNCI_CONTEXT_VIOLATION_HW 0xEU
+#define VPU_JSM_STATUS_MVNCI_PREEMPTION_TIMED_OUT 0xFU
+#define VPU_JSM_STATUS_ENGINE_RESET_REQUIRED_MAX 0x1FU
/*
* Host <-> VPU IPC channels.
@@ -86,18 +93,70 @@
/*
* Job flags bit masks.
*/
-#define VPU_JOB_FLAGS_NULL_SUBMISSION_MASK 0x00000001
-#define VPU_JOB_FLAGS_PRIVATE_DATA_MASK 0xFF000000
-
-/*
- * Sizes of the reserved areas in jobs, in bytes.
- */
-#define VPU_JOB_RESERVED_BYTES 8
+enum {
+ /*
+ * Null submission mask.
+ * When set, batch buffer's commands are not processed but returned as
+ * successful immediately, except fences and timestamps.
+ * When cleared, batch buffer's commands are processed normally.
+ * Used for testing and profiling purposes.
+ */
+ VPU_JOB_FLAGS_NULL_SUBMISSION_MASK = (1 << 0U),
+ /*
+ * Inline command mask.
+ * When set, the object in job queue is an inline command (see struct vpu_inline_cmd below).
+ * When cleared, the object in job queue is a job (see struct vpu_job_queue_entry below).
+ */
+ VPU_JOB_FLAGS_INLINE_CMD_MASK = (1 << 1U),
+ /*
+ * VPU private data mask.
+ * Reserved for the VPU to store private data about the job (or inline command)
+ * while being processed.
+ */
+ VPU_JOB_FLAGS_PRIVATE_DATA_MASK = 0xFFFF0000U
+};
/*
- * Sizes of the reserved areas in job queues, in bytes.
+ * Job queue flags bit masks.
*/
-#define VPU_JOB_QUEUE_RESERVED_BYTES 52
+enum {
+ /*
+ * No job done notification mask.
+ * When set, indicates that no job done notification should be sent for any
+ * job from this queue. When cleared, indicates that job done notification
+ * should be sent for every job completed from this queue.
+ */
+ VPU_JOB_QUEUE_FLAGS_NO_JOB_DONE_MASK = (1 << 0U),
+ /*
+ * Native fence usage mask.
+ * When set, indicates that job queue uses native fences (as inline commands
+ * in job queue). Such queues may also use legacy fences (as commands in batch buffers).
+ * When cleared, indicates the job queue only uses legacy fences.
+ * NOTES:
+ * 1. For queues using native fences, VPU expects that all jobs in the queue
+ * are immediately followed by an inline command object. This object is expected
+ * to be a fence signal command in most cases, but can also be a NOP in case the host
+ * does not need per-job fence signalling. Other inline commands objects can be
+ * inserted between "job and inline command" pairs.
+ * 2. Native fence queues are only supported on VPU 40xx onwards.
+ */
+ VPU_JOB_QUEUE_FLAGS_USE_NATIVE_FENCE_MASK = (1 << 1U),
+ /*
+ * Enable turbo mode for testing NPU performance; not recommended for regular usage.
+ */
+ VPU_JOB_QUEUE_FLAGS_TURBO_MODE = (1 << 2U),
+ /*
+ * Queue error detection mode flag
+ * For 'interactive' queues (this bit not set), the FW will identify queues that have not
+ * completed a job inside the TDR timeout as in error as part of engine reset sequence.
+ * For 'non-interactive' queues (this bit set), the FW will identify queues that have not
+ * progressed the heartbeat inside the non-interactive no-progress timeout as in error as
+ * part of engine reset sequence. Additionally, there is an upper limit applied to these
+ * queues: even if they progress the heartbeat, if they run longer than non-interactive
+ * timeout, then the FW will also identify them as in error.
+ */
+ VPU_JOB_QUEUE_FLAGS_NON_INTERACTIVE = (1 << 3U)
+};
/*
* Max length (including trailing NULL char) of trace entity name (e.g., the
@@ -141,33 +200,140 @@
#define VPU_HWS_INVALID_CMDQ_HANDLE 0ULL
/*
+ * Inline commands types.
+ */
+/*
+ * NOP.
+ * VPU does nothing other than consuming the inline command object.
+ */
+#define VPU_INLINE_CMD_TYPE_NOP 0x0
+/*
+ * Fence wait.
+ * VPU waits for the fence current value to reach monitored value.
+ * Fence wait operations are executed upon job dispatching. While waiting for
+ * the fence to be satisfied, VPU blocks fetching of the next objects in the queue.
+ * Jobs present in the queue prior to the fence wait object may be processed
+ * concurrently.
+ */
+#define VPU_INLINE_CMD_TYPE_FENCE_WAIT 0x1
+/*
+ * Fence signal.
+ * VPU sets the fence current value to the provided value. If new current value
+ * is equal to or higher than monitored value, VPU sends fence signalled notification
+ * to the host. Fence signal operations are executed upon completion of all the jobs
+ * present in the queue prior to them, and in-order relative to each other in the queue.
+ * But jobs in-between them may be processed concurrently and may complete out-of-order.
+ */
+#define VPU_INLINE_CMD_TYPE_FENCE_SIGNAL 0x2
+
+/**
+ * Job scheduling priority bands for both hardware scheduling and OS scheduling.
+ */
+enum vpu_job_scheduling_priority_band {
+ VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE = 0,
+ VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL = 1,
+ VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS = 2,
+ VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME = 3,
+ VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT = 4,
+};
+
+/**
* Job format.
+ * Jobs defines the actual workloads to be executed by a given engine.
*/
struct vpu_job_queue_entry {
- u64 batch_buf_addr; /**< Address of VPU commands batch buffer */
- u32 job_id; /**< Job ID */
- u32 flags; /**< Flags bit field, see VPU_JOB_FLAGS_* above */
- u64 root_page_table_addr; /**< Address of root page table to use for this job */
- u64 root_page_table_update_counter; /**< Page tables update events counter */
+ /** Address of VPU commands batch buffer */
+ u64 batch_buf_addr;
+ /** Job ID */
+ u32 job_id;
+ /** Flags bit field, see VPU_JOB_FLAGS_* above */
+ u32 flags;
+ /**
+ * Doorbell ring timestamp taken by KMD from SoC's global system clock, in
+ * microseconds. NPU can convert this value to its own fixed clock's timebase,
+ * to match other profiling timestamps.
+ */
+ u64 doorbell_timestamp;
+ /** Extra id for job tracking, used only in the firmware perf traces */
+ u64 host_tracking_id;
+ /** Address of the primary preemption buffer to use for this job */
u64 primary_preempt_buf_addr;
- /**< Address of the primary preemption buffer to use for this job */
+ /** Size of the primary preemption buffer to use for this job */
u32 primary_preempt_buf_size;
- /**< Size of the primary preemption buffer to use for this job */
+ /** Size of secondary preemption buffer to use for this job */
u32 secondary_preempt_buf_size;
- /**< Size of secondary preemption buffer to use for this job */
+ /** Address of secondary preemption buffer to use for this job */
u64 secondary_preempt_buf_addr;
- /**< Address of secondary preemption buffer to use for this job */
- u8 reserved_0[VPU_JOB_RESERVED_BYTES];
+ u64 reserved_0;
};
-/*
+/**
+ * Inline command format.
+ * Inline commands are the commands executed at scheduler level (typically,
+ * synchronization directives). Inline command and job objects must be of
+ * the same size and have flags field at same offset.
+ */
+struct vpu_inline_cmd {
+ u64 reserved_0;
+ /** Inline command type, see VPU_INLINE_CMD_TYPE_* defines. */
+ u32 type;
+ /** Flags bit field, see VPU_JOB_FLAGS_* above. */
+ u32 flags;
+ /** Inline command payload. Depends on inline command type. */
+ union payload {
+ /** Fence (wait and signal) commands' payload. */
+ struct fence {
+ /** Fence object handle. */
+ u64 fence_handle;
+ /** User VA of the current fence value. */
+ u64 current_value_va;
+ /** User VA of the monitored fence value (read-only). */
+ u64 monitored_value_va;
+ /** Value to wait for or write in fence location. */
+ u64 value;
+ /** User VA of the log buffer in which to add log entry on completion. */
+ u64 log_buffer_va;
+ /** NPU private data. */
+ u64 npu_private_data;
+ } fence;
+ /**
+ * Other commands do not have a payload:
+ * Payload definition for future inline commands can be inserted here.
+ */
+ u64 reserved_1[6];
+ } payload;
+};
+
+/**
+ * Job queue slots can be populated either with job objects or inline command objects.
+ */
+union vpu_jobq_slot {
+ struct vpu_job_queue_entry job;
+ struct vpu_inline_cmd inline_cmd;
+};
+
+/**
* Job queue control registers.
*/
struct vpu_job_queue_header {
u32 engine_idx;
u32 head;
u32 tail;
- u8 reserved_0[VPU_JOB_QUEUE_RESERVED_BYTES];
+ u32 flags;
+ /** Set to 1 to indicate priority_band field is valid */
+ u32 priority_band_valid;
+ /**
+ * Priority for the work of this job queue, valid only if the HWS is NOT used
+ * and the @ref priority_band_valid is set to 1. It is applied only during
+ * the @ref VPU_JSM_MSG_REGISTER_DB message processing.
+ * The device firmware might use the priority_band to optimize the power
+ * management logic, but it will not affect the order of jobs.
+ * Available priority bands: @see enum vpu_job_scheduling_priority_band
+ */
+ u32 priority_band;
+ /** Inside realtime band assigns a further priority, limited to 0..31 range */
+ u32 realtime_priority_level;
+ u32 reserved_0[9];
};
/*
@@ -175,7 +341,7 @@ struct vpu_job_queue_header {
*/
struct vpu_job_queue {
struct vpu_job_queue_header header;
- struct vpu_job_queue_entry job[];
+ union vpu_jobq_slot slot[];
};
/**
@@ -190,18 +356,16 @@ enum vpu_trace_entity_type {
VPU_TRACE_ENTITY_TYPE_HW_COMPONENT = 2,
};
-/*
+/**
* HWS specific log buffer header details.
* Total size is 32 bytes.
*/
struct vpu_hws_log_buffer_header {
- /* Written by VPU after adding a log entry. Initialised by host to 0. */
+ /** Written by VPU after adding a log entry. Initialised by host to 0. */
u32 first_free_entry_index;
- /* Incremented by VPU every time the VPU overwrites the 0th entry;
- * initialised by host to 0.
- */
+ /** Incremented by VPU every time the VPU writes the 0th entry; initialised by host to 0. */
u32 wraparound_count;
- /*
+ /**
* This is the number of buffers that can be stored in the log buffer provided by the host.
* It is written by host before passing buffer to VPU. VPU should consider it read-only.
*/
@@ -209,14 +373,14 @@ struct vpu_hws_log_buffer_header {
u64 reserved[2];
};
-/*
+/**
* HWS specific log buffer entry details.
* Total size is 32 bytes.
*/
struct vpu_hws_log_buffer_entry {
- /* VPU timestamp must be an invariant timer tick (not impacted by DVFS) */
+ /** VPU timestamp must be an invariant timer tick (not impacted by DVFS) */
u64 vpu_timestamp;
- /*
+ /**
* Operation type:
* 0 - context state change
* 1 - queue new work
@@ -226,21 +390,111 @@ struct vpu_hws_log_buffer_entry {
*/
u32 operation_type;
u32 reserved;
- /* Operation data depends on operation type */
+ /** Operation data depends on operation type */
u64 operation_data[2];
};
+/* Native fence log buffer types. */
+enum vpu_hws_native_fence_log_type {
+ VPU_HWS_NATIVE_FENCE_LOG_TYPE_WAITS = 1,
+ VPU_HWS_NATIVE_FENCE_LOG_TYPE_SIGNALS = 2
+};
+
+/** HWS native fence log buffer header. */
+struct vpu_hws_native_fence_log_header {
+ union {
+ struct {
+ /** Index of the first free entry in buffer. */
+ u32 first_free_entry_idx;
+ /**
+ * Incremented whenever the NPU wraps around the buffer and writes
+ * to the first entry again.
+ */
+ u32 wraparound_count;
+ };
+ /** Field allowing atomic update of both fields above. */
+ u64 atomic_wraparound_and_entry_idx;
+ };
+ /** Log buffer type, see enum vpu_hws_native_fence_log_type. */
+ u64 type;
+ /** Allocated number of entries in the log buffer. */
+ u64 entry_nb;
+ u64 reserved[2];
+};
+
+/** Native fence log operation types. */
+enum vpu_hws_native_fence_log_op {
+ VPU_HWS_NATIVE_FENCE_LOG_OP_SIGNAL_EXECUTED = 0,
+ VPU_HWS_NATIVE_FENCE_LOG_OP_WAIT_UNBLOCKED = 1
+};
+
+/** HWS native fence log entry. */
+struct vpu_hws_native_fence_log_entry {
+ /** Newly signaled/unblocked fence value. */
+ u64 fence_value;
+ /** Native fence object handle to which this operation belongs. */
+ u64 fence_handle;
+ /** Operation type, see enum vpu_hws_native_fence_log_op. */
+ u64 op_type;
+ u64 reserved_0;
+ /**
+ * VPU_HWS_NATIVE_FENCE_LOG_OP_WAIT_UNBLOCKED only: Timestamp at which fence
+ * wait was started (in NPU SysTime).
+ */
+ u64 fence_wait_start_ts;
+ u64 reserved_1;
+ /** Timestamp at which fence operation was completed (in NPU SysTime). */
+ u64 fence_end_ts;
+};
+
+/** Native fence log buffer. */
+struct vpu_hws_native_fence_log_buffer {
+ struct vpu_hws_native_fence_log_header header;
+ struct vpu_hws_native_fence_log_entry entry[];
+};
+
/*
* Host <-> VPU IPC messages types.
*/
enum vpu_ipc_msg_type {
+ /** Unsupported command */
VPU_JSM_MSG_UNKNOWN = 0xFFFFFFFF,
- /* IPC Host -> Device, Async commands */
+
+ /** IPC Host -> Device, base id for async commands */
VPU_JSM_MSG_ASYNC_CMD = 0x1100,
+ /**
+ * Reset engine. The NPU cancels all the jobs currently executing on the target
+ * engine making the engine become idle and then does a HW reset, before returning
+ * to the host.
+ * @see struct vpu_ipc_msg_payload_engine_reset
+ */
VPU_JSM_MSG_ENGINE_RESET = VPU_JSM_MSG_ASYNC_CMD,
+ /**
+ * Preempt engine. The NPU stops (preempts) all the jobs currently
+ * executing on the target engine making the engine become idle and ready to
+ * execute new jobs.
+ * NOTE: The NPU does not remove unstarted jobs (if any) from job queues of
+ * the target engine, but it stops processing them (until the queue doorbell
+ * is rung again); the host is responsible to reset the job queue, either
+ * after preemption or when resubmitting jobs to the queue.
+ * @see vpu_ipc_msg_payload_engine_preempt
+ */
VPU_JSM_MSG_ENGINE_PREEMPT = 0x1101,
+ /**
+ * OS scheduling doorbell register command
+ * @see vpu_ipc_msg_payload_register_db
+ */
VPU_JSM_MSG_REGISTER_DB = 0x1102,
+ /**
+ * OS scheduling doorbell unregister command
+ * @see vpu_ipc_msg_payload_unregister_db
+ */
VPU_JSM_MSG_UNREGISTER_DB = 0x1103,
+ /**
+ * Query engine heartbeat. Heartbeat is expected to increase monotonically
+ * and increase while work is being progressed by NPU.
+ * @see vpu_ipc_msg_payload_query_engine_hb
+ */
VPU_JSM_MSG_QUERY_ENGINE_HB = 0x1104,
VPU_JSM_MSG_GET_POWER_LEVEL_COUNT = 0x1105,
VPU_JSM_MSG_GET_POWER_LEVEL = 0x1106,
@@ -266,6 +520,7 @@ enum vpu_ipc_msg_type {
* aborted and removed from internal scheduling queues. All doorbells assigned
* to the host_ssid are unregistered and any internal FW resources belonging to
* the host_ssid are released.
+ * @see vpu_ipc_msg_payload_ssid_release
*/
VPU_JSM_MSG_SSID_RELEASE = 0x110e,
/**
@@ -293,56 +548,114 @@ enum vpu_ipc_msg_type {
* @see vpu_jsm_metric_streamer_start
*/
VPU_JSM_MSG_METRIC_STREAMER_INFO = 0x1112,
- /** Control command: Priority band setup */
+ /**
+ * Control command: Priority band setup
+ * @see vpu_ipc_msg_payload_hws_priority_band_setup
+ */
VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP = 0x1113,
- /** Control command: Create command queue */
+ /**
+ * Control command: Create command queue
+ * @see vpu_ipc_msg_payload_hws_create_cmdq
+ */
VPU_JSM_MSG_CREATE_CMD_QUEUE = 0x1114,
- /** Control command: Destroy command queue */
+ /**
+ * Control command: Destroy command queue
+ * @see vpu_ipc_msg_payload_hws_destroy_cmdq
+ */
VPU_JSM_MSG_DESTROY_CMD_QUEUE = 0x1115,
- /** Control command: Set context scheduling properties */
+ /**
+ * Control command: Set context scheduling properties
+ * @see vpu_ipc_msg_payload_hws_set_context_sched_properties
+ */
VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES = 0x1116,
- /*
+ /**
* Register a doorbell to notify VPU of new work. The doorbell may later be
* deallocated or reassigned to another context.
+ * @see vpu_jsm_hws_register_db
*/
VPU_JSM_MSG_HWS_REGISTER_DB = 0x1117,
- /** Control command: Log buffer setting */
+ /**
+ * Control command: Log buffer setting
+ * @see vpu_ipc_msg_payload_hws_set_scheduling_log
+ */
VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG = 0x1118,
- /* Control command: Suspend command queue. */
+ /**
+ * Control command: Suspend command queue.
+ * @see vpu_ipc_msg_payload_hws_suspend_cmdq
+ */
VPU_JSM_MSG_HWS_SUSPEND_CMDQ = 0x1119,
- /* Control command: Resume command queue */
+ /**
+ * Control command: Resume command queue
+ * @see vpu_ipc_msg_payload_hws_resume_cmdq
+ */
VPU_JSM_MSG_HWS_RESUME_CMDQ = 0x111a,
- /* Control command: Resume engine after reset */
+ /**
+ * Control command: Resume engine after reset
+ * @see vpu_ipc_msg_payload_hws_resume_engine
+ */
VPU_JSM_MSG_HWS_ENGINE_RESUME = 0x111b,
- /* Control command: Enable survivability/DCT mode */
+ /**
+ * Control command: Enable survivability/DCT mode
+ * @see vpu_ipc_msg_payload_pwr_dct_control
+ */
VPU_JSM_MSG_DCT_ENABLE = 0x111c,
- /* Control command: Disable survivability/DCT mode */
+ /**
+ * Control command: Disable survivability/DCT mode
+ * This command has no payload
+ */
VPU_JSM_MSG_DCT_DISABLE = 0x111d,
/**
* Dump VPU state. To be used for debug purposes only.
- * NOTE: Please introduce new ASYNC commands before this one. *
+ * This command has no payload.
+ * NOTE: Please introduce new ASYNC commands before this one.
*/
VPU_JSM_MSG_STATE_DUMP = 0x11FF,
- /* IPC Host -> Device, General commands */
+
+ /** IPC Host -> Device, base id for general commands */
VPU_JSM_MSG_GENERAL_CMD = 0x1200,
- VPU_JSM_MSG_BLOB_DEINIT = VPU_JSM_MSG_GENERAL_CMD,
+ /** Unsupported command */
+ VPU_JSM_MSG_BLOB_DEINIT_DEPRECATED = VPU_JSM_MSG_GENERAL_CMD,
/**
* Control dyndbg behavior by executing a dyndbg command; equivalent to
- * Linux command: `echo '<dyndbg_cmd>' > <debugfs>/dynamic_debug/control`.
+ * Linux command:
+ * @verbatim echo '<dyndbg_cmd>' > <debugfs>/dynamic_debug/control @endverbatim
+ * @see vpu_ipc_msg_payload_dyndbg_control
*/
VPU_JSM_MSG_DYNDBG_CONTROL = 0x1201,
/**
* Perform the save procedure for the D0i3 entry
*/
VPU_JSM_MSG_PWR_D0I3_ENTER = 0x1202,
- /* IPC Device -> Host, Job completion */
+
+ /**
+ * IPC Device -> Host, Job completion
+ * @see struct vpu_ipc_msg_payload_job_done
+ */
VPU_JSM_MSG_JOB_DONE = 0x2100,
+ /**
+ * IPC Device -> Host, Fence signalled
+ * @see vpu_ipc_msg_payload_native_fence_signalled
+ */
+ VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED = 0x2101,
+
/* IPC Device -> Host, Async command completion */
VPU_JSM_MSG_ASYNC_CMD_DONE = 0x2200,
+ /**
+ * IPC Device -> Host, engine reset complete
+ * @see vpu_ipc_msg_payload_engine_reset_done
+ */
VPU_JSM_MSG_ENGINE_RESET_DONE = VPU_JSM_MSG_ASYNC_CMD_DONE,
+ /**
+ * Preempt complete message
+ * @see vpu_ipc_msg_payload_engine_preempt_done
+ */
VPU_JSM_MSG_ENGINE_PREEMPT_DONE = 0x2201,
VPU_JSM_MSG_REGISTER_DB_DONE = 0x2202,
VPU_JSM_MSG_UNREGISTER_DB_DONE = 0x2203,
+ /**
+ * Response to query engine heartbeat.
+ * @see vpu_ipc_msg_payload_query_engine_hb_done
+ */
VPU_JSM_MSG_QUERY_ENGINE_HB_DONE = 0x2204,
VPU_JSM_MSG_GET_POWER_LEVEL_COUNT_DONE = 0x2205,
VPU_JSM_MSG_GET_POWER_LEVEL_DONE = 0x2206,
@@ -359,7 +672,10 @@ enum vpu_ipc_msg_type {
VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP = 0x220c,
/** Response to VPU_JSM_MSG_TRACE_GET_NAME. */
VPU_JSM_MSG_TRACE_GET_NAME_RSP = 0x220d,
- /** Response to VPU_JSM_MSG_SSID_RELEASE. */
+ /**
+ * Response to VPU_JSM_MSG_SSID_RELEASE.
+ * @see vpu_ipc_msg_payload_ssid_release
+ */
VPU_JSM_MSG_SSID_RELEASE_DONE = 0x220e,
/**
* Response to VPU_JSM_MSG_METRIC_STREAMER_START.
@@ -389,39 +705,74 @@ enum vpu_ipc_msg_type {
/**
* Asynchronous event sent from the VPU to the host either when the current
* metric buffer is full or when the VPU has collected a multiple of
- * @notify_sample_count samples as indicated through the start command
- * (VPU_JSM_MSG_METRIC_STREAMER_START). Returns information about collected
- * metric data.
+ * @ref vpu_jsm_metric_streamer_start::notify_sample_count samples as indicated
+ * through the start command (VPU_JSM_MSG_METRIC_STREAMER_START). Returns
+ * information about collected metric data.
* @see vpu_jsm_metric_streamer_done
*/
VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION = 0x2213,
- /** Response to control command: Priority band setup */
+ /**
+ * Response to control command: Priority band setup
+ * @see vpu_ipc_msg_payload_hws_priority_band_setup
+ */
VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP = 0x2214,
- /** Response to control command: Create command queue */
+ /**
+ * Response to control command: Create command queue
+ * @see vpu_ipc_msg_payload_hws_create_cmdq_rsp
+ */
VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP = 0x2215,
- /** Response to control command: Destroy command queue */
+ /**
+ * Response to control command: Destroy command queue
+ * @see vpu_ipc_msg_payload_hws_destroy_cmdq
+ */
VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP = 0x2216,
- /** Response to control command: Set context scheduling properties */
+ /**
+ * Response to control command: Set context scheduling properties
+ * @see vpu_ipc_msg_payload_hws_set_context_sched_properties
+ */
VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP = 0x2217,
- /** Response to control command: Log buffer setting */
+ /**
+ * Response to control command: Log buffer setting
+ * @see vpu_ipc_msg_payload_hws_set_scheduling_log
+ */
VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP = 0x2218,
- /* IPC Device -> Host, HWS notify index entry of log buffer written */
+ /**
+ * IPC Device -> Host, HWS notify index entry of log buffer written
+ * @see vpu_ipc_msg_payload_hws_scheduling_log_notification
+ */
VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION = 0x2219,
- /* IPC Device -> Host, HWS completion of a context suspend request */
+ /**
+ * IPC Device -> Host, HWS completion of a context suspend request
+ * @see vpu_ipc_msg_payload_hws_suspend_cmdq
+ */
VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE = 0x221a,
- /* Response to control command: Resume command queue */
+ /**
+ * Response to control command: Resume command queue
+ * @see vpu_ipc_msg_payload_hws_resume_cmdq
+ */
VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP = 0x221b,
- /* Response to control command: Resume engine command response */
+ /**
+ * Response to control command: Resume engine command response
+ * @see vpu_ipc_msg_payload_hws_resume_engine
+ */
VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE = 0x221c,
- /* Response to control command: Enable survivability/DCT mode */
+ /**
+ * Response to control command: Enable survivability/DCT mode
+ * This command has no payload
+ */
VPU_JSM_MSG_DCT_ENABLE_DONE = 0x221d,
- /* Response to control command: Disable survivability/DCT mode */
+ /**
+ * Response to control command: Disable survivability/DCT mode
+ * This command has no payload
+ */
VPU_JSM_MSG_DCT_DISABLE_DONE = 0x221e,
/**
* Response to state dump control command.
- * NOTE: Please introduce new ASYNC responses before this one. *
+ * This command has no payload.
+ * NOTE: Please introduce new ASYNC responses before this one.
*/
VPU_JSM_MSG_STATE_DUMP_RSP = 0x22FF,
+
/* IPC Device -> Host, General command completion */
VPU_JSM_MSG_GENERAL_CMD_DONE = 0x2300,
VPU_JSM_MSG_BLOB_DEINIT_DONE = VPU_JSM_MSG_GENERAL_CMD_DONE,
@@ -436,57 +787,66 @@ enum vpu_ipc_msg_type {
enum vpu_ipc_msg_status { VPU_JSM_MSG_FREE, VPU_JSM_MSG_ALLOCATED };
-/*
- * Host <-> LRT IPC message payload definitions
+/**
+ * Engine reset request payload
+ * @see VPU_JSM_MSG_ENGINE_RESET
*/
struct vpu_ipc_msg_payload_engine_reset {
- /* Engine to be reset. */
+ /** Engine to be reset. */
u32 engine_idx;
- /* Reserved */
+ /** Reserved */
u32 reserved_0;
};
+/**
+ * Engine preemption request struct
+ * @see VPU_JSM_MSG_ENGINE_PREEMPT
+ */
struct vpu_ipc_msg_payload_engine_preempt {
- /* Engine to be preempted. */
+ /** Engine to be preempted. */
u32 engine_idx;
- /* ID of the preemption request. */
+ /** ID of the preemption request. */
u32 preempt_id;
};
-/*
- * @brief Register doorbell command structure.
+/**
+ * Register doorbell command structure.
* This structure supports doorbell registration for only OS scheduling.
* @see VPU_JSM_MSG_REGISTER_DB
*/
struct vpu_ipc_msg_payload_register_db {
- /* Index of the doorbell to register. */
+ /** Index of the doorbell to register. */
u32 db_idx;
- /* Reserved */
+ /** Reserved */
u32 reserved_0;
- /* Virtual address in Global GTT pointing to the start of job queue. */
+ /** Virtual address in Global GTT pointing to the start of job queue. */
u64 jobq_base;
- /* Size of the job queue in bytes. */
+ /** Size of the job queue in bytes. */
u32 jobq_size;
- /* Host sub-stream ID for the context assigned to the doorbell. */
+ /** Host sub-stream ID for the context assigned to the doorbell. */
u32 host_ssid;
};
/**
- * @brief Unregister doorbell command structure.
+ * Unregister doorbell command structure.
* Request structure to unregister a doorbell for both HW and OS scheduling.
* @see VPU_JSM_MSG_UNREGISTER_DB
*/
struct vpu_ipc_msg_payload_unregister_db {
- /* Index of the doorbell to unregister. */
+ /** Index of the doorbell to unregister. */
u32 db_idx;
- /* Reserved */
+ /** Reserved */
u32 reserved_0;
};
+/**
+ * Heartbeat request structure
+ * @see VPU_JSM_MSG_QUERY_ENGINE_HB
+ */
struct vpu_ipc_msg_payload_query_engine_hb {
- /* Engine to return heartbeat value. */
+ /** Engine to return heartbeat value. */
u32 engine_idx;
- /* Reserved */
+ /** Reserved */
u32 reserved_0;
};
@@ -506,10 +866,14 @@ struct vpu_ipc_msg_payload_power_level {
u32 reserved_0;
};
+/**
+ * Structure for requesting ssid release
+ * @see VPU_JSM_MSG_SSID_RELEASE
+ */
struct vpu_ipc_msg_payload_ssid_release {
- /* Host sub-stream ID for the context to be released. */
+ /** Host sub-stream ID for the context to be released. */
u32 host_ssid;
- /* Reserved */
+ /** Reserved */
u32 reserved_0;
};
@@ -535,7 +899,7 @@ struct vpu_jsm_metric_streamer_start {
u64 sampling_rate;
/**
* If > 0 the VPU will send a VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION message
- * after every @notify_sample_count samples is collected or dropped by the VPU.
+ * after every @ref notify_sample_count samples is collected or dropped by the VPU.
* If set to UINT_MAX the VPU will only generate a notification when the metric
* buffer is full. If set to 0 the VPU will never generate a notification.
*/
@@ -545,9 +909,9 @@ struct vpu_jsm_metric_streamer_start {
* Address and size of the buffer where the VPU will write metric data. The
* VPU writes all counters from enabled metric groups one after another. If
* there is no space left to write data at the next sample period the VPU
- * will switch to the next buffer (@see next_buffer_addr) and will optionally
- * send a notification to the host driver if @notify_sample_count is non-zero.
- * If @next_buffer_addr is NULL the VPU will stop collecting metric data.
+ * will switch to the next buffer (@ref next_buffer_addr) and will optionally
+ * send a notification to the host driver if @ref notify_sample_count is non-zero.
+ * If @ref next_buffer_addr is NULL the VPU will stop collecting metric data.
*/
u64 buffer_addr;
u64 buffer_size;
@@ -577,12 +941,22 @@ struct vpu_jsm_metric_streamer_update {
/** Metric group mask that identifies metric streamer instance. */
u64 metric_group_mask;
/**
- * Address and size of the buffer where the VPU will write metric data. If
- * the buffer address is 0 or same as the currently used buffer the VPU will
- * continue writing metric data to the current buffer. In this case the
- * buffer size is ignored and the size of the current buffer is unchanged.
- * If the address is non-zero and differs from the current buffer address the
- * VPU will immediately switch data collection to the new buffer.
+ * Address and size of the buffer where the VPU will write metric data.
+ * This member dictates how the update operation should perform:
+ * 1. client needs information about the number of collected samples and the
+ * amount of data written to the current buffer
+ * 2. client wants to switch to a new buffer
+ *
+ * Case 1. is identified by the buffer address being 0 or the same as the
+ * currently used buffer address. In this case the buffer size is ignored and
+ * the size of the current buffer is unchanged. The VPU will return an update
+ * in the vpu_jsm_metric_streamer_done structure. The internal writing position
+ * into the buffer is not changed.
+ *
+ * Case 2. is identified by the address being non-zero and differs from the
+ * current buffer address. The VPU will immediately switch data collection to
+ * the new buffer. Then the VPU will return an update in the
+ * vpu_jsm_metric_streamer_done structure.
*/
u64 buffer_addr;
u64 buffer_size;
@@ -600,53 +974,80 @@ struct vpu_jsm_metric_streamer_update {
u64 next_buffer_size;
};
-struct vpu_ipc_msg_payload_blob_deinit {
- /* 64-bit unique ID for the blob to be de-initialized. */
- u64 blob_id;
-};
-
+/**
+ * Device -> host job completion message.
+ * @see VPU_JSM_MSG_JOB_DONE
+ */
struct vpu_ipc_msg_payload_job_done {
- /* Engine to which the job was submitted. */
+ /** Engine to which the job was submitted. */
u32 engine_idx;
- /* Index of the doorbell to which the job was submitted */
+ /** Index of the doorbell to which the job was submitted */
u32 db_idx;
- /* ID of the completed job */
+ /** ID of the completed job */
u32 job_id;
- /* Status of the completed job */
+ /** Status of the completed job */
u32 job_status;
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /* Zero Padding */
+ /** Zero Padding */
u32 reserved_0;
- /* Command queue id */
+ /** Command queue id */
+ u64 cmdq_id;
+};
+
+/**
+ * Notification message upon native fence signalling.
+ * @see VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED
+ */
+struct vpu_ipc_msg_payload_native_fence_signalled {
+ /** Engine ID. */
+ u32 engine_idx;
+ /** Host SSID. */
+ u32 host_ssid;
+ /** CMDQ ID */
u64 cmdq_id;
+ /** Fence object handle. */
+ u64 fence_handle;
};
+/**
+ * vpu_ipc_msg_payload_engine_reset_done will contain an array of this structure
+ * which contains which queues caused reset if FW was able to detect any error.
+ * @see vpu_ipc_msg_payload_engine_reset_done
+ */
struct vpu_jsm_engine_reset_context {
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /* Zero Padding */
+ /** Zero Padding */
u32 reserved_0;
- /* Command queue id */
+ /** Command queue id */
u64 cmdq_id;
- /* See VPU_ENGINE_RESET_CONTEXT_* defines */
+ /** See VPU_ENGINE_RESET_CONTEXT_* defines */
u64 flags;
};
+/**
+ * Engine reset response.
+ * @see VPU_JSM_MSG_ENGINE_RESET_DONE
+ */
struct vpu_ipc_msg_payload_engine_reset_done {
- /* Engine ordinal */
+ /** Engine ordinal */
u32 engine_idx;
- /* Number of impacted contexts */
+ /** Number of impacted contexts */
u32 num_impacted_contexts;
- /* Array of impacted command queue ids and their flags */
+ /** Array of impacted command queue ids and their flags */
struct vpu_jsm_engine_reset_context
impacted_contexts[VPU_MAX_ENGINE_RESET_IMPACTED_CONTEXTS];
};
+/**
+ * Preemption response struct
+ * @see VPU_JSM_MSG_ENGINE_PREEMPT_DONE
+ */
struct vpu_ipc_msg_payload_engine_preempt_done {
- /* Engine preempted. */
+ /** Engine preempted. */
u32 engine_idx;
- /* ID of the preemption request. */
+ /** ID of the preemption request. */
u32 preempt_id;
};
@@ -675,12 +1076,16 @@ struct vpu_ipc_msg_payload_unregister_db_done {
u32 reserved_0;
};
+/**
+ * Structure for heartbeat response
+ * @see VPU_JSM_MSG_QUERY_ENGINE_HB_DONE
+ */
struct vpu_ipc_msg_payload_query_engine_hb_done {
- /* Engine returning heartbeat value. */
+ /** Engine returning heartbeat value. */
u32 engine_idx;
- /* Reserved */
+ /** Reserved */
u32 reserved_0;
- /* Heartbeat value. */
+ /** Heartbeat value. */
u64 heartbeat;
};
@@ -700,12 +1105,10 @@ struct vpu_ipc_msg_payload_get_power_level_count_done {
u8 power_limit[16];
};
-struct vpu_ipc_msg_payload_blob_deinit_done {
- /* 64-bit unique ID for the blob de-initialized. */
- u64 blob_id;
-};
-
-/* HWS priority band setup request / response */
+/**
+ * HWS priority band setup request / response
+ * @see VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP
+ */
struct vpu_ipc_msg_payload_hws_priority_band_setup {
/*
* Grace period in 100ns units when preempting another priority band for
@@ -715,6 +1118,7 @@ struct vpu_ipc_msg_payload_hws_priority_band_setup {
/*
* Default quantum in 100ns units for scheduling across processes
* within a priority band
+ * Minimum value supported by NPU is 1ms (10000 in 100ns units).
*/
u32 process_quantum[VPU_HWS_NUM_PRIORITY_BANDS];
/*
@@ -727,17 +1131,27 @@ struct vpu_ipc_msg_payload_hws_priority_band_setup {
* in situations when it's starved by the focus band.
*/
u32 normal_band_percentage;
- /* Reserved */
- u32 reserved_0;
+ /*
+ * TDR timeout value in milliseconds. Default value of 0 meaning no timeout.
+ */
+ u32 tdr_timeout;
+ /* Non-interactive queue timeout for no progress of heartbeat in milliseconds.
+ * Default value of 0 meaning no timeout.
+ */
+ u32 non_interactive_no_progress_timeout;
+ /*
+ * Non-interactive queue upper limit timeout value in milliseconds. Default
+ * value of 0 meaning no timeout.
+ */
+ u32 non_interactive_timeout;
};
-/*
+/**
* @brief HWS create command queue request.
* Host will create a command queue via this command.
* Note: Cmdq group is a handle of an object which
* may contain one or more command queues.
* @see VPU_JSM_MSG_CREATE_CMD_QUEUE
- * @see VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP
*/
struct vpu_ipc_msg_payload_hws_create_cmdq {
/* Process id */
@@ -758,60 +1172,73 @@ struct vpu_ipc_msg_payload_hws_create_cmdq {
u32 reserved_0;
};
-/*
- * @brief HWS create command queue response.
- * @see VPU_JSM_MSG_CREATE_CMD_QUEUE
+/**
+ * HWS create command queue response.
* @see VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP
*/
struct vpu_ipc_msg_payload_hws_create_cmdq_rsp {
- /* Process id */
+ /** Process id */
u64 process_id;
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /* Engine for which queue is being created */
+ /** Engine for which queue is being created */
u32 engine_idx;
- /* Command queue group */
+ /** Command queue group */
u64 cmdq_group;
- /* Command queue id */
+ /** Command queue id */
u64 cmdq_id;
};
-/* HWS destroy command queue request / response */
+/**
+ * HWS destroy command queue request / response
+ * @see VPU_JSM_MSG_DESTROY_CMD_QUEUE
+ * @see VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP
+ */
struct vpu_ipc_msg_payload_hws_destroy_cmdq {
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /* Zero Padding */
+ /** Zero Padding */
u32 reserved;
- /* Command queue id */
+ /** Command queue id */
u64 cmdq_id;
};
-/* HWS set context scheduling properties request / response */
+/**
+ * HWS set context scheduling properties request / response
+ * @see VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES
+ * @see VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP
+ */
struct vpu_ipc_msg_payload_hws_set_context_sched_properties {
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /* Zero Padding */
+ /** Zero Padding */
u32 reserved_0;
- /* Command queue id */
+ /** Command queue id */
u64 cmdq_id;
- /* Priority band to assign to work of this context */
+ /**
+ * Priority band to assign to work of this context.
+ * Available priority bands: @see enum vpu_job_scheduling_priority_band
+ */
u32 priority_band;
- /* Inside realtime band assigns a further priority */
+ /** Inside realtime band assigns a further priority */
u32 realtime_priority_level;
- /* Priority relative to other contexts in the same process */
+ /** Priority relative to other contexts in the same process */
s32 in_process_priority;
- /* Zero padding / Reserved */
+ /** Zero padding / Reserved */
u32 reserved_1;
- /* Context quantum relative to other contexts of same priority in the same process */
+ /**
+ * Context quantum relative to other contexts of same priority in the same process
+ * Minimum value supported by NPU is 1ms (10000 in 100ns units).
+ */
u64 context_quantum;
- /* Grace period when preempting context of the same priority within the same process */
+ /** Grace period when preempting context of the same priority within the same process */
u64 grace_period_same_priority;
- /* Grace period when preempting context of a lower priority within the same process */
+ /** Grace period when preempting context of a lower priority within the same process */
u64 grace_period_lower_priority;
};
-/*
- * @brief Register doorbell command structure.
+/**
+ * Register doorbell command structure.
* This structure supports doorbell registration for both HW and OS scheduling.
* Note: Queue base and size are added here so that the same structure can be used for
* OS scheduling and HW scheduling. For OS scheduling, cmdq_id will be ignored
@@ -820,27 +1247,27 @@ struct vpu_ipc_msg_payload_hws_set_context_sched_properties {
* @see VPU_JSM_MSG_HWS_REGISTER_DB
*/
struct vpu_jsm_hws_register_db {
- /* Index of the doorbell to register. */
+ /** Index of the doorbell to register. */
u32 db_id;
- /* Host sub-stream ID for the context assigned to the doorbell. */
+ /** Host sub-stream ID for the context assigned to the doorbell. */
u32 host_ssid;
- /* ID of the command queue associated with the doorbell. */
+ /** ID of the command queue associated with the doorbell. */
u64 cmdq_id;
- /* Virtual address pointing to the start of command queue. */
+ /** Virtual address pointing to the start of command queue. */
u64 cmdq_base;
- /* Size of the command queue in bytes. */
+ /** Size of the command queue in bytes. */
u64 cmdq_size;
};
-/*
- * @brief Structure to set another buffer to be used for scheduling-related logging.
+/**
+ * Structure to set another buffer to be used for scheduling-related logging.
* The size of the logging buffer and the number of entries is defined as part of the
* buffer itself as described next.
* The log buffer received from the host is made up of;
- * - header: 32 bytes in size, as shown in 'struct vpu_hws_log_buffer_header'.
+ * - header: 32 bytes in size, as shown in @ref vpu_hws_log_buffer_header.
* The header contains the number of log entries in the buffer.
* - log entry: 0 to n-1, each log entry is 32 bytes in size, as shown in
- * 'struct vpu_hws_log_buffer_entry'.
+ * @ref vpu_hws_log_buffer_entry.
* The entry contains the VPU timestamp, operation type and data.
* The host should provide the notify index value of log buffer to VPU. This is a
* value defined within the log buffer and when written to will generate the
@@ -854,24 +1281,30 @@ struct vpu_jsm_hws_register_db {
* @see VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION
*/
struct vpu_ipc_msg_payload_hws_set_scheduling_log {
- /* Engine ordinal */
+ /** Engine ordinal */
u32 engine_idx;
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /*
+ /**
* VPU log buffer virtual address.
* Set to 0 to disable logging for this engine.
*/
u64 vpu_log_buffer_va;
- /*
+ /**
* Notify index of log buffer. VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION
* is generated when an event log is written to this index.
*/
u64 notify_index;
+ /**
+ * Field is now deprecated, will be removed when KMD is updated to support removal
+ */
+ u32 enable_extra_events;
+ /** Zero Padding */
+ u32 reserved_0;
};
-/*
- * @brief The scheduling log notification is generated by VPU when it writes
+/**
+ * The scheduling log notification is generated by VPU when it writes
* an event into the log buffer at the notify_index. VPU notifies host with
* VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION. This is an asynchronous
* message from VPU to host.
@@ -879,14 +1312,14 @@ struct vpu_ipc_msg_payload_hws_set_scheduling_log {
* @see VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG
*/
struct vpu_ipc_msg_payload_hws_scheduling_log_notification {
- /* Engine ordinal */
+ /** Engine ordinal */
u32 engine_idx;
- /* Zero Padding */
+ /** Zero Padding */
u32 reserved_0;
};
-/*
- * @brief HWS suspend command queue request and done structure.
+/**
+ * HWS suspend command queue request and done structure.
* Host will request the suspend of contexts and VPU will;
* - Suspend all work on this context
* - Preempt any running work
@@ -905,21 +1338,21 @@ struct vpu_ipc_msg_payload_hws_scheduling_log_notification {
* @see VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE
*/
struct vpu_ipc_msg_payload_hws_suspend_cmdq {
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /* Zero Padding */
+ /** Zero Padding */
u32 reserved_0;
- /* Command queue id */
+ /** Command queue id */
u64 cmdq_id;
- /*
+ /**
* Suspend fence value - reported by the VPU suspend context
* completed once suspend is complete.
*/
u64 suspend_fence_value;
};
-/*
- * @brief HWS Resume command queue request / response structure.
+/**
+ * HWS Resume command queue request / response structure.
* Host will request the resume of a context;
* - VPU will resume all work on this context
* - Scheduler will allow this context to be scheduled
@@ -927,25 +1360,25 @@ struct vpu_ipc_msg_payload_hws_suspend_cmdq {
* @see VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP
*/
struct vpu_ipc_msg_payload_hws_resume_cmdq {
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /* Zero Padding */
+ /** Zero Padding */
u32 reserved_0;
- /* Command queue id */
+ /** Command queue id */
u64 cmdq_id;
};
-/*
- * @brief HWS Resume engine request / response structure.
- * After a HWS engine reset, all scheduling is stopped on VPU until a engine resume.
+/**
+ * HWS Resume engine request / response structure.
+ * After a HWS engine reset, all scheduling is stopped on VPU until an engine resume.
* Host shall send this command to resume scheduling of any valid queue.
- * @see VPU_JSM_MSG_HWS_RESUME_ENGINE
+ * @see VPU_JSM_MSG_HWS_ENGINE_RESUME
* @see VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE
*/
struct vpu_ipc_msg_payload_hws_resume_engine {
- /* Engine to be resumed */
+ /** Engine to be resumed */
u32 engine_idx;
- /* Reserved */
+ /** Reserved */
u32 reserved_0;
};
@@ -1079,7 +1512,7 @@ struct vpu_jsm_metric_streamer_done {
/**
* Metric group description placed in the metric buffer after successful completion
* of the VPU_JSM_MSG_METRIC_STREAMER_INFO command. This is followed by one or more
- * @vpu_jsm_metric_counter_descriptor records.
+ * @ref vpu_jsm_metric_counter_descriptor records.
* @see VPU_JSM_MSG_METRIC_STREAMER_INFO
*/
struct vpu_jsm_metric_group_descriptor {
@@ -1166,29 +1599,24 @@ struct vpu_jsm_metric_counter_descriptor {
};
/**
- * Payload for VPU_JSM_MSG_DYNDBG_CONTROL requests.
+ * Payload for @ref VPU_JSM_MSG_DYNDBG_CONTROL requests.
*
- * VPU_JSM_MSG_DYNDBG_CONTROL are used to control the VPU FW Dynamic Debug
- * feature, which allows developers to selectively enable / disable MVLOG_DEBUG
- * messages. This is equivalent to the Dynamic Debug functionality provided by
- * Linux
- * (https://www.kernel.org/doc/html/latest/admin-guide/dynamic-debug-howto.html)
- * The host can control Dynamic Debug behavior by sending dyndbg commands, which
- * have the same syntax as Linux
- * dyndbg commands.
+ * VPU_JSM_MSG_DYNDBG_CONTROL requests are used to control the VPU FW dynamic debug
+ * feature, which allows developers to selectively enable/disable code to obtain
+ * additional FW information. This is equivalent to the dynamic debug functionality
+ * provided by Linux. The host can control dynamic debug behavior by sending dyndbg
+ * commands, using the same syntax as for Linux dynamic debug commands.
*
- * NOTE: in order for MVLOG_DEBUG messages to be actually printed, the host
- * still has to set the logging level to MVLOG_DEBUG, using the
- * VPU_JSM_MSG_TRACE_SET_CONFIG command.
+ * @see https://www.kernel.org/doc/html/latest/admin-guide/dynamic-debug-howto.html.
*
- * The host can see the current dynamic debug configuration by executing a
- * special 'show' command. The dyndbg configuration will be printed to the
- * configured logging destination using MVLOG_INFO logging level.
+ * NOTE:
+ * As the dynamic debug feature uses MVLOG messages to provide information, the host
+ * must first set the logging level to MVLOG_DEBUG, using the @ref VPU_JSM_MSG_TRACE_SET_CONFIG
+ * command.
*/
struct vpu_ipc_msg_payload_dyndbg_control {
/**
- * Dyndbg command (same format as Linux dyndbg); must be a NULL-terminated
- * string.
+ * Dyndbg command to be executed.
*/
char dyndbg_cmd[VPU_DYNDBG_CMD_MAX_LEN];
};
@@ -1209,7 +1637,7 @@ struct vpu_ipc_msg_payload_pwr_d0i3_enter {
};
/**
- * Payload for VPU_JSM_MSG_DCT_ENABLE message.
+ * Payload for @ref VPU_JSM_MSG_DCT_ENABLE message.
*
* Default values for DCT active/inactive times are 5.3ms and 30ms respectively,
* corresponding to a 85% duty cycle. This payload allows the host to tune these
@@ -1235,10 +1663,10 @@ union vpu_ipc_msg_payload {
struct vpu_jsm_metric_streamer_start metric_streamer_start;
struct vpu_jsm_metric_streamer_stop metric_streamer_stop;
struct vpu_jsm_metric_streamer_update metric_streamer_update;
- struct vpu_ipc_msg_payload_blob_deinit blob_deinit;
struct vpu_ipc_msg_payload_ssid_release ssid_release;
struct vpu_jsm_hws_register_db hws_register_db;
struct vpu_ipc_msg_payload_job_done job_done;
+ struct vpu_ipc_msg_payload_native_fence_signalled native_fence_signalled;
struct vpu_ipc_msg_payload_engine_reset_done engine_reset_done;
struct vpu_ipc_msg_payload_engine_preempt_done engine_preempt_done;
struct vpu_ipc_msg_payload_register_db_done register_db_done;
@@ -1246,7 +1674,6 @@ union vpu_ipc_msg_payload {
struct vpu_ipc_msg_payload_query_engine_hb_done query_engine_hb_done;
struct vpu_ipc_msg_payload_get_power_level_count_done get_power_level_count_done;
struct vpu_jsm_metric_streamer_done metric_streamer_done;
- struct vpu_ipc_msg_payload_blob_deinit_done blob_deinit_done;
struct vpu_ipc_msg_payload_trace_config trace_config;
struct vpu_ipc_msg_payload_trace_capability_rsp trace_capability;
struct vpu_ipc_msg_payload_trace_get_name trace_get_name;
@@ -1267,28 +1694,28 @@ union vpu_ipc_msg_payload {
struct vpu_ipc_msg_payload_pwr_dct_control pwr_dct_control;
};
-/*
- * Host <-> LRT IPC message base structure.
+/**
+ * Host <-> NPU IPC message base structure.
*
* NOTE: All instances of this object must be aligned on a 64B boundary
* to allow proper handling of VPU cache operations.
*/
struct vpu_jsm_msg {
- /* Reserved */
+ /** Reserved */
u64 reserved_0;
- /* Message type, see vpu_ipc_msg_type enum. */
+ /** Message type, see @ref vpu_ipc_msg_type. */
u32 type;
- /* Buffer status, see vpu_ipc_msg_status enum. */
+ /** Buffer status, see @ref vpu_ipc_msg_status. */
u32 status;
- /*
+ /**
* Request ID, provided by the host in a request message and passed
* back by VPU in the response message.
*/
u32 request_id;
- /* Request return code set by the VPU, see VPU_JSM_STATUS_* defines. */
+ /** Request return code set by the VPU, see VPU_JSM_STATUS_* defines. */
u32 result;
u64 reserved_1;
- /* Message payload depending on message type, see vpu_ipc_msg_payload union. */
+ /** Message payload depending on message type, see vpu_ipc_msg_payload union. */
union vpu_ipc_msg_payload payload;
};
diff --git a/drivers/accel/qaic/Kconfig b/drivers/accel/qaic/Kconfig
index a9f866230058..116e42d152ca 100644
--- a/drivers/accel/qaic/Kconfig
+++ b/drivers/accel/qaic/Kconfig
@@ -8,8 +8,8 @@ config DRM_ACCEL_QAIC
depends on DRM_ACCEL
depends on PCI && HAS_IOMEM
depends on MHI_BUS
- depends on MMU
select CRC32
+ select WANT_DEV_COREDUMP
help
Enables driver for Qualcomm's Cloud AI accelerator PCIe cards that are
designed to accelerate Deep Learning inference workloads.
diff --git a/drivers/accel/qaic/Makefile b/drivers/accel/qaic/Makefile
index 35e883515629..71f727b74da3 100644
--- a/drivers/accel/qaic/Makefile
+++ b/drivers/accel/qaic/Makefile
@@ -10,6 +10,9 @@ qaic-y := \
qaic_control.o \
qaic_data.o \
qaic_drv.o \
+ qaic_ras.o \
+ qaic_ssr.o \
+ qaic_sysfs.o \
qaic_timesync.o \
sahara.o
diff --git a/drivers/accel/qaic/mhi_controller.c b/drivers/accel/qaic/mhi_controller.c
index ada9b1eb0787..13a14c6c6168 100644
--- a/drivers/accel/qaic/mhi_controller.c
+++ b/drivers/accel/qaic/mhi_controller.c
@@ -20,6 +20,11 @@ static unsigned int mhi_timeout_ms = 2000; /* 2 sec default */
module_param(mhi_timeout_ms, uint, 0600);
MODULE_PARM_DESC(mhi_timeout_ms, "MHI controller timeout value");
+static const char *fw_image_paths[FAMILY_MAX] = {
+ [FAMILY_AIC100] = "qcom/aic100/sbl.bin",
+ [FAMILY_AIC200] = "qcom/aic200/sbl.bin",
+};
+
static const struct mhi_channel_config aic100_channels[] = {
{
.name = "QAIC_LOOPBACK",
@@ -405,6 +410,329 @@ static const struct mhi_channel_config aic100_channels[] = {
.auto_queue = false,
.wake_capable = false,
},
+ {
+ .name = "IPCR",
+ .num = 24,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "IPCR",
+ .num = 25,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = true,
+ .wake_capable = false,
+ },
+};
+
+static const struct mhi_channel_config aic200_channels[] = {
+ {
+ .name = "QAIC_LOOPBACK",
+ .num = 0,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_LOOPBACK",
+ .num = 1,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_SAHARA",
+ .num = 2,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_SBL,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_SAHARA",
+ .num = 3,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_SBL,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_SSR",
+ .num = 6,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_SSR",
+ .num = 7,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_CONTROL",
+ .num = 10,
+ .num_elements = 128,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_CONTROL",
+ .num = 11,
+ .num_elements = 128,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_LOGGING",
+ .num = 12,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_SBL,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_LOGGING",
+ .num = 13,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_SBL,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_STATUS",
+ .num = 14,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_STATUS",
+ .num = 15,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_TELEMETRY",
+ .num = 16,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_TELEMETRY",
+ .num = 17,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_TIMESYNC_PERIODIC",
+ .num = 22,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_TIMESYNC_PERIODIC",
+ .num = 23,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "IPCR",
+ .num = 24,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "IPCR",
+ .num = 25,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = true,
+ .wake_capable = false,
+ },
};
static struct mhi_event_config aic100_events[] = {
@@ -422,16 +750,44 @@ static struct mhi_event_config aic100_events[] = {
},
};
-static struct mhi_controller_config aic100_config = {
- .max_channels = 128,
- .timeout_ms = 0, /* controlled by mhi_timeout */
- .buf_len = 0,
- .num_channels = ARRAY_SIZE(aic100_channels),
- .ch_cfg = aic100_channels,
- .num_events = ARRAY_SIZE(aic100_events),
- .event_cfg = aic100_events,
- .use_bounce_buf = false,
- .m2_no_db = false,
+static struct mhi_event_config aic200_events[] = {
+ {
+ .num_elements = 32,
+ .irq_moderation_ms = 0,
+ .irq = 0,
+ .channel = U32_MAX,
+ .priority = 1,
+ .mode = MHI_DB_BRST_DISABLE,
+ .data_type = MHI_ER_CTRL,
+ .hardware_event = false,
+ .client_managed = false,
+ .offload_channel = false,
+ },
+};
+
+static struct mhi_controller_config mhi_cntrl_configs[] = {
+ [FAMILY_AIC100] = {
+ .max_channels = 128,
+ .timeout_ms = 0, /* controlled by mhi_timeout */
+ .buf_len = 0,
+ .num_channels = ARRAY_SIZE(aic100_channels),
+ .ch_cfg = aic100_channels,
+ .num_events = ARRAY_SIZE(aic100_events),
+ .event_cfg = aic100_events,
+ .use_bounce_buf = false,
+ .m2_no_db = false,
+ },
+ [FAMILY_AIC200] = {
+ .max_channels = 128,
+ .timeout_ms = 0, /* controlled by mhi_timeout */
+ .buf_len = 0,
+ .num_channels = ARRAY_SIZE(aic200_channels),
+ .ch_cfg = aic200_channels,
+ .num_events = ARRAY_SIZE(aic200_events),
+ .event_cfg = aic200_events,
+ .use_bounce_buf = false,
+ .m2_no_db = false,
+ },
};
static int mhi_read_reg(struct mhi_controller *mhi_cntrl, void __iomem *addr, u32 *out)
@@ -513,8 +869,9 @@ static int mhi_reset_and_async_power_up(struct mhi_controller *mhi_cntrl)
}
struct mhi_controller *qaic_mhi_register_controller(struct pci_dev *pci_dev, void __iomem *mhi_bar,
- int mhi_irq, bool shared_msi)
+ int mhi_irq, bool shared_msi, int family)
{
+ struct mhi_controller_config mhi_config = mhi_cntrl_configs[family];
struct mhi_controller *mhi_cntrl;
int ret;
@@ -549,11 +906,18 @@ struct mhi_controller *qaic_mhi_register_controller(struct pci_dev *pci_dev, voi
if (shared_msi) /* MSI shared with data path, no IRQF_NO_SUSPEND */
mhi_cntrl->irq_flags = IRQF_SHARED;
- mhi_cntrl->fw_image = "qcom/aic100/sbl.bin";
+ mhi_cntrl->fw_image = fw_image_paths[family];
+
+ if (family == FAMILY_AIC200) {
+ mhi_cntrl->name = "AIC200";
+ mhi_cntrl->seg_len = SZ_512K;
+ } else {
+ mhi_cntrl->name = "AIC100";
+ }
/* use latest configured timeout */
- aic100_config.timeout_ms = mhi_timeout_ms;
- ret = mhi_register_controller(mhi_cntrl, &aic100_config);
+ mhi_config.timeout_ms = mhi_timeout_ms;
+ ret = mhi_register_controller(mhi_cntrl, &mhi_config);
if (ret) {
pci_err(pci_dev, "mhi_register_controller failed %d\n", ret);
return ERR_PTR(ret);
diff --git a/drivers/accel/qaic/mhi_controller.h b/drivers/accel/qaic/mhi_controller.h
index 500e7f4af2af..8939f6ae185e 100644
--- a/drivers/accel/qaic/mhi_controller.h
+++ b/drivers/accel/qaic/mhi_controller.h
@@ -8,7 +8,7 @@
#define MHICONTROLLERQAIC_H_
struct mhi_controller *qaic_mhi_register_controller(struct pci_dev *pci_dev, void __iomem *mhi_bar,
- int mhi_irq, bool shared_msi);
+ int mhi_irq, bool shared_msi, int family);
void qaic_mhi_free_controller(struct mhi_controller *mhi_cntrl, bool link_up);
void qaic_mhi_start_reset(struct mhi_controller *mhi_cntrl);
void qaic_mhi_reset_done(struct mhi_controller *mhi_cntrl);
diff --git a/drivers/accel/qaic/qaic.h b/drivers/accel/qaic/qaic.h
index 02561b6cecc6..fa7a8155658c 100644
--- a/drivers/accel/qaic/qaic.h
+++ b/drivers/accel/qaic/qaic.h
@@ -21,6 +21,7 @@
#define QAIC_DBC_BASE SZ_128K
#define QAIC_DBC_SIZE SZ_4K
+#define QAIC_SSR_DBC_SENTINEL U32_MAX /* No ongoing SSR sentinel */
#define QAIC_NO_PARTITION -1
@@ -32,6 +33,12 @@
#define to_accel_kdev(qddev) (to_drm(qddev)->accel->kdev) /* Return Linux device of accel node */
#define to_qaic_device(dev) (to_qaic_drm_device((dev))->qdev)
+enum aic_families {
+ FAMILY_AIC100,
+ FAMILY_AIC200,
+ FAMILY_MAX,
+};
+
enum __packed dev_states {
/* Device is offline or will be very soon */
QAIC_OFFLINE,
@@ -41,6 +48,22 @@ enum __packed dev_states {
QAIC_ONLINE,
};
+enum dbc_states {
+ /* DBC is free and can be activated */
+ DBC_STATE_IDLE,
+ /* DBC is activated and a workload is running on device */
+ DBC_STATE_ASSIGNED,
+ /* Sub-system associated with this workload has crashed and it will shutdown soon */
+ DBC_STATE_BEFORE_SHUTDOWN,
+ /* Sub-system associated with this workload has crashed and it has shutdown */
+ DBC_STATE_AFTER_SHUTDOWN,
+ /* Sub-system associated with this workload is shutdown and it will be powered up soon */
+ DBC_STATE_BEFORE_POWER_UP,
+ /* Sub-system associated with this workload is now powered up */
+ DBC_STATE_AFTER_POWER_UP,
+ DBC_STATE_MAX,
+};
+
extern bool datapath_polling;
struct qaic_user {
@@ -91,6 +114,8 @@ struct dma_bridge_chan {
* response queue's head and tail pointer of this DBC.
*/
void __iomem *dbc_base;
+ /* Synchronizes access to Request queue's head and tail pointer */
+ struct mutex req_lock;
/* Head of list where each node is a memory handle queued in request queue */
struct list_head xfer_list;
/* Synchronizes DBC readers during cleanup */
@@ -106,6 +131,8 @@ struct dma_bridge_chan {
unsigned int irq;
/* Polling work item to simulate interrupts */
struct work_struct poll_work;
+ /* Represents various states of this DBC from enum dbc_states */
+ unsigned int state;
};
struct qaic_device {
@@ -113,10 +140,10 @@ struct qaic_device {
struct pci_dev *pdev;
/* Req. ID of request that will be queued next in MHI control device */
u32 next_seq_num;
- /* Base address of bar 0 */
- void __iomem *bar_0;
- /* Base address of bar 2 */
- void __iomem *bar_2;
+ /* Base address of the MHI bar */
+ void __iomem *bar_mhi;
+ /* Base address of the DBCs bar */
+ void __iomem *bar_dbc;
/* Controller structure for MHI devices */
struct mhi_controller *mhi_cntrl;
/* MHI control channel device */
@@ -153,6 +180,8 @@ struct qaic_device {
struct mhi_device *qts_ch;
/* Work queue for tasks related to MHI "QAIC_TIMESYNC" channel */
struct workqueue_struct *qts_wq;
+ /* MHI "QAIC_TIMESYNC_PERIODIC" channel device */
+ struct mhi_device *mqts_ch;
/* Head of list of page allocated by MHI bootlog device */
struct list_head bootlog;
/* MHI bootlog channel device */
@@ -161,6 +190,22 @@ struct qaic_device {
struct workqueue_struct *bootlog_wq;
/* Synchronizes access of pages in MHI bootlog device */
struct mutex bootlog_mutex;
+ /* MHI RAS channel device */
+ struct mhi_device *ras_ch;
+ /* Correctable error count */
+ unsigned int ce_count;
+ /* Un-correctable error count */
+ unsigned int ue_count;
+ /* Un-correctable non-fatal error count */
+ unsigned int ue_nf_count;
+ /* MHI SSR channel device */
+ struct mhi_device *ssr_ch;
+ /* Work queue for tasks related to MHI SSR device */
+ struct workqueue_struct *ssr_wq;
+ /* Buffer to collect SSR crashdump via SSR MHI channel */
+ void *ssr_mhi_buf;
+ /* DBC which is under SSR. Sentinel U32_MAX would mean that no SSR in progress */
+ u32 ssr_dbc;
};
struct qaic_drm_device {
@@ -179,6 +224,8 @@ struct qaic_drm_device {
struct list_head users;
/* Synchronizes access to users list */
struct mutex users_mutex;
+ /* Pointer to array of DBC sysfs attributes */
+ void *sysfs_attrs;
};
struct qaic_bo {
@@ -207,8 +254,6 @@ struct qaic_bo {
bool sliced;
/* Request ID of this BO if it is queued for execution */
u16 req_id;
- /* Handle assigned to this BO */
- u32 handle;
/* Wait on this for completion of DMA transfer of this BO */
struct completion xfer_done;
/*
@@ -303,6 +348,13 @@ int qaic_partial_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm
int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
int qaic_detach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
-void irq_polling_work(struct work_struct *work);
+void qaic_irq_polling_work(struct work_struct *work);
+void qaic_dbc_enter_ssr(struct qaic_device *qdev, u32 dbc_id);
+void qaic_dbc_exit_ssr(struct qaic_device *qdev);
+
+/* qaic_sysfs.c */
+int qaic_sysfs_init(struct qaic_drm_device *qddev);
+void qaic_sysfs_remove(struct qaic_drm_device *qddev);
+void set_dbc_state(struct qaic_device *qdev, u32 dbc_id, unsigned int state);
#endif /* _QAIC_H_ */
diff --git a/drivers/accel/qaic/qaic_control.c b/drivers/accel/qaic/qaic_control.c
index 9e8a8cbadf6b..428d8f65bff3 100644
--- a/drivers/accel/qaic/qaic_control.c
+++ b/drivers/accel/qaic/qaic_control.c
@@ -17,6 +17,7 @@
#include <linux/overflow.h>
#include <linux/pci.h>
#include <linux/scatterlist.h>
+#include <linux/sched/signal.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/workqueue.h>
@@ -30,7 +31,7 @@
#define MANAGE_MAGIC_NUMBER ((__force __le32)0x43494151) /* "QAIC" in little endian */
#define QAIC_DBC_Q_GAP SZ_256
#define QAIC_DBC_Q_BUF_ALIGN SZ_4K
-#define QAIC_MANAGE_EXT_MSG_LENGTH SZ_64K /* Max DMA message length */
+#define QAIC_MANAGE_WIRE_MSG_LENGTH SZ_64K /* Max DMA message length */
#define QAIC_WRAPPER_MAX_SIZE SZ_4K
#define QAIC_MHI_RETRY_WAIT_MS 100
#define QAIC_MHI_RETRY_MAX 20
@@ -309,6 +310,7 @@ static void save_dbc_buf(struct qaic_device *qdev, struct ioctl_resources *resou
enable_dbc(qdev, dbc_id, usr);
qdev->dbc[dbc_id].in_use = true;
resources->buf = NULL;
+ set_dbc_state(qdev, dbc_id, DBC_STATE_ASSIGNED);
}
}
@@ -367,7 +369,7 @@ static int encode_passthrough(struct qaic_device *qdev, void *trans, struct wrap
if (in_trans->hdr.len % 8 != 0)
return -EINVAL;
- if (size_add(msg_hdr_len, in_trans->hdr.len) > QAIC_MANAGE_EXT_MSG_LENGTH)
+ if (size_add(msg_hdr_len, in_trans->hdr.len) > QAIC_MANAGE_WIRE_MSG_LENGTH)
return -ENOSPC;
trans_wrapper = add_wrapper(wrappers,
@@ -407,7 +409,7 @@ static int find_and_map_user_pages(struct qaic_device *qdev,
return -EINVAL;
remaining = in_trans->size - resources->xferred_dma_size;
if (remaining == 0)
- return 0;
+ return -EINVAL;
if (check_add_overflow(xfer_start_addr, remaining, &end))
return -EINVAL;
@@ -495,8 +497,8 @@ static int encode_addr_size_pairs(struct dma_xfer *xfer, struct wrapper_list *wr
nents = sgt->nents;
nents_dma = nents;
- *size = QAIC_MANAGE_EXT_MSG_LENGTH - msg_hdr_len - sizeof(**out_trans);
- for_each_sgtable_sg(sgt, sg, i) {
+ *size = QAIC_MANAGE_WIRE_MSG_LENGTH - msg_hdr_len - sizeof(**out_trans);
+ for_each_sgtable_dma_sg(sgt, sg, i) {
*size -= sizeof(*asp);
/* Save 1K for possible follow-up transactions. */
if (*size < SZ_1K) {
@@ -576,7 +578,7 @@ static int encode_dma(struct qaic_device *qdev, void *trans, struct wrapper_list
/* There should be enough space to hold at least one ASP entry. */
if (size_add(msg_hdr_len, sizeof(*out_trans) + sizeof(struct wire_addr_size_pair)) >
- QAIC_MANAGE_EXT_MSG_LENGTH)
+ QAIC_MANAGE_WIRE_MSG_LENGTH)
return -ENOMEM;
xfer = kmalloc(sizeof(*xfer), GFP_KERNEL);
@@ -645,7 +647,7 @@ static int encode_activate(struct qaic_device *qdev, void *trans, struct wrapper
msg = &wrapper->msg;
msg_hdr_len = le32_to_cpu(msg->hdr.len);
- if (size_add(msg_hdr_len, sizeof(*out_trans)) > QAIC_MANAGE_MAX_MSG_LENGTH)
+ if (size_add(msg_hdr_len, sizeof(*out_trans)) > QAIC_MANAGE_WIRE_MSG_LENGTH)
return -ENOSPC;
if (!in_trans->queue_size)
@@ -655,8 +657,9 @@ static int encode_activate(struct qaic_device *qdev, void *trans, struct wrapper
return -EINVAL;
nelem = in_trans->queue_size;
- size = (get_dbc_req_elem_size() + get_dbc_rsp_elem_size()) * nelem;
- if (size / nelem != get_dbc_req_elem_size() + get_dbc_rsp_elem_size())
+ if (check_mul_overflow((u32)(get_dbc_req_elem_size() + get_dbc_rsp_elem_size()),
+ nelem,
+ &size))
return -EINVAL;
if (size + QAIC_DBC_Q_GAP + QAIC_DBC_Q_BUF_ALIGN < size)
@@ -729,7 +732,7 @@ static int encode_status(struct qaic_device *qdev, void *trans, struct wrapper_l
msg = &wrapper->msg;
msg_hdr_len = le32_to_cpu(msg->hdr.len);
- if (size_add(msg_hdr_len, in_trans->hdr.len) > QAIC_MANAGE_MAX_MSG_LENGTH)
+ if (size_add(msg_hdr_len, in_trans->hdr.len) > QAIC_MANAGE_WIRE_MSG_LENGTH)
return -ENOSPC;
trans_wrapper = add_wrapper(wrappers, sizeof(*trans_wrapper));
@@ -810,7 +813,7 @@ static int encode_message(struct qaic_device *qdev, struct manage_msg *user_msg,
}
if (ret)
- break;
+ goto out;
}
if (user_len != user_msg->len)
@@ -921,6 +924,7 @@ static int decode_deactivate(struct qaic_device *qdev, void *trans, u32 *msg_len
}
release_dbc(qdev, dbc_id);
+ set_dbc_state(qdev, dbc_id, DBC_STATE_IDLE);
*msg_len += sizeof(*in_trans);
return 0;
@@ -1052,7 +1056,7 @@ static void *msg_xfer(struct qaic_device *qdev, struct wrapper_list *wrappers, u
init_completion(&elem.xfer_done);
if (likely(!qdev->cntl_lost_buf)) {
/*
- * The max size of request to device is QAIC_MANAGE_EXT_MSG_LENGTH.
+ * The max size of request to device is QAIC_MANAGE_WIRE_MSG_LENGTH.
* The max size of response from device is QAIC_MANAGE_MAX_MSG_LENGTH.
*/
out_buf = kmalloc(QAIC_MANAGE_MAX_MSG_LENGTH, GFP_KERNEL);
@@ -1079,7 +1083,6 @@ static void *msg_xfer(struct qaic_device *qdev, struct wrapper_list *wrappers, u
list_for_each_entry(w, &wrappers->list, list) {
kref_get(&w->ref_count);
- retry_count = 0;
ret = mhi_queue_buf(qdev->cntl_ch, DMA_TO_DEVICE, &w->msg, w->len,
list_is_last(&w->list, &wrappers->list) ? MHI_EOT : MHI_CHAIN);
if (ret) {
diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
index e86e71c1cdd8..60cb4d65d48e 100644
--- a/drivers/accel/qaic/qaic_data.c
+++ b/drivers/accel/qaic/qaic_data.c
@@ -18,6 +18,7 @@
#include <linux/scatterlist.h>
#include <linux/spinlock.h>
#include <linux/srcu.h>
+#include <linux/string.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/wait.h>
@@ -165,16 +166,17 @@ static void free_slice(struct kref *kref)
drm_gem_object_put(&slice->bo->base);
sg_free_table(slice->sgt);
kfree(slice->sgt);
- kfree(slice->reqs);
+ kvfree(slice->reqs);
kfree(slice);
}
static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_table **sgt_out,
struct sg_table *sgt_in, u64 size, u64 offset)
{
- int total_len, len, nents, offf = 0, offl = 0;
struct scatterlist *sg, *sgn, *sgf, *sgl;
+ unsigned int len, nents, offf, offl;
struct sg_table *sgt;
+ size_t total_len;
int ret, j;
/* find out number of relevant nents needed for this mem */
@@ -182,9 +184,11 @@ static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_tabl
sgf = NULL;
sgl = NULL;
nents = 0;
+ offf = 0;
+ offl = 0;
size = size ? size : PAGE_SIZE;
- for (sg = sgt_in->sgl; sg; sg = sg_next(sg)) {
+ for_each_sgtable_dma_sg(sgt_in, sg, j) {
len = sg_dma_len(sg);
if (!len)
@@ -221,7 +225,7 @@ static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_tabl
/* copy relevant sg node and fix page and length */
sgn = sgf;
- for_each_sgtable_sg(sgt, sg, j) {
+ for_each_sgtable_dma_sg(sgt, sg, j) {
memcpy(sg, sgn, sizeof(*sg));
if (sgn == sgf) {
sg_dma_address(sg) += offf;
@@ -301,7 +305,7 @@ static int encode_reqs(struct qaic_device *qdev, struct bo_slice *slice,
* fence.
*/
dev_addr = req->dev_addr;
- for_each_sgtable_sg(slice->sgt, sg, i) {
+ for_each_sgtable_dma_sg(slice->sgt, sg, i) {
slice->reqs[i].cmd = cmd;
slice->reqs[i].src_addr = cpu_to_le64(slice->dir == DMA_TO_DEVICE ?
sg_dma_address(sg) : dev_addr);
@@ -401,7 +405,7 @@ static int qaic_map_one_slice(struct qaic_device *qdev, struct qaic_bo *bo,
goto free_sgt;
}
- slice->reqs = kcalloc(sgt->nents, sizeof(*slice->reqs), GFP_KERNEL);
+ slice->reqs = kvcalloc(sgt->nents, sizeof(*slice->reqs), GFP_KERNEL);
if (!slice->reqs) {
ret = -ENOMEM;
goto free_slice;
@@ -427,7 +431,7 @@ static int qaic_map_one_slice(struct qaic_device *qdev, struct qaic_bo *bo,
return 0;
free_req:
- kfree(slice->reqs);
+ kvfree(slice->reqs);
free_slice:
kfree(slice);
free_sgt:
@@ -554,6 +558,7 @@ static bool invalid_sem(struct qaic_sem *sem)
static int qaic_validate_req(struct qaic_device *qdev, struct qaic_attach_slice_entry *slice_ent,
u32 count, u64 total_size)
{
+ u64 total;
int i;
for (i = 0; i < count; i++) {
@@ -563,7 +568,8 @@ static int qaic_validate_req(struct qaic_device *qdev, struct qaic_attach_slice_
invalid_sem(&slice_ent[i].sem2) || invalid_sem(&slice_ent[i].sem3))
return -EINVAL;
- if (slice_ent[i].offset + slice_ent[i].size > total_size)
+ if (check_add_overflow(slice_ent[i].offset, slice_ent[i].size, &total) ||
+ total > total_size)
return -EINVAL;
}
@@ -604,7 +610,7 @@ static int qaic_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struc
struct scatterlist *sg;
int ret = 0;
- if (obj->import_attach)
+ if (drm_gem_is_imported(obj))
return -EINVAL;
for (sg = bo->sgt->sgl; sg; sg = sg_next(sg)) {
@@ -625,7 +631,7 @@ static void qaic_free_object(struct drm_gem_object *obj)
{
struct qaic_bo *bo = to_qaic_bo(obj);
- if (obj->import_attach) {
+ if (drm_gem_is_imported(obj)) {
/* DMABUF/PRIME Path */
drm_prime_gem_destroy(obj, NULL);
} else {
@@ -638,8 +644,36 @@ static void qaic_free_object(struct drm_gem_object *obj)
kfree(bo);
}
+static struct sg_table *qaic_get_sg_table(struct drm_gem_object *obj)
+{
+ struct qaic_bo *bo = to_qaic_bo(obj);
+ struct scatterlist *sg, *sg_in;
+ struct sg_table *sgt, *sgt_in;
+ int i;
+
+ sgt_in = bo->sgt;
+
+ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return ERR_PTR(-ENOMEM);
+
+ if (sg_alloc_table(sgt, sgt_in->orig_nents, GFP_KERNEL)) {
+ kfree(sgt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ sg = sgt->sgl;
+ for_each_sgtable_sg(sgt_in, sg_in, i) {
+ memcpy(sg, sg_in, sizeof(*sg));
+ sg = sg_next(sg);
+ }
+
+ return sgt;
+}
+
static const struct drm_gem_object_funcs qaic_gem_funcs = {
.free = qaic_free_object,
+ .get_sg_table = qaic_get_sg_table,
.print_info = qaic_gem_print_info,
.mmap = qaic_gem_object_mmap,
.vm_ops = &drm_vm_ops,
@@ -726,7 +760,6 @@ int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
if (ret)
goto free_bo;
- bo->handle = args->handle;
drm_gem_object_put(obj);
srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
@@ -865,7 +898,7 @@ static int qaic_prepare_bo(struct qaic_device *qdev, struct qaic_bo *bo,
{
int ret;
- if (bo->base.import_attach)
+ if (drm_gem_is_imported(&bo->base))
ret = qaic_prepare_import_bo(bo, hdr);
else
ret = qaic_prepare_export_bo(qdev, bo, hdr);
@@ -889,7 +922,7 @@ static void qaic_unprepare_export_bo(struct qaic_device *qdev, struct qaic_bo *b
static void qaic_unprepare_bo(struct qaic_device *qdev, struct qaic_bo *bo)
{
- if (bo->base.import_attach)
+ if (drm_gem_is_imported(&bo->base))
qaic_unprepare_import_bo(bo);
else
qaic_unprepare_export_bo(qdev, bo);
@@ -949,8 +982,9 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
if (args->hdr.count == 0)
return -EINVAL;
- arg_size = args->hdr.count * sizeof(*slice_ent);
- if (arg_size / args->hdr.count != sizeof(*slice_ent))
+ if (check_mul_overflow((unsigned long)args->hdr.count,
+ (unsigned long)sizeof(*slice_ent),
+ &arg_size))
return -EINVAL;
if (!(args->hdr.dir == DMA_TO_DEVICE || args->hdr.dir == DMA_FROM_DEVICE))
@@ -980,18 +1014,12 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
user_data = u64_to_user_ptr(args->data);
- slice_ent = kzalloc(arg_size, GFP_KERNEL);
- if (!slice_ent) {
- ret = -EINVAL;
+ slice_ent = memdup_user(user_data, arg_size);
+ if (IS_ERR(slice_ent)) {
+ ret = PTR_ERR(slice_ent);
goto unlock_dev_srcu;
}
- ret = copy_from_user(slice_ent, user_data, arg_size);
- if (ret) {
- ret = -EFAULT;
- goto free_slice_ent;
- }
-
obj = drm_gem_object_lookup(file_priv, args->hdr.handle);
if (!obj) {
ret = -ENOENT;
@@ -1019,6 +1047,11 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
goto unlock_ch_srcu;
}
+ if (dbc->id == qdev->ssr_dbc) {
+ ret = -EPIPE;
+ goto unlock_ch_srcu;
+ }
+
ret = qaic_prepare_bo(qdev, bo, &args->hdr);
if (ret)
goto unlock_ch_srcu;
@@ -1296,8 +1329,6 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
int usr_rcu_id, qdev_rcu_id;
struct qaic_device *qdev;
struct qaic_user *usr;
- u8 __user *user_data;
- unsigned long n;
u64 received_ts;
u32 queue_level;
u64 submit_ts;
@@ -1310,20 +1341,12 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
received_ts = ktime_get_ns();
size = is_partial ? sizeof(struct qaic_partial_execute_entry) : sizeof(*exec);
- n = (unsigned long)size * args->hdr.count;
- if (args->hdr.count == 0 || n / args->hdr.count != size)
+ if (args->hdr.count == 0)
return -EINVAL;
- user_data = u64_to_user_ptr(args->data);
-
- exec = kcalloc(args->hdr.count, size, GFP_KERNEL);
- if (!exec)
- return -ENOMEM;
-
- if (copy_from_user(exec, user_data, n)) {
- ret = -EFAULT;
- goto free_exec;
- }
+ exec = memdup_array_user(u64_to_user_ptr(args->data), args->hdr.count, size);
+ if (IS_ERR(exec))
+ return PTR_ERR(exec);
usr = file_priv->driver_priv;
usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
@@ -1352,13 +1375,22 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
goto release_ch_rcu;
}
+ if (dbc->id == qdev->ssr_dbc) {
+ ret = -EPIPE;
+ goto release_ch_rcu;
+ }
+
+ ret = mutex_lock_interruptible(&dbc->req_lock);
+ if (ret)
+ goto release_ch_rcu;
+
head = readl(dbc->dbc_base + REQHP_OFF);
tail = readl(dbc->dbc_base + REQTP_OFF);
if (head == U32_MAX || tail == U32_MAX) {
/* PCI link error */
ret = -ENODEV;
- goto release_ch_rcu;
+ goto unlock_req_lock;
}
queue_level = head <= tail ? tail - head : dbc->nelem - (head - tail);
@@ -1366,11 +1398,12 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
ret = send_bo_list_to_device(qdev, file_priv, exec, args->hdr.count, is_partial, dbc,
head, &tail);
if (ret)
- goto release_ch_rcu;
+ goto unlock_req_lock;
/* Finalize commit to hardware */
submit_ts = ktime_get_ns();
writel(tail, dbc->dbc_base + REQTP_OFF);
+ mutex_unlock(&dbc->req_lock);
update_profiling_data(file_priv, exec, args->hdr.count, is_partial, received_ts,
submit_ts, queue_level);
@@ -1378,13 +1411,15 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
if (datapath_polling)
schedule_work(&dbc->poll_work);
+unlock_req_lock:
+ if (ret)
+ mutex_unlock(&dbc->req_lock);
release_ch_rcu:
srcu_read_unlock(&dbc->ch_lock, rcu_id);
unlock_dev_srcu:
srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
unlock_usr_srcu:
srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
-free_exec:
kfree(exec);
return ret;
}
@@ -1479,7 +1514,7 @@ irqreturn_t dbc_irq_handler(int irq, void *data)
return IRQ_WAKE_THREAD;
}
-void irq_polling_work(struct work_struct *work)
+void qaic_irq_polling_work(struct work_struct *work)
{
struct dma_bridge_chan *dbc = container_of(work, struct dma_bridge_chan, poll_work);
unsigned long flags;
@@ -1697,6 +1732,11 @@ int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file
goto unlock_ch_srcu;
}
+ if (dbc->id == qdev->ssr_dbc) {
+ ret = -EPIPE;
+ goto unlock_ch_srcu;
+ }
+
obj = drm_gem_object_lookup(file_priv, args->handle);
if (!obj) {
ret = -ENOENT;
@@ -1717,6 +1757,9 @@ int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file
if (!dbc->usr)
ret = -EPERM;
+ if (dbc->id == qdev->ssr_dbc)
+ ret = -EPIPE;
+
put_obj:
drm_gem_object_put(obj);
unlock_ch_srcu:
@@ -1737,7 +1780,8 @@ int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file
struct qaic_device *qdev;
struct qaic_user *usr;
struct qaic_bo *bo;
- int ret, i;
+ int ret = 0;
+ int i;
usr = file_priv->driver_priv;
usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
@@ -1758,18 +1802,12 @@ int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file
goto unlock_dev_srcu;
}
- ent = kcalloc(args->hdr.count, sizeof(*ent), GFP_KERNEL);
- if (!ent) {
- ret = -EINVAL;
+ ent = memdup_array_user(u64_to_user_ptr(args->data), args->hdr.count, sizeof(*ent));
+ if (IS_ERR(ent)) {
+ ret = PTR_ERR(ent);
goto unlock_dev_srcu;
}
- ret = copy_from_user(ent, u64_to_user_ptr(args->data), args->hdr.count * sizeof(*ent));
- if (ret) {
- ret = -EFAULT;
- goto free_ent;
- }
-
for (i = 0; i < args->hdr.count; i++) {
obj = drm_gem_object_lookup(file_priv, ent[i].handle);
if (!obj) {
@@ -1777,6 +1815,16 @@ int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file
goto free_ent;
}
bo = to_qaic_bo(obj);
+ if (!bo->sliced) {
+ drm_gem_object_put(obj);
+ ret = -EINVAL;
+ goto free_ent;
+ }
+ if (bo->dbc->id != args->hdr.dbc_id) {
+ drm_gem_object_put(obj);
+ ret = -EINVAL;
+ goto free_ent;
+ }
/*
* perf stats ioctl is called before wait ioctl is complete then
* the latency information is invalid.
@@ -1915,6 +1963,17 @@ static void empty_xfer_list(struct qaic_device *qdev, struct dma_bridge_chan *db
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
}
+static void sync_empty_xfer_list(struct qaic_device *qdev, struct dma_bridge_chan *dbc)
+{
+ empty_xfer_list(qdev, dbc);
+ synchronize_srcu(&dbc->ch_lock);
+ /*
+ * Threads holding channel lock, may add more elements in the xfer_list.
+ * Flush out these elements from xfer_list.
+ */
+ empty_xfer_list(qdev, dbc);
+}
+
int disable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr)
{
if (!qdev->dbc[dbc_id].usr || qdev->dbc[dbc_id].usr->handle != usr->handle)
@@ -1929,7 +1988,7 @@ int disable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr)
* enable_dbc - Enable the DBC. DBCs are disabled by removing the context of
* user. Add user context back to DBC to enable it. This function trusts the
* DBC ID passed and expects the DBC to be disabled.
- * @qdev: Qranium device handle
+ * @qdev: qaic device handle
* @dbc_id: ID of the DBC
* @usr: User context
*/
@@ -1943,13 +2002,7 @@ void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id)
struct dma_bridge_chan *dbc = &qdev->dbc[dbc_id];
dbc->usr = NULL;
- empty_xfer_list(qdev, dbc);
- synchronize_srcu(&dbc->ch_lock);
- /*
- * Threads holding channel lock, may add more elements in the xfer_list.
- * Flush out these elements from xfer_list.
- */
- empty_xfer_list(qdev, dbc);
+ sync_empty_xfer_list(qdev, dbc);
}
void release_dbc(struct qaic_device *qdev, u32 dbc_id)
@@ -1990,3 +2043,30 @@ void qaic_data_get_fifo_info(struct dma_bridge_chan *dbc, u32 *head, u32 *tail)
*head = readl(dbc->dbc_base + REQHP_OFF);
*tail = readl(dbc->dbc_base + REQTP_OFF);
}
+
+/*
+ * qaic_dbc_enter_ssr - Prepare to enter in sub system reset(SSR) for given DBC ID.
+ * @qdev: qaic device handle
+ * @dbc_id: ID of the DBC which will enter SSR
+ *
+ * The device will automatically deactivate the workload as not
+ * all errors can be silently recovered. The user will be
+ * notified and will need to decide the required recovery
+ * action to take.
+ */
+void qaic_dbc_enter_ssr(struct qaic_device *qdev, u32 dbc_id)
+{
+ qdev->ssr_dbc = dbc_id;
+ release_dbc(qdev, dbc_id);
+}
+
+/*
+ * qaic_dbc_exit_ssr - Prepare to exit from sub system reset(SSR) for given DBC ID.
+ * @qdev: qaic device handle
+ *
+ * The DBC returns to an operational state and begins accepting work after exiting SSR.
+ */
+void qaic_dbc_exit_ssr(struct qaic_device *qdev)
+{
+ qdev->ssr_dbc = QAIC_SSR_DBC_SENTINEL;
+}
diff --git a/drivers/accel/qaic/qaic_debugfs.c b/drivers/accel/qaic/qaic_debugfs.c
index 20b653d99e52..8dc4fe5bb560 100644
--- a/drivers/accel/qaic/qaic_debugfs.c
+++ b/drivers/accel/qaic/qaic_debugfs.c
@@ -64,20 +64,9 @@ static int bootlog_show(struct seq_file *s, void *unused)
return 0;
}
-static int bootlog_fops_open(struct inode *inode, struct file *file)
-{
- return single_open(file, bootlog_show, inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(bootlog);
-static const struct file_operations bootlog_fops = {
- .owner = THIS_MODULE,
- .open = bootlog_fops_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int read_dbc_fifo_size(struct seq_file *s, void *unused)
+static int fifo_size_show(struct seq_file *s, void *unused)
{
struct dma_bridge_chan *dbc = s->private;
@@ -85,20 +74,9 @@ static int read_dbc_fifo_size(struct seq_file *s, void *unused)
return 0;
}
-static int fifo_size_open(struct inode *inode, struct file *file)
-{
- return single_open(file, read_dbc_fifo_size, inode->i_private);
-}
-
-static const struct file_operations fifo_size_fops = {
- .owner = THIS_MODULE,
- .open = fifo_size_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(fifo_size);
-static int read_dbc_queued(struct seq_file *s, void *unused)
+static int queued_show(struct seq_file *s, void *unused)
{
struct dma_bridge_chan *dbc = s->private;
u32 tail = 0, head = 0;
@@ -115,18 +93,7 @@ static int read_dbc_queued(struct seq_file *s, void *unused)
return 0;
}
-static int queued_open(struct inode *inode, struct file *file)
-{
- return single_open(file, read_dbc_queued, inode->i_private);
-}
-
-static const struct file_operations queued_fops = {
- .owner = THIS_MODULE,
- .open = queued_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(queued);
void qaic_debugfs_init(struct qaic_drm_device *qddev)
{
@@ -251,6 +218,9 @@ static int qaic_bootlog_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_d
if (ret)
goto destroy_workqueue;
+ dev_set_drvdata(&mhi_dev->dev, qdev);
+ qdev->bootlog_ch = mhi_dev;
+
for (i = 0; i < BOOTLOG_POOL_SIZE; i++) {
msg = devm_kzalloc(&qdev->pdev->dev, sizeof(*msg), GFP_KERNEL);
if (!msg) {
@@ -266,14 +236,11 @@ static int qaic_bootlog_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_d
goto mhi_unprepare;
}
- dev_set_drvdata(&mhi_dev->dev, qdev);
- qdev->bootlog_ch = mhi_dev;
return 0;
mhi_unprepare:
mhi_unprepare_from_transfer(mhi_dev);
destroy_workqueue:
- flush_workqueue(qdev->bootlog_wq);
destroy_workqueue(qdev->bootlog_wq);
out:
return ret;
@@ -286,7 +253,6 @@ static void qaic_bootlog_mhi_remove(struct mhi_device *mhi_dev)
qdev = dev_get_drvdata(&mhi_dev->dev);
mhi_unprepare_from_transfer(qdev->bootlog_ch);
- flush_workqueue(qdev->bootlog_wq);
destroy_workqueue(qdev->bootlog_wq);
qdev->bootlog_ch = NULL;
}
diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c
index 580b29ed1902..4c70bd949d53 100644
--- a/drivers/accel/qaic/qaic_drv.c
+++ b/drivers/accel/qaic/qaic_drv.c
@@ -29,17 +29,53 @@
#include "mhi_controller.h"
#include "qaic.h"
#include "qaic_debugfs.h"
+#include "qaic_ras.h"
+#include "qaic_ssr.h"
#include "qaic_timesync.h"
#include "sahara.h"
-MODULE_IMPORT_NS(DMA_BUF);
+MODULE_IMPORT_NS("DMA_BUF");
-#define PCI_DEV_AIC100 0xa100
+#define PCI_DEVICE_ID_QCOM_AIC080 0xa080
+#define PCI_DEVICE_ID_QCOM_AIC100 0xa100
+#define PCI_DEVICE_ID_QCOM_AIC200 0xa110
#define QAIC_NAME "qaic"
#define QAIC_DESC "Qualcomm Cloud AI Accelerators"
#define CNTL_MAJOR 5
#define CNTL_MINOR 0
+struct qaic_device_config {
+ /* Indicates the AIC family the device belongs to */
+ int family;
+ /* A bitmask representing the available BARs */
+ int bar_mask;
+ /* An index value used to identify the MHI controller BAR */
+ unsigned int mhi_bar_idx;
+ /* An index value used to identify the DBCs BAR */
+ unsigned int dbc_bar_idx;
+};
+
+static const struct qaic_device_config aic080_config = {
+ .family = FAMILY_AIC100,
+ .bar_mask = BIT(0) | BIT(2) | BIT(4),
+ .mhi_bar_idx = 0,
+ .dbc_bar_idx = 2,
+};
+
+static const struct qaic_device_config aic100_config = {
+ .family = FAMILY_AIC100,
+ .bar_mask = BIT(0) | BIT(2) | BIT(4),
+ .mhi_bar_idx = 0,
+ .dbc_bar_idx = 2,
+};
+
+static const struct qaic_device_config aic200_config = {
+ .family = FAMILY_AIC200,
+ .bar_mask = BIT(0) | BIT(1) | BIT(2) | BIT(4),
+ .mhi_bar_idx = 1,
+ .dbc_bar_idx = 2,
+};
+
bool datapath_polling;
module_param(datapath_polling, bool, 0400);
MODULE_PARM_DESC(datapath_polling, "Operate the datapath in polling mode");
@@ -53,12 +89,12 @@ static void qaicm_wq_release(struct drm_device *dev, void *res)
destroy_workqueue(wq);
}
-static struct workqueue_struct *qaicm_wq_init(struct drm_device *dev, const char *fmt)
+static struct workqueue_struct *qaicm_wq_init(struct drm_device *dev, const char *name)
{
struct workqueue_struct *wq;
int ret;
- wq = alloc_workqueue(fmt, WQ_UNBOUND, 0);
+ wq = alloc_workqueue("%s", WQ_UNBOUND, 0, name);
if (!wq)
return ERR_PTR(-ENOMEM);
ret = drmm_add_action_or_reset(dev, qaicm_wq_release, wq);
@@ -207,7 +243,6 @@ static const struct drm_driver qaic_accel_driver = {
.name = QAIC_NAME,
.desc = QAIC_DESC,
- .date = "20190618",
.fops = &qaic_accel_fops,
.open = qaic_open,
@@ -236,6 +271,13 @@ static int qaic_create_drm_device(struct qaic_device *qdev, s32 partition_id)
return ret;
}
+ ret = qaic_sysfs_init(qddev);
+ if (ret) {
+ drm_dev_unregister(drm);
+ pci_dbg(qdev->pdev, "qaic_sysfs_init failed %d\n", ret);
+ return ret;
+ }
+
qaic_debugfs_init(qddev);
return ret;
@@ -247,6 +289,7 @@ static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id)
struct drm_device *drm = to_drm(qddev);
struct qaic_user *usr;
+ qaic_sysfs_remove(qddev);
drm_dev_unregister(drm);
qddev->partition_id = 0;
/*
@@ -348,11 +391,13 @@ void qaic_dev_reset_clean_local_state(struct qaic_device *qdev)
qaic_notify_reset(qdev);
/* start tearing things down */
+ qaic_clean_up_ssr(qdev);
for (i = 0; i < qdev->num_dbc; ++i)
release_dbc(qdev, i);
}
-static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_device_id *id)
+static struct qaic_device *create_qdev(struct pci_dev *pdev,
+ const struct qaic_device_config *config)
{
struct device *dev = &pdev->dev;
struct qaic_drm_device *qddev;
@@ -365,12 +410,10 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_de
return NULL;
qdev->dev_state = QAIC_OFFLINE;
- if (id->device == PCI_DEV_AIC100) {
- qdev->num_dbc = 16;
- qdev->dbc = devm_kcalloc(dev, qdev->num_dbc, sizeof(*qdev->dbc), GFP_KERNEL);
- if (!qdev->dbc)
- return NULL;
- }
+ qdev->num_dbc = 16;
+ qdev->dbc = devm_kcalloc(dev, qdev->num_dbc, sizeof(*qdev->dbc), GFP_KERNEL);
+ if (!qdev->dbc)
+ return NULL;
qddev = devm_drm_dev_alloc(&pdev->dev, &qaic_accel_driver, struct qaic_drm_device, drm);
if (IS_ERR(qddev))
@@ -398,11 +441,18 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_de
qdev->qts_wq = qaicm_wq_init(drm, "qaic_ts");
if (IS_ERR(qdev->qts_wq))
return NULL;
+ qdev->ssr_wq = qaicm_wq_init(drm, "qaic_ssr");
+ if (IS_ERR(qdev->ssr_wq))
+ return NULL;
ret = qaicm_srcu_init(drm, &qdev->dev_lock);
if (ret)
return NULL;
+ ret = qaic_ssr_init(qdev, drm);
+ if (ret)
+ pci_info(pdev, "QAIC SSR crashdump collection not supported.\n");
+
qdev->qddev = qddev;
qdev->pdev = pdev;
qddev->qdev = qdev;
@@ -421,22 +471,26 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_de
return NULL;
init_waitqueue_head(&qdev->dbc[i].dbc_release);
INIT_LIST_HEAD(&qdev->dbc[i].bo_lists);
+ ret = drmm_mutex_init(drm, &qdev->dbc[i].req_lock);
+ if (ret)
+ return NULL;
}
return qdev;
}
-static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev)
+static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev,
+ const struct qaic_device_config *config)
{
int bars;
int ret;
- bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ bars = pci_select_bars(pdev, IORESOURCE_MEM) & 0x3f;
/* make sure the device has the expected BARs */
- if (bars != (BIT(0) | BIT(2) | BIT(4))) {
- pci_dbg(pdev, "%s: expected BARs 0, 2, and 4 not found in device. Found 0x%x\n",
- __func__, bars);
+ if (bars != config->bar_mask) {
+ pci_dbg(pdev, "%s: expected BARs %#x not found in device. Found %#x\n",
+ __func__, config->bar_mask, bars);
return -EINVAL;
}
@@ -447,17 +501,15 @@ static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev)
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (ret)
return ret;
- ret = dma_set_max_seg_size(&pdev->dev, UINT_MAX);
- if (ret)
- return ret;
+ dma_set_max_seg_size(&pdev->dev, UINT_MAX);
- qdev->bar_0 = devm_ioremap_resource(&pdev->dev, &pdev->resource[0]);
- if (IS_ERR(qdev->bar_0))
- return PTR_ERR(qdev->bar_0);
+ qdev->bar_mhi = devm_ioremap_resource(&pdev->dev, &pdev->resource[config->mhi_bar_idx]);
+ if (IS_ERR(qdev->bar_mhi))
+ return PTR_ERR(qdev->bar_mhi);
- qdev->bar_2 = devm_ioremap_resource(&pdev->dev, &pdev->resource[2]);
- if (IS_ERR(qdev->bar_2))
- return PTR_ERR(qdev->bar_2);
+ qdev->bar_dbc = devm_ioremap_resource(&pdev->dev, &pdev->resource[config->dbc_bar_idx]);
+ if (IS_ERR(qdev->bar_dbc))
+ return PTR_ERR(qdev->bar_dbc);
/* Managed release since we use pcim_enable_device above */
pci_set_master(pdev);
@@ -467,14 +519,15 @@ static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev)
static int init_msi(struct qaic_device *qdev, struct pci_dev *pdev)
{
+ int irq_count = qdev->num_dbc + 1;
int mhi_irq;
int ret;
int i;
/* Managed release since we use pcim_enable_device */
- ret = pci_alloc_irq_vectors(pdev, 32, 32, PCI_IRQ_MSI);
+ ret = pci_alloc_irq_vectors(pdev, irq_count, irq_count, PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (ret == -ENOSPC) {
- ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (ret < 0)
return ret;
@@ -487,7 +540,8 @@ static int init_msi(struct qaic_device *qdev, struct pci_dev *pdev)
* interrupted, it shouldn't race with itself.
*/
qdev->single_msi = true;
- pci_info(pdev, "Allocating 32 MSIs failed, operating in 1 MSI mode. Performance may be impacted.\n");
+ pci_info(pdev, "Allocating %d MSIs failed, operating in 1 MSI mode. Performance may be impacted.\n",
+ irq_count);
} else if (ret < 0) {
return ret;
}
@@ -508,7 +562,7 @@ static int init_msi(struct qaic_device *qdev, struct pci_dev *pdev)
qdev->dbc[i].irq = pci_irq_vector(pdev, qdev->single_msi ? 0 : i + 1);
if (!qdev->single_msi)
disable_irq_nosync(qdev->dbc[i].irq);
- INIT_WORK(&qdev->dbc[i].poll_work, irq_polling_work);
+ INIT_WORK(&qdev->dbc[i].poll_work, qaic_irq_polling_work);
}
}
@@ -517,21 +571,22 @@ static int init_msi(struct qaic_device *qdev, struct pci_dev *pdev)
static int qaic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ struct qaic_device_config *config = (struct qaic_device_config *)id->driver_data;
struct qaic_device *qdev;
int mhi_irq;
int ret;
int i;
- qdev = create_qdev(pdev, id);
+ qdev = create_qdev(pdev, config);
if (!qdev)
return -ENOMEM;
- ret = init_pci(qdev, pdev);
+ ret = init_pci(qdev, pdev, config);
if (ret)
return ret;
for (i = 0; i < qdev->num_dbc; ++i)
- qdev->dbc[i].dbc_base = qdev->bar_2 + QAIC_DBC_OFF(i);
+ qdev->dbc[i].dbc_base = qdev->bar_dbc + QAIC_DBC_OFF(i);
mhi_irq = init_msi(qdev, pdev);
if (mhi_irq < 0)
@@ -541,8 +596,8 @@ static int qaic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
return ret;
- qdev->mhi_cntrl = qaic_mhi_register_controller(pdev, qdev->bar_0, mhi_irq,
- qdev->single_msi);
+ qdev->mhi_cntrl = qaic_mhi_register_controller(pdev, qdev->bar_mhi, mhi_irq,
+ qdev->single_msi, config->family);
if (IS_ERR(qdev->mhi_cntrl)) {
ret = PTR_ERR(qdev->mhi_cntrl);
qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
@@ -609,7 +664,9 @@ static struct mhi_driver qaic_mhi_driver = {
};
static const struct pci_device_id qaic_ids[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_QCOM, PCI_DEV_AIC100), },
+ { PCI_DEVICE_DATA(QCOM, AIC080, (kernel_ulong_t)&aic080_config), },
+ { PCI_DEVICE_DATA(QCOM, AIC100, (kernel_ulong_t)&aic100_config), },
+ { PCI_DEVICE_DATA(QCOM, AIC200, (kernel_ulong_t)&aic200_config), },
{ }
};
MODULE_DEVICE_TABLE(pci, qaic_ids);
@@ -620,6 +677,92 @@ static const struct pci_error_handlers qaic_pci_err_handler = {
.reset_done = qaic_pci_reset_done,
};
+static bool qaic_is_under_reset(struct qaic_device *qdev)
+{
+ int rcu_id;
+ bool ret;
+
+ rcu_id = srcu_read_lock(&qdev->dev_lock);
+ ret = qdev->dev_state != QAIC_ONLINE;
+ srcu_read_unlock(&qdev->dev_lock, rcu_id);
+ return ret;
+}
+
+static bool qaic_data_path_busy(struct qaic_device *qdev)
+{
+ bool ret = false;
+ int dev_rcu_id;
+ int i;
+
+ dev_rcu_id = srcu_read_lock(&qdev->dev_lock);
+ if (qdev->dev_state != QAIC_ONLINE) {
+ srcu_read_unlock(&qdev->dev_lock, dev_rcu_id);
+ return false;
+ }
+ for (i = 0; i < qdev->num_dbc; i++) {
+ struct dma_bridge_chan *dbc = &qdev->dbc[i];
+ unsigned long flags;
+ int ch_rcu_id;
+
+ ch_rcu_id = srcu_read_lock(&dbc->ch_lock);
+ if (!dbc->usr || !dbc->in_use) {
+ srcu_read_unlock(&dbc->ch_lock, ch_rcu_id);
+ continue;
+ }
+ spin_lock_irqsave(&dbc->xfer_lock, flags);
+ ret = !list_empty(&dbc->xfer_list);
+ spin_unlock_irqrestore(&dbc->xfer_lock, flags);
+ srcu_read_unlock(&dbc->ch_lock, ch_rcu_id);
+ if (ret)
+ break;
+ }
+ srcu_read_unlock(&qdev->dev_lock, dev_rcu_id);
+ return ret;
+}
+
+static int qaic_pm_suspend(struct device *dev)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(dev));
+
+ dev_dbg(dev, "Suspending..\n");
+ if (qaic_data_path_busy(qdev)) {
+ dev_dbg(dev, "Device's datapath is busy. Aborting suspend..\n");
+ return -EBUSY;
+ }
+ if (qaic_is_under_reset(qdev)) {
+ dev_dbg(dev, "Device is under reset. Aborting suspend..\n");
+ return -EBUSY;
+ }
+ qaic_mqts_ch_stop_timer(qdev->mqts_ch);
+ qaic_pci_reset_prepare(qdev->pdev);
+ pci_save_state(qdev->pdev);
+ pci_disable_device(qdev->pdev);
+ pci_set_power_state(qdev->pdev, PCI_D3hot);
+ return 0;
+}
+
+static int qaic_pm_resume(struct device *dev)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(dev));
+ int ret;
+
+ dev_dbg(dev, "Resuming..\n");
+ pci_set_power_state(qdev->pdev, PCI_D0);
+ pci_restore_state(qdev->pdev);
+ ret = pci_enable_device(qdev->pdev);
+ if (ret) {
+ dev_err(dev, "pci_enable_device failed on resume %d\n", ret);
+ return ret;
+ }
+ pci_set_master(qdev->pdev);
+ qaic_pci_reset_done(qdev->pdev);
+ return 0;
+}
+
+static const struct dev_pm_ops qaic_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(qaic_pm_suspend, qaic_pm_resume)
+};
+
static struct pci_driver qaic_pci_driver = {
.name = QAIC_NAME,
.id_table = qaic_ids,
@@ -627,6 +770,9 @@ static struct pci_driver qaic_pci_driver = {
.remove = qaic_pci_remove,
.shutdown = qaic_pci_shutdown,
.err_handler = &qaic_pci_err_handler,
+ .driver = {
+ .pm = pm_sleep_ptr(&qaic_pm_ops),
+ },
};
static int __init qaic_init(void)
@@ -659,8 +805,19 @@ static int __init qaic_init(void)
if (ret)
pr_debug("qaic: qaic_bootlog_register failed %d\n", ret);
+ ret = qaic_ras_register();
+ if (ret)
+ pr_debug("qaic: qaic_ras_register failed %d\n", ret);
+ ret = qaic_ssr_register();
+ if (ret) {
+ pr_debug("qaic: qaic_ssr_register failed %d\n", ret);
+ goto free_bootlog;
+ }
+
return 0;
+free_bootlog:
+ qaic_bootlog_unregister();
free_mhi:
mhi_driver_unregister(&qaic_mhi_driver);
free_pci:
@@ -686,6 +843,8 @@ static void __exit qaic_exit(void)
* reinitializing the link_up state after the cleanup is done.
*/
link_up = true;
+ qaic_ssr_unregister();
+ qaic_ras_unregister();
qaic_bootlog_unregister();
qaic_timesync_deinit();
sahara_unregister();
diff --git a/drivers/accel/qaic/qaic_ras.c b/drivers/accel/qaic/qaic_ras.c
new file mode 100644
index 000000000000..f1d52a710136
--- /dev/null
+++ b/drivers/accel/qaic/qaic_ras.c
@@ -0,0 +1,642 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. */
+/* Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */
+
+#include <asm/byteorder.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mhi.h>
+
+#include "qaic.h"
+#include "qaic_ras.h"
+
+#define MAGIC 0x55AA
+#define VERSION 0x2
+#define HDR_SZ 12
+#define NUM_TEMP_LVL 3
+#define POWER_BREAK BIT(0)
+
+enum msg_type {
+ MSG_PUSH, /* async push from device */
+ MSG_REQ, /* sync request to device */
+ MSG_RESP, /* sync response from device */
+};
+
+enum err_type {
+ CE, /* correctable error */
+ UE, /* uncorrectable error */
+ UE_NF, /* uncorrectable error that is non-fatal, expect a disruption */
+ ERR_TYPE_MAX,
+};
+
+static const char * const err_type_str[] = {
+ [CE] = "Correctable",
+ [UE] = "Uncorrectable",
+ [UE_NF] = "Uncorrectable Non-Fatal",
+};
+
+static const char * const err_class_str[] = {
+ [CE] = "Warning",
+ [UE] = "Fatal",
+ [UE_NF] = "Warning",
+};
+
+enum err_source {
+ SOC_MEM,
+ PCIE,
+ DDR,
+ SYS_BUS1,
+ SYS_BUS2,
+ NSP_MEM,
+ TSENS,
+};
+
+static const char * const err_src_str[TSENS + 1] = {
+ [SOC_MEM] = "SoC Memory",
+ [PCIE] = "PCIE",
+ [DDR] = "DDR",
+ [SYS_BUS1] = "System Bus source 1",
+ [SYS_BUS2] = "System Bus source 2",
+ [NSP_MEM] = "NSP Memory",
+ [TSENS] = "Temperature Sensors",
+};
+
+struct ras_data {
+ /* header start */
+ /* Magic number to validate the message */
+ u16 magic;
+ /* RAS version number */
+ u16 ver;
+ u32 seq_num;
+ /* RAS message type */
+ u8 type;
+ u8 id;
+ /* Size of RAS message without the header in byte */
+ u16 len;
+ /* header end */
+ s32 result;
+ /*
+ * Error source
+ * 0 : SoC Memory
+ * 1 : PCIE
+ * 2 : DDR
+ * 3 : System Bus source 1
+ * 4 : System Bus source 2
+ * 5 : NSP Memory
+ * 6 : Temperature Sensors
+ */
+ u32 source;
+ /*
+ * Stores the error type, there are three types of error in RAS
+ * 0 : correctable error (CE)
+ * 1 : uncorrectable error (UE)
+ * 2 : uncorrectable error that is non-fatal (UE_NF)
+ */
+ u32 err_type;
+ u32 err_threshold;
+ u32 ce_count;
+ u32 ue_count;
+ u32 intr_num;
+ /* Data specific to error source */
+ u8 syndrome[64];
+} __packed;
+
+struct soc_mem_syndrome {
+ u64 error_address[8];
+} __packed;
+
+struct nsp_mem_syndrome {
+ u32 error_address[8];
+ u8 nsp_id;
+} __packed;
+
+struct ddr_syndrome {
+ u32 count;
+ u32 irq_status;
+ u32 data_31_0[2];
+ u32 data_63_32[2];
+ u32 data_95_64[2];
+ u32 data_127_96[2];
+ u32 addr_lsb;
+ u16 addr_msb;
+ u16 parity_bits;
+ u16 instance;
+ u16 err_type;
+} __packed;
+
+struct tsens_syndrome {
+ u32 threshold_type;
+ s32 temp;
+} __packed;
+
+struct sysbus1_syndrome {
+ u32 slave;
+ u32 err_type;
+ u16 addr[8];
+ u8 instance;
+} __packed;
+
+struct sysbus2_syndrome {
+ u32 lsb3;
+ u32 msb3;
+ u32 lsb2;
+ u32 msb2;
+ u32 ext_id;
+ u16 path;
+ u16 op_type;
+ u16 len;
+ u16 redirect;
+ u8 valid;
+ u8 word_error;
+ u8 non_secure;
+ u8 opc;
+ u8 error_code;
+ u8 trans_type;
+ u8 addr_space;
+ u8 instance;
+} __packed;
+
+struct pcie_syndrome {
+ /* CE info */
+ u32 bad_tlp;
+ u32 bad_dllp;
+ u32 replay_rollover;
+ u32 replay_timeout;
+ u32 rx_err;
+ u32 internal_ce_count;
+ /* UE_NF info */
+ u32 fc_timeout;
+ u32 poison_tlp;
+ u32 ecrc_err;
+ u32 unsupported_req;
+ u32 completer_abort;
+ u32 completion_timeout;
+ /* UE info */
+ u32 addr;
+ u8 index;
+ /*
+ * Flag to indicate specific event of PCIe
+ * BIT(0): Power break (low power)
+ * BIT(1) to BIT(7): Reserved
+ */
+ u8 flag;
+} __packed;
+
+static const char * const threshold_type_str[NUM_TEMP_LVL] = {
+ [0] = "lower",
+ [1] = "upper",
+ [2] = "critical",
+};
+
+static void ras_msg_to_cpu(struct ras_data *msg)
+{
+ struct sysbus1_syndrome *sysbus1_syndrome = (struct sysbus1_syndrome *)&msg->syndrome[0];
+ struct sysbus2_syndrome *sysbus2_syndrome = (struct sysbus2_syndrome *)&msg->syndrome[0];
+ struct soc_mem_syndrome *soc_syndrome = (struct soc_mem_syndrome *)&msg->syndrome[0];
+ struct nsp_mem_syndrome *nsp_syndrome = (struct nsp_mem_syndrome *)&msg->syndrome[0];
+ struct tsens_syndrome *tsens_syndrome = (struct tsens_syndrome *)&msg->syndrome[0];
+ struct pcie_syndrome *pcie_syndrome = (struct pcie_syndrome *)&msg->syndrome[0];
+ struct ddr_syndrome *ddr_syndrome = (struct ddr_syndrome *)&msg->syndrome[0];
+ int i;
+
+ le16_to_cpus(&msg->magic);
+ le16_to_cpus(&msg->ver);
+ le32_to_cpus(&msg->seq_num);
+ le16_to_cpus(&msg->len);
+ le32_to_cpus(&msg->result);
+ le32_to_cpus(&msg->source);
+ le32_to_cpus(&msg->err_type);
+ le32_to_cpus(&msg->err_threshold);
+ le32_to_cpus(&msg->ce_count);
+ le32_to_cpus(&msg->ue_count);
+ le32_to_cpus(&msg->intr_num);
+
+ switch (msg->source) {
+ case SOC_MEM:
+ for (i = 0; i < 8; i++)
+ le64_to_cpus(&soc_syndrome->error_address[i]);
+ break;
+ case PCIE:
+ le32_to_cpus(&pcie_syndrome->bad_tlp);
+ le32_to_cpus(&pcie_syndrome->bad_dllp);
+ le32_to_cpus(&pcie_syndrome->replay_rollover);
+ le32_to_cpus(&pcie_syndrome->replay_timeout);
+ le32_to_cpus(&pcie_syndrome->rx_err);
+ le32_to_cpus(&pcie_syndrome->internal_ce_count);
+ le32_to_cpus(&pcie_syndrome->fc_timeout);
+ le32_to_cpus(&pcie_syndrome->poison_tlp);
+ le32_to_cpus(&pcie_syndrome->ecrc_err);
+ le32_to_cpus(&pcie_syndrome->unsupported_req);
+ le32_to_cpus(&pcie_syndrome->completer_abort);
+ le32_to_cpus(&pcie_syndrome->completion_timeout);
+ le32_to_cpus(&pcie_syndrome->addr);
+ break;
+ case DDR:
+ le16_to_cpus(&ddr_syndrome->instance);
+ le16_to_cpus(&ddr_syndrome->err_type);
+ le32_to_cpus(&ddr_syndrome->count);
+ le32_to_cpus(&ddr_syndrome->irq_status);
+ le32_to_cpus(&ddr_syndrome->data_31_0[0]);
+ le32_to_cpus(&ddr_syndrome->data_31_0[1]);
+ le32_to_cpus(&ddr_syndrome->data_63_32[0]);
+ le32_to_cpus(&ddr_syndrome->data_63_32[1]);
+ le32_to_cpus(&ddr_syndrome->data_95_64[0]);
+ le32_to_cpus(&ddr_syndrome->data_95_64[1]);
+ le32_to_cpus(&ddr_syndrome->data_127_96[0]);
+ le32_to_cpus(&ddr_syndrome->data_127_96[1]);
+ le16_to_cpus(&ddr_syndrome->parity_bits);
+ le16_to_cpus(&ddr_syndrome->addr_msb);
+ le32_to_cpus(&ddr_syndrome->addr_lsb);
+ break;
+ case SYS_BUS1:
+ le32_to_cpus(&sysbus1_syndrome->slave);
+ le32_to_cpus(&sysbus1_syndrome->err_type);
+ for (i = 0; i < 8; i++)
+ le16_to_cpus(&sysbus1_syndrome->addr[i]);
+ break;
+ case SYS_BUS2:
+ le16_to_cpus(&sysbus2_syndrome->op_type);
+ le16_to_cpus(&sysbus2_syndrome->len);
+ le16_to_cpus(&sysbus2_syndrome->redirect);
+ le16_to_cpus(&sysbus2_syndrome->path);
+ le32_to_cpus(&sysbus2_syndrome->ext_id);
+ le32_to_cpus(&sysbus2_syndrome->lsb2);
+ le32_to_cpus(&sysbus2_syndrome->msb2);
+ le32_to_cpus(&sysbus2_syndrome->lsb3);
+ le32_to_cpus(&sysbus2_syndrome->msb3);
+ break;
+ case NSP_MEM:
+ for (i = 0; i < 8; i++)
+ le32_to_cpus(&nsp_syndrome->error_address[i]);
+ break;
+ case TSENS:
+ le32_to_cpus(&tsens_syndrome->threshold_type);
+ le32_to_cpus(&tsens_syndrome->temp);
+ break;
+ }
+}
+
+static void decode_ras_msg(struct qaic_device *qdev, struct ras_data *msg)
+{
+ struct sysbus1_syndrome *sysbus1_syndrome = (struct sysbus1_syndrome *)&msg->syndrome[0];
+ struct sysbus2_syndrome *sysbus2_syndrome = (struct sysbus2_syndrome *)&msg->syndrome[0];
+ struct soc_mem_syndrome *soc_syndrome = (struct soc_mem_syndrome *)&msg->syndrome[0];
+ struct nsp_mem_syndrome *nsp_syndrome = (struct nsp_mem_syndrome *)&msg->syndrome[0];
+ struct tsens_syndrome *tsens_syndrome = (struct tsens_syndrome *)&msg->syndrome[0];
+ struct pcie_syndrome *pcie_syndrome = (struct pcie_syndrome *)&msg->syndrome[0];
+ struct ddr_syndrome *ddr_syndrome = (struct ddr_syndrome *)&msg->syndrome[0];
+ char *class;
+ char *level;
+
+ if (msg->magic != MAGIC) {
+ pci_warn(qdev->pdev, "Dropping RAS message with invalid magic %x\n", msg->magic);
+ return;
+ }
+
+ if (!msg->ver || msg->ver > VERSION) {
+ pci_warn(qdev->pdev, "Dropping RAS message with invalid version %d\n", msg->ver);
+ return;
+ }
+
+ if (msg->type != MSG_PUSH) {
+ pci_warn(qdev->pdev, "Dropping non-PUSH RAS message\n");
+ return;
+ }
+
+ if (msg->len != sizeof(*msg) - HDR_SZ) {
+ pci_warn(qdev->pdev, "Dropping RAS message with invalid len %d\n", msg->len);
+ return;
+ }
+
+ if (msg->err_type >= ERR_TYPE_MAX) {
+ pci_warn(qdev->pdev, "Dropping RAS message with err type %d\n", msg->err_type);
+ return;
+ }
+
+ if (msg->err_type == UE)
+ level = KERN_ERR;
+ else
+ level = KERN_WARNING;
+
+ switch (msg->source) {
+ case SOC_MEM:
+ dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold for this report %d\nSyndrome:\n 0x%llx\n 0x%llx\n 0x%llx\n 0x%llx\n 0x%llx\n 0x%llx\n 0x%llx\n 0x%llx\n",
+ err_class_str[msg->err_type],
+ err_type_str[msg->err_type],
+ "error from",
+ err_src_str[msg->source],
+ msg->err_threshold,
+ soc_syndrome->error_address[0],
+ soc_syndrome->error_address[1],
+ soc_syndrome->error_address[2],
+ soc_syndrome->error_address[3],
+ soc_syndrome->error_address[4],
+ soc_syndrome->error_address[5],
+ soc_syndrome->error_address[6],
+ soc_syndrome->error_address[7]);
+ break;
+ case PCIE:
+ dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold for this report %d\n",
+ err_class_str[msg->err_type],
+ err_type_str[msg->err_type],
+ "error from",
+ err_src_str[msg->source],
+ msg->err_threshold);
+
+ switch (msg->err_type) {
+ case CE:
+ /*
+ * Modeled after AER prints. This continues the dev_printk() from a few
+ * lines up. We reduce duplication of code, but also avoid re-printing the
+ * PCI device info so that the end result looks uniform to the log user.
+ */
+ printk(KERN_WARNING pr_fmt("Syndrome:\n Bad TLP count %d\n Bad DLLP count %d\n Replay Rollover count %d\n Replay Timeout count %d\n Recv Error count %d\n Internal CE count %d\n"),
+ pcie_syndrome->bad_tlp,
+ pcie_syndrome->bad_dllp,
+ pcie_syndrome->replay_rollover,
+ pcie_syndrome->replay_timeout,
+ pcie_syndrome->rx_err,
+ pcie_syndrome->internal_ce_count);
+ if (msg->ver > 0x1)
+ pr_warn(" Power break %s\n",
+ pcie_syndrome->flag & POWER_BREAK ? "ON" : "OFF");
+ break;
+ case UE:
+ printk(KERN_ERR pr_fmt("Syndrome:\n Index %d\n Address 0x%x\n"),
+ pcie_syndrome->index, pcie_syndrome->addr);
+ break;
+ case UE_NF:
+ printk(KERN_WARNING pr_fmt("Syndrome:\n FC timeout count %d\n Poisoned TLP count %d\n ECRC error count %d\n Unsupported request count %d\n Completer abort count %d\n Completion timeout count %d\n"),
+ pcie_syndrome->fc_timeout,
+ pcie_syndrome->poison_tlp,
+ pcie_syndrome->ecrc_err,
+ pcie_syndrome->unsupported_req,
+ pcie_syndrome->completer_abort,
+ pcie_syndrome->completion_timeout);
+ break;
+ default:
+ break;
+ }
+ break;
+ case DDR:
+ dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold for this report %d\nSyndrome:\n Instance %d\n Count %d\n Data 31_0 0x%x 0x%x\n Data 63_32 0x%x 0x%x\n Data 95_64 0x%x 0x%x\n Data 127_96 0x%x 0x%x\n Parity bits 0x%x\n Address msb 0x%x\n Address lsb 0x%x\n",
+ err_class_str[msg->err_type],
+ err_type_str[msg->err_type],
+ "error from",
+ err_src_str[msg->source],
+ msg->err_threshold,
+ ddr_syndrome->instance,
+ ddr_syndrome->count,
+ ddr_syndrome->data_31_0[1],
+ ddr_syndrome->data_31_0[0],
+ ddr_syndrome->data_63_32[1],
+ ddr_syndrome->data_63_32[0],
+ ddr_syndrome->data_95_64[1],
+ ddr_syndrome->data_95_64[0],
+ ddr_syndrome->data_127_96[1],
+ ddr_syndrome->data_127_96[0],
+ ddr_syndrome->parity_bits,
+ ddr_syndrome->addr_msb,
+ ddr_syndrome->addr_lsb);
+ break;
+ case SYS_BUS1:
+ dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold for this report %d\nSyndrome:\n instance %d\n %s\n err_type %d\n address0 0x%x\n address1 0x%x\n address2 0x%x\n address3 0x%x\n address4 0x%x\n address5 0x%x\n address6 0x%x\n address7 0x%x\n",
+ err_class_str[msg->err_type],
+ err_type_str[msg->err_type],
+ "error from",
+ err_src_str[msg->source],
+ msg->err_threshold,
+ sysbus1_syndrome->instance,
+ sysbus1_syndrome->slave ? "Slave" : "Master",
+ sysbus1_syndrome->err_type,
+ sysbus1_syndrome->addr[0],
+ sysbus1_syndrome->addr[1],
+ sysbus1_syndrome->addr[2],
+ sysbus1_syndrome->addr[3],
+ sysbus1_syndrome->addr[4],
+ sysbus1_syndrome->addr[5],
+ sysbus1_syndrome->addr[6],
+ sysbus1_syndrome->addr[7]);
+ break;
+ case SYS_BUS2:
+ dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold for this report %d\nSyndrome:\n instance %d\n valid %d\n word error %d\n non-secure %d\n opc %d\n error code %d\n transaction type %d\n address space %d\n operation type %d\n len %d\n redirect %d\n path %d\n ext_id %d\n lsb2 %d\n msb2 %d\n lsb3 %d\n msb3 %d\n",
+ err_class_str[msg->err_type],
+ err_type_str[msg->err_type],
+ "error from",
+ err_src_str[msg->source],
+ msg->err_threshold,
+ sysbus2_syndrome->instance,
+ sysbus2_syndrome->valid,
+ sysbus2_syndrome->word_error,
+ sysbus2_syndrome->non_secure,
+ sysbus2_syndrome->opc,
+ sysbus2_syndrome->error_code,
+ sysbus2_syndrome->trans_type,
+ sysbus2_syndrome->addr_space,
+ sysbus2_syndrome->op_type,
+ sysbus2_syndrome->len,
+ sysbus2_syndrome->redirect,
+ sysbus2_syndrome->path,
+ sysbus2_syndrome->ext_id,
+ sysbus2_syndrome->lsb2,
+ sysbus2_syndrome->msb2,
+ sysbus2_syndrome->lsb3,
+ sysbus2_syndrome->msb3);
+ break;
+ case NSP_MEM:
+ dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold for this report %d\nSyndrome:\n NSP ID %d\n 0x%x\n 0x%x\n 0x%x\n 0x%x\n 0x%x\n 0x%x\n 0x%x\n 0x%x\n",
+ err_class_str[msg->err_type],
+ err_type_str[msg->err_type],
+ "error from",
+ err_src_str[msg->source],
+ msg->err_threshold,
+ nsp_syndrome->nsp_id,
+ nsp_syndrome->error_address[0],
+ nsp_syndrome->error_address[1],
+ nsp_syndrome->error_address[2],
+ nsp_syndrome->error_address[3],
+ nsp_syndrome->error_address[4],
+ nsp_syndrome->error_address[5],
+ nsp_syndrome->error_address[6],
+ nsp_syndrome->error_address[7]);
+ break;
+ case TSENS:
+ if (tsens_syndrome->threshold_type >= NUM_TEMP_LVL) {
+ pci_warn(qdev->pdev, "Dropping RAS message with invalid temp threshold %d\n",
+ tsens_syndrome->threshold_type);
+ break;
+ }
+
+ if (msg->err_type)
+ class = "Fatal";
+ else if (tsens_syndrome->threshold_type)
+ class = "Critical";
+ else
+ class = "Warning";
+
+ dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold for this report %d\nSyndrome:\n %s threshold\n %d deg C\n",
+ class,
+ err_type_str[msg->err_type],
+ "error from",
+ err_src_str[msg->source],
+ msg->err_threshold,
+ threshold_type_str[tsens_syndrome->threshold_type],
+ tsens_syndrome->temp);
+ break;
+ }
+
+ /* Uncorrectable errors are fatal */
+ if (msg->err_type == UE)
+ mhi_soc_reset(qdev->mhi_cntrl);
+
+ switch (msg->err_type) {
+ case CE:
+ if (qdev->ce_count != UINT_MAX)
+ qdev->ce_count++;
+ break;
+ case UE:
+ if (qdev->ce_count != UINT_MAX)
+ qdev->ue_count++;
+ break;
+ case UE_NF:
+ if (qdev->ce_count != UINT_MAX)
+ qdev->ue_nf_count++;
+ break;
+ default:
+ /* not possible */
+ break;
+ }
+}
+
+static ssize_t ce_count_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(dev));
+
+ return sysfs_emit(buf, "%d\n", qdev->ce_count);
+}
+
+static ssize_t ue_count_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(dev));
+
+ return sysfs_emit(buf, "%d\n", qdev->ue_count);
+}
+
+static ssize_t ue_nonfatal_count_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(dev));
+
+ return sysfs_emit(buf, "%d\n", qdev->ue_nf_count);
+}
+
+static DEVICE_ATTR_RO(ce_count);
+static DEVICE_ATTR_RO(ue_count);
+static DEVICE_ATTR_RO(ue_nonfatal_count);
+
+static struct attribute *ras_attrs[] = {
+ &dev_attr_ce_count.attr,
+ &dev_attr_ue_count.attr,
+ &dev_attr_ue_nonfatal_count.attr,
+ NULL,
+};
+
+static struct attribute_group ras_group = {
+ .attrs = ras_attrs,
+};
+
+static int qaic_ras_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev));
+ struct ras_data *resp;
+ int ret;
+
+ ret = mhi_prepare_for_transfer(mhi_dev);
+ if (ret)
+ return ret;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ mhi_unprepare_from_transfer(mhi_dev);
+ return -ENOMEM;
+ }
+
+ ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, resp, sizeof(*resp), MHI_EOT);
+ if (ret) {
+ kfree(resp);
+ mhi_unprepare_from_transfer(mhi_dev);
+ return ret;
+ }
+
+ ret = device_add_group(&qdev->pdev->dev, &ras_group);
+ if (ret) {
+ mhi_unprepare_from_transfer(mhi_dev);
+ pci_dbg(qdev->pdev, "ras add sysfs failed %d\n", ret);
+ return ret;
+ }
+
+ dev_set_drvdata(&mhi_dev->dev, qdev);
+ qdev->ras_ch = mhi_dev;
+
+ return ret;
+}
+
+static void qaic_ras_mhi_remove(struct mhi_device *mhi_dev)
+{
+ struct qaic_device *qdev;
+
+ qdev = dev_get_drvdata(&mhi_dev->dev);
+ qdev->ras_ch = NULL;
+ device_remove_group(&qdev->pdev->dev, &ras_group);
+ mhi_unprepare_from_transfer(mhi_dev);
+}
+
+static void qaic_ras_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result) {}
+
+static void qaic_ras_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
+{
+ struct qaic_device *qdev = dev_get_drvdata(&mhi_dev->dev);
+ struct ras_data *msg = mhi_result->buf_addr;
+ int ret;
+
+ if (mhi_result->transaction_status) {
+ kfree(msg);
+ return;
+ }
+
+ ras_msg_to_cpu(msg);
+ decode_ras_msg(qdev, msg);
+
+ ret = mhi_queue_buf(qdev->ras_ch, DMA_FROM_DEVICE, msg, sizeof(*msg), MHI_EOT);
+ if (ret) {
+ dev_err(&mhi_dev->dev, "Cannot requeue RAS recv buf %d\n", ret);
+ kfree(msg);
+ }
+}
+
+static const struct mhi_device_id qaic_ras_mhi_match_table[] = {
+ { .chan = "QAIC_STATUS", },
+ {},
+};
+
+static struct mhi_driver qaic_ras_mhi_driver = {
+ .id_table = qaic_ras_mhi_match_table,
+ .remove = qaic_ras_mhi_remove,
+ .probe = qaic_ras_mhi_probe,
+ .ul_xfer_cb = qaic_ras_mhi_ul_xfer_cb,
+ .dl_xfer_cb = qaic_ras_mhi_dl_xfer_cb,
+ .driver = {
+ .name = "qaic_ras",
+ },
+};
+
+int qaic_ras_register(void)
+{
+ return mhi_driver_register(&qaic_ras_mhi_driver);
+}
+
+void qaic_ras_unregister(void)
+{
+ mhi_driver_unregister(&qaic_ras_mhi_driver);
+}
diff --git a/drivers/accel/qaic/qaic_ras.h b/drivers/accel/qaic/qaic_ras.h
new file mode 100644
index 000000000000..d44a4eeeb060
--- /dev/null
+++ b/drivers/accel/qaic/qaic_ras.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2020, The Linux Foundation. All rights reserved. */
+
+#ifndef __QAIC_RAS_H__
+#define __QAIC_RAS_H__
+
+int qaic_ras_register(void);
+void qaic_ras_unregister(void);
+
+#endif /* __QAIC_RAS_H__ */
diff --git a/drivers/accel/qaic/qaic_ssr.c b/drivers/accel/qaic/qaic_ssr.c
new file mode 100644
index 000000000000..9b662d690371
--- /dev/null
+++ b/drivers/accel/qaic/qaic_ssr.c
@@ -0,0 +1,815 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */
+
+#include <asm/byteorder.h>
+#include <drm/drm_file.h>
+#include <drm/drm_managed.h>
+#include <linux/devcoredump.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mhi.h>
+#include <linux/workqueue.h>
+
+#include "qaic.h"
+#include "qaic_ssr.h"
+
+#define SSR_RESP_MSG_SZ 32
+#define SSR_MHI_BUF_SIZE SZ_64K
+#define SSR_MEM_READ_DATA_SIZE ((u64)SSR_MHI_BUF_SIZE - sizeof(struct ssr_crashdump))
+#define SSR_MEM_READ_CHUNK_SIZE ((u64)SSR_MEM_READ_DATA_SIZE - sizeof(struct ssr_memory_read_rsp))
+
+#define DEBUG_TRANSFER_INFO BIT(0)
+#define DEBUG_TRANSFER_INFO_RSP BIT(1)
+#define MEMORY_READ BIT(2)
+#define MEMORY_READ_RSP BIT(3)
+#define DEBUG_TRANSFER_DONE BIT(4)
+#define DEBUG_TRANSFER_DONE_RSP BIT(5)
+#define SSR_EVENT BIT(8)
+#define SSR_EVENT_RSP BIT(9)
+
+#define SSR_EVENT_NACK BIT(0)
+#define BEFORE_SHUTDOWN BIT(1)
+#define AFTER_SHUTDOWN BIT(2)
+#define BEFORE_POWER_UP BIT(3)
+#define AFTER_POWER_UP BIT(4)
+
+struct debug_info_table {
+ /* Save preferences. Default is mandatory */
+ u64 save_perf;
+ /* Base address of the debug region */
+ u64 mem_base;
+ /* Size of debug region in bytes */
+ u64 len;
+ /* Description */
+ char desc[20];
+ /* Filename of debug region */
+ char filename[20];
+};
+
+struct _ssr_hdr {
+ __le32 cmd;
+ __le32 len;
+ __le32 dbc_id;
+};
+
+struct ssr_hdr {
+ u32 cmd;
+ u32 len;
+ u32 dbc_id;
+};
+
+struct ssr_debug_transfer_info {
+ struct ssr_hdr hdr;
+ u32 resv;
+ u64 tbl_addr;
+ u64 tbl_len;
+} __packed;
+
+struct ssr_debug_transfer_info_rsp {
+ struct _ssr_hdr hdr;
+ __le32 ret;
+} __packed;
+
+struct ssr_memory_read {
+ struct _ssr_hdr hdr;
+ __le32 resv;
+ __le64 addr;
+ __le64 len;
+} __packed;
+
+struct ssr_memory_read_rsp {
+ struct _ssr_hdr hdr;
+ __le32 resv;
+ u8 data[];
+} __packed;
+
+struct ssr_debug_transfer_done {
+ struct _ssr_hdr hdr;
+ __le32 resv;
+} __packed;
+
+struct ssr_debug_transfer_done_rsp {
+ struct _ssr_hdr hdr;
+ __le32 ret;
+} __packed;
+
+struct ssr_event {
+ struct ssr_hdr hdr;
+ u32 event;
+} __packed;
+
+struct ssr_event_rsp {
+ struct _ssr_hdr hdr;
+ __le32 event;
+} __packed;
+
+struct ssr_resp {
+ /* Work struct to schedule work coming on QAIC_SSR channel */
+ struct work_struct work;
+ /* Root struct of device, used to access device resources */
+ struct qaic_device *qdev;
+ /* Buffer used by MHI for transfer requests */
+ u8 data[] __aligned(8);
+};
+
+/* SSR crashdump book keeping structure */
+struct ssr_dump_info {
+ /* DBC associated with this SSR crashdump */
+ struct dma_bridge_chan *dbc;
+ /*
+ * It will be used when we complete the crashdump download and switch
+ * to waiting on SSR events
+ */
+ struct ssr_resp *resp;
+ /* MEMORY READ request MHI buffer.*/
+ struct ssr_memory_read *read_buf_req;
+ /* TRUE: ->read_buf_req is queued for MHI transaction. FALSE: Otherwise */
+ bool read_buf_req_queued;
+ /* Address of table in host */
+ void *tbl_addr;
+ /* Total size of table */
+ u64 tbl_len;
+ /* Offset of table(->tbl_addr) where the new chunk will be dumped */
+ u64 tbl_off;
+ /* Address of table in device/target */
+ u64 tbl_addr_dev;
+ /* Ptr to the entire dump */
+ void *dump_addr;
+ /* Entire crashdump size */
+ u64 dump_sz;
+ /* Offset of crashdump(->dump_addr) where the new chunk will be dumped */
+ u64 dump_off;
+ /* Points to the table entry we are currently downloading */
+ struct debug_info_table *tbl_ent;
+ /* Offset in the current table entry(->tbl_ent) for next chuck */
+ u64 tbl_ent_off;
+};
+
+struct ssr_crashdump {
+ /*
+ * Points to a book keeping struct maintained by MHI SSR device while
+ * downloading a SSR crashdump. It is NULL when crashdump downloading
+ * not in progress.
+ */
+ struct ssr_dump_info *dump_info;
+ /* Work struct to schedule work coming on QAIC_SSR channel */
+ struct work_struct work;
+ /* Root struct of device, used to access device resources */
+ struct qaic_device *qdev;
+ /* Buffer used by MHI for transfer requests */
+ u8 data[];
+};
+
+#define QAIC_SSR_DUMP_V1_MAGIC 0x1234567890abcdef
+#define QAIC_SSR_DUMP_V1_VER 1
+struct dump_file_meta {
+ u64 magic;
+ u64 version;
+ u64 size; /* Total size of the entire dump */
+ u64 tbl_len; /* Length of the table in byte */
+};
+
+/*
+ * Layout of crashdump
+ * +------------------------------------------+
+ * | Crashdump Meta structure |
+ * | type: struct dump_file_meta |
+ * +------------------------------------------+
+ * | Crashdump Table |
+ * | type: array of struct debug_info_table |
+ * | |
+ * | |
+ * | |
+ * +------------------------------------------+
+ * | Crashdump |
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * +------------------------------------------+
+ */
+
+static void free_ssr_dump_info(struct ssr_crashdump *ssr_crash)
+{
+ struct ssr_dump_info *dump_info = ssr_crash->dump_info;
+
+ ssr_crash->dump_info = NULL;
+ if (!dump_info)
+ return;
+ if (!dump_info->read_buf_req_queued)
+ kfree(dump_info->read_buf_req);
+ vfree(dump_info->tbl_addr);
+ vfree(dump_info->dump_addr);
+ kfree(dump_info);
+}
+
+void qaic_clean_up_ssr(struct qaic_device *qdev)
+{
+ struct ssr_crashdump *ssr_crash = qdev->ssr_mhi_buf;
+
+ if (!ssr_crash)
+ return;
+
+ qaic_dbc_exit_ssr(qdev);
+ free_ssr_dump_info(ssr_crash);
+}
+
+static int alloc_dump(struct ssr_dump_info *dump_info)
+{
+ struct debug_info_table *tbl_ent = dump_info->tbl_addr;
+ struct dump_file_meta *dump_meta;
+ u64 tbl_sz_lp = 0;
+ u64 dump_size = 0;
+
+ while (tbl_sz_lp < dump_info->tbl_len) {
+ le64_to_cpus(&tbl_ent->save_perf);
+ le64_to_cpus(&tbl_ent->mem_base);
+ le64_to_cpus(&tbl_ent->len);
+
+ if (tbl_ent->len == 0)
+ return -EINVAL;
+
+ dump_size += tbl_ent->len;
+ tbl_ent++;
+ tbl_sz_lp += sizeof(*tbl_ent);
+ }
+
+ dump_info->dump_sz = dump_size + dump_info->tbl_len + sizeof(*dump_meta);
+ dump_info->dump_addr = vzalloc(dump_info->dump_sz);
+ if (!dump_info->dump_addr)
+ return -ENOMEM;
+
+ /* Copy crashdump meta and table */
+ dump_meta = dump_info->dump_addr;
+ dump_meta->magic = QAIC_SSR_DUMP_V1_MAGIC;
+ dump_meta->version = QAIC_SSR_DUMP_V1_VER;
+ dump_meta->size = dump_info->dump_sz;
+ dump_meta->tbl_len = dump_info->tbl_len;
+ memcpy(dump_info->dump_addr + sizeof(*dump_meta), dump_info->tbl_addr, dump_info->tbl_len);
+ /* Offset by crashdump meta and table (copied above) */
+ dump_info->dump_off = dump_info->tbl_len + sizeof(*dump_meta);
+
+ return 0;
+}
+
+static int send_xfer_done(struct qaic_device *qdev, void *resp, u32 dbc_id)
+{
+ struct ssr_debug_transfer_done *xfer_done;
+ int ret;
+
+ xfer_done = kmalloc(sizeof(*xfer_done), GFP_KERNEL);
+ if (!xfer_done) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = mhi_queue_buf(qdev->ssr_ch, DMA_FROM_DEVICE, resp, SSR_RESP_MSG_SZ, MHI_EOT);
+ if (ret)
+ goto free_xfer_done;
+
+ xfer_done->hdr.cmd = cpu_to_le32(DEBUG_TRANSFER_DONE);
+ xfer_done->hdr.len = cpu_to_le32(sizeof(*xfer_done));
+ xfer_done->hdr.dbc_id = cpu_to_le32(dbc_id);
+
+ ret = mhi_queue_buf(qdev->ssr_ch, DMA_TO_DEVICE, xfer_done, sizeof(*xfer_done), MHI_EOT);
+ if (ret)
+ goto free_xfer_done;
+
+ return 0;
+
+free_xfer_done:
+ kfree(xfer_done);
+out:
+ return ret;
+}
+
+static int mem_read_req(struct qaic_device *qdev, u64 dest_addr, u64 dest_len)
+{
+ struct ssr_crashdump *ssr_crash = qdev->ssr_mhi_buf;
+ struct ssr_memory_read *read_buf_req;
+ struct ssr_dump_info *dump_info;
+ int ret;
+
+ dump_info = ssr_crash->dump_info;
+ ret = mhi_queue_buf(qdev->ssr_ch, DMA_FROM_DEVICE, ssr_crash->data, SSR_MEM_READ_DATA_SIZE,
+ MHI_EOT);
+ if (ret)
+ goto out;
+
+ read_buf_req = dump_info->read_buf_req;
+ read_buf_req->hdr.cmd = cpu_to_le32(MEMORY_READ);
+ read_buf_req->hdr.len = cpu_to_le32(sizeof(*read_buf_req));
+ read_buf_req->hdr.dbc_id = cpu_to_le32(qdev->ssr_dbc);
+ read_buf_req->addr = cpu_to_le64(dest_addr);
+ read_buf_req->len = cpu_to_le64(dest_len);
+
+ ret = mhi_queue_buf(qdev->ssr_ch, DMA_TO_DEVICE, read_buf_req, sizeof(*read_buf_req),
+ MHI_EOT);
+ if (!ret)
+ dump_info->read_buf_req_queued = true;
+
+out:
+ return ret;
+}
+
+static int ssr_copy_table(struct ssr_dump_info *dump_info, void *data, u64 len)
+{
+ if (len > dump_info->tbl_len - dump_info->tbl_off)
+ return -EINVAL;
+
+ memcpy(dump_info->tbl_addr + dump_info->tbl_off, data, len);
+ dump_info->tbl_off += len;
+
+ /* Entire table has been downloaded, alloc dump memory */
+ if (dump_info->tbl_off == dump_info->tbl_len) {
+ dump_info->tbl_ent = dump_info->tbl_addr;
+ return alloc_dump(dump_info);
+ }
+
+ return 0;
+}
+
+static int ssr_copy_dump(struct ssr_dump_info *dump_info, void *data, u64 len)
+{
+ struct debug_info_table *tbl_ent;
+
+ tbl_ent = dump_info->tbl_ent;
+
+ if (len > tbl_ent->len - dump_info->tbl_ent_off)
+ return -EINVAL;
+
+ memcpy(dump_info->dump_addr + dump_info->dump_off, data, len);
+ dump_info->dump_off += len;
+ dump_info->tbl_ent_off += len;
+
+ /*
+ * Current segment (a entry in table) of the crashdump is complete,
+ * move to next one
+ */
+ if (tbl_ent->len == dump_info->tbl_ent_off) {
+ dump_info->tbl_ent++;
+ dump_info->tbl_ent_off = 0;
+ }
+
+ return 0;
+}
+
+static void ssr_dump_worker(struct work_struct *work)
+{
+ struct ssr_crashdump *ssr_crash = container_of(work, struct ssr_crashdump, work);
+ struct qaic_device *qdev = ssr_crash->qdev;
+ struct ssr_memory_read_rsp *mem_rd_resp;
+ struct debug_info_table *tbl_ent;
+ struct ssr_dump_info *dump_info;
+ u64 dest_addr, dest_len;
+ struct _ssr_hdr *_hdr;
+ struct ssr_hdr hdr;
+ u64 data_len;
+ int ret;
+
+ mem_rd_resp = (struct ssr_memory_read_rsp *)ssr_crash->data;
+ _hdr = &mem_rd_resp->hdr;
+ hdr.cmd = le32_to_cpu(_hdr->cmd);
+ hdr.len = le32_to_cpu(_hdr->len);
+ hdr.dbc_id = le32_to_cpu(_hdr->dbc_id);
+
+ if (hdr.dbc_id != qdev->ssr_dbc)
+ goto reset_device;
+
+ dump_info = ssr_crash->dump_info;
+ if (!dump_info)
+ goto reset_device;
+
+ if (hdr.cmd != MEMORY_READ_RSP)
+ goto free_dump_info;
+
+ if (hdr.len > SSR_MEM_READ_DATA_SIZE)
+ goto free_dump_info;
+
+ data_len = hdr.len - sizeof(*mem_rd_resp);
+
+ if (dump_info->tbl_off < dump_info->tbl_len) /* Chunk belongs to table */
+ ret = ssr_copy_table(dump_info, mem_rd_resp->data, data_len);
+ else /* Chunk belongs to crashdump */
+ ret = ssr_copy_dump(dump_info, mem_rd_resp->data, data_len);
+
+ if (ret)
+ goto free_dump_info;
+
+ if (dump_info->tbl_off < dump_info->tbl_len) {
+ /* Continue downloading table */
+ dest_addr = dump_info->tbl_addr_dev + dump_info->tbl_off;
+ dest_len = min(SSR_MEM_READ_CHUNK_SIZE, dump_info->tbl_len - dump_info->tbl_off);
+ ret = mem_read_req(qdev, dest_addr, dest_len);
+ } else if (dump_info->dump_off < dump_info->dump_sz) {
+ /* Continue downloading crashdump */
+ tbl_ent = dump_info->tbl_ent;
+ dest_addr = tbl_ent->mem_base + dump_info->tbl_ent_off;
+ dest_len = min(SSR_MEM_READ_CHUNK_SIZE, tbl_ent->len - dump_info->tbl_ent_off);
+ ret = mem_read_req(qdev, dest_addr, dest_len);
+ } else {
+ /* Crashdump download complete */
+ ret = send_xfer_done(qdev, dump_info->resp->data, hdr.dbc_id);
+ }
+
+ /* Most likely a MHI xfer has failed */
+ if (ret)
+ goto free_dump_info;
+
+ return;
+
+free_dump_info:
+ /* Free the allocated memory */
+ free_ssr_dump_info(ssr_crash);
+reset_device:
+ /*
+ * After subsystem crashes in device crashdump collection begins but
+ * something went wrong while collecting crashdump, now instead of
+ * handling this error we just reset the device as the best effort has
+ * been made
+ */
+ mhi_soc_reset(qdev->mhi_cntrl);
+}
+
+static struct ssr_dump_info *alloc_dump_info(struct qaic_device *qdev,
+ struct ssr_debug_transfer_info *debug_info)
+{
+ struct ssr_dump_info *dump_info;
+ int ret;
+
+ le64_to_cpus(&debug_info->tbl_len);
+ le64_to_cpus(&debug_info->tbl_addr);
+
+ if (debug_info->tbl_len == 0 ||
+ debug_info->tbl_len % sizeof(struct debug_info_table) != 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Allocate SSR crashdump book keeping structure */
+ dump_info = kzalloc(sizeof(*dump_info), GFP_KERNEL);
+ if (!dump_info) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Buffer used to send MEMORY READ request to device via MHI */
+ dump_info->read_buf_req = kzalloc(sizeof(*dump_info->read_buf_req), GFP_KERNEL);
+ if (!dump_info->read_buf_req) {
+ ret = -ENOMEM;
+ goto free_dump_info;
+ }
+
+ /* Crashdump meta table buffer */
+ dump_info->tbl_addr = vzalloc(debug_info->tbl_len);
+ if (!dump_info->tbl_addr) {
+ ret = -ENOMEM;
+ goto free_read_buf_req;
+ }
+
+ dump_info->tbl_addr_dev = debug_info->tbl_addr;
+ dump_info->tbl_len = debug_info->tbl_len;
+
+ return dump_info;
+
+free_read_buf_req:
+ kfree(dump_info->read_buf_req);
+free_dump_info:
+ kfree(dump_info);
+out:
+ return ERR_PTR(ret);
+}
+
+static int dbg_xfer_info_rsp(struct qaic_device *qdev, struct dma_bridge_chan *dbc,
+ struct ssr_debug_transfer_info *debug_info)
+{
+ struct ssr_debug_transfer_info_rsp *debug_rsp;
+ struct ssr_crashdump *ssr_crash = NULL;
+ int ret = 0, ret2;
+
+ debug_rsp = kmalloc(sizeof(*debug_rsp), GFP_KERNEL);
+ if (!debug_rsp)
+ return -ENOMEM;
+
+ if (!qdev->ssr_mhi_buf) {
+ ret = -ENOMEM;
+ goto send_rsp;
+ }
+
+ if (dbc->state != DBC_STATE_BEFORE_POWER_UP) {
+ ret = -EINVAL;
+ goto send_rsp;
+ }
+
+ ssr_crash = qdev->ssr_mhi_buf;
+ ssr_crash->dump_info = alloc_dump_info(qdev, debug_info);
+ if (IS_ERR(ssr_crash->dump_info)) {
+ ret = PTR_ERR(ssr_crash->dump_info);
+ ssr_crash->dump_info = NULL;
+ }
+
+send_rsp:
+ debug_rsp->hdr.cmd = cpu_to_le32(DEBUG_TRANSFER_INFO_RSP);
+ debug_rsp->hdr.len = cpu_to_le32(sizeof(*debug_rsp));
+ debug_rsp->hdr.dbc_id = cpu_to_le32(dbc->id);
+ /*
+ * 0 = Return an ACK confirming the host is ready to download crashdump
+ * 1 = Return an NACK confirming the host is not ready to download crashdump
+ */
+ debug_rsp->ret = cpu_to_le32(ret ? 1 : 0);
+
+ ret2 = mhi_queue_buf(qdev->ssr_ch, DMA_TO_DEVICE, debug_rsp, sizeof(*debug_rsp), MHI_EOT);
+ if (ret2) {
+ free_ssr_dump_info(ssr_crash);
+ kfree(debug_rsp);
+ return ret2;
+ }
+
+ return ret;
+}
+
+static void dbg_xfer_done_rsp(struct qaic_device *qdev, struct dma_bridge_chan *dbc,
+ struct ssr_debug_transfer_done_rsp *xfer_rsp)
+{
+ struct ssr_crashdump *ssr_crash = qdev->ssr_mhi_buf;
+ u32 status = le32_to_cpu(xfer_rsp->ret);
+ struct device *dev = &qdev->pdev->dev;
+ struct ssr_dump_info *dump_info;
+
+ dump_info = ssr_crash->dump_info;
+ if (!dump_info)
+ return;
+
+ if (status) {
+ free_ssr_dump_info(ssr_crash);
+ return;
+ }
+
+ dev_coredumpv(dev, dump_info->dump_addr, dump_info->dump_sz, GFP_KERNEL);
+ /* dev_coredumpv will free dump_info->dump_addr */
+ dump_info->dump_addr = NULL;
+ free_ssr_dump_info(ssr_crash);
+}
+
+static void ssr_worker(struct work_struct *work)
+{
+ struct ssr_resp *resp = container_of(work, struct ssr_resp, work);
+ struct ssr_hdr *hdr = (struct ssr_hdr *)resp->data;
+ struct ssr_dump_info *dump_info = NULL;
+ struct qaic_device *qdev = resp->qdev;
+ struct ssr_crashdump *ssr_crash;
+ struct ssr_event_rsp *event_rsp;
+ struct dma_bridge_chan *dbc;
+ struct ssr_event *event;
+ u32 ssr_event_ack;
+ int ret;
+
+ le32_to_cpus(&hdr->cmd);
+ le32_to_cpus(&hdr->len);
+ le32_to_cpus(&hdr->dbc_id);
+
+ if (hdr->len > SSR_RESP_MSG_SZ)
+ goto out;
+
+ if (hdr->dbc_id >= qdev->num_dbc)
+ goto out;
+
+ dbc = &qdev->dbc[hdr->dbc_id];
+
+ switch (hdr->cmd) {
+ case DEBUG_TRANSFER_INFO:
+ ret = dbg_xfer_info_rsp(qdev, dbc, (struct ssr_debug_transfer_info *)resp->data);
+ if (ret)
+ break;
+
+ ssr_crash = qdev->ssr_mhi_buf;
+ dump_info = ssr_crash->dump_info;
+ dump_info->dbc = dbc;
+ dump_info->resp = resp;
+
+ /* Start by downloading debug table */
+ ret = mem_read_req(qdev, dump_info->tbl_addr_dev,
+ min(dump_info->tbl_len, SSR_MEM_READ_CHUNK_SIZE));
+ if (ret) {
+ free_ssr_dump_info(ssr_crash);
+ break;
+ }
+
+ /*
+ * Till now everything went fine, which means that we will be
+ * collecting crashdump chunk by chunk. Do not queue a response
+ * buffer for SSR cmds till the crashdump is complete.
+ */
+ return;
+ case SSR_EVENT:
+ event = (struct ssr_event *)hdr;
+ le32_to_cpus(&event->event);
+ ssr_event_ack = event->event;
+ ssr_crash = qdev->ssr_mhi_buf;
+
+ switch (event->event) {
+ case BEFORE_SHUTDOWN:
+ set_dbc_state(qdev, hdr->dbc_id, DBC_STATE_BEFORE_SHUTDOWN);
+ qaic_dbc_enter_ssr(qdev, hdr->dbc_id);
+ break;
+ case AFTER_SHUTDOWN:
+ set_dbc_state(qdev, hdr->dbc_id, DBC_STATE_AFTER_SHUTDOWN);
+ break;
+ case BEFORE_POWER_UP:
+ set_dbc_state(qdev, hdr->dbc_id, DBC_STATE_BEFORE_POWER_UP);
+ break;
+ case AFTER_POWER_UP:
+ /*
+ * If dump info is a non NULL value it means that we
+ * have received this SSR event while downloading a
+ * crashdump for this DBC is still in progress. NACK
+ * the SSR event
+ */
+ if (ssr_crash && ssr_crash->dump_info) {
+ free_ssr_dump_info(ssr_crash);
+ ssr_event_ack = SSR_EVENT_NACK;
+ break;
+ }
+
+ set_dbc_state(qdev, hdr->dbc_id, DBC_STATE_AFTER_POWER_UP);
+ break;
+ default:
+ break;
+ }
+
+ event_rsp = kmalloc(sizeof(*event_rsp), GFP_KERNEL);
+ if (!event_rsp)
+ break;
+
+ event_rsp->hdr.cmd = cpu_to_le32(SSR_EVENT_RSP);
+ event_rsp->hdr.len = cpu_to_le32(sizeof(*event_rsp));
+ event_rsp->hdr.dbc_id = cpu_to_le32(hdr->dbc_id);
+ event_rsp->event = cpu_to_le32(ssr_event_ack);
+
+ ret = mhi_queue_buf(qdev->ssr_ch, DMA_TO_DEVICE, event_rsp, sizeof(*event_rsp),
+ MHI_EOT);
+ if (ret)
+ kfree(event_rsp);
+
+ if (event->event == AFTER_POWER_UP && ssr_event_ack != SSR_EVENT_NACK) {
+ qaic_dbc_exit_ssr(qdev);
+ set_dbc_state(qdev, hdr->dbc_id, DBC_STATE_IDLE);
+ }
+
+ break;
+ case DEBUG_TRANSFER_DONE_RSP:
+ dbg_xfer_done_rsp(qdev, dbc, (struct ssr_debug_transfer_done_rsp *)hdr);
+ break;
+ default:
+ break;
+ }
+
+out:
+ ret = mhi_queue_buf(qdev->ssr_ch, DMA_FROM_DEVICE, resp->data, SSR_RESP_MSG_SZ, MHI_EOT);
+ if (ret)
+ kfree(resp);
+}
+
+static int qaic_ssr_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev));
+ struct ssr_resp *resp;
+ int ret;
+
+ ret = mhi_prepare_for_transfer(mhi_dev);
+ if (ret)
+ return ret;
+
+ resp = kzalloc(sizeof(*resp) + SSR_RESP_MSG_SZ, GFP_KERNEL);
+ if (!resp) {
+ mhi_unprepare_from_transfer(mhi_dev);
+ return -ENOMEM;
+ }
+
+ resp->qdev = qdev;
+ INIT_WORK(&resp->work, ssr_worker);
+
+ ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, resp->data, SSR_RESP_MSG_SZ, MHI_EOT);
+ if (ret) {
+ kfree(resp);
+ mhi_unprepare_from_transfer(mhi_dev);
+ return ret;
+ }
+
+ dev_set_drvdata(&mhi_dev->dev, qdev);
+ qdev->ssr_ch = mhi_dev;
+
+ return 0;
+}
+
+static void qaic_ssr_mhi_remove(struct mhi_device *mhi_dev)
+{
+ struct qaic_device *qdev;
+
+ qdev = dev_get_drvdata(&mhi_dev->dev);
+ mhi_unprepare_from_transfer(qdev->ssr_ch);
+ qdev->ssr_ch = NULL;
+}
+
+static void qaic_ssr_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
+{
+ struct qaic_device *qdev = dev_get_drvdata(&mhi_dev->dev);
+ struct ssr_crashdump *ssr_crash = qdev->ssr_mhi_buf;
+ struct _ssr_hdr *hdr = mhi_result->buf_addr;
+ struct ssr_dump_info *dump_info;
+
+ if (mhi_result->transaction_status) {
+ kfree(mhi_result->buf_addr);
+ return;
+ }
+
+ /*
+ * MEMORY READ is used to download crashdump. And crashdump is
+ * downloaded chunk by chunk in a series of MEMORY READ SSR commands.
+ * Hence to avoid too many kmalloc() and kfree() of the same MEMORY READ
+ * request buffer, we allocate only one such buffer and free it only
+ * once.
+ */
+ if (le32_to_cpu(hdr->cmd) == MEMORY_READ) {
+ dump_info = ssr_crash->dump_info;
+ if (dump_info) {
+ dump_info->read_buf_req_queued = false;
+ return;
+ }
+ }
+
+ kfree(mhi_result->buf_addr);
+}
+
+static void qaic_ssr_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
+{
+ struct ssr_resp *resp = container_of(mhi_result->buf_addr, struct ssr_resp, data);
+ struct qaic_device *qdev = dev_get_drvdata(&mhi_dev->dev);
+ struct ssr_crashdump *ssr_crash = qdev->ssr_mhi_buf;
+ bool memory_read_rsp = false;
+
+ if (ssr_crash && ssr_crash->data == mhi_result->buf_addr)
+ memory_read_rsp = true;
+
+ if (mhi_result->transaction_status) {
+ /* Do not free SSR crashdump buffer as it allocated via managed APIs */
+ if (!memory_read_rsp)
+ kfree(resp);
+ return;
+ }
+
+ if (memory_read_rsp)
+ queue_work(qdev->ssr_wq, &ssr_crash->work);
+ else
+ queue_work(qdev->ssr_wq, &resp->work);
+}
+
+static const struct mhi_device_id qaic_ssr_mhi_match_table[] = {
+ { .chan = "QAIC_SSR", },
+ {},
+};
+
+static struct mhi_driver qaic_ssr_mhi_driver = {
+ .id_table = qaic_ssr_mhi_match_table,
+ .remove = qaic_ssr_mhi_remove,
+ .probe = qaic_ssr_mhi_probe,
+ .ul_xfer_cb = qaic_ssr_mhi_ul_xfer_cb,
+ .dl_xfer_cb = qaic_ssr_mhi_dl_xfer_cb,
+ .driver = {
+ .name = "qaic_ssr",
+ },
+};
+
+int qaic_ssr_init(struct qaic_device *qdev, struct drm_device *drm)
+{
+ struct ssr_crashdump *ssr_crash;
+
+ qdev->ssr_dbc = QAIC_SSR_DBC_SENTINEL;
+
+ /*
+ * Device requests only one SSR at a time. So allocating only one
+ * buffer to download crashdump is good enough.
+ */
+ ssr_crash = drmm_kzalloc(drm, SSR_MHI_BUF_SIZE, GFP_KERNEL);
+ if (!ssr_crash)
+ return -ENOMEM;
+
+ ssr_crash->qdev = qdev;
+ INIT_WORK(&ssr_crash->work, ssr_dump_worker);
+ qdev->ssr_mhi_buf = ssr_crash;
+
+ return 0;
+}
+
+int qaic_ssr_register(void)
+{
+ return mhi_driver_register(&qaic_ssr_mhi_driver);
+}
+
+void qaic_ssr_unregister(void)
+{
+ mhi_driver_unregister(&qaic_ssr_mhi_driver);
+}
diff --git a/drivers/accel/qaic/qaic_ssr.h b/drivers/accel/qaic/qaic_ssr.h
new file mode 100644
index 000000000000..97ccff305750
--- /dev/null
+++ b/drivers/accel/qaic/qaic_ssr.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __QAIC_SSR_H__
+#define __QAIC_SSR_H__
+
+struct drm_device;
+struct qaic_device;
+
+int qaic_ssr_register(void);
+void qaic_ssr_unregister(void);
+void qaic_clean_up_ssr(struct qaic_device *qdev);
+int qaic_ssr_init(struct qaic_device *qdev, struct drm_device *drm);
+#endif /* __QAIC_SSR_H__ */
diff --git a/drivers/accel/qaic/qaic_sysfs.c b/drivers/accel/qaic/qaic_sysfs.c
new file mode 100644
index 000000000000..e0afb0ffb589
--- /dev/null
+++ b/drivers/accel/qaic/qaic_sysfs.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/* Copyright (c) 2020-2025, The Linux Foundation. All rights reserved. */
+
+#include <drm/drm_file.h>
+#include <drm/drm_managed.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/mutex.h>
+#include <linux/sysfs.h>
+
+#include "qaic.h"
+
+#define NAME_LEN 14
+
+struct dbc_attribute {
+ struct device_attribute dev_attr;
+ u32 dbc_id;
+ char name[NAME_LEN];
+};
+
+static ssize_t dbc_state_show(struct device *dev, struct device_attribute *a, char *buf)
+{
+ struct dbc_attribute *dbc_attr = container_of(a, struct dbc_attribute, dev_attr);
+ struct drm_minor *minor = dev_get_drvdata(dev);
+ struct qaic_device *qdev;
+
+ qdev = to_qaic_device(minor->dev);
+ return sysfs_emit(buf, "%d\n", qdev->dbc[dbc_attr->dbc_id].state);
+}
+
+void set_dbc_state(struct qaic_device *qdev, u32 dbc_id, unsigned int state)
+{
+ struct device *kdev = to_accel_kdev(qdev->qddev);
+ char *envp[3] = {};
+ char state_str[16];
+ char id_str[12];
+
+ envp[0] = id_str;
+ envp[1] = state_str;
+
+ if (state >= DBC_STATE_MAX)
+ return;
+ if (dbc_id >= qdev->num_dbc)
+ return;
+ if (state == qdev->dbc[dbc_id].state)
+ return;
+
+ scnprintf(id_str, ARRAY_SIZE(id_str), "DBC_ID=%d", dbc_id);
+ scnprintf(state_str, ARRAY_SIZE(state_str), "DBC_STATE=%d", state);
+
+ qdev->dbc[dbc_id].state = state;
+ kobject_uevent_env(&kdev->kobj, KOBJ_CHANGE, envp);
+}
+
+int qaic_sysfs_init(struct qaic_drm_device *qddev)
+{
+ struct device *kdev = to_accel_kdev(qddev);
+ struct drm_device *drm = to_drm(qddev);
+ u32 num_dbc = qddev->qdev->num_dbc;
+ struct dbc_attribute *dbc_attrs;
+ int i, ret;
+
+ dbc_attrs = drmm_kcalloc(drm, num_dbc, sizeof(*dbc_attrs), GFP_KERNEL);
+ if (!dbc_attrs)
+ return -ENOMEM;
+
+ for (i = 0; i < num_dbc; ++i) {
+ struct dbc_attribute *dbc_attr = &dbc_attrs[i];
+
+ sysfs_attr_init(&dbc_attr->dev_attr.attr);
+ dbc_attr->dbc_id = i;
+ scnprintf(dbc_attr->name, NAME_LEN, "dbc%d_state", i);
+ dbc_attr->dev_attr.attr.name = dbc_attr->name;
+ dbc_attr->dev_attr.attr.mode = 0444;
+ dbc_attr->dev_attr.show = dbc_state_show;
+ ret = sysfs_create_file(&kdev->kobj, &dbc_attr->dev_attr.attr);
+ if (ret) {
+ int j;
+
+ for (j = 0; j < i; ++j) {
+ dbc_attr = &dbc_attrs[j];
+ sysfs_remove_file(&kdev->kobj, &dbc_attr->dev_attr.attr);
+ }
+ drmm_kfree(drm, dbc_attrs);
+ return ret;
+ }
+ }
+
+ qddev->sysfs_attrs = dbc_attrs;
+ return 0;
+}
+
+void qaic_sysfs_remove(struct qaic_drm_device *qddev)
+{
+ struct dbc_attribute *dbc_attrs = qddev->sysfs_attrs;
+ struct device *kdev = to_accel_kdev(qddev);
+ u32 num_dbc = qddev->qdev->num_dbc;
+ int i;
+
+ if (!dbc_attrs)
+ return;
+
+ qddev->sysfs_attrs = NULL;
+ for (i = 0; i < num_dbc; ++i)
+ sysfs_remove_file(&kdev->kobj, &dbc_attrs[i].dev_attr.attr);
+ drmm_kfree(to_drm(qddev), dbc_attrs);
+}
diff --git a/drivers/accel/qaic/qaic_timesync.c b/drivers/accel/qaic/qaic_timesync.c
index 301f4462d51b..8af2475f4f36 100644
--- a/drivers/accel/qaic/qaic_timesync.c
+++ b/drivers/accel/qaic/qaic_timesync.c
@@ -129,7 +129,7 @@ static void qaic_timesync_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_resu
static void qaic_timesync_timer(struct timer_list *t)
{
- struct mqts_dev *mqtsdev = from_timer(mqtsdev, t, timer);
+ struct mqts_dev *mqtsdev = timer_container_of(mqtsdev, t, timer);
struct qts_host_time_sync_msg_data *sync_msg;
u64 device_qtimer_us;
u64 device_qtimer;
@@ -171,6 +171,13 @@ mod_timer:
dev_err(mqtsdev->dev, "%s mod_timer error:%d\n", __func__, ret);
}
+void qaic_mqts_ch_stop_timer(struct mhi_device *mhi_dev)
+{
+ struct mqts_dev *mqtsdev = dev_get_drvdata(&mhi_dev->dev);
+
+ timer_delete_sync(&mqtsdev->timer);
+}
+
static int qaic_timesync_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
{
struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev));
@@ -201,11 +208,12 @@ static int qaic_timesync_probe(struct mhi_device *mhi_dev, const struct mhi_devi
goto free_sync_msg;
/* Qtimer register pointer */
- mqtsdev->qtimer_addr = qdev->bar_0 + QTIMER_REG_OFFSET;
+ mqtsdev->qtimer_addr = qdev->bar_mhi + QTIMER_REG_OFFSET;
timer_setup(timer, qaic_timesync_timer, 0);
timer->expires = jiffies + msecs_to_jiffies(timesync_delay_ms);
add_timer(timer);
dev_set_drvdata(&mhi_dev->dev, mqtsdev);
+ qdev->mqts_ch = mhi_dev;
return 0;
@@ -221,7 +229,8 @@ static void qaic_timesync_remove(struct mhi_device *mhi_dev)
{
struct mqts_dev *mqtsdev = dev_get_drvdata(&mhi_dev->dev);
- del_timer_sync(&mqtsdev->timer);
+ mqtsdev->qdev->mqts_ch = NULL;
+ timer_delete_sync(&mqtsdev->timer);
mhi_unprepare_from_transfer(mqtsdev->mhi_dev);
kfree(mqtsdev->sync_msg);
kfree(mqtsdev);
diff --git a/drivers/accel/qaic/qaic_timesync.h b/drivers/accel/qaic/qaic_timesync.h
index 851b7acd43bb..77b9c2b55057 100644
--- a/drivers/accel/qaic/qaic_timesync.h
+++ b/drivers/accel/qaic/qaic_timesync.h
@@ -6,6 +6,9 @@
#ifndef __QAIC_TIMESYNC_H__
#define __QAIC_TIMESYNC_H__
+#include <linux/mhi.h>
+
int qaic_timesync_init(void);
void qaic_timesync_deinit(void);
+void qaic_mqts_ch_stop_timer(struct mhi_device *mhi_dev);
#endif /* __QAIC_TIMESYNC_H__ */
diff --git a/drivers/accel/qaic/sahara.c b/drivers/accel/qaic/sahara.c
index bf94bbab6be5..fd3c3b2d1fd3 100644
--- a/drivers/accel/qaic/sahara.c
+++ b/drivers/accel/qaic/sahara.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. */
+#include <linux/devcoredump.h>
#include <linux/firmware.h>
#include <linux/limits.h>
#include <linux/mhi.h>
@@ -9,6 +10,7 @@
#include <linux/mod_devicetable.h>
#include <linux/overflow.h>
#include <linux/types.h>
+#include <linux/vmalloc.h>
#include <linux/workqueue.h>
#include "sahara.h"
@@ -36,12 +38,14 @@
#define SAHARA_PACKET_MAX_SIZE 0xffffU /* MHI_MAX_MTU */
#define SAHARA_TRANSFER_MAX_SIZE 0x80000
+#define SAHARA_READ_MAX_SIZE 0xfff0U /* Avoid unaligned requests */
#define SAHARA_NUM_TX_BUF DIV_ROUND_UP(SAHARA_TRANSFER_MAX_SIZE,\
SAHARA_PACKET_MAX_SIZE)
#define SAHARA_IMAGE_ID_NONE U32_MAX
#define SAHARA_VERSION 2
#define SAHARA_SUCCESS 0
+#define SAHARA_TABLE_ENTRY_STR_LEN 20
#define SAHARA_MODE_IMAGE_TX_PENDING 0x0
#define SAHARA_MODE_IMAGE_TX_COMPLETE 0x1
@@ -53,6 +57,8 @@
#define SAHARA_END_OF_IMAGE_LENGTH 0x10
#define SAHARA_DONE_LENGTH 0x8
#define SAHARA_RESET_LENGTH 0x8
+#define SAHARA_MEM_DEBUG64_LENGTH 0x18
+#define SAHARA_MEM_READ64_LENGTH 0x18
struct sahara_packet {
__le32 cmd;
@@ -80,21 +86,102 @@ struct sahara_packet {
__le32 image;
__le32 status;
} end_of_image;
+ struct {
+ __le64 table_address;
+ __le64 table_length;
+ } memory_debug64;
+ struct {
+ __le64 memory_address;
+ __le64 memory_length;
+ } memory_read64;
};
};
+struct sahara_debug_table_entry64 {
+ __le64 type;
+ __le64 address;
+ __le64 length;
+ char description[SAHARA_TABLE_ENTRY_STR_LEN];
+ char filename[SAHARA_TABLE_ENTRY_STR_LEN];
+};
+
+struct sahara_dump_table_entry {
+ u64 type;
+ u64 address;
+ u64 length;
+ char description[SAHARA_TABLE_ENTRY_STR_LEN];
+ char filename[SAHARA_TABLE_ENTRY_STR_LEN];
+};
+
+#define SAHARA_DUMP_V1_MAGIC 0x1234567890abcdef
+#define SAHARA_DUMP_V1_VER 1
+struct sahara_memory_dump_meta_v1 {
+ u64 magic;
+ u64 version;
+ u64 dump_size;
+ u64 table_size;
+};
+
+/*
+ * Layout of crashdump provided to user via devcoredump
+ * +------------------------------------------+
+ * | Crashdump Meta structure |
+ * | type: struct sahara_memory_dump_meta_v1 |
+ * +------------------------------------------+
+ * | Crashdump Table |
+ * | type: array of struct |
+ * | sahara_dump_table_entry |
+ * | |
+ * | |
+ * +------------------------------------------+
+ * | Crashdump |
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * +------------------------------------------+
+ *
+ * First is the metadata header. Userspace can use the magic number to verify
+ * the content type, and then check the version for the rest of the format.
+ * New versions should keep the magic number location/value, and version
+ * location, but increment the version value.
+ *
+ * For v1, the metadata lists the size of the entire dump (header + table +
+ * dump) and the size of the table. Then the dump image table, which describes
+ * the contents of the dump. Finally all the images are listed in order, with
+ * no deadspace in between. Userspace can use the sizes listed in the image
+ * table to reconstruct the individual images.
+ */
+
struct sahara_context {
struct sahara_packet *tx[SAHARA_NUM_TX_BUF];
struct sahara_packet *rx;
- struct work_struct work;
+ struct work_struct fw_work;
+ struct work_struct dump_work;
+ struct work_struct read_data_work;
struct mhi_device *mhi_dev;
- const char **image_table;
+ const char * const *image_table;
u32 table_size;
u32 active_image_id;
const struct firmware *firmware;
+ u64 dump_table_address;
+ u64 dump_table_length;
+ size_t rx_size;
+ size_t rx_size_requested;
+ void *mem_dump;
+ size_t mem_dump_sz;
+ struct sahara_dump_table_entry *dump_image;
+ u64 dump_image_offset;
+ void *mem_dump_freespace;
+ u64 dump_images_left;
+ u32 read_data_offset;
+ u32 read_data_length;
+ bool is_mem_dump_mode;
+ bool non_streaming;
};
-static const char *aic100_image_table[] = {
+static const char * const aic100_image_table[] = {
[1] = "qcom/aic100/fw1.bin",
[2] = "qcom/aic100/fw2.bin",
[4] = "qcom/aic100/fw4.bin",
@@ -105,6 +192,42 @@ static const char *aic100_image_table[] = {
[10] = "qcom/aic100/fw10.bin",
};
+static const char * const aic200_image_table[] = {
+ [5] = "qcom/aic200/uefi.elf",
+ [12] = "qcom/aic200/aic200-nsp.bin",
+ [23] = "qcom/aic200/aop.mbn",
+ [32] = "qcom/aic200/tz.mbn",
+ [33] = "qcom/aic200/hypvm.mbn",
+ [38] = "qcom/aic200/xbl_config.elf",
+ [39] = "qcom/aic200/aic200_abl.elf",
+ [40] = "qcom/aic200/apdp.mbn",
+ [41] = "qcom/aic200/devcfg.mbn",
+ [42] = "qcom/aic200/sec.elf",
+ [43] = "qcom/aic200/aic200-hlos.elf",
+ [49] = "qcom/aic200/shrm.elf",
+ [50] = "qcom/aic200/cpucp.elf",
+ [51] = "qcom/aic200/aop_devcfg.mbn",
+ [54] = "qcom/aic200/qupv3fw.elf",
+ [57] = "qcom/aic200/cpucp_dtbs.elf",
+ [62] = "qcom/aic200/uefi_dtbs.elf",
+ [63] = "qcom/aic200/xbl_ac_config.mbn",
+ [64] = "qcom/aic200/tz_ac_config.mbn",
+ [65] = "qcom/aic200/hyp_ac_config.mbn",
+ [66] = "qcom/aic200/pdp.elf",
+ [67] = "qcom/aic200/pdp_cdb.elf",
+ [68] = "qcom/aic200/sdi.mbn",
+ [69] = "qcom/aic200/dcd.mbn",
+ [73] = "qcom/aic200/gearvm.mbn",
+ [74] = "qcom/aic200/sti.bin",
+ [76] = "qcom/aic200/tz_qti_config.mbn",
+ [78] = "qcom/aic200/pvs.bin",
+};
+
+static bool is_streaming(struct sahara_context *context)
+{
+ return !context->non_streaming;
+}
+
static int sahara_find_image(struct sahara_context *context, u32 image_id)
{
int ret;
@@ -153,6 +276,10 @@ static void sahara_send_reset(struct sahara_context *context)
{
int ret;
+ context->is_mem_dump_mode = false;
+ context->read_data_offset = 0;
+ context->read_data_length = 0;
+
context->tx[0]->cmd = cpu_to_le32(SAHARA_RESET_CMD);
context->tx[0]->length = cpu_to_le32(SAHARA_RESET_LENGTH);
@@ -186,7 +313,8 @@ static void sahara_hello(struct sahara_context *context)
}
if (le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_IMAGE_TX_PENDING &&
- le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_IMAGE_TX_COMPLETE) {
+ le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_IMAGE_TX_COMPLETE &&
+ le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_MEMORY_DEBUG) {
dev_err(&context->mhi_dev->dev, "Unsupported hello packet - mode %d\n",
le32_to_cpu(context->rx->hello.mode));
return;
@@ -205,9 +333,39 @@ static void sahara_hello(struct sahara_context *context)
dev_err(&context->mhi_dev->dev, "Unable to send hello response %d\n", ret);
}
+static int read_data_helper(struct sahara_context *context, int buf_index)
+{
+ enum mhi_flags mhi_flag;
+ u32 pkt_data_len;
+ int ret;
+
+ pkt_data_len = min(context->read_data_length, SAHARA_PACKET_MAX_SIZE);
+
+ memcpy(context->tx[buf_index],
+ &context->firmware->data[context->read_data_offset],
+ pkt_data_len);
+
+ context->read_data_offset += pkt_data_len;
+ context->read_data_length -= pkt_data_len;
+
+ if (is_streaming(context) || !context->read_data_length)
+ mhi_flag = MHI_EOT;
+ else
+ mhi_flag = MHI_CHAIN;
+
+ ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE,
+ context->tx[buf_index], pkt_data_len, mhi_flag);
+ if (ret) {
+ dev_err(&context->mhi_dev->dev, "Unable to send read_data response %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static void sahara_read_data(struct sahara_context *context)
{
- u32 image_id, data_offset, data_len, pkt_data_len;
+ u32 image_id, data_offset, data_len;
int ret;
int i;
@@ -243,7 +401,7 @@ static void sahara_read_data(struct sahara_context *context)
* and is not needed here on error.
*/
- if (data_len > SAHARA_TRANSFER_MAX_SIZE) {
+ if (context->non_streaming && data_len > SAHARA_TRANSFER_MAX_SIZE) {
dev_err(&context->mhi_dev->dev, "Malformed read_data packet - data len %d exceeds max xfer size %d\n",
data_len, SAHARA_TRANSFER_MAX_SIZE);
sahara_send_reset(context);
@@ -264,22 +422,18 @@ static void sahara_read_data(struct sahara_context *context)
return;
}
- for (i = 0; i < SAHARA_NUM_TX_BUF && data_len; ++i) {
- pkt_data_len = min(data_len, SAHARA_PACKET_MAX_SIZE);
-
- memcpy(context->tx[i], &context->firmware->data[data_offset], pkt_data_len);
+ context->read_data_offset = data_offset;
+ context->read_data_length = data_len;
- data_offset += pkt_data_len;
- data_len -= pkt_data_len;
+ if (is_streaming(context)) {
+ schedule_work(&context->read_data_work);
+ return;
+ }
- ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE,
- context->tx[i], pkt_data_len,
- !data_len ? MHI_EOT : MHI_CHAIN);
- if (ret) {
- dev_err(&context->mhi_dev->dev, "Unable to send read_data response %d\n",
- ret);
- return;
- }
+ for (i = 0; i < SAHARA_NUM_TX_BUF && context->read_data_length; ++i) {
+ ret = read_data_helper(context, i);
+ if (ret)
+ break;
}
}
@@ -320,9 +474,70 @@ static void sahara_end_of_image(struct sahara_context *context)
dev_dbg(&context->mhi_dev->dev, "Unable to send done response %d\n", ret);
}
+static void sahara_memory_debug64(struct sahara_context *context)
+{
+ int ret;
+
+ dev_dbg(&context->mhi_dev->dev,
+ "MEMORY DEBUG64 cmd received. length:%d table_address:%#llx table_length:%#llx\n",
+ le32_to_cpu(context->rx->length),
+ le64_to_cpu(context->rx->memory_debug64.table_address),
+ le64_to_cpu(context->rx->memory_debug64.table_length));
+
+ if (le32_to_cpu(context->rx->length) != SAHARA_MEM_DEBUG64_LENGTH) {
+ dev_err(&context->mhi_dev->dev, "Malformed memory debug64 packet - length %d\n",
+ le32_to_cpu(context->rx->length));
+ return;
+ }
+
+ context->dump_table_address = le64_to_cpu(context->rx->memory_debug64.table_address);
+ context->dump_table_length = le64_to_cpu(context->rx->memory_debug64.table_length);
+
+ if (context->dump_table_length % sizeof(struct sahara_debug_table_entry64) != 0 ||
+ !context->dump_table_length) {
+ dev_err(&context->mhi_dev->dev, "Malformed memory debug64 packet - table length %lld\n",
+ context->dump_table_length);
+ return;
+ }
+
+ /*
+ * From this point, the protocol flips. We make memory_read requests to
+ * the device, and the device responds with the raw data. If the device
+ * has an error, it will send an End of Image command. First we need to
+ * request the memory dump table so that we know where all the pieces
+ * of the dump are that we can consume.
+ */
+
+ context->is_mem_dump_mode = true;
+
+ /*
+ * Assume that the table is smaller than our MTU so that we can read it
+ * in one shot. The spec does not put an upper limit on the table, but
+ * no known device will exceed this.
+ */
+ if (context->dump_table_length > SAHARA_PACKET_MAX_SIZE) {
+ dev_err(&context->mhi_dev->dev, "Memory dump table length %lld exceeds supported size. Discarding dump\n",
+ context->dump_table_length);
+ sahara_send_reset(context);
+ return;
+ }
+
+ context->tx[0]->cmd = cpu_to_le32(SAHARA_MEM_READ64_CMD);
+ context->tx[0]->length = cpu_to_le32(SAHARA_MEM_READ64_LENGTH);
+ context->tx[0]->memory_read64.memory_address = cpu_to_le64(context->dump_table_address);
+ context->tx[0]->memory_read64.memory_length = cpu_to_le64(context->dump_table_length);
+
+ context->rx_size_requested = context->dump_table_length;
+
+ ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
+ SAHARA_MEM_READ64_LENGTH, MHI_EOT);
+ if (ret)
+ dev_err(&context->mhi_dev->dev, "Unable to send read for dump table %d\n", ret);
+}
+
static void sahara_processing(struct work_struct *work)
{
- struct sahara_context *context = container_of(work, struct sahara_context, work);
+ struct sahara_context *context = container_of(work, struct sahara_context, fw_work);
int ret;
switch (le32_to_cpu(context->rx->cmd)) {
@@ -338,6 +553,12 @@ static void sahara_processing(struct work_struct *work)
case SAHARA_DONE_RESP_CMD:
/* Intentional do nothing as we don't need to exit an app */
break;
+ case SAHARA_RESET_RESP_CMD:
+ /* Intentional do nothing as we don't need to exit an app */
+ break;
+ case SAHARA_MEM_DEBUG64_CMD:
+ sahara_memory_debug64(context);
+ break;
default:
dev_err(&context->mhi_dev->dev, "Unknown command %d\n",
le32_to_cpu(context->rx->cmd));
@@ -350,6 +571,229 @@ static void sahara_processing(struct work_struct *work)
dev_err(&context->mhi_dev->dev, "Unable to requeue rx buf %d\n", ret);
}
+static void sahara_parse_dump_table(struct sahara_context *context)
+{
+ struct sahara_dump_table_entry *image_out_table;
+ struct sahara_debug_table_entry64 *dev_table;
+ struct sahara_memory_dump_meta_v1 *dump_meta;
+ u64 table_nents;
+ u64 dump_length;
+ u64 mul_bytes;
+ int ret;
+ u64 i;
+
+ table_nents = context->dump_table_length / sizeof(*dev_table);
+ context->dump_images_left = table_nents;
+ dump_length = 0;
+
+ dev_table = (struct sahara_debug_table_entry64 *)(context->rx);
+ for (i = 0; i < table_nents; ++i) {
+ /* Do not trust the device, ensure the strings are terminated */
+ dev_table[i].description[SAHARA_TABLE_ENTRY_STR_LEN - 1] = 0;
+ dev_table[i].filename[SAHARA_TABLE_ENTRY_STR_LEN - 1] = 0;
+
+ if (check_add_overflow(dump_length,
+ le64_to_cpu(dev_table[i].length),
+ &dump_length)) {
+ /* Discard the dump */
+ sahara_send_reset(context);
+ return;
+ }
+
+ dev_dbg(&context->mhi_dev->dev,
+ "Memory dump table entry %lld type: %lld address: %#llx length: %#llx description: \"%s\" filename \"%s\"\n",
+ i,
+ le64_to_cpu(dev_table[i].type),
+ le64_to_cpu(dev_table[i].address),
+ le64_to_cpu(dev_table[i].length),
+ dev_table[i].description,
+ dev_table[i].filename);
+ }
+
+ if (check_add_overflow(dump_length, (u64)sizeof(*dump_meta), &dump_length)) {
+ /* Discard the dump */
+ sahara_send_reset(context);
+ return;
+ }
+ if (check_mul_overflow((u64)sizeof(*image_out_table), table_nents, &mul_bytes)) {
+ /* Discard the dump */
+ sahara_send_reset(context);
+ return;
+ }
+ if (check_add_overflow(dump_length, mul_bytes, &dump_length)) {
+ /* Discard the dump */
+ sahara_send_reset(context);
+ return;
+ }
+
+ context->mem_dump_sz = dump_length;
+ context->mem_dump = vzalloc(dump_length);
+ if (!context->mem_dump) {
+ /* Discard the dump */
+ sahara_send_reset(context);
+ return;
+ }
+
+ /* Populate the dump metadata and table for userspace */
+ dump_meta = context->mem_dump;
+ dump_meta->magic = SAHARA_DUMP_V1_MAGIC;
+ dump_meta->version = SAHARA_DUMP_V1_VER;
+ dump_meta->dump_size = dump_length;
+ dump_meta->table_size = context->dump_table_length;
+
+ image_out_table = context->mem_dump + sizeof(*dump_meta);
+ for (i = 0; i < table_nents; ++i) {
+ image_out_table[i].type = le64_to_cpu(dev_table[i].type);
+ image_out_table[i].address = le64_to_cpu(dev_table[i].address);
+ image_out_table[i].length = le64_to_cpu(dev_table[i].length);
+ strscpy(image_out_table[i].description, dev_table[i].description,
+ SAHARA_TABLE_ENTRY_STR_LEN);
+ strscpy(image_out_table[i].filename,
+ dev_table[i].filename,
+ SAHARA_TABLE_ENTRY_STR_LEN);
+ }
+
+ context->mem_dump_freespace = &image_out_table[i];
+
+ /* Done parsing the table, switch to image dump mode */
+ context->dump_table_length = 0;
+
+ /* Request the first chunk of the first image */
+ context->dump_image = &image_out_table[0];
+ dump_length = min_t(u64, context->dump_image->length, SAHARA_READ_MAX_SIZE);
+ /* Avoid requesting EOI sized data so that we can identify errors */
+ if (dump_length == SAHARA_END_OF_IMAGE_LENGTH)
+ dump_length = SAHARA_END_OF_IMAGE_LENGTH / 2;
+
+ context->dump_image_offset = dump_length;
+
+ context->tx[0]->cmd = cpu_to_le32(SAHARA_MEM_READ64_CMD);
+ context->tx[0]->length = cpu_to_le32(SAHARA_MEM_READ64_LENGTH);
+ context->tx[0]->memory_read64.memory_address = cpu_to_le64(context->dump_image->address);
+ context->tx[0]->memory_read64.memory_length = cpu_to_le64(dump_length);
+
+ context->rx_size_requested = dump_length;
+
+ ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
+ SAHARA_MEM_READ64_LENGTH, MHI_EOT);
+ if (ret)
+ dev_err(&context->mhi_dev->dev, "Unable to send read for dump content %d\n", ret);
+}
+
+static void sahara_parse_dump_image(struct sahara_context *context)
+{
+ u64 dump_length;
+ int ret;
+
+ memcpy(context->mem_dump_freespace, context->rx, context->rx_size);
+ context->mem_dump_freespace += context->rx_size;
+
+ if (context->dump_image_offset >= context->dump_image->length) {
+ /* Need to move to next image */
+ context->dump_image++;
+ context->dump_images_left--;
+ context->dump_image_offset = 0;
+
+ if (!context->dump_images_left) {
+ /* Dump done */
+ dev_coredumpv(context->mhi_dev->mhi_cntrl->cntrl_dev,
+ context->mem_dump,
+ context->mem_dump_sz,
+ GFP_KERNEL);
+ context->mem_dump = NULL;
+ sahara_send_reset(context);
+ return;
+ }
+ }
+
+ /* Get next image chunk */
+ dump_length = context->dump_image->length - context->dump_image_offset;
+ dump_length = min_t(u64, dump_length, SAHARA_READ_MAX_SIZE);
+ /* Avoid requesting EOI sized data so that we can identify errors */
+ if (dump_length == SAHARA_END_OF_IMAGE_LENGTH)
+ dump_length = SAHARA_END_OF_IMAGE_LENGTH / 2;
+
+ context->tx[0]->cmd = cpu_to_le32(SAHARA_MEM_READ64_CMD);
+ context->tx[0]->length = cpu_to_le32(SAHARA_MEM_READ64_LENGTH);
+ context->tx[0]->memory_read64.memory_address =
+ cpu_to_le64(context->dump_image->address + context->dump_image_offset);
+ context->tx[0]->memory_read64.memory_length = cpu_to_le64(dump_length);
+
+ context->dump_image_offset += dump_length;
+ context->rx_size_requested = dump_length;
+
+ ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
+ SAHARA_MEM_READ64_LENGTH, MHI_EOT);
+ if (ret)
+ dev_err(&context->mhi_dev->dev,
+ "Unable to send read for dump content %d\n", ret);
+}
+
+static void sahara_dump_processing(struct work_struct *work)
+{
+ struct sahara_context *context = container_of(work, struct sahara_context, dump_work);
+ int ret;
+
+ /*
+ * We should get the expected raw data, but if the device has an error
+ * it is supposed to send EOI with an error code.
+ */
+ if (context->rx_size != context->rx_size_requested &&
+ context->rx_size != SAHARA_END_OF_IMAGE_LENGTH) {
+ dev_err(&context->mhi_dev->dev,
+ "Unexpected response to read_data. Expected size: %#zx got: %#zx\n",
+ context->rx_size_requested,
+ context->rx_size);
+ goto error;
+ }
+
+ if (context->rx_size == SAHARA_END_OF_IMAGE_LENGTH &&
+ le32_to_cpu(context->rx->cmd) == SAHARA_END_OF_IMAGE_CMD) {
+ dev_err(&context->mhi_dev->dev,
+ "Unexpected EOI response to read_data. Status: %d\n",
+ le32_to_cpu(context->rx->end_of_image.status));
+ goto error;
+ }
+
+ if (context->rx_size == SAHARA_END_OF_IMAGE_LENGTH &&
+ le32_to_cpu(context->rx->cmd) != SAHARA_END_OF_IMAGE_CMD) {
+ dev_err(&context->mhi_dev->dev,
+ "Invalid EOI response to read_data. CMD: %d\n",
+ le32_to_cpu(context->rx->cmd));
+ goto error;
+ }
+
+ /*
+ * Need to know if we received the dump table, or part of a dump image.
+ * Since we get raw data, we cannot tell from the data itself. Instead,
+ * we use the stored dump_table_length, which we zero after we read and
+ * process the entire table.
+ */
+ if (context->dump_table_length)
+ sahara_parse_dump_table(context);
+ else
+ sahara_parse_dump_image(context);
+
+ ret = mhi_queue_buf(context->mhi_dev, DMA_FROM_DEVICE, context->rx,
+ SAHARA_PACKET_MAX_SIZE, MHI_EOT);
+ if (ret)
+ dev_err(&context->mhi_dev->dev, "Unable to requeue rx buf %d\n", ret);
+
+ return;
+
+error:
+ vfree(context->mem_dump);
+ context->mem_dump = NULL;
+ sahara_send_reset(context);
+}
+
+static void sahara_read_data_processing(struct work_struct *work)
+{
+ struct sahara_context *context = container_of(work, struct sahara_context, read_data_work);
+
+ read_data_helper(context, 0);
+}
+
static int sahara_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
{
struct sahara_context *context;
@@ -364,27 +808,57 @@ static int sahara_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_
if (!context->rx)
return -ENOMEM;
+ if (!strcmp(mhi_dev->mhi_cntrl->name, "AIC200")) {
+ context->image_table = aic200_image_table;
+ context->table_size = ARRAY_SIZE(aic200_image_table);
+ } else {
+ context->image_table = aic100_image_table;
+ context->table_size = ARRAY_SIZE(aic100_image_table);
+ context->non_streaming = true;
+ }
+
/*
- * AIC100 defines SAHARA_TRANSFER_MAX_SIZE as the largest value it
- * will request for READ_DATA. This is larger than
- * SAHARA_PACKET_MAX_SIZE, and we need 9x SAHARA_PACKET_MAX_SIZE to
- * cover SAHARA_TRANSFER_MAX_SIZE. When the remote side issues a
- * READ_DATA, it requires a transfer of the exact size requested. We
- * can use MHI_CHAIN to link multiple buffers into a single transfer
- * but the remote side will not consume the buffers until it sees an
- * EOT, thus we need to allocate enough buffers to put in the tx fifo
- * to cover an entire READ_DATA request of the max size.
+ * There are two firmware implementations for READ_DATA handling.
+ * The older "SBL" implementation defines a Sahara transfer size, and
+ * expects that the response is a single transport transfer. If the
+ * FW wants to transfer a file that is larger than the transfer size,
+ * the FW will issue multiple READ_DATA commands. For this
+ * implementation, we need to allocate enough buffers to contain the
+ * entire Sahara transfer size.
+ *
+ * The newer "XBL" implementation does not define a maximum transfer
+ * size and instead expects the data to be streamed over using the
+ * transport level MTU. The FW will issue a single READ_DATA command
+ * of whatever size, and consume multiple transport level transfers
+ * until the expected amount of data is consumed. For this
+ * implementation we only need a single buffer of the transport MTU
+ * but we'll need to be able to use it multiple times for a single
+ * READ_DATA request.
+ *
+ * AIC100 is the SBL implementation and defines SAHARA_TRANSFER_MAX_SIZE
+ * and we need 9x SAHARA_PACKET_MAX_SIZE to cover that. We can use
+ * MHI_CHAIN to link multiple buffers into a single transfer but the
+ * remote side will not consume the buffers until it sees an EOT, thus
+ * we need to allocate enough buffers to put in the tx fifo to cover an
+ * entire READ_DATA request of the max size.
+ *
+ * AIC200 is the XBL implementation, and so a single buffer will work.
*/
for (i = 0; i < SAHARA_NUM_TX_BUF; ++i) {
- context->tx[i] = devm_kzalloc(&mhi_dev->dev, SAHARA_PACKET_MAX_SIZE, GFP_KERNEL);
+ context->tx[i] = devm_kzalloc(&mhi_dev->dev,
+ SAHARA_PACKET_MAX_SIZE,
+ GFP_KERNEL);
if (!context->tx[i])
return -ENOMEM;
+ if (is_streaming(context))
+ break;
}
context->mhi_dev = mhi_dev;
- INIT_WORK(&context->work, sahara_processing);
- context->image_table = aic100_image_table;
- context->table_size = ARRAY_SIZE(aic100_image_table);
+ INIT_WORK(&context->fw_work, sahara_processing);
+ INIT_WORK(&context->dump_work, sahara_dump_processing);
+ INIT_WORK(&context->read_data_work, sahara_read_data_processing);
+
context->active_image_id = SAHARA_IMAGE_ID_NONE;
dev_set_drvdata(&mhi_dev->dev, context);
@@ -405,21 +879,33 @@ static void sahara_mhi_remove(struct mhi_device *mhi_dev)
{
struct sahara_context *context = dev_get_drvdata(&mhi_dev->dev);
- cancel_work_sync(&context->work);
+ cancel_work_sync(&context->fw_work);
+ cancel_work_sync(&context->dump_work);
+ vfree(context->mem_dump);
sahara_release_image(context);
mhi_unprepare_from_transfer(mhi_dev);
}
static void sahara_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
{
+ struct sahara_context *context = dev_get_drvdata(&mhi_dev->dev);
+
+ if (!mhi_result->transaction_status && context->read_data_length && is_streaming(context))
+ schedule_work(&context->read_data_work);
}
static void sahara_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
{
struct sahara_context *context = dev_get_drvdata(&mhi_dev->dev);
- if (!mhi_result->transaction_status)
- schedule_work(&context->work);
+ if (!mhi_result->transaction_status) {
+ context->rx_size = mhi_result->bytes_xferd;
+ if (context->is_mem_dump_mode)
+ schedule_work(&context->dump_work);
+ else
+ schedule_work(&context->fw_work);
+ }
+
}
static const struct mhi_device_id sahara_mhi_match_table[] = {
diff --git a/drivers/accel/rocket/Kconfig b/drivers/accel/rocket/Kconfig
new file mode 100644
index 000000000000..16465abe0660
--- /dev/null
+++ b/drivers/accel/rocket/Kconfig
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config DRM_ACCEL_ROCKET
+ tristate "Rocket (support for Rockchip NPUs)"
+ depends on DRM_ACCEL
+ depends on (ARCH_ROCKCHIP && ARM64) || COMPILE_TEST
+ depends on ROCKCHIP_IOMMU || COMPILE_TEST
+ depends on MMU
+ select DRM_SCHED
+ select DRM_GEM_SHMEM_HELPER
+ help
+ Choose this option if you have a Rockchip SoC that contains a
+ compatible Neural Processing Unit (NPU), such as the RK3588. Called by
+ Rockchip either RKNN or RKNPU, it accelerates inference of neural
+ networks.
+
+ The interface exposed to userspace is described in
+ include/uapi/drm/rocket_accel.h and is used by the Rocket userspace
+ driver in Mesa3D.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rocket.
diff --git a/drivers/accel/rocket/Makefile b/drivers/accel/rocket/Makefile
new file mode 100644
index 000000000000..3713dfe223d6
--- /dev/null
+++ b/drivers/accel/rocket/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_DRM_ACCEL_ROCKET) := rocket.o
+
+rocket-y := \
+ rocket_core.o \
+ rocket_device.o \
+ rocket_drv.o \
+ rocket_gem.o \
+ rocket_job.o
diff --git a/drivers/accel/rocket/rocket_core.c b/drivers/accel/rocket/rocket_core.c
new file mode 100644
index 000000000000..abe7719c1db4
--- /dev/null
+++ b/drivers/accel/rocket/rocket_core.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include "rocket_core.h"
+#include "rocket_job.h"
+
+int rocket_core_init(struct rocket_core *core)
+{
+ struct device *dev = core->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ u32 version;
+ int err = 0;
+
+ core->resets[0].id = "srst_a";
+ core->resets[1].id = "srst_h";
+ err = devm_reset_control_bulk_get_exclusive(&pdev->dev, ARRAY_SIZE(core->resets),
+ core->resets);
+ if (err)
+ return dev_err_probe(dev, err, "failed to get resets for core %d\n", core->index);
+
+ err = devm_clk_bulk_get(dev, ARRAY_SIZE(core->clks), core->clks);
+ if (err)
+ return dev_err_probe(dev, err, "failed to get clocks for core %d\n", core->index);
+
+ core->pc_iomem = devm_platform_ioremap_resource_byname(pdev, "pc");
+ if (IS_ERR(core->pc_iomem)) {
+ dev_err(dev, "couldn't find PC registers %ld\n", PTR_ERR(core->pc_iomem));
+ return PTR_ERR(core->pc_iomem);
+ }
+
+ core->cna_iomem = devm_platform_ioremap_resource_byname(pdev, "cna");
+ if (IS_ERR(core->cna_iomem)) {
+ dev_err(dev, "couldn't find CNA registers %ld\n", PTR_ERR(core->cna_iomem));
+ return PTR_ERR(core->cna_iomem);
+ }
+
+ core->core_iomem = devm_platform_ioremap_resource_byname(pdev, "core");
+ if (IS_ERR(core->core_iomem)) {
+ dev_err(dev, "couldn't find CORE registers %ld\n", PTR_ERR(core->core_iomem));
+ return PTR_ERR(core->core_iomem);
+ }
+
+ dma_set_max_seg_size(dev, UINT_MAX);
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ if (err)
+ return err;
+
+ core->iommu_group = iommu_group_get(dev);
+
+ err = rocket_job_init(core);
+ if (err)
+ return err;
+
+ pm_runtime_use_autosuspend(dev);
+
+ /*
+ * As this NPU will be most often used as part of a media pipeline that
+ * ends presenting in a display, choose 50 ms (~3 frames at 60Hz) as an
+ * autosuspend delay as that will keep the device powered up while the
+ * pipeline is running.
+ */
+ pm_runtime_set_autosuspend_delay(dev, 50);
+
+ pm_runtime_enable(dev);
+
+ err = pm_runtime_resume_and_get(dev);
+ if (err) {
+ rocket_job_fini(core);
+ return err;
+ }
+
+ version = rocket_pc_readl(core, VERSION);
+ version += rocket_pc_readl(core, VERSION_NUM) & 0xffff;
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ dev_info(dev, "Rockchip NPU core %d version: %d\n", core->index, version);
+
+ return 0;
+}
+
+void rocket_core_fini(struct rocket_core *core)
+{
+ pm_runtime_dont_use_autosuspend(core->dev);
+ pm_runtime_disable(core->dev);
+ iommu_group_put(core->iommu_group);
+ core->iommu_group = NULL;
+ rocket_job_fini(core);
+}
+
+void rocket_core_reset(struct rocket_core *core)
+{
+ reset_control_bulk_assert(ARRAY_SIZE(core->resets), core->resets);
+
+ udelay(10);
+
+ reset_control_bulk_deassert(ARRAY_SIZE(core->resets), core->resets);
+}
diff --git a/drivers/accel/rocket/rocket_core.h b/drivers/accel/rocket/rocket_core.h
new file mode 100644
index 000000000000..f6d7382854ca
--- /dev/null
+++ b/drivers/accel/rocket/rocket_core.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#ifndef __ROCKET_CORE_H__
+#define __ROCKET_CORE_H__
+
+#include <drm/gpu_scheduler.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/mutex_types.h>
+#include <linux/reset.h>
+
+#include "rocket_registers.h"
+
+#define rocket_pc_readl(core, reg) \
+ readl((core)->pc_iomem + (REG_PC_##reg))
+#define rocket_pc_writel(core, reg, value) \
+ writel(value, (core)->pc_iomem + (REG_PC_##reg))
+
+#define rocket_cna_readl(core, reg) \
+ readl((core)->cna_iomem + (REG_CNA_##reg) - REG_CNA_S_STATUS)
+#define rocket_cna_writel(core, reg, value) \
+ writel(value, (core)->cna_iomem + (REG_CNA_##reg) - REG_CNA_S_STATUS)
+
+#define rocket_core_readl(core, reg) \
+ readl((core)->core_iomem + (REG_CORE_##reg) - REG_CORE_S_STATUS)
+#define rocket_core_writel(core, reg, value) \
+ writel(value, (core)->core_iomem + (REG_CORE_##reg) - REG_CORE_S_STATUS)
+
+struct rocket_core {
+ struct device *dev;
+ struct rocket_device *rdev;
+ unsigned int index;
+
+ int irq;
+ void __iomem *pc_iomem;
+ void __iomem *cna_iomem;
+ void __iomem *core_iomem;
+ struct clk_bulk_data clks[4];
+ struct reset_control_bulk_data resets[2];
+
+ struct iommu_group *iommu_group;
+
+ struct mutex job_lock;
+ struct rocket_job *in_flight_job;
+
+ spinlock_t fence_lock;
+
+ struct {
+ struct workqueue_struct *wq;
+ struct work_struct work;
+ atomic_t pending;
+ } reset;
+
+ struct drm_gpu_scheduler sched;
+ u64 fence_context;
+ u64 emit_seqno;
+};
+
+int rocket_core_init(struct rocket_core *core);
+void rocket_core_fini(struct rocket_core *core);
+void rocket_core_reset(struct rocket_core *core);
+
+#endif
diff --git a/drivers/accel/rocket/rocket_device.c b/drivers/accel/rocket/rocket_device.c
new file mode 100644
index 000000000000..46e6ee1e72c5
--- /dev/null
+++ b/drivers/accel/rocket/rocket_device.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#include <drm/drm_drv.h>
+#include <linux/array_size.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+
+#include "rocket_device.h"
+
+struct rocket_device *rocket_device_init(struct platform_device *pdev,
+ const struct drm_driver *rocket_drm_driver)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *core_node;
+ struct rocket_device *rdev;
+ struct drm_device *ddev;
+ unsigned int num_cores = 0;
+ int err;
+
+ rdev = devm_drm_dev_alloc(dev, rocket_drm_driver, struct rocket_device, ddev);
+ if (IS_ERR(rdev))
+ return rdev;
+
+ ddev = &rdev->ddev;
+ dev_set_drvdata(dev, rdev);
+
+ for_each_compatible_node(core_node, NULL, "rockchip,rk3588-rknn-core")
+ if (of_device_is_available(core_node))
+ num_cores++;
+
+ rdev->cores = devm_kcalloc(dev, num_cores, sizeof(*rdev->cores), GFP_KERNEL);
+ if (!rdev->cores)
+ return ERR_PTR(-ENOMEM);
+
+ dma_set_max_seg_size(dev, UINT_MAX);
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ if (err)
+ return ERR_PTR(err);
+
+ err = devm_mutex_init(dev, &rdev->sched_lock);
+ if (err)
+ return ERR_PTR(-ENOMEM);
+
+ err = drm_dev_register(ddev, 0);
+ if (err)
+ return ERR_PTR(err);
+
+ return rdev;
+}
+
+void rocket_device_fini(struct rocket_device *rdev)
+{
+ WARN_ON(rdev->num_cores > 0);
+
+ drm_dev_unregister(&rdev->ddev);
+}
diff --git a/drivers/accel/rocket/rocket_device.h b/drivers/accel/rocket/rocket_device.h
new file mode 100644
index 000000000000..ce662abc01d3
--- /dev/null
+++ b/drivers/accel/rocket/rocket_device.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#ifndef __ROCKET_DEVICE_H__
+#define __ROCKET_DEVICE_H__
+
+#include <drm/drm_device.h>
+#include <linux/clk.h>
+#include <linux/container_of.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+
+#include "rocket_core.h"
+
+struct rocket_device {
+ struct drm_device ddev;
+
+ struct mutex sched_lock;
+
+ struct rocket_core *cores;
+ unsigned int num_cores;
+};
+
+struct rocket_device *rocket_device_init(struct platform_device *pdev,
+ const struct drm_driver *rocket_drm_driver);
+void rocket_device_fini(struct rocket_device *rdev);
+#define to_rocket_device(drm_dev) \
+ ((struct rocket_device *)(container_of((drm_dev), struct rocket_device, ddev)))
+
+#endif /* __ROCKET_DEVICE_H__ */
diff --git a/drivers/accel/rocket/rocket_drv.c b/drivers/accel/rocket/rocket_drv.c
new file mode 100644
index 000000000000..5c0b63f0a8f0
--- /dev/null
+++ b/drivers/accel/rocket/rocket_drv.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#include <drm/drm_accel.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_ioctl.h>
+#include <drm/rocket_accel.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/iommu.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "rocket_drv.h"
+#include "rocket_gem.h"
+#include "rocket_job.h"
+
+/*
+ * Facade device, used to expose a single DRM device to userspace, that
+ * schedules jobs to any RKNN cores in the system.
+ */
+static struct platform_device *drm_dev;
+static struct rocket_device *rdev;
+
+static void
+rocket_iommu_domain_destroy(struct kref *kref)
+{
+ struct rocket_iommu_domain *domain = container_of(kref, struct rocket_iommu_domain, kref);
+
+ iommu_domain_free(domain->domain);
+ domain->domain = NULL;
+ kfree(domain);
+}
+
+static struct rocket_iommu_domain*
+rocket_iommu_domain_create(struct device *dev)
+{
+ struct rocket_iommu_domain *domain = kmalloc(sizeof(*domain), GFP_KERNEL);
+ void *err;
+
+ if (!domain)
+ return ERR_PTR(-ENOMEM);
+
+ domain->domain = iommu_paging_domain_alloc(dev);
+ if (IS_ERR(domain->domain)) {
+ err = ERR_CAST(domain->domain);
+ kfree(domain);
+ return err;
+ }
+ kref_init(&domain->kref);
+
+ return domain;
+}
+
+struct rocket_iommu_domain *
+rocket_iommu_domain_get(struct rocket_file_priv *rocket_priv)
+{
+ kref_get(&rocket_priv->domain->kref);
+ return rocket_priv->domain;
+}
+
+void
+rocket_iommu_domain_put(struct rocket_iommu_domain *domain)
+{
+ kref_put(&domain->kref, rocket_iommu_domain_destroy);
+}
+
+static int
+rocket_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct rocket_device *rdev = to_rocket_device(dev);
+ struct rocket_file_priv *rocket_priv;
+ u64 start, end;
+ int ret;
+
+ if (!try_module_get(THIS_MODULE))
+ return -EINVAL;
+
+ rocket_priv = kzalloc(sizeof(*rocket_priv), GFP_KERNEL);
+ if (!rocket_priv) {
+ ret = -ENOMEM;
+ goto err_put_mod;
+ }
+
+ rocket_priv->rdev = rdev;
+ rocket_priv->domain = rocket_iommu_domain_create(rdev->cores[0].dev);
+ if (IS_ERR(rocket_priv->domain)) {
+ ret = PTR_ERR(rocket_priv->domain);
+ goto err_free;
+ }
+
+ file->driver_priv = rocket_priv;
+
+ start = rocket_priv->domain->domain->geometry.aperture_start;
+ end = rocket_priv->domain->domain->geometry.aperture_end;
+ drm_mm_init(&rocket_priv->mm, start, end - start + 1);
+ mutex_init(&rocket_priv->mm_lock);
+
+ ret = rocket_job_open(rocket_priv);
+ if (ret)
+ goto err_mm_takedown;
+
+ return 0;
+
+err_mm_takedown:
+ mutex_destroy(&rocket_priv->mm_lock);
+ drm_mm_takedown(&rocket_priv->mm);
+ rocket_iommu_domain_put(rocket_priv->domain);
+err_free:
+ kfree(rocket_priv);
+err_put_mod:
+ module_put(THIS_MODULE);
+ return ret;
+}
+
+static void
+rocket_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct rocket_file_priv *rocket_priv = file->driver_priv;
+
+ rocket_job_close(rocket_priv);
+ mutex_destroy(&rocket_priv->mm_lock);
+ drm_mm_takedown(&rocket_priv->mm);
+ rocket_iommu_domain_put(rocket_priv->domain);
+ kfree(rocket_priv);
+ module_put(THIS_MODULE);
+}
+
+static const struct drm_ioctl_desc rocket_drm_driver_ioctls[] = {
+#define ROCKET_IOCTL(n, func) \
+ DRM_IOCTL_DEF_DRV(ROCKET_##n, rocket_ioctl_##func, 0)
+
+ ROCKET_IOCTL(CREATE_BO, create_bo),
+ ROCKET_IOCTL(SUBMIT, submit),
+ ROCKET_IOCTL(PREP_BO, prep_bo),
+ ROCKET_IOCTL(FINI_BO, fini_bo),
+};
+
+DEFINE_DRM_ACCEL_FOPS(rocket_accel_driver_fops);
+
+/*
+ * Rocket driver version:
+ * - 1.0 - initial interface
+ */
+static const struct drm_driver rocket_drm_driver = {
+ .driver_features = DRIVER_COMPUTE_ACCEL | DRIVER_GEM,
+ .open = rocket_open,
+ .postclose = rocket_postclose,
+ .gem_create_object = rocket_gem_create_object,
+ .ioctls = rocket_drm_driver_ioctls,
+ .num_ioctls = ARRAY_SIZE(rocket_drm_driver_ioctls),
+ .fops = &rocket_accel_driver_fops,
+ .name = "rocket",
+ .desc = "rocket DRM",
+};
+
+static int rocket_probe(struct platform_device *pdev)
+{
+ if (rdev == NULL) {
+ /* First core probing, initialize DRM device. */
+ rdev = rocket_device_init(drm_dev, &rocket_drm_driver);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "failed to initialize rocket device\n");
+ return PTR_ERR(rdev);
+ }
+ }
+
+ unsigned int core = rdev->num_cores;
+
+ dev_set_drvdata(&pdev->dev, rdev);
+
+ rdev->cores[core].rdev = rdev;
+ rdev->cores[core].dev = &pdev->dev;
+ rdev->cores[core].index = core;
+
+ rdev->num_cores++;
+
+ return rocket_core_init(&rdev->cores[core]);
+}
+
+static void rocket_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ for (unsigned int core = 0; core < rdev->num_cores; core++) {
+ if (rdev->cores[core].dev == dev) {
+ rocket_core_fini(&rdev->cores[core]);
+ rdev->num_cores--;
+ break;
+ }
+ }
+
+ if (rdev->num_cores == 0) {
+ /* Last core removed, deinitialize DRM device. */
+ rocket_device_fini(rdev);
+ rdev = NULL;
+ }
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "rockchip,rk3588-rknn-core" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
+static int find_core_for_dev(struct device *dev)
+{
+ struct rocket_device *rdev = dev_get_drvdata(dev);
+
+ for (unsigned int core = 0; core < rdev->num_cores; core++) {
+ if (dev == rdev->cores[core].dev)
+ return core;
+ }
+
+ return -1;
+}
+
+static int rocket_device_runtime_resume(struct device *dev)
+{
+ struct rocket_device *rdev = dev_get_drvdata(dev);
+ int core = find_core_for_dev(dev);
+ int err = 0;
+
+ if (core < 0)
+ return -ENODEV;
+
+ err = clk_bulk_prepare_enable(ARRAY_SIZE(rdev->cores[core].clks), rdev->cores[core].clks);
+ if (err) {
+ dev_err(dev, "failed to enable (%d) clocks for core %d\n", err, core);
+ return err;
+ }
+
+ return 0;
+}
+
+static int rocket_device_runtime_suspend(struct device *dev)
+{
+ struct rocket_device *rdev = dev_get_drvdata(dev);
+ int core = find_core_for_dev(dev);
+
+ if (core < 0)
+ return -ENODEV;
+
+ if (!rocket_job_is_idle(&rdev->cores[core]))
+ return -EBUSY;
+
+ clk_bulk_disable_unprepare(ARRAY_SIZE(rdev->cores[core].clks), rdev->cores[core].clks);
+
+ return 0;
+}
+
+EXPORT_GPL_DEV_PM_OPS(rocket_pm_ops) = {
+ RUNTIME_PM_OPS(rocket_device_runtime_suspend, rocket_device_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+};
+
+static struct platform_driver rocket_driver = {
+ .probe = rocket_probe,
+ .remove = rocket_remove,
+ .driver = {
+ .name = "rocket",
+ .pm = pm_ptr(&rocket_pm_ops),
+ .of_match_table = dt_match,
+ },
+};
+
+static int __init rocket_register(void)
+{
+ drm_dev = platform_device_register_simple("rknn", -1, NULL, 0);
+ if (IS_ERR(drm_dev))
+ return PTR_ERR(drm_dev);
+
+ return platform_driver_register(&rocket_driver);
+}
+
+static void __exit rocket_unregister(void)
+{
+ platform_driver_unregister(&rocket_driver);
+
+ platform_device_unregister(drm_dev);
+}
+
+module_init(rocket_register);
+module_exit(rocket_unregister);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("DRM driver for the Rockchip NPU IP");
+MODULE_AUTHOR("Tomeu Vizoso");
diff --git a/drivers/accel/rocket/rocket_drv.h b/drivers/accel/rocket/rocket_drv.h
new file mode 100644
index 000000000000..2c673bb99ccc
--- /dev/null
+++ b/drivers/accel/rocket/rocket_drv.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#ifndef __ROCKET_DRV_H__
+#define __ROCKET_DRV_H__
+
+#include <drm/drm_mm.h>
+#include <drm/gpu_scheduler.h>
+
+#include "rocket_device.h"
+
+extern const struct dev_pm_ops rocket_pm_ops;
+
+struct rocket_iommu_domain {
+ struct iommu_domain *domain;
+ struct kref kref;
+};
+
+struct rocket_file_priv {
+ struct rocket_device *rdev;
+
+ struct rocket_iommu_domain *domain;
+ struct drm_mm mm;
+ struct mutex mm_lock;
+
+ struct drm_sched_entity sched_entity;
+};
+
+struct rocket_iommu_domain *rocket_iommu_domain_get(struct rocket_file_priv *rocket_priv);
+void rocket_iommu_domain_put(struct rocket_iommu_domain *domain);
+
+#endif
diff --git a/drivers/accel/rocket/rocket_gem.c b/drivers/accel/rocket/rocket_gem.c
new file mode 100644
index 000000000000..624c4ecf5a34
--- /dev/null
+++ b/drivers/accel/rocket/rocket_gem.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include <drm/drm_utils.h>
+#include <drm/rocket_accel.h>
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+
+#include "rocket_drv.h"
+#include "rocket_gem.h"
+
+static void rocket_gem_bo_free(struct drm_gem_object *obj)
+{
+ struct rocket_gem_object *bo = to_rocket_bo(obj);
+ struct rocket_file_priv *rocket_priv = bo->driver_priv;
+ size_t unmapped;
+
+ drm_WARN_ON(obj->dev, refcount_read(&bo->base.pages_use_count) > 1);
+
+ unmapped = iommu_unmap(bo->domain->domain, bo->mm.start, bo->size);
+ drm_WARN_ON(obj->dev, unmapped != bo->size);
+
+ mutex_lock(&rocket_priv->mm_lock);
+ drm_mm_remove_node(&bo->mm);
+ mutex_unlock(&rocket_priv->mm_lock);
+
+ rocket_iommu_domain_put(bo->domain);
+ bo->domain = NULL;
+
+ drm_gem_shmem_free(&bo->base);
+}
+
+static const struct drm_gem_object_funcs rocket_gem_funcs = {
+ .free = rocket_gem_bo_free,
+ .print_info = drm_gem_shmem_object_print_info,
+ .pin = drm_gem_shmem_object_pin,
+ .unpin = drm_gem_shmem_object_unpin,
+ .get_sg_table = drm_gem_shmem_object_get_sg_table,
+ .vmap = drm_gem_shmem_object_vmap,
+ .vunmap = drm_gem_shmem_object_vunmap,
+ .mmap = drm_gem_shmem_object_mmap,
+ .vm_ops = &drm_gem_shmem_vm_ops,
+};
+
+struct drm_gem_object *rocket_gem_create_object(struct drm_device *dev, size_t size)
+{
+ struct rocket_gem_object *obj;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ obj->base.base.funcs = &rocket_gem_funcs;
+
+ return &obj->base.base;
+}
+
+int rocket_ioctl_create_bo(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct rocket_file_priv *rocket_priv = file->driver_priv;
+ struct drm_rocket_create_bo *args = data;
+ struct drm_gem_shmem_object *shmem_obj;
+ struct rocket_gem_object *rkt_obj;
+ struct drm_gem_object *gem_obj;
+ struct sg_table *sgt;
+ int ret;
+
+ shmem_obj = drm_gem_shmem_create(dev, args->size);
+ if (IS_ERR(shmem_obj))
+ return PTR_ERR(shmem_obj);
+
+ gem_obj = &shmem_obj->base;
+ rkt_obj = to_rocket_bo(gem_obj);
+
+ rkt_obj->driver_priv = rocket_priv;
+ rkt_obj->domain = rocket_iommu_domain_get(rocket_priv);
+ rkt_obj->size = args->size;
+ rkt_obj->offset = 0;
+
+ ret = drm_gem_handle_create(file, gem_obj, &args->handle);
+ drm_gem_object_put(gem_obj);
+ if (ret)
+ goto err;
+
+ sgt = drm_gem_shmem_get_pages_sgt(shmem_obj);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto err;
+ }
+
+ mutex_lock(&rocket_priv->mm_lock);
+ ret = drm_mm_insert_node_generic(&rocket_priv->mm, &rkt_obj->mm,
+ rkt_obj->size, PAGE_SIZE,
+ 0, 0);
+ mutex_unlock(&rocket_priv->mm_lock);
+
+ ret = iommu_map_sgtable(rocket_priv->domain->domain,
+ rkt_obj->mm.start,
+ shmem_obj->sgt,
+ IOMMU_READ | IOMMU_WRITE);
+ if (ret < 0 || ret < args->size) {
+ drm_err(dev, "failed to map buffer: size=%d request_size=%u\n",
+ ret, args->size);
+ ret = -ENOMEM;
+ goto err_remove_node;
+ }
+
+ /* iommu_map_sgtable might have aligned the size */
+ rkt_obj->size = ret;
+ args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
+ args->dma_address = rkt_obj->mm.start;
+
+ return 0;
+
+err_remove_node:
+ mutex_lock(&rocket_priv->mm_lock);
+ drm_mm_remove_node(&rkt_obj->mm);
+ mutex_unlock(&rocket_priv->mm_lock);
+
+err:
+ drm_gem_shmem_object_free(gem_obj);
+
+ return ret;
+}
+
+int rocket_ioctl_prep_bo(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_rocket_prep_bo *args = data;
+ unsigned long timeout = drm_timeout_abs_to_jiffies(args->timeout_ns);
+ struct drm_gem_object *gem_obj;
+ struct drm_gem_shmem_object *shmem_obj;
+ long ret = 0;
+
+ if (args->reserved != 0) {
+ drm_dbg(dev, "Reserved field in drm_rocket_prep_bo struct should be 0.\n");
+ return -EINVAL;
+ }
+
+ gem_obj = drm_gem_object_lookup(file, args->handle);
+ if (!gem_obj)
+ return -ENOENT;
+
+ ret = dma_resv_wait_timeout(gem_obj->resv, DMA_RESV_USAGE_WRITE, true, timeout);
+ if (!ret)
+ ret = timeout ? -ETIMEDOUT : -EBUSY;
+
+ shmem_obj = &to_rocket_bo(gem_obj)->base;
+
+ dma_sync_sgtable_for_cpu(dev->dev, shmem_obj->sgt, DMA_BIDIRECTIONAL);
+
+ drm_gem_object_put(gem_obj);
+
+ return ret;
+}
+
+int rocket_ioctl_fini_bo(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_rocket_fini_bo *args = data;
+ struct drm_gem_shmem_object *shmem_obj;
+ struct rocket_gem_object *rkt_obj;
+ struct drm_gem_object *gem_obj;
+
+ if (args->reserved != 0) {
+ drm_dbg(dev, "Reserved field in drm_rocket_fini_bo struct should be 0.\n");
+ return -EINVAL;
+ }
+
+ gem_obj = drm_gem_object_lookup(file, args->handle);
+ if (!gem_obj)
+ return -ENOENT;
+
+ rkt_obj = to_rocket_bo(gem_obj);
+ shmem_obj = &rkt_obj->base;
+
+ dma_sync_sgtable_for_device(dev->dev, shmem_obj->sgt, DMA_BIDIRECTIONAL);
+
+ drm_gem_object_put(gem_obj);
+
+ return 0;
+}
diff --git a/drivers/accel/rocket/rocket_gem.h b/drivers/accel/rocket/rocket_gem.h
new file mode 100644
index 000000000000..240430334509
--- /dev/null
+++ b/drivers/accel/rocket/rocket_gem.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#ifndef __ROCKET_GEM_H__
+#define __ROCKET_GEM_H__
+
+#include <drm/drm_gem_shmem_helper.h>
+
+struct rocket_gem_object {
+ struct drm_gem_shmem_object base;
+
+ struct rocket_file_priv *driver_priv;
+
+ struct rocket_iommu_domain *domain;
+ struct drm_mm_node mm;
+ size_t size;
+ u32 offset;
+};
+
+struct drm_gem_object *rocket_gem_create_object(struct drm_device *dev, size_t size);
+
+int rocket_ioctl_create_bo(struct drm_device *dev, void *data, struct drm_file *file);
+
+int rocket_ioctl_prep_bo(struct drm_device *dev, void *data, struct drm_file *file);
+
+int rocket_ioctl_fini_bo(struct drm_device *dev, void *data, struct drm_file *file);
+
+static inline
+struct rocket_gem_object *to_rocket_bo(struct drm_gem_object *obj)
+{
+ return container_of(to_drm_gem_shmem_obj(obj), struct rocket_gem_object, base);
+}
+
+#endif
diff --git a/drivers/accel/rocket/rocket_job.c b/drivers/accel/rocket/rocket_job.c
new file mode 100644
index 000000000000..acd606160dc9
--- /dev/null
+++ b/drivers/accel/rocket/rocket_job.c
@@ -0,0 +1,637 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+/* Copyright 2019 Collabora ltd. */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#include <drm/drm_print.h>
+#include <drm/drm_file.h>
+#include <drm/drm_gem.h>
+#include <drm/rocket_accel.h>
+#include <linux/interrupt.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "rocket_core.h"
+#include "rocket_device.h"
+#include "rocket_drv.h"
+#include "rocket_job.h"
+#include "rocket_registers.h"
+
+#define JOB_TIMEOUT_MS 500
+
+static struct rocket_job *
+to_rocket_job(struct drm_sched_job *sched_job)
+{
+ return container_of(sched_job, struct rocket_job, base);
+}
+
+static const char *rocket_fence_get_driver_name(struct dma_fence *fence)
+{
+ return "rocket";
+}
+
+static const char *rocket_fence_get_timeline_name(struct dma_fence *fence)
+{
+ return "rockchip-npu";
+}
+
+static const struct dma_fence_ops rocket_fence_ops = {
+ .get_driver_name = rocket_fence_get_driver_name,
+ .get_timeline_name = rocket_fence_get_timeline_name,
+};
+
+static struct dma_fence *rocket_fence_create(struct rocket_core *core)
+{
+ struct dma_fence *fence;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return ERR_PTR(-ENOMEM);
+
+ dma_fence_init(fence, &rocket_fence_ops, &core->fence_lock,
+ core->fence_context, ++core->emit_seqno);
+
+ return fence;
+}
+
+static int
+rocket_copy_tasks(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_rocket_job *job,
+ struct rocket_job *rjob)
+{
+ int ret = 0;
+
+ if (job->task_struct_size < sizeof(struct drm_rocket_task))
+ return -EINVAL;
+
+ rjob->task_count = job->task_count;
+
+ if (!rjob->task_count)
+ return 0;
+
+ rjob->tasks = kvmalloc_array(job->task_count, sizeof(*rjob->tasks), GFP_KERNEL);
+ if (!rjob->tasks) {
+ drm_dbg(dev, "Failed to allocate task array\n");
+ return -ENOMEM;
+ }
+
+ for (int i = 0; i < rjob->task_count; i++) {
+ struct drm_rocket_task task = {0};
+
+ if (copy_from_user(&task,
+ u64_to_user_ptr(job->tasks) + i * job->task_struct_size,
+ sizeof(task))) {
+ drm_dbg(dev, "Failed to copy incoming tasks\n");
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ if (task.regcmd_count == 0) {
+ drm_dbg(dev, "regcmd_count field in drm_rocket_task should be > 0.\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ rjob->tasks[i].regcmd = task.regcmd;
+ rjob->tasks[i].regcmd_count = task.regcmd_count;
+ }
+
+ return 0;
+
+fail:
+ kvfree(rjob->tasks);
+ return ret;
+}
+
+static void rocket_job_hw_submit(struct rocket_core *core, struct rocket_job *job)
+{
+ struct rocket_task *task;
+ unsigned int extra_bit;
+
+ /* Don't queue the job if a reset is in progress */
+ if (atomic_read(&core->reset.pending))
+ return;
+
+ /* GO ! */
+
+ task = &job->tasks[job->next_task_idx];
+ job->next_task_idx++;
+
+ rocket_pc_writel(core, BASE_ADDRESS, 0x1);
+
+ /* From rknpu, in the TRM this bit is marked as reserved */
+ extra_bit = 0x10000000 * core->index;
+ rocket_cna_writel(core, S_POINTER, CNA_S_POINTER_POINTER_PP_EN(1) |
+ CNA_S_POINTER_EXECUTER_PP_EN(1) |
+ CNA_S_POINTER_POINTER_PP_MODE(1) |
+ extra_bit);
+
+ rocket_core_writel(core, S_POINTER, CORE_S_POINTER_POINTER_PP_EN(1) |
+ CORE_S_POINTER_EXECUTER_PP_EN(1) |
+ CORE_S_POINTER_POINTER_PP_MODE(1) |
+ extra_bit);
+
+ rocket_pc_writel(core, BASE_ADDRESS, task->regcmd);
+ rocket_pc_writel(core, REGISTER_AMOUNTS,
+ PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT((task->regcmd_count + 1) / 2 - 1));
+
+ rocket_pc_writel(core, INTERRUPT_MASK, PC_INTERRUPT_MASK_DPU_0 | PC_INTERRUPT_MASK_DPU_1);
+ rocket_pc_writel(core, INTERRUPT_CLEAR, PC_INTERRUPT_CLEAR_DPU_0 | PC_INTERRUPT_CLEAR_DPU_1);
+
+ rocket_pc_writel(core, TASK_CON, PC_TASK_CON_RESERVED_0(1) |
+ PC_TASK_CON_TASK_COUNT_CLEAR(1) |
+ PC_TASK_CON_TASK_NUMBER(1) |
+ PC_TASK_CON_TASK_PP_EN(1));
+
+ rocket_pc_writel(core, TASK_DMA_BASE_ADDR, PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR(0x0));
+
+ rocket_pc_writel(core, OPERATION_ENABLE, PC_OPERATION_ENABLE_OP_EN(1));
+
+ dev_dbg(core->dev, "Submitted regcmd at 0x%llx to core %d", task->regcmd, core->index);
+}
+
+static int rocket_acquire_object_fences(struct drm_gem_object **bos,
+ int bo_count,
+ struct drm_sched_job *job,
+ bool is_write)
+{
+ int i, ret;
+
+ for (i = 0; i < bo_count; i++) {
+ ret = dma_resv_reserve_fences(bos[i]->resv, 1);
+ if (ret)
+ return ret;
+
+ ret = drm_sched_job_add_implicit_dependencies(job, bos[i],
+ is_write);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rocket_attach_object_fences(struct drm_gem_object **bos,
+ int bo_count,
+ struct dma_fence *fence)
+{
+ int i;
+
+ for (i = 0; i < bo_count; i++)
+ dma_resv_add_fence(bos[i]->resv, fence, DMA_RESV_USAGE_WRITE);
+}
+
+static int rocket_job_push(struct rocket_job *job)
+{
+ struct rocket_device *rdev = job->rdev;
+ struct drm_gem_object **bos;
+ struct ww_acquire_ctx acquire_ctx;
+ int ret = 0;
+
+ bos = kvmalloc_array(job->in_bo_count + job->out_bo_count, sizeof(void *),
+ GFP_KERNEL);
+ memcpy(bos, job->in_bos, job->in_bo_count * sizeof(void *));
+ memcpy(&bos[job->in_bo_count], job->out_bos, job->out_bo_count * sizeof(void *));
+
+ ret = drm_gem_lock_reservations(bos, job->in_bo_count + job->out_bo_count, &acquire_ctx);
+ if (ret)
+ goto err;
+
+ scoped_guard(mutex, &rdev->sched_lock) {
+ drm_sched_job_arm(&job->base);
+
+ job->inference_done_fence = dma_fence_get(&job->base.s_fence->finished);
+
+ ret = rocket_acquire_object_fences(job->in_bos, job->in_bo_count, &job->base, false);
+ if (ret)
+ goto err_unlock;
+
+ ret = rocket_acquire_object_fences(job->out_bos, job->out_bo_count, &job->base, true);
+ if (ret)
+ goto err_unlock;
+
+ kref_get(&job->refcount); /* put by scheduler job completion */
+
+ drm_sched_entity_push_job(&job->base);
+ }
+
+ rocket_attach_object_fences(job->out_bos, job->out_bo_count, job->inference_done_fence);
+
+err_unlock:
+ drm_gem_unlock_reservations(bos, job->in_bo_count + job->out_bo_count, &acquire_ctx);
+err:
+ kvfree(bos);
+
+ return ret;
+}
+
+static void rocket_job_cleanup(struct kref *ref)
+{
+ struct rocket_job *job = container_of(ref, struct rocket_job,
+ refcount);
+ unsigned int i;
+
+ rocket_iommu_domain_put(job->domain);
+
+ dma_fence_put(job->done_fence);
+ dma_fence_put(job->inference_done_fence);
+
+ if (job->in_bos) {
+ for (i = 0; i < job->in_bo_count; i++)
+ drm_gem_object_put(job->in_bos[i]);
+
+ kvfree(job->in_bos);
+ }
+
+ if (job->out_bos) {
+ for (i = 0; i < job->out_bo_count; i++)
+ drm_gem_object_put(job->out_bos[i]);
+
+ kvfree(job->out_bos);
+ }
+
+ kvfree(job->tasks);
+
+ kfree(job);
+}
+
+static void rocket_job_put(struct rocket_job *job)
+{
+ kref_put(&job->refcount, rocket_job_cleanup);
+}
+
+static void rocket_job_free(struct drm_sched_job *sched_job)
+{
+ struct rocket_job *job = to_rocket_job(sched_job);
+
+ drm_sched_job_cleanup(sched_job);
+
+ rocket_job_put(job);
+}
+
+static struct rocket_core *sched_to_core(struct rocket_device *rdev,
+ struct drm_gpu_scheduler *sched)
+{
+ unsigned int core;
+
+ for (core = 0; core < rdev->num_cores; core++) {
+ if (&rdev->cores[core].sched == sched)
+ return &rdev->cores[core];
+ }
+
+ return NULL;
+}
+
+static struct dma_fence *rocket_job_run(struct drm_sched_job *sched_job)
+{
+ struct rocket_job *job = to_rocket_job(sched_job);
+ struct rocket_device *rdev = job->rdev;
+ struct rocket_core *core = sched_to_core(rdev, sched_job->sched);
+ struct dma_fence *fence = NULL;
+ int ret;
+
+ if (unlikely(job->base.s_fence->finished.error))
+ return NULL;
+
+ /*
+ * Nothing to execute: can happen if the job has finished while
+ * we were resetting the NPU.
+ */
+ if (job->next_task_idx == job->task_count)
+ return NULL;
+
+ fence = rocket_fence_create(core);
+ if (IS_ERR(fence))
+ return fence;
+
+ if (job->done_fence)
+ dma_fence_put(job->done_fence);
+ job->done_fence = dma_fence_get(fence);
+
+ ret = pm_runtime_get_sync(core->dev);
+ if (ret < 0)
+ return fence;
+
+ ret = iommu_attach_group(job->domain->domain, core->iommu_group);
+ if (ret < 0)
+ return fence;
+
+ scoped_guard(mutex, &core->job_lock) {
+ core->in_flight_job = job;
+ rocket_job_hw_submit(core, job);
+ }
+
+ return fence;
+}
+
+static void rocket_job_handle_irq(struct rocket_core *core)
+{
+ pm_runtime_mark_last_busy(core->dev);
+
+ rocket_pc_writel(core, OPERATION_ENABLE, 0x0);
+ rocket_pc_writel(core, INTERRUPT_CLEAR, 0x1ffff);
+
+ scoped_guard(mutex, &core->job_lock)
+ if (core->in_flight_job) {
+ if (core->in_flight_job->next_task_idx < core->in_flight_job->task_count) {
+ rocket_job_hw_submit(core, core->in_flight_job);
+ return;
+ }
+
+ iommu_detach_group(NULL, iommu_group_get(core->dev));
+ dma_fence_signal(core->in_flight_job->done_fence);
+ pm_runtime_put_autosuspend(core->dev);
+ core->in_flight_job = NULL;
+ }
+}
+
+static void
+rocket_reset(struct rocket_core *core, struct drm_sched_job *bad)
+{
+ if (!atomic_read(&core->reset.pending))
+ return;
+
+ drm_sched_stop(&core->sched, bad);
+
+ /*
+ * Remaining interrupts have been handled, but we might still have
+ * stuck jobs. Let's make sure the PM counters stay balanced by
+ * manually calling pm_runtime_put_noidle().
+ */
+ scoped_guard(mutex, &core->job_lock) {
+ if (core->in_flight_job)
+ pm_runtime_put_noidle(core->dev);
+
+ iommu_detach_group(NULL, core->iommu_group);
+
+ core->in_flight_job = NULL;
+ }
+
+ /* Proceed with reset now. */
+ rocket_core_reset(core);
+
+ /* NPU has been reset, we can clear the reset pending bit. */
+ atomic_set(&core->reset.pending, 0);
+
+ /* Restart the scheduler */
+ drm_sched_start(&core->sched, 0);
+}
+
+static enum drm_gpu_sched_stat rocket_job_timedout(struct drm_sched_job *sched_job)
+{
+ struct rocket_job *job = to_rocket_job(sched_job);
+ struct rocket_device *rdev = job->rdev;
+ struct rocket_core *core = sched_to_core(rdev, sched_job->sched);
+
+ dev_err(core->dev, "NPU job timed out");
+
+ atomic_set(&core->reset.pending, 1);
+ rocket_reset(core, sched_job);
+
+ return DRM_GPU_SCHED_STAT_RESET;
+}
+
+static void rocket_reset_work(struct work_struct *work)
+{
+ struct rocket_core *core;
+
+ core = container_of(work, struct rocket_core, reset.work);
+ rocket_reset(core, NULL);
+}
+
+static const struct drm_sched_backend_ops rocket_sched_ops = {
+ .run_job = rocket_job_run,
+ .timedout_job = rocket_job_timedout,
+ .free_job = rocket_job_free
+};
+
+static irqreturn_t rocket_job_irq_handler_thread(int irq, void *data)
+{
+ struct rocket_core *core = data;
+
+ rocket_job_handle_irq(core);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rocket_job_irq_handler(int irq, void *data)
+{
+ struct rocket_core *core = data;
+ u32 raw_status = rocket_pc_readl(core, INTERRUPT_RAW_STATUS);
+
+ WARN_ON(raw_status & PC_INTERRUPT_RAW_STATUS_DMA_READ_ERROR);
+ WARN_ON(raw_status & PC_INTERRUPT_RAW_STATUS_DMA_WRITE_ERROR);
+
+ if (!(raw_status & PC_INTERRUPT_RAW_STATUS_DPU_0 ||
+ raw_status & PC_INTERRUPT_RAW_STATUS_DPU_1))
+ return IRQ_NONE;
+
+ rocket_pc_writel(core, INTERRUPT_MASK, 0x0);
+
+ return IRQ_WAKE_THREAD;
+}
+
+int rocket_job_init(struct rocket_core *core)
+{
+ struct drm_sched_init_args args = {
+ .ops = &rocket_sched_ops,
+ .num_rqs = DRM_SCHED_PRIORITY_COUNT,
+ .credit_limit = 1,
+ .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS),
+ .name = dev_name(core->dev),
+ .dev = core->dev,
+ };
+ int ret;
+
+ INIT_WORK(&core->reset.work, rocket_reset_work);
+ spin_lock_init(&core->fence_lock);
+ mutex_init(&core->job_lock);
+
+ core->irq = platform_get_irq(to_platform_device(core->dev), 0);
+ if (core->irq < 0)
+ return core->irq;
+
+ ret = devm_request_threaded_irq(core->dev, core->irq,
+ rocket_job_irq_handler,
+ rocket_job_irq_handler_thread,
+ IRQF_SHARED, dev_name(core->dev),
+ core);
+ if (ret) {
+ dev_err(core->dev, "failed to request job irq");
+ return ret;
+ }
+
+ core->reset.wq = alloc_ordered_workqueue("rocket-reset-%d", 0, core->index);
+ if (!core->reset.wq)
+ return -ENOMEM;
+
+ core->fence_context = dma_fence_context_alloc(1);
+
+ args.timeout_wq = core->reset.wq;
+ ret = drm_sched_init(&core->sched, &args);
+ if (ret) {
+ dev_err(core->dev, "Failed to create scheduler: %d.", ret);
+ goto err_sched;
+ }
+
+ return 0;
+
+err_sched:
+ drm_sched_fini(&core->sched);
+
+ destroy_workqueue(core->reset.wq);
+ return ret;
+}
+
+void rocket_job_fini(struct rocket_core *core)
+{
+ drm_sched_fini(&core->sched);
+
+ cancel_work_sync(&core->reset.work);
+ destroy_workqueue(core->reset.wq);
+}
+
+int rocket_job_open(struct rocket_file_priv *rocket_priv)
+{
+ struct rocket_device *rdev = rocket_priv->rdev;
+ struct drm_gpu_scheduler **scheds = kmalloc_array(rdev->num_cores,
+ sizeof(*scheds),
+ GFP_KERNEL);
+ unsigned int core;
+ int ret;
+
+ for (core = 0; core < rdev->num_cores; core++)
+ scheds[core] = &rdev->cores[core].sched;
+
+ ret = drm_sched_entity_init(&rocket_priv->sched_entity,
+ DRM_SCHED_PRIORITY_NORMAL,
+ scheds,
+ rdev->num_cores, NULL);
+ if (WARN_ON(ret))
+ return ret;
+
+ return 0;
+}
+
+void rocket_job_close(struct rocket_file_priv *rocket_priv)
+{
+ struct drm_sched_entity *entity = &rocket_priv->sched_entity;
+
+ kfree(entity->sched_list);
+ drm_sched_entity_destroy(entity);
+}
+
+int rocket_job_is_idle(struct rocket_core *core)
+{
+ /* If there are any jobs in this HW queue, we're not idle */
+ if (atomic_read(&core->sched.credit_count))
+ return false;
+
+ return true;
+}
+
+static int rocket_ioctl_submit_job(struct drm_device *dev, struct drm_file *file,
+ struct drm_rocket_job *job)
+{
+ struct rocket_device *rdev = to_rocket_device(dev);
+ struct rocket_file_priv *file_priv = file->driver_priv;
+ struct rocket_job *rjob = NULL;
+ int ret = 0;
+
+ if (job->task_count == 0)
+ return -EINVAL;
+
+ rjob = kzalloc(sizeof(*rjob), GFP_KERNEL);
+ if (!rjob)
+ return -ENOMEM;
+
+ kref_init(&rjob->refcount);
+
+ rjob->rdev = rdev;
+
+ ret = drm_sched_job_init(&rjob->base,
+ &file_priv->sched_entity,
+ 1, NULL, file->client_id);
+ if (ret)
+ goto out_put_job;
+
+ ret = rocket_copy_tasks(dev, file, job, rjob);
+ if (ret)
+ goto out_cleanup_job;
+
+ ret = drm_gem_objects_lookup(file, u64_to_user_ptr(job->in_bo_handles),
+ job->in_bo_handle_count, &rjob->in_bos);
+ if (ret)
+ goto out_cleanup_job;
+
+ rjob->in_bo_count = job->in_bo_handle_count;
+
+ ret = drm_gem_objects_lookup(file, u64_to_user_ptr(job->out_bo_handles),
+ job->out_bo_handle_count, &rjob->out_bos);
+ if (ret)
+ goto out_cleanup_job;
+
+ rjob->out_bo_count = job->out_bo_handle_count;
+
+ rjob->domain = rocket_iommu_domain_get(file_priv);
+
+ ret = rocket_job_push(rjob);
+ if (ret)
+ goto out_cleanup_job;
+
+out_cleanup_job:
+ if (ret)
+ drm_sched_job_cleanup(&rjob->base);
+out_put_job:
+ rocket_job_put(rjob);
+
+ return ret;
+}
+
+int rocket_ioctl_submit(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_rocket_submit *args = data;
+ struct drm_rocket_job *jobs;
+ int ret = 0;
+ unsigned int i = 0;
+
+ if (args->job_count == 0)
+ return 0;
+
+ if (args->job_struct_size < sizeof(struct drm_rocket_job)) {
+ drm_dbg(dev, "job_struct_size field in drm_rocket_submit struct is too small.\n");
+ return -EINVAL;
+ }
+
+ if (args->reserved != 0) {
+ drm_dbg(dev, "Reserved field in drm_rocket_submit struct should be 0.\n");
+ return -EINVAL;
+ }
+
+ jobs = kvmalloc_array(args->job_count, sizeof(*jobs), GFP_KERNEL);
+ if (!jobs) {
+ drm_dbg(dev, "Failed to allocate incoming job array\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < args->job_count; i++) {
+ if (copy_from_user(&jobs[i],
+ u64_to_user_ptr(args->jobs) + i * args->job_struct_size,
+ sizeof(*jobs))) {
+ ret = -EFAULT;
+ drm_dbg(dev, "Failed to copy incoming job array\n");
+ goto exit;
+ }
+ }
+
+
+ for (i = 0; i < args->job_count; i++)
+ rocket_ioctl_submit_job(dev, file, &jobs[i]);
+
+exit:
+ kvfree(jobs);
+
+ return ret;
+}
diff --git a/drivers/accel/rocket/rocket_job.h b/drivers/accel/rocket/rocket_job.h
new file mode 100644
index 000000000000..4ae00feec3b9
--- /dev/null
+++ b/drivers/accel/rocket/rocket_job.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#ifndef __ROCKET_JOB_H__
+#define __ROCKET_JOB_H__
+
+#include <drm/drm_drv.h>
+#include <drm/gpu_scheduler.h>
+
+#include "rocket_core.h"
+#include "rocket_drv.h"
+
+struct rocket_task {
+ u64 regcmd;
+ u32 regcmd_count;
+};
+
+struct rocket_job {
+ struct drm_sched_job base;
+
+ struct rocket_device *rdev;
+
+ struct drm_gem_object **in_bos;
+ struct drm_gem_object **out_bos;
+
+ u32 in_bo_count;
+ u32 out_bo_count;
+
+ struct rocket_task *tasks;
+ u32 task_count;
+ u32 next_task_idx;
+
+ /* Fence to be signaled by drm-sched once its done with the job */
+ struct dma_fence *inference_done_fence;
+
+ /* Fence to be signaled by IRQ handler when the job is complete. */
+ struct dma_fence *done_fence;
+
+ struct rocket_iommu_domain *domain;
+
+ struct kref refcount;
+};
+
+int rocket_ioctl_submit(struct drm_device *dev, void *data, struct drm_file *file);
+
+int rocket_job_init(struct rocket_core *core);
+void rocket_job_fini(struct rocket_core *core);
+int rocket_job_open(struct rocket_file_priv *rocket_priv);
+void rocket_job_close(struct rocket_file_priv *rocket_priv);
+int rocket_job_is_idle(struct rocket_core *core);
+
+#endif
diff --git a/drivers/accel/rocket/rocket_registers.h b/drivers/accel/rocket/rocket_registers.h
new file mode 100644
index 000000000000..9aef614c3470
--- /dev/null
+++ b/drivers/accel/rocket/rocket_registers.h
@@ -0,0 +1,4404 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+
+#ifndef __ROCKET_REGISTERS_XML__
+#define __ROCKET_REGISTERS_XML__
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng gen_header.py tool in this git repository:
+http://gitlab.freedesktop.org/mesa/mesa/
+git clone https://gitlab.freedesktop.org/mesa/mesa.git
+
+The rules-ng-ng source files this header was generated from are:
+
+- /home/tomeu/src/mesa/src/gallium/drivers/rocket/registers.xml ( 60076 bytes, from Wed Jun 12 10:02:25 2024)
+
+Copyright (C) 2024-2025 by the following authors:
+- Tomeu Vizoso <tomeu@tomeuvizoso.net>
+*/
+
+#define REG_PC_VERSION 0x00000000
+#define PC_VERSION_VERSION__MASK 0xffffffff
+#define PC_VERSION_VERSION__SHIFT 0
+static inline uint32_t PC_VERSION_VERSION(uint32_t val)
+{
+ return ((val) << PC_VERSION_VERSION__SHIFT) & PC_VERSION_VERSION__MASK;
+}
+
+#define REG_PC_VERSION_NUM 0x00000004
+#define PC_VERSION_NUM_VERSION_NUM__MASK 0xffffffff
+#define PC_VERSION_NUM_VERSION_NUM__SHIFT 0
+static inline uint32_t PC_VERSION_NUM_VERSION_NUM(uint32_t val)
+{
+ return ((val) << PC_VERSION_NUM_VERSION_NUM__SHIFT) & PC_VERSION_NUM_VERSION_NUM__MASK;
+}
+
+#define REG_PC_OPERATION_ENABLE 0x00000008
+#define PC_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define PC_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t PC_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_OPERATION_ENABLE_RESERVED_0__SHIFT) & PC_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define PC_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define PC_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t PC_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << PC_OPERATION_ENABLE_OP_EN__SHIFT) & PC_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_PC_BASE_ADDRESS 0x00000010
+#define PC_BASE_ADDRESS_PC_SOURCE_ADDR__MASK 0xfffffff0
+#define PC_BASE_ADDRESS_PC_SOURCE_ADDR__SHIFT 4
+static inline uint32_t PC_BASE_ADDRESS_PC_SOURCE_ADDR(uint32_t val)
+{
+ return ((val) << PC_BASE_ADDRESS_PC_SOURCE_ADDR__SHIFT) & PC_BASE_ADDRESS_PC_SOURCE_ADDR__MASK;
+}
+#define PC_BASE_ADDRESS_RESERVED_0__MASK 0x0000000e
+#define PC_BASE_ADDRESS_RESERVED_0__SHIFT 1
+static inline uint32_t PC_BASE_ADDRESS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_BASE_ADDRESS_RESERVED_0__SHIFT) & PC_BASE_ADDRESS_RESERVED_0__MASK;
+}
+#define PC_BASE_ADDRESS_PC_SEL__MASK 0x00000001
+#define PC_BASE_ADDRESS_PC_SEL__SHIFT 0
+static inline uint32_t PC_BASE_ADDRESS_PC_SEL(uint32_t val)
+{
+ return ((val) << PC_BASE_ADDRESS_PC_SEL__SHIFT) & PC_BASE_ADDRESS_PC_SEL__MASK;
+}
+
+#define REG_PC_REGISTER_AMOUNTS 0x00000014
+#define PC_REGISTER_AMOUNTS_RESERVED_0__MASK 0xffff0000
+#define PC_REGISTER_AMOUNTS_RESERVED_0__SHIFT 16
+static inline uint32_t PC_REGISTER_AMOUNTS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_REGISTER_AMOUNTS_RESERVED_0__SHIFT) & PC_REGISTER_AMOUNTS_RESERVED_0__MASK;
+}
+#define PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT__MASK 0x0000ffff
+#define PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT__SHIFT 0
+static inline uint32_t PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT(uint32_t val)
+{
+ return ((val) << PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT__SHIFT) & PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT__MASK;
+}
+
+#define REG_PC_INTERRUPT_MASK 0x00000020
+#define PC_INTERRUPT_MASK_RESERVED_0__MASK 0xffffc000
+#define PC_INTERRUPT_MASK_RESERVED_0__SHIFT 14
+static inline uint32_t PC_INTERRUPT_MASK_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_INTERRUPT_MASK_RESERVED_0__SHIFT) & PC_INTERRUPT_MASK_RESERVED_0__MASK;
+}
+#define PC_INTERRUPT_MASK_DMA_WRITE_ERROR 0x00002000
+#define PC_INTERRUPT_MASK_DMA_READ_ERROR 0x00001000
+#define PC_INTERRUPT_MASK_PPU_1 0x00000800
+#define PC_INTERRUPT_MASK_PPU_0 0x00000400
+#define PC_INTERRUPT_MASK_DPU_1 0x00000200
+#define PC_INTERRUPT_MASK_DPU_0 0x00000100
+#define PC_INTERRUPT_MASK_CORE_1 0x00000080
+#define PC_INTERRUPT_MASK_CORE_0 0x00000040
+#define PC_INTERRUPT_MASK_CNA_CSC_1 0x00000020
+#define PC_INTERRUPT_MASK_CNA_CSC_0 0x00000010
+#define PC_INTERRUPT_MASK_CNA_WEIGHT_1 0x00000008
+#define PC_INTERRUPT_MASK_CNA_WEIGHT_0 0x00000004
+#define PC_INTERRUPT_MASK_CNA_FEATURE_1 0x00000002
+#define PC_INTERRUPT_MASK_CNA_FEATURE_0 0x00000001
+
+#define REG_PC_INTERRUPT_CLEAR 0x00000024
+#define PC_INTERRUPT_CLEAR_RESERVED_0__MASK 0xffffc000
+#define PC_INTERRUPT_CLEAR_RESERVED_0__SHIFT 14
+static inline uint32_t PC_INTERRUPT_CLEAR_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_INTERRUPT_CLEAR_RESERVED_0__SHIFT) & PC_INTERRUPT_CLEAR_RESERVED_0__MASK;
+}
+#define PC_INTERRUPT_CLEAR_DMA_WRITE_ERROR 0x00002000
+#define PC_INTERRUPT_CLEAR_DMA_READ_ERROR 0x00001000
+#define PC_INTERRUPT_CLEAR_PPU_1 0x00000800
+#define PC_INTERRUPT_CLEAR_PPU_0 0x00000400
+#define PC_INTERRUPT_CLEAR_DPU_1 0x00000200
+#define PC_INTERRUPT_CLEAR_DPU_0 0x00000100
+#define PC_INTERRUPT_CLEAR_CORE_1 0x00000080
+#define PC_INTERRUPT_CLEAR_CORE_0 0x00000040
+#define PC_INTERRUPT_CLEAR_CNA_CSC_1 0x00000020
+#define PC_INTERRUPT_CLEAR_CNA_CSC_0 0x00000010
+#define PC_INTERRUPT_CLEAR_CNA_WEIGHT_1 0x00000008
+#define PC_INTERRUPT_CLEAR_CNA_WEIGHT_0 0x00000004
+#define PC_INTERRUPT_CLEAR_CNA_FEATURE_1 0x00000002
+#define PC_INTERRUPT_CLEAR_CNA_FEATURE_0 0x00000001
+
+#define REG_PC_INTERRUPT_STATUS 0x00000028
+#define PC_INTERRUPT_STATUS_RESERVED_0__MASK 0xffffc000
+#define PC_INTERRUPT_STATUS_RESERVED_0__SHIFT 14
+static inline uint32_t PC_INTERRUPT_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_INTERRUPT_STATUS_RESERVED_0__SHIFT) & PC_INTERRUPT_STATUS_RESERVED_0__MASK;
+}
+#define PC_INTERRUPT_STATUS_DMA_WRITE_ERROR 0x00002000
+#define PC_INTERRUPT_STATUS_DMA_READ_ERROR 0x00001000
+#define PC_INTERRUPT_STATUS_PPU_1 0x00000800
+#define PC_INTERRUPT_STATUS_PPU_0 0x00000400
+#define PC_INTERRUPT_STATUS_DPU_1 0x00000200
+#define PC_INTERRUPT_STATUS_DPU_0 0x00000100
+#define PC_INTERRUPT_STATUS_CORE_1 0x00000080
+#define PC_INTERRUPT_STATUS_CORE_0 0x00000040
+#define PC_INTERRUPT_STATUS_CNA_CSC_1 0x00000020
+#define PC_INTERRUPT_STATUS_CNA_CSC_0 0x00000010
+#define PC_INTERRUPT_STATUS_CNA_WEIGHT_1 0x00000008
+#define PC_INTERRUPT_STATUS_CNA_WEIGHT_0 0x00000004
+#define PC_INTERRUPT_STATUS_CNA_FEATURE_1 0x00000002
+#define PC_INTERRUPT_STATUS_CNA_FEATURE_0 0x00000001
+
+#define REG_PC_INTERRUPT_RAW_STATUS 0x0000002c
+#define PC_INTERRUPT_RAW_STATUS_RESERVED_0__MASK 0xffffc000
+#define PC_INTERRUPT_RAW_STATUS_RESERVED_0__SHIFT 14
+static inline uint32_t PC_INTERRUPT_RAW_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_INTERRUPT_RAW_STATUS_RESERVED_0__SHIFT) & PC_INTERRUPT_RAW_STATUS_RESERVED_0__MASK;
+}
+#define PC_INTERRUPT_RAW_STATUS_DMA_WRITE_ERROR 0x00002000
+#define PC_INTERRUPT_RAW_STATUS_DMA_READ_ERROR 0x00001000
+#define PC_INTERRUPT_RAW_STATUS_PPU_1 0x00000800
+#define PC_INTERRUPT_RAW_STATUS_PPU_0 0x00000400
+#define PC_INTERRUPT_RAW_STATUS_DPU_1 0x00000200
+#define PC_INTERRUPT_RAW_STATUS_DPU_0 0x00000100
+#define PC_INTERRUPT_RAW_STATUS_CORE_1 0x00000080
+#define PC_INTERRUPT_RAW_STATUS_CORE_0 0x00000040
+#define PC_INTERRUPT_RAW_STATUS_CNA_CSC_1 0x00000020
+#define PC_INTERRUPT_RAW_STATUS_CNA_CSC_0 0x00000010
+#define PC_INTERRUPT_RAW_STATUS_CNA_WEIGHT_1 0x00000008
+#define PC_INTERRUPT_RAW_STATUS_CNA_WEIGHT_0 0x00000004
+#define PC_INTERRUPT_RAW_STATUS_CNA_FEATURE_1 0x00000002
+#define PC_INTERRUPT_RAW_STATUS_CNA_FEATURE_0 0x00000001
+
+#define REG_PC_TASK_CON 0x00000030
+#define PC_TASK_CON_RESERVED_0__MASK 0xffffc000
+#define PC_TASK_CON_RESERVED_0__SHIFT 14
+static inline uint32_t PC_TASK_CON_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_TASK_CON_RESERVED_0__SHIFT) & PC_TASK_CON_RESERVED_0__MASK;
+}
+#define PC_TASK_CON_TASK_COUNT_CLEAR__MASK 0x00002000
+#define PC_TASK_CON_TASK_COUNT_CLEAR__SHIFT 13
+static inline uint32_t PC_TASK_CON_TASK_COUNT_CLEAR(uint32_t val)
+{
+ return ((val) << PC_TASK_CON_TASK_COUNT_CLEAR__SHIFT) & PC_TASK_CON_TASK_COUNT_CLEAR__MASK;
+}
+#define PC_TASK_CON_TASK_PP_EN__MASK 0x00001000
+#define PC_TASK_CON_TASK_PP_EN__SHIFT 12
+static inline uint32_t PC_TASK_CON_TASK_PP_EN(uint32_t val)
+{
+ return ((val) << PC_TASK_CON_TASK_PP_EN__SHIFT) & PC_TASK_CON_TASK_PP_EN__MASK;
+}
+#define PC_TASK_CON_TASK_NUMBER__MASK 0x00000fff
+#define PC_TASK_CON_TASK_NUMBER__SHIFT 0
+static inline uint32_t PC_TASK_CON_TASK_NUMBER(uint32_t val)
+{
+ return ((val) << PC_TASK_CON_TASK_NUMBER__SHIFT) & PC_TASK_CON_TASK_NUMBER__MASK;
+}
+
+#define REG_PC_TASK_DMA_BASE_ADDR 0x00000034
+#define PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR__MASK 0xfffffff0
+#define PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR__SHIFT 4
+static inline uint32_t PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR(uint32_t val)
+{
+ return ((val) << PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR__SHIFT) & PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR__MASK;
+}
+#define PC_TASK_DMA_BASE_ADDR_RESERVED_0__MASK 0x0000000f
+#define PC_TASK_DMA_BASE_ADDR_RESERVED_0__SHIFT 0
+static inline uint32_t PC_TASK_DMA_BASE_ADDR_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_TASK_DMA_BASE_ADDR_RESERVED_0__SHIFT) & PC_TASK_DMA_BASE_ADDR_RESERVED_0__MASK;
+}
+
+#define REG_PC_TASK_STATUS 0x0000003c
+#define PC_TASK_STATUS_RESERVED_0__MASK 0xf0000000
+#define PC_TASK_STATUS_RESERVED_0__SHIFT 28
+static inline uint32_t PC_TASK_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_TASK_STATUS_RESERVED_0__SHIFT) & PC_TASK_STATUS_RESERVED_0__MASK;
+}
+#define PC_TASK_STATUS_TASK_STATUS__MASK 0x0fffffff
+#define PC_TASK_STATUS_TASK_STATUS__SHIFT 0
+static inline uint32_t PC_TASK_STATUS_TASK_STATUS(uint32_t val)
+{
+ return ((val) << PC_TASK_STATUS_TASK_STATUS__SHIFT) & PC_TASK_STATUS_TASK_STATUS__MASK;
+}
+
+#define REG_CNA_S_STATUS 0x00001000
+#define CNA_S_STATUS_RESERVED_0__MASK 0xfffc0000
+#define CNA_S_STATUS_RESERVED_0__SHIFT 18
+static inline uint32_t CNA_S_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_S_STATUS_RESERVED_0__SHIFT) & CNA_S_STATUS_RESERVED_0__MASK;
+}
+#define CNA_S_STATUS_STATUS_1__MASK 0x00030000
+#define CNA_S_STATUS_STATUS_1__SHIFT 16
+static inline uint32_t CNA_S_STATUS_STATUS_1(uint32_t val)
+{
+ return ((val) << CNA_S_STATUS_STATUS_1__SHIFT) & CNA_S_STATUS_STATUS_1__MASK;
+}
+#define CNA_S_STATUS_RESERVED_1__MASK 0x0000fffc
+#define CNA_S_STATUS_RESERVED_1__SHIFT 2
+static inline uint32_t CNA_S_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_S_STATUS_RESERVED_1__SHIFT) & CNA_S_STATUS_RESERVED_1__MASK;
+}
+#define CNA_S_STATUS_STATUS_0__MASK 0x00000003
+#define CNA_S_STATUS_STATUS_0__SHIFT 0
+static inline uint32_t CNA_S_STATUS_STATUS_0(uint32_t val)
+{
+ return ((val) << CNA_S_STATUS_STATUS_0__SHIFT) & CNA_S_STATUS_STATUS_0__MASK;
+}
+
+#define REG_CNA_S_POINTER 0x00001004
+#define CNA_S_POINTER_RESERVED_0__MASK 0xfffe0000
+#define CNA_S_POINTER_RESERVED_0__SHIFT 17
+static inline uint32_t CNA_S_POINTER_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_RESERVED_0__SHIFT) & CNA_S_POINTER_RESERVED_0__MASK;
+}
+#define CNA_S_POINTER_EXECUTER__MASK 0x00010000
+#define CNA_S_POINTER_EXECUTER__SHIFT 16
+static inline uint32_t CNA_S_POINTER_EXECUTER(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_EXECUTER__SHIFT) & CNA_S_POINTER_EXECUTER__MASK;
+}
+#define CNA_S_POINTER_RESERVED_1__MASK 0x0000ffc0
+#define CNA_S_POINTER_RESERVED_1__SHIFT 6
+static inline uint32_t CNA_S_POINTER_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_RESERVED_1__SHIFT) & CNA_S_POINTER_RESERVED_1__MASK;
+}
+#define CNA_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020
+#define CNA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5
+static inline uint32_t CNA_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & CNA_S_POINTER_EXECUTER_PP_CLEAR__MASK;
+}
+#define CNA_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010
+#define CNA_S_POINTER_POINTER_PP_CLEAR__SHIFT 4
+static inline uint32_t CNA_S_POINTER_POINTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_POINTER_PP_CLEAR__SHIFT) & CNA_S_POINTER_POINTER_PP_CLEAR__MASK;
+}
+#define CNA_S_POINTER_POINTER_PP_MODE__MASK 0x00000008
+#define CNA_S_POINTER_POINTER_PP_MODE__SHIFT 3
+static inline uint32_t CNA_S_POINTER_POINTER_PP_MODE(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_POINTER_PP_MODE__SHIFT) & CNA_S_POINTER_POINTER_PP_MODE__MASK;
+}
+#define CNA_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004
+#define CNA_S_POINTER_EXECUTER_PP_EN__SHIFT 2
+static inline uint32_t CNA_S_POINTER_EXECUTER_PP_EN(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_EXECUTER_PP_EN__SHIFT) & CNA_S_POINTER_EXECUTER_PP_EN__MASK;
+}
+#define CNA_S_POINTER_POINTER_PP_EN__MASK 0x00000002
+#define CNA_S_POINTER_POINTER_PP_EN__SHIFT 1
+static inline uint32_t CNA_S_POINTER_POINTER_PP_EN(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_POINTER_PP_EN__SHIFT) & CNA_S_POINTER_POINTER_PP_EN__MASK;
+}
+#define CNA_S_POINTER_POINTER__MASK 0x00000001
+#define CNA_S_POINTER_POINTER__SHIFT 0
+static inline uint32_t CNA_S_POINTER_POINTER(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_POINTER__SHIFT) & CNA_S_POINTER_POINTER__MASK;
+}
+
+#define REG_CNA_OPERATION_ENABLE 0x00001008
+#define CNA_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define CNA_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t CNA_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_OPERATION_ENABLE_RESERVED_0__SHIFT) & CNA_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define CNA_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define CNA_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t CNA_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << CNA_OPERATION_ENABLE_OP_EN__SHIFT) & CNA_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_CNA_CONV_CON1 0x0000100c
+#define CNA_CONV_CON1_RESERVED_0__MASK 0x80000000
+#define CNA_CONV_CON1_RESERVED_0__SHIFT 31
+static inline uint32_t CNA_CONV_CON1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_RESERVED_0__SHIFT) & CNA_CONV_CON1_RESERVED_0__MASK;
+}
+#define CNA_CONV_CON1_NONALIGN_DMA__MASK 0x40000000
+#define CNA_CONV_CON1_NONALIGN_DMA__SHIFT 30
+static inline uint32_t CNA_CONV_CON1_NONALIGN_DMA(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_NONALIGN_DMA__SHIFT) & CNA_CONV_CON1_NONALIGN_DMA__MASK;
+}
+#define CNA_CONV_CON1_GROUP_LINE_OFF__MASK 0x20000000
+#define CNA_CONV_CON1_GROUP_LINE_OFF__SHIFT 29
+static inline uint32_t CNA_CONV_CON1_GROUP_LINE_OFF(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_GROUP_LINE_OFF__SHIFT) & CNA_CONV_CON1_GROUP_LINE_OFF__MASK;
+}
+#define CNA_CONV_CON1_RESERVED_1__MASK 0x1ffe0000
+#define CNA_CONV_CON1_RESERVED_1__SHIFT 17
+static inline uint32_t CNA_CONV_CON1_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_RESERVED_1__SHIFT) & CNA_CONV_CON1_RESERVED_1__MASK;
+}
+#define CNA_CONV_CON1_DECONV__MASK 0x00010000
+#define CNA_CONV_CON1_DECONV__SHIFT 16
+static inline uint32_t CNA_CONV_CON1_DECONV(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_DECONV__SHIFT) & CNA_CONV_CON1_DECONV__MASK;
+}
+#define CNA_CONV_CON1_ARGB_IN__MASK 0x0000f000
+#define CNA_CONV_CON1_ARGB_IN__SHIFT 12
+static inline uint32_t CNA_CONV_CON1_ARGB_IN(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_ARGB_IN__SHIFT) & CNA_CONV_CON1_ARGB_IN__MASK;
+}
+#define CNA_CONV_CON1_RESERVED_2__MASK 0x00000c00
+#define CNA_CONV_CON1_RESERVED_2__SHIFT 10
+static inline uint32_t CNA_CONV_CON1_RESERVED_2(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_RESERVED_2__SHIFT) & CNA_CONV_CON1_RESERVED_2__MASK;
+}
+#define CNA_CONV_CON1_PROC_PRECISION__MASK 0x00000380
+#define CNA_CONV_CON1_PROC_PRECISION__SHIFT 7
+static inline uint32_t CNA_CONV_CON1_PROC_PRECISION(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_PROC_PRECISION__SHIFT) & CNA_CONV_CON1_PROC_PRECISION__MASK;
+}
+#define CNA_CONV_CON1_IN_PRECISION__MASK 0x00000070
+#define CNA_CONV_CON1_IN_PRECISION__SHIFT 4
+static inline uint32_t CNA_CONV_CON1_IN_PRECISION(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_IN_PRECISION__SHIFT) & CNA_CONV_CON1_IN_PRECISION__MASK;
+}
+#define CNA_CONV_CON1_CONV_MODE__MASK 0x0000000f
+#define CNA_CONV_CON1_CONV_MODE__SHIFT 0
+static inline uint32_t CNA_CONV_CON1_CONV_MODE(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_CONV_MODE__SHIFT) & CNA_CONV_CON1_CONV_MODE__MASK;
+}
+
+#define REG_CNA_CONV_CON2 0x00001010
+#define CNA_CONV_CON2_RESERVED_0__MASK 0xff000000
+#define CNA_CONV_CON2_RESERVED_0__SHIFT 24
+static inline uint32_t CNA_CONV_CON2_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_RESERVED_0__SHIFT) & CNA_CONV_CON2_RESERVED_0__MASK;
+}
+#define CNA_CONV_CON2_KERNEL_GROUP__MASK 0x00ff0000
+#define CNA_CONV_CON2_KERNEL_GROUP__SHIFT 16
+static inline uint32_t CNA_CONV_CON2_KERNEL_GROUP(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_KERNEL_GROUP__SHIFT) & CNA_CONV_CON2_KERNEL_GROUP__MASK;
+}
+#define CNA_CONV_CON2_RESERVED_1__MASK 0x0000c000
+#define CNA_CONV_CON2_RESERVED_1__SHIFT 14
+static inline uint32_t CNA_CONV_CON2_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_RESERVED_1__SHIFT) & CNA_CONV_CON2_RESERVED_1__MASK;
+}
+#define CNA_CONV_CON2_FEATURE_GRAINS__MASK 0x00003ff0
+#define CNA_CONV_CON2_FEATURE_GRAINS__SHIFT 4
+static inline uint32_t CNA_CONV_CON2_FEATURE_GRAINS(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_FEATURE_GRAINS__SHIFT) & CNA_CONV_CON2_FEATURE_GRAINS__MASK;
+}
+#define CNA_CONV_CON2_RESERVED_2__MASK 0x00000008
+#define CNA_CONV_CON2_RESERVED_2__SHIFT 3
+static inline uint32_t CNA_CONV_CON2_RESERVED_2(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_RESERVED_2__SHIFT) & CNA_CONV_CON2_RESERVED_2__MASK;
+}
+#define CNA_CONV_CON2_CSC_WO_EN__MASK 0x00000004
+#define CNA_CONV_CON2_CSC_WO_EN__SHIFT 2
+static inline uint32_t CNA_CONV_CON2_CSC_WO_EN(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_CSC_WO_EN__SHIFT) & CNA_CONV_CON2_CSC_WO_EN__MASK;
+}
+#define CNA_CONV_CON2_CSC_DO_EN__MASK 0x00000002
+#define CNA_CONV_CON2_CSC_DO_EN__SHIFT 1
+static inline uint32_t CNA_CONV_CON2_CSC_DO_EN(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_CSC_DO_EN__SHIFT) & CNA_CONV_CON2_CSC_DO_EN__MASK;
+}
+#define CNA_CONV_CON2_CMD_FIFO_SRST__MASK 0x00000001
+#define CNA_CONV_CON2_CMD_FIFO_SRST__SHIFT 0
+static inline uint32_t CNA_CONV_CON2_CMD_FIFO_SRST(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_CMD_FIFO_SRST__SHIFT) & CNA_CONV_CON2_CMD_FIFO_SRST__MASK;
+}
+
+#define REG_CNA_CONV_CON3 0x00001014
+#define CNA_CONV_CON3_RESERVED_0__MASK 0x80000000
+#define CNA_CONV_CON3_RESERVED_0__SHIFT 31
+static inline uint32_t CNA_CONV_CON3_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_RESERVED_0__SHIFT) & CNA_CONV_CON3_RESERVED_0__MASK;
+}
+#define CNA_CONV_CON3_NN_MODE__MASK 0x70000000
+#define CNA_CONV_CON3_NN_MODE__SHIFT 28
+static inline uint32_t CNA_CONV_CON3_NN_MODE(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_NN_MODE__SHIFT) & CNA_CONV_CON3_NN_MODE__MASK;
+}
+#define CNA_CONV_CON3_RESERVED_1__MASK 0x0c000000
+#define CNA_CONV_CON3_RESERVED_1__SHIFT 26
+static inline uint32_t CNA_CONV_CON3_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_RESERVED_1__SHIFT) & CNA_CONV_CON3_RESERVED_1__MASK;
+}
+#define CNA_CONV_CON3_ATROUS_Y_DILATION__MASK 0x03e00000
+#define CNA_CONV_CON3_ATROUS_Y_DILATION__SHIFT 21
+static inline uint32_t CNA_CONV_CON3_ATROUS_Y_DILATION(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_ATROUS_Y_DILATION__SHIFT) & CNA_CONV_CON3_ATROUS_Y_DILATION__MASK;
+}
+#define CNA_CONV_CON3_ATROUS_X_DILATION__MASK 0x001f0000
+#define CNA_CONV_CON3_ATROUS_X_DILATION__SHIFT 16
+static inline uint32_t CNA_CONV_CON3_ATROUS_X_DILATION(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_ATROUS_X_DILATION__SHIFT) & CNA_CONV_CON3_ATROUS_X_DILATION__MASK;
+}
+#define CNA_CONV_CON3_RESERVED_2__MASK 0x0000c000
+#define CNA_CONV_CON3_RESERVED_2__SHIFT 14
+static inline uint32_t CNA_CONV_CON3_RESERVED_2(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_RESERVED_2__SHIFT) & CNA_CONV_CON3_RESERVED_2__MASK;
+}
+#define CNA_CONV_CON3_DECONV_Y_STRIDE__MASK 0x00003800
+#define CNA_CONV_CON3_DECONV_Y_STRIDE__SHIFT 11
+static inline uint32_t CNA_CONV_CON3_DECONV_Y_STRIDE(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_DECONV_Y_STRIDE__SHIFT) & CNA_CONV_CON3_DECONV_Y_STRIDE__MASK;
+}
+#define CNA_CONV_CON3_DECONV_X_STRIDE__MASK 0x00000700
+#define CNA_CONV_CON3_DECONV_X_STRIDE__SHIFT 8
+static inline uint32_t CNA_CONV_CON3_DECONV_X_STRIDE(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_DECONV_X_STRIDE__SHIFT) & CNA_CONV_CON3_DECONV_X_STRIDE__MASK;
+}
+#define CNA_CONV_CON3_RESERVED_3__MASK 0x000000c0
+#define CNA_CONV_CON3_RESERVED_3__SHIFT 6
+static inline uint32_t CNA_CONV_CON3_RESERVED_3(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_RESERVED_3__SHIFT) & CNA_CONV_CON3_RESERVED_3__MASK;
+}
+#define CNA_CONV_CON3_CONV_Y_STRIDE__MASK 0x00000038
+#define CNA_CONV_CON3_CONV_Y_STRIDE__SHIFT 3
+static inline uint32_t CNA_CONV_CON3_CONV_Y_STRIDE(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_CONV_Y_STRIDE__SHIFT) & CNA_CONV_CON3_CONV_Y_STRIDE__MASK;
+}
+#define CNA_CONV_CON3_CONV_X_STRIDE__MASK 0x00000007
+#define CNA_CONV_CON3_CONV_X_STRIDE__SHIFT 0
+static inline uint32_t CNA_CONV_CON3_CONV_X_STRIDE(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_CONV_X_STRIDE__SHIFT) & CNA_CONV_CON3_CONV_X_STRIDE__MASK;
+}
+
+#define REG_CNA_DATA_SIZE0 0x00001020
+#define CNA_DATA_SIZE0_RESERVED_0__MASK 0xf8000000
+#define CNA_DATA_SIZE0_RESERVED_0__SHIFT 27
+static inline uint32_t CNA_DATA_SIZE0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE0_RESERVED_0__SHIFT) & CNA_DATA_SIZE0_RESERVED_0__MASK;
+}
+#define CNA_DATA_SIZE0_DATAIN_WIDTH__MASK 0x07ff0000
+#define CNA_DATA_SIZE0_DATAIN_WIDTH__SHIFT 16
+static inline uint32_t CNA_DATA_SIZE0_DATAIN_WIDTH(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE0_DATAIN_WIDTH__SHIFT) & CNA_DATA_SIZE0_DATAIN_WIDTH__MASK;
+}
+#define CNA_DATA_SIZE0_RESERVED_1__MASK 0x0000f800
+#define CNA_DATA_SIZE0_RESERVED_1__SHIFT 11
+static inline uint32_t CNA_DATA_SIZE0_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE0_RESERVED_1__SHIFT) & CNA_DATA_SIZE0_RESERVED_1__MASK;
+}
+#define CNA_DATA_SIZE0_DATAIN_HEIGHT__MASK 0x000007ff
+#define CNA_DATA_SIZE0_DATAIN_HEIGHT__SHIFT 0
+static inline uint32_t CNA_DATA_SIZE0_DATAIN_HEIGHT(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE0_DATAIN_HEIGHT__SHIFT) & CNA_DATA_SIZE0_DATAIN_HEIGHT__MASK;
+}
+
+#define REG_CNA_DATA_SIZE1 0x00001024
+#define CNA_DATA_SIZE1_RESERVED_0__MASK 0xc0000000
+#define CNA_DATA_SIZE1_RESERVED_0__SHIFT 30
+static inline uint32_t CNA_DATA_SIZE1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE1_RESERVED_0__SHIFT) & CNA_DATA_SIZE1_RESERVED_0__MASK;
+}
+#define CNA_DATA_SIZE1_DATAIN_CHANNEL_REAL__MASK 0x3fff0000
+#define CNA_DATA_SIZE1_DATAIN_CHANNEL_REAL__SHIFT 16
+static inline uint32_t CNA_DATA_SIZE1_DATAIN_CHANNEL_REAL(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE1_DATAIN_CHANNEL_REAL__SHIFT) & CNA_DATA_SIZE1_DATAIN_CHANNEL_REAL__MASK;
+}
+#define CNA_DATA_SIZE1_DATAIN_CHANNEL__MASK 0x0000ffff
+#define CNA_DATA_SIZE1_DATAIN_CHANNEL__SHIFT 0
+static inline uint32_t CNA_DATA_SIZE1_DATAIN_CHANNEL(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE1_DATAIN_CHANNEL__SHIFT) & CNA_DATA_SIZE1_DATAIN_CHANNEL__MASK;
+}
+
+#define REG_CNA_DATA_SIZE2 0x00001028
+#define CNA_DATA_SIZE2_RESERVED_0__MASK 0xfffff800
+#define CNA_DATA_SIZE2_RESERVED_0__SHIFT 11
+static inline uint32_t CNA_DATA_SIZE2_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE2_RESERVED_0__SHIFT) & CNA_DATA_SIZE2_RESERVED_0__MASK;
+}
+#define CNA_DATA_SIZE2_DATAOUT_WIDTH__MASK 0x000007ff
+#define CNA_DATA_SIZE2_DATAOUT_WIDTH__SHIFT 0
+static inline uint32_t CNA_DATA_SIZE2_DATAOUT_WIDTH(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE2_DATAOUT_WIDTH__SHIFT) & CNA_DATA_SIZE2_DATAOUT_WIDTH__MASK;
+}
+
+#define REG_CNA_DATA_SIZE3 0x0000102c
+#define CNA_DATA_SIZE3_RESERVED_0__MASK 0xff000000
+#define CNA_DATA_SIZE3_RESERVED_0__SHIFT 24
+static inline uint32_t CNA_DATA_SIZE3_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE3_RESERVED_0__SHIFT) & CNA_DATA_SIZE3_RESERVED_0__MASK;
+}
+#define CNA_DATA_SIZE3_SURF_MODE__MASK 0x00c00000
+#define CNA_DATA_SIZE3_SURF_MODE__SHIFT 22
+static inline uint32_t CNA_DATA_SIZE3_SURF_MODE(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE3_SURF_MODE__SHIFT) & CNA_DATA_SIZE3_SURF_MODE__MASK;
+}
+#define CNA_DATA_SIZE3_DATAOUT_ATOMICS__MASK 0x003fffff
+#define CNA_DATA_SIZE3_DATAOUT_ATOMICS__SHIFT 0
+static inline uint32_t CNA_DATA_SIZE3_DATAOUT_ATOMICS(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE3_DATAOUT_ATOMICS__SHIFT) & CNA_DATA_SIZE3_DATAOUT_ATOMICS__MASK;
+}
+
+#define REG_CNA_WEIGHT_SIZE0 0x00001030
+#define CNA_WEIGHT_SIZE0_WEIGHT_BYTES__MASK 0xffffffff
+#define CNA_WEIGHT_SIZE0_WEIGHT_BYTES__SHIFT 0
+static inline uint32_t CNA_WEIGHT_SIZE0_WEIGHT_BYTES(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE0_WEIGHT_BYTES__SHIFT) & CNA_WEIGHT_SIZE0_WEIGHT_BYTES__MASK;
+}
+
+#define REG_CNA_WEIGHT_SIZE1 0x00001034
+#define CNA_WEIGHT_SIZE1_RESERVED_0__MASK 0xfff80000
+#define CNA_WEIGHT_SIZE1_RESERVED_0__SHIFT 19
+static inline uint32_t CNA_WEIGHT_SIZE1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE1_RESERVED_0__SHIFT) & CNA_WEIGHT_SIZE1_RESERVED_0__MASK;
+}
+#define CNA_WEIGHT_SIZE1_WEIGHT_BYTES_PER_KERNEL__MASK 0x0007ffff
+#define CNA_WEIGHT_SIZE1_WEIGHT_BYTES_PER_KERNEL__SHIFT 0
+static inline uint32_t CNA_WEIGHT_SIZE1_WEIGHT_BYTES_PER_KERNEL(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE1_WEIGHT_BYTES_PER_KERNEL__SHIFT) & CNA_WEIGHT_SIZE1_WEIGHT_BYTES_PER_KERNEL__MASK;
+}
+
+#define REG_CNA_WEIGHT_SIZE2 0x00001038
+#define CNA_WEIGHT_SIZE2_RESERVED_0__MASK 0xe0000000
+#define CNA_WEIGHT_SIZE2_RESERVED_0__SHIFT 29
+static inline uint32_t CNA_WEIGHT_SIZE2_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE2_RESERVED_0__SHIFT) & CNA_WEIGHT_SIZE2_RESERVED_0__MASK;
+}
+#define CNA_WEIGHT_SIZE2_WEIGHT_WIDTH__MASK 0x1f000000
+#define CNA_WEIGHT_SIZE2_WEIGHT_WIDTH__SHIFT 24
+static inline uint32_t CNA_WEIGHT_SIZE2_WEIGHT_WIDTH(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE2_WEIGHT_WIDTH__SHIFT) & CNA_WEIGHT_SIZE2_WEIGHT_WIDTH__MASK;
+}
+#define CNA_WEIGHT_SIZE2_RESERVED_1__MASK 0x00e00000
+#define CNA_WEIGHT_SIZE2_RESERVED_1__SHIFT 21
+static inline uint32_t CNA_WEIGHT_SIZE2_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE2_RESERVED_1__SHIFT) & CNA_WEIGHT_SIZE2_RESERVED_1__MASK;
+}
+#define CNA_WEIGHT_SIZE2_WEIGHT_HEIGHT__MASK 0x001f0000
+#define CNA_WEIGHT_SIZE2_WEIGHT_HEIGHT__SHIFT 16
+static inline uint32_t CNA_WEIGHT_SIZE2_WEIGHT_HEIGHT(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE2_WEIGHT_HEIGHT__SHIFT) & CNA_WEIGHT_SIZE2_WEIGHT_HEIGHT__MASK;
+}
+#define CNA_WEIGHT_SIZE2_RESERVED_2__MASK 0x0000c000
+#define CNA_WEIGHT_SIZE2_RESERVED_2__SHIFT 14
+static inline uint32_t CNA_WEIGHT_SIZE2_RESERVED_2(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE2_RESERVED_2__SHIFT) & CNA_WEIGHT_SIZE2_RESERVED_2__MASK;
+}
+#define CNA_WEIGHT_SIZE2_WEIGHT_KERNELS__MASK 0x00003fff
+#define CNA_WEIGHT_SIZE2_WEIGHT_KERNELS__SHIFT 0
+static inline uint32_t CNA_WEIGHT_SIZE2_WEIGHT_KERNELS(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE2_WEIGHT_KERNELS__SHIFT) & CNA_WEIGHT_SIZE2_WEIGHT_KERNELS__MASK;
+}
+
+#define REG_CNA_CBUF_CON0 0x00001040
+#define CNA_CBUF_CON0_RESERVED_0__MASK 0xffffc000
+#define CNA_CBUF_CON0_RESERVED_0__SHIFT 14
+static inline uint32_t CNA_CBUF_CON0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_RESERVED_0__SHIFT) & CNA_CBUF_CON0_RESERVED_0__MASK;
+}
+#define CNA_CBUF_CON0_WEIGHT_REUSE__MASK 0x00002000
+#define CNA_CBUF_CON0_WEIGHT_REUSE__SHIFT 13
+static inline uint32_t CNA_CBUF_CON0_WEIGHT_REUSE(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_WEIGHT_REUSE__SHIFT) & CNA_CBUF_CON0_WEIGHT_REUSE__MASK;
+}
+#define CNA_CBUF_CON0_DATA_REUSE__MASK 0x00001000
+#define CNA_CBUF_CON0_DATA_REUSE__SHIFT 12
+static inline uint32_t CNA_CBUF_CON0_DATA_REUSE(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_DATA_REUSE__SHIFT) & CNA_CBUF_CON0_DATA_REUSE__MASK;
+}
+#define CNA_CBUF_CON0_RESERVED_1__MASK 0x00000800
+#define CNA_CBUF_CON0_RESERVED_1__SHIFT 11
+static inline uint32_t CNA_CBUF_CON0_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_RESERVED_1__SHIFT) & CNA_CBUF_CON0_RESERVED_1__MASK;
+}
+#define CNA_CBUF_CON0_FC_DATA_BANK__MASK 0x00000700
+#define CNA_CBUF_CON0_FC_DATA_BANK__SHIFT 8
+static inline uint32_t CNA_CBUF_CON0_FC_DATA_BANK(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_FC_DATA_BANK__SHIFT) & CNA_CBUF_CON0_FC_DATA_BANK__MASK;
+}
+#define CNA_CBUF_CON0_WEIGHT_BANK__MASK 0x000000f0
+#define CNA_CBUF_CON0_WEIGHT_BANK__SHIFT 4
+static inline uint32_t CNA_CBUF_CON0_WEIGHT_BANK(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_WEIGHT_BANK__SHIFT) & CNA_CBUF_CON0_WEIGHT_BANK__MASK;
+}
+#define CNA_CBUF_CON0_DATA_BANK__MASK 0x0000000f
+#define CNA_CBUF_CON0_DATA_BANK__SHIFT 0
+static inline uint32_t CNA_CBUF_CON0_DATA_BANK(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_DATA_BANK__SHIFT) & CNA_CBUF_CON0_DATA_BANK__MASK;
+}
+
+#define REG_CNA_CBUF_CON1 0x00001044
+#define CNA_CBUF_CON1_RESERVED_0__MASK 0xffffc000
+#define CNA_CBUF_CON1_RESERVED_0__SHIFT 14
+static inline uint32_t CNA_CBUF_CON1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON1_RESERVED_0__SHIFT) & CNA_CBUF_CON1_RESERVED_0__MASK;
+}
+#define CNA_CBUF_CON1_DATA_ENTRIES__MASK 0x00003fff
+#define CNA_CBUF_CON1_DATA_ENTRIES__SHIFT 0
+static inline uint32_t CNA_CBUF_CON1_DATA_ENTRIES(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON1_DATA_ENTRIES__SHIFT) & CNA_CBUF_CON1_DATA_ENTRIES__MASK;
+}
+
+#define REG_CNA_CVT_CON0 0x0000104c
+#define CNA_CVT_CON0_RESERVED_0__MASK 0xf0000000
+#define CNA_CVT_CON0_RESERVED_0__SHIFT 28
+static inline uint32_t CNA_CVT_CON0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_RESERVED_0__SHIFT) & CNA_CVT_CON0_RESERVED_0__MASK;
+}
+#define CNA_CVT_CON0_CVT_TRUNCATE_3__MASK 0x0fc00000
+#define CNA_CVT_CON0_CVT_TRUNCATE_3__SHIFT 22
+static inline uint32_t CNA_CVT_CON0_CVT_TRUNCATE_3(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_CVT_TRUNCATE_3__SHIFT) & CNA_CVT_CON0_CVT_TRUNCATE_3__MASK;
+}
+#define CNA_CVT_CON0_CVT_TRUNCATE_2__MASK 0x003f0000
+#define CNA_CVT_CON0_CVT_TRUNCATE_2__SHIFT 16
+static inline uint32_t CNA_CVT_CON0_CVT_TRUNCATE_2(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_CVT_TRUNCATE_2__SHIFT) & CNA_CVT_CON0_CVT_TRUNCATE_2__MASK;
+}
+#define CNA_CVT_CON0_CVT_TRUNCATE_1__MASK 0x0000fc00
+#define CNA_CVT_CON0_CVT_TRUNCATE_1__SHIFT 10
+static inline uint32_t CNA_CVT_CON0_CVT_TRUNCATE_1(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_CVT_TRUNCATE_1__SHIFT) & CNA_CVT_CON0_CVT_TRUNCATE_1__MASK;
+}
+#define CNA_CVT_CON0_CVT_TRUNCATE_0__MASK 0x000003f0
+#define CNA_CVT_CON0_CVT_TRUNCATE_0__SHIFT 4
+static inline uint32_t CNA_CVT_CON0_CVT_TRUNCATE_0(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_CVT_TRUNCATE_0__SHIFT) & CNA_CVT_CON0_CVT_TRUNCATE_0__MASK;
+}
+#define CNA_CVT_CON0_DATA_SIGN__MASK 0x00000008
+#define CNA_CVT_CON0_DATA_SIGN__SHIFT 3
+static inline uint32_t CNA_CVT_CON0_DATA_SIGN(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_DATA_SIGN__SHIFT) & CNA_CVT_CON0_DATA_SIGN__MASK;
+}
+#define CNA_CVT_CON0_ROUND_TYPE__MASK 0x00000004
+#define CNA_CVT_CON0_ROUND_TYPE__SHIFT 2
+static inline uint32_t CNA_CVT_CON0_ROUND_TYPE(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_ROUND_TYPE__SHIFT) & CNA_CVT_CON0_ROUND_TYPE__MASK;
+}
+#define CNA_CVT_CON0_CVT_TYPE__MASK 0x00000002
+#define CNA_CVT_CON0_CVT_TYPE__SHIFT 1
+static inline uint32_t CNA_CVT_CON0_CVT_TYPE(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_CVT_TYPE__SHIFT) & CNA_CVT_CON0_CVT_TYPE__MASK;
+}
+#define CNA_CVT_CON0_CVT_BYPASS__MASK 0x00000001
+#define CNA_CVT_CON0_CVT_BYPASS__SHIFT 0
+static inline uint32_t CNA_CVT_CON0_CVT_BYPASS(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_CVT_BYPASS__SHIFT) & CNA_CVT_CON0_CVT_BYPASS__MASK;
+}
+
+#define REG_CNA_CVT_CON1 0x00001050
+#define CNA_CVT_CON1_CVT_SCALE0__MASK 0xffff0000
+#define CNA_CVT_CON1_CVT_SCALE0__SHIFT 16
+static inline uint32_t CNA_CVT_CON1_CVT_SCALE0(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON1_CVT_SCALE0__SHIFT) & CNA_CVT_CON1_CVT_SCALE0__MASK;
+}
+#define CNA_CVT_CON1_CVT_OFFSET0__MASK 0x0000ffff
+#define CNA_CVT_CON1_CVT_OFFSET0__SHIFT 0
+static inline uint32_t CNA_CVT_CON1_CVT_OFFSET0(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON1_CVT_OFFSET0__SHIFT) & CNA_CVT_CON1_CVT_OFFSET0__MASK;
+}
+
+#define REG_CNA_CVT_CON2 0x00001054
+#define CNA_CVT_CON2_CVT_SCALE1__MASK 0xffff0000
+#define CNA_CVT_CON2_CVT_SCALE1__SHIFT 16
+static inline uint32_t CNA_CVT_CON2_CVT_SCALE1(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON2_CVT_SCALE1__SHIFT) & CNA_CVT_CON2_CVT_SCALE1__MASK;
+}
+#define CNA_CVT_CON2_CVT_OFFSET1__MASK 0x0000ffff
+#define CNA_CVT_CON2_CVT_OFFSET1__SHIFT 0
+static inline uint32_t CNA_CVT_CON2_CVT_OFFSET1(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON2_CVT_OFFSET1__SHIFT) & CNA_CVT_CON2_CVT_OFFSET1__MASK;
+}
+
+#define REG_CNA_CVT_CON3 0x00001058
+#define CNA_CVT_CON3_CVT_SCALE2__MASK 0xffff0000
+#define CNA_CVT_CON3_CVT_SCALE2__SHIFT 16
+static inline uint32_t CNA_CVT_CON3_CVT_SCALE2(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON3_CVT_SCALE2__SHIFT) & CNA_CVT_CON3_CVT_SCALE2__MASK;
+}
+#define CNA_CVT_CON3_CVT_OFFSET2__MASK 0x0000ffff
+#define CNA_CVT_CON3_CVT_OFFSET2__SHIFT 0
+static inline uint32_t CNA_CVT_CON3_CVT_OFFSET2(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON3_CVT_OFFSET2__SHIFT) & CNA_CVT_CON3_CVT_OFFSET2__MASK;
+}
+
+#define REG_CNA_CVT_CON4 0x0000105c
+#define CNA_CVT_CON4_CVT_SCALE3__MASK 0xffff0000
+#define CNA_CVT_CON4_CVT_SCALE3__SHIFT 16
+static inline uint32_t CNA_CVT_CON4_CVT_SCALE3(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON4_CVT_SCALE3__SHIFT) & CNA_CVT_CON4_CVT_SCALE3__MASK;
+}
+#define CNA_CVT_CON4_CVT_OFFSET3__MASK 0x0000ffff
+#define CNA_CVT_CON4_CVT_OFFSET3__SHIFT 0
+static inline uint32_t CNA_CVT_CON4_CVT_OFFSET3(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON4_CVT_OFFSET3__SHIFT) & CNA_CVT_CON4_CVT_OFFSET3__MASK;
+}
+
+#define REG_CNA_FC_CON0 0x00001060
+#define CNA_FC_CON0_FC_SKIP_DATA__MASK 0xffff0000
+#define CNA_FC_CON0_FC_SKIP_DATA__SHIFT 16
+static inline uint32_t CNA_FC_CON0_FC_SKIP_DATA(uint32_t val)
+{
+ return ((val) << CNA_FC_CON0_FC_SKIP_DATA__SHIFT) & CNA_FC_CON0_FC_SKIP_DATA__MASK;
+}
+#define CNA_FC_CON0_RESERVED_0__MASK 0x0000fffe
+#define CNA_FC_CON0_RESERVED_0__SHIFT 1
+static inline uint32_t CNA_FC_CON0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_FC_CON0_RESERVED_0__SHIFT) & CNA_FC_CON0_RESERVED_0__MASK;
+}
+#define CNA_FC_CON0_FC_SKIP_EN__MASK 0x00000001
+#define CNA_FC_CON0_FC_SKIP_EN__SHIFT 0
+static inline uint32_t CNA_FC_CON0_FC_SKIP_EN(uint32_t val)
+{
+ return ((val) << CNA_FC_CON0_FC_SKIP_EN__SHIFT) & CNA_FC_CON0_FC_SKIP_EN__MASK;
+}
+
+#define REG_CNA_FC_CON1 0x00001064
+#define CNA_FC_CON1_RESERVED_0__MASK 0xfffe0000
+#define CNA_FC_CON1_RESERVED_0__SHIFT 17
+static inline uint32_t CNA_FC_CON1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_FC_CON1_RESERVED_0__SHIFT) & CNA_FC_CON1_RESERVED_0__MASK;
+}
+#define CNA_FC_CON1_DATA_OFFSET__MASK 0x0001ffff
+#define CNA_FC_CON1_DATA_OFFSET__SHIFT 0
+static inline uint32_t CNA_FC_CON1_DATA_OFFSET(uint32_t val)
+{
+ return ((val) << CNA_FC_CON1_DATA_OFFSET__SHIFT) & CNA_FC_CON1_DATA_OFFSET__MASK;
+}
+
+#define REG_CNA_PAD_CON0 0x00001068
+#define CNA_PAD_CON0_RESERVED_0__MASK 0xffffff00
+#define CNA_PAD_CON0_RESERVED_0__SHIFT 8
+static inline uint32_t CNA_PAD_CON0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_PAD_CON0_RESERVED_0__SHIFT) & CNA_PAD_CON0_RESERVED_0__MASK;
+}
+#define CNA_PAD_CON0_PAD_LEFT__MASK 0x000000f0
+#define CNA_PAD_CON0_PAD_LEFT__SHIFT 4
+static inline uint32_t CNA_PAD_CON0_PAD_LEFT(uint32_t val)
+{
+ return ((val) << CNA_PAD_CON0_PAD_LEFT__SHIFT) & CNA_PAD_CON0_PAD_LEFT__MASK;
+}
+#define CNA_PAD_CON0_PAD_TOP__MASK 0x0000000f
+#define CNA_PAD_CON0_PAD_TOP__SHIFT 0
+static inline uint32_t CNA_PAD_CON0_PAD_TOP(uint32_t val)
+{
+ return ((val) << CNA_PAD_CON0_PAD_TOP__SHIFT) & CNA_PAD_CON0_PAD_TOP__MASK;
+}
+
+#define REG_CNA_FEATURE_DATA_ADDR 0x00001070
+#define CNA_FEATURE_DATA_ADDR_FEATURE_BASE_ADDR__MASK 0xffffffff
+#define CNA_FEATURE_DATA_ADDR_FEATURE_BASE_ADDR__SHIFT 0
+static inline uint32_t CNA_FEATURE_DATA_ADDR_FEATURE_BASE_ADDR(uint32_t val)
+{
+ return ((val) << CNA_FEATURE_DATA_ADDR_FEATURE_BASE_ADDR__SHIFT) & CNA_FEATURE_DATA_ADDR_FEATURE_BASE_ADDR__MASK;
+}
+
+#define REG_CNA_FC_CON2 0x00001074
+#define CNA_FC_CON2_RESERVED_0__MASK 0xfffe0000
+#define CNA_FC_CON2_RESERVED_0__SHIFT 17
+static inline uint32_t CNA_FC_CON2_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_FC_CON2_RESERVED_0__SHIFT) & CNA_FC_CON2_RESERVED_0__MASK;
+}
+#define CNA_FC_CON2_WEIGHT_OFFSET__MASK 0x0001ffff
+#define CNA_FC_CON2_WEIGHT_OFFSET__SHIFT 0
+static inline uint32_t CNA_FC_CON2_WEIGHT_OFFSET(uint32_t val)
+{
+ return ((val) << CNA_FC_CON2_WEIGHT_OFFSET__SHIFT) & CNA_FC_CON2_WEIGHT_OFFSET__MASK;
+}
+
+#define REG_CNA_DMA_CON0 0x00001078
+#define CNA_DMA_CON0_OV4K_BYPASS__MASK 0x80000000
+#define CNA_DMA_CON0_OV4K_BYPASS__SHIFT 31
+static inline uint32_t CNA_DMA_CON0_OV4K_BYPASS(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON0_OV4K_BYPASS__SHIFT) & CNA_DMA_CON0_OV4K_BYPASS__MASK;
+}
+#define CNA_DMA_CON0_RESERVED_0__MASK 0x7ff00000
+#define CNA_DMA_CON0_RESERVED_0__SHIFT 20
+static inline uint32_t CNA_DMA_CON0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON0_RESERVED_0__SHIFT) & CNA_DMA_CON0_RESERVED_0__MASK;
+}
+#define CNA_DMA_CON0_WEIGHT_BURST_LEN__MASK 0x000f0000
+#define CNA_DMA_CON0_WEIGHT_BURST_LEN__SHIFT 16
+static inline uint32_t CNA_DMA_CON0_WEIGHT_BURST_LEN(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON0_WEIGHT_BURST_LEN__SHIFT) & CNA_DMA_CON0_WEIGHT_BURST_LEN__MASK;
+}
+#define CNA_DMA_CON0_RESERVED_1__MASK 0x0000fff0
+#define CNA_DMA_CON0_RESERVED_1__SHIFT 4
+static inline uint32_t CNA_DMA_CON0_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON0_RESERVED_1__SHIFT) & CNA_DMA_CON0_RESERVED_1__MASK;
+}
+#define CNA_DMA_CON0_DATA_BURST_LEN__MASK 0x0000000f
+#define CNA_DMA_CON0_DATA_BURST_LEN__SHIFT 0
+static inline uint32_t CNA_DMA_CON0_DATA_BURST_LEN(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON0_DATA_BURST_LEN__SHIFT) & CNA_DMA_CON0_DATA_BURST_LEN__MASK;
+}
+
+#define REG_CNA_DMA_CON1 0x0000107c
+#define CNA_DMA_CON1_RESERVED_0__MASK 0xf0000000
+#define CNA_DMA_CON1_RESERVED_0__SHIFT 28
+static inline uint32_t CNA_DMA_CON1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON1_RESERVED_0__SHIFT) & CNA_DMA_CON1_RESERVED_0__MASK;
+}
+#define CNA_DMA_CON1_LINE_STRIDE__MASK 0x0fffffff
+#define CNA_DMA_CON1_LINE_STRIDE__SHIFT 0
+static inline uint32_t CNA_DMA_CON1_LINE_STRIDE(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON1_LINE_STRIDE__SHIFT) & CNA_DMA_CON1_LINE_STRIDE__MASK;
+}
+
+#define REG_CNA_DMA_CON2 0x00001080
+#define CNA_DMA_CON2_RESERVED_0__MASK 0xf0000000
+#define CNA_DMA_CON2_RESERVED_0__SHIFT 28
+static inline uint32_t CNA_DMA_CON2_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON2_RESERVED_0__SHIFT) & CNA_DMA_CON2_RESERVED_0__MASK;
+}
+#define CNA_DMA_CON2_SURF_STRIDE__MASK 0x0fffffff
+#define CNA_DMA_CON2_SURF_STRIDE__SHIFT 0
+static inline uint32_t CNA_DMA_CON2_SURF_STRIDE(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON2_SURF_STRIDE__SHIFT) & CNA_DMA_CON2_SURF_STRIDE__MASK;
+}
+
+#define REG_CNA_FC_DATA_SIZE0 0x00001084
+#define CNA_FC_DATA_SIZE0_RESERVED_0__MASK 0xc0000000
+#define CNA_FC_DATA_SIZE0_RESERVED_0__SHIFT 30
+static inline uint32_t CNA_FC_DATA_SIZE0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_FC_DATA_SIZE0_RESERVED_0__SHIFT) & CNA_FC_DATA_SIZE0_RESERVED_0__MASK;
+}
+#define CNA_FC_DATA_SIZE0_DMA_WIDTH__MASK 0x3fff0000
+#define CNA_FC_DATA_SIZE0_DMA_WIDTH__SHIFT 16
+static inline uint32_t CNA_FC_DATA_SIZE0_DMA_WIDTH(uint32_t val)
+{
+ return ((val) << CNA_FC_DATA_SIZE0_DMA_WIDTH__SHIFT) & CNA_FC_DATA_SIZE0_DMA_WIDTH__MASK;
+}
+#define CNA_FC_DATA_SIZE0_RESERVED_1__MASK 0x0000f800
+#define CNA_FC_DATA_SIZE0_RESERVED_1__SHIFT 11
+static inline uint32_t CNA_FC_DATA_SIZE0_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_FC_DATA_SIZE0_RESERVED_1__SHIFT) & CNA_FC_DATA_SIZE0_RESERVED_1__MASK;
+}
+#define CNA_FC_DATA_SIZE0_DMA_HEIGHT__MASK 0x000007ff
+#define CNA_FC_DATA_SIZE0_DMA_HEIGHT__SHIFT 0
+static inline uint32_t CNA_FC_DATA_SIZE0_DMA_HEIGHT(uint32_t val)
+{
+ return ((val) << CNA_FC_DATA_SIZE0_DMA_HEIGHT__SHIFT) & CNA_FC_DATA_SIZE0_DMA_HEIGHT__MASK;
+}
+
+#define REG_CNA_FC_DATA_SIZE1 0x00001088
+#define CNA_FC_DATA_SIZE1_RESERVED_0__MASK 0xffff0000
+#define CNA_FC_DATA_SIZE1_RESERVED_0__SHIFT 16
+static inline uint32_t CNA_FC_DATA_SIZE1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_FC_DATA_SIZE1_RESERVED_0__SHIFT) & CNA_FC_DATA_SIZE1_RESERVED_0__MASK;
+}
+#define CNA_FC_DATA_SIZE1_DMA_CHANNEL__MASK 0x0000ffff
+#define CNA_FC_DATA_SIZE1_DMA_CHANNEL__SHIFT 0
+static inline uint32_t CNA_FC_DATA_SIZE1_DMA_CHANNEL(uint32_t val)
+{
+ return ((val) << CNA_FC_DATA_SIZE1_DMA_CHANNEL__SHIFT) & CNA_FC_DATA_SIZE1_DMA_CHANNEL__MASK;
+}
+
+#define REG_CNA_CLK_GATE 0x00001090
+#define CNA_CLK_GATE_RESERVED_0__MASK 0xffffffe0
+#define CNA_CLK_GATE_RESERVED_0__SHIFT 5
+static inline uint32_t CNA_CLK_GATE_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CLK_GATE_RESERVED_0__SHIFT) & CNA_CLK_GATE_RESERVED_0__MASK;
+}
+#define CNA_CLK_GATE_CBUF_CS_DISABLE_CLKGATE__MASK 0x00000010
+#define CNA_CLK_GATE_CBUF_CS_DISABLE_CLKGATE__SHIFT 4
+static inline uint32_t CNA_CLK_GATE_CBUF_CS_DISABLE_CLKGATE(uint32_t val)
+{
+ return ((val) << CNA_CLK_GATE_CBUF_CS_DISABLE_CLKGATE__SHIFT) & CNA_CLK_GATE_CBUF_CS_DISABLE_CLKGATE__MASK;
+}
+#define CNA_CLK_GATE_RESERVED_1__MASK 0x00000008
+#define CNA_CLK_GATE_RESERVED_1__SHIFT 3
+static inline uint32_t CNA_CLK_GATE_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_CLK_GATE_RESERVED_1__SHIFT) & CNA_CLK_GATE_RESERVED_1__MASK;
+}
+#define CNA_CLK_GATE_CSC_DISABLE_CLKGATE__MASK 0x00000004
+#define CNA_CLK_GATE_CSC_DISABLE_CLKGATE__SHIFT 2
+static inline uint32_t CNA_CLK_GATE_CSC_DISABLE_CLKGATE(uint32_t val)
+{
+ return ((val) << CNA_CLK_GATE_CSC_DISABLE_CLKGATE__SHIFT) & CNA_CLK_GATE_CSC_DISABLE_CLKGATE__MASK;
+}
+#define CNA_CLK_GATE_CNA_WEIGHT_DISABLE_CLKGATE__MASK 0x00000002
+#define CNA_CLK_GATE_CNA_WEIGHT_DISABLE_CLKGATE__SHIFT 1
+static inline uint32_t CNA_CLK_GATE_CNA_WEIGHT_DISABLE_CLKGATE(uint32_t val)
+{
+ return ((val) << CNA_CLK_GATE_CNA_WEIGHT_DISABLE_CLKGATE__SHIFT) & CNA_CLK_GATE_CNA_WEIGHT_DISABLE_CLKGATE__MASK;
+}
+#define CNA_CLK_GATE_CNA_FEATURE_DISABLE_CLKGATE__MASK 0x00000001
+#define CNA_CLK_GATE_CNA_FEATURE_DISABLE_CLKGATE__SHIFT 0
+static inline uint32_t CNA_CLK_GATE_CNA_FEATURE_DISABLE_CLKGATE(uint32_t val)
+{
+ return ((val) << CNA_CLK_GATE_CNA_FEATURE_DISABLE_CLKGATE__SHIFT) & CNA_CLK_GATE_CNA_FEATURE_DISABLE_CLKGATE__MASK;
+}
+
+#define REG_CNA_DCOMP_CTRL 0x00001100
+#define CNA_DCOMP_CTRL_RESERVED_0__MASK 0xfffffff0
+#define CNA_DCOMP_CTRL_RESERVED_0__SHIFT 4
+static inline uint32_t CNA_DCOMP_CTRL_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_CTRL_RESERVED_0__SHIFT) & CNA_DCOMP_CTRL_RESERVED_0__MASK;
+}
+#define CNA_DCOMP_CTRL_WT_DEC_BYPASS__MASK 0x00000008
+#define CNA_DCOMP_CTRL_WT_DEC_BYPASS__SHIFT 3
+static inline uint32_t CNA_DCOMP_CTRL_WT_DEC_BYPASS(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_CTRL_WT_DEC_BYPASS__SHIFT) & CNA_DCOMP_CTRL_WT_DEC_BYPASS__MASK;
+}
+#define CNA_DCOMP_CTRL_DECOMP_CONTROL__MASK 0x00000007
+#define CNA_DCOMP_CTRL_DECOMP_CONTROL__SHIFT 0
+static inline uint32_t CNA_DCOMP_CTRL_DECOMP_CONTROL(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_CTRL_DECOMP_CONTROL__SHIFT) & CNA_DCOMP_CTRL_DECOMP_CONTROL__MASK;
+}
+
+#define REG_CNA_DCOMP_REGNUM 0x00001104
+#define CNA_DCOMP_REGNUM_DCOMP_REGNUM__MASK 0xffffffff
+#define CNA_DCOMP_REGNUM_DCOMP_REGNUM__SHIFT 0
+static inline uint32_t CNA_DCOMP_REGNUM_DCOMP_REGNUM(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_REGNUM_DCOMP_REGNUM__SHIFT) & CNA_DCOMP_REGNUM_DCOMP_REGNUM__MASK;
+}
+
+#define REG_CNA_DCOMP_ADDR0 0x00001110
+#define CNA_DCOMP_ADDR0_DECOMPRESS_ADDR0__MASK 0xffffffff
+#define CNA_DCOMP_ADDR0_DECOMPRESS_ADDR0__SHIFT 0
+static inline uint32_t CNA_DCOMP_ADDR0_DECOMPRESS_ADDR0(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_ADDR0_DECOMPRESS_ADDR0__SHIFT) & CNA_DCOMP_ADDR0_DECOMPRESS_ADDR0__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT0 0x00001140
+#define CNA_DCOMP_AMOUNT0_DCOMP_AMOUNT0__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT0_DCOMP_AMOUNT0__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT0_DCOMP_AMOUNT0(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT0_DCOMP_AMOUNT0__SHIFT) & CNA_DCOMP_AMOUNT0_DCOMP_AMOUNT0__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT1 0x00001144
+#define CNA_DCOMP_AMOUNT1_DCOMP_AMOUNT1__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT1_DCOMP_AMOUNT1__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT1_DCOMP_AMOUNT1(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT1_DCOMP_AMOUNT1__SHIFT) & CNA_DCOMP_AMOUNT1_DCOMP_AMOUNT1__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT2 0x00001148
+#define CNA_DCOMP_AMOUNT2_DCOMP_AMOUNT2__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT2_DCOMP_AMOUNT2__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT2_DCOMP_AMOUNT2(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT2_DCOMP_AMOUNT2__SHIFT) & CNA_DCOMP_AMOUNT2_DCOMP_AMOUNT2__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT3 0x0000114c
+#define CNA_DCOMP_AMOUNT3_DCOMP_AMOUNT3__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT3_DCOMP_AMOUNT3__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT3_DCOMP_AMOUNT3(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT3_DCOMP_AMOUNT3__SHIFT) & CNA_DCOMP_AMOUNT3_DCOMP_AMOUNT3__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT4 0x00001150
+#define CNA_DCOMP_AMOUNT4_DCOMP_AMOUNT4__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT4_DCOMP_AMOUNT4__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT4_DCOMP_AMOUNT4(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT4_DCOMP_AMOUNT4__SHIFT) & CNA_DCOMP_AMOUNT4_DCOMP_AMOUNT4__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT5 0x00001154
+#define CNA_DCOMP_AMOUNT5_DCOMP_AMOUNT5__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT5_DCOMP_AMOUNT5__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT5_DCOMP_AMOUNT5(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT5_DCOMP_AMOUNT5__SHIFT) & CNA_DCOMP_AMOUNT5_DCOMP_AMOUNT5__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT6 0x00001158
+#define CNA_DCOMP_AMOUNT6_DCOMP_AMOUNT6__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT6_DCOMP_AMOUNT6__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT6_DCOMP_AMOUNT6(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT6_DCOMP_AMOUNT6__SHIFT) & CNA_DCOMP_AMOUNT6_DCOMP_AMOUNT6__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT7 0x0000115c
+#define CNA_DCOMP_AMOUNT7_DCOMP_AMOUNT7__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT7_DCOMP_AMOUNT7__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT7_DCOMP_AMOUNT7(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT7_DCOMP_AMOUNT7__SHIFT) & CNA_DCOMP_AMOUNT7_DCOMP_AMOUNT7__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT8 0x00001160
+#define CNA_DCOMP_AMOUNT8_DCOMP_AMOUNT8__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT8_DCOMP_AMOUNT8__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT8_DCOMP_AMOUNT8(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT8_DCOMP_AMOUNT8__SHIFT) & CNA_DCOMP_AMOUNT8_DCOMP_AMOUNT8__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT9 0x00001164
+#define CNA_DCOMP_AMOUNT9_DCOMP_AMOUNT9__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT9_DCOMP_AMOUNT9__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT9_DCOMP_AMOUNT9(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT9_DCOMP_AMOUNT9__SHIFT) & CNA_DCOMP_AMOUNT9_DCOMP_AMOUNT9__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT10 0x00001168
+#define CNA_DCOMP_AMOUNT10_DCOMP_AMOUNT10__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT10_DCOMP_AMOUNT10__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT10_DCOMP_AMOUNT10(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT10_DCOMP_AMOUNT10__SHIFT) & CNA_DCOMP_AMOUNT10_DCOMP_AMOUNT10__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT11 0x0000116c
+#define CNA_DCOMP_AMOUNT11_DCOMP_AMOUNT11__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT11_DCOMP_AMOUNT11__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT11_DCOMP_AMOUNT11(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT11_DCOMP_AMOUNT11__SHIFT) & CNA_DCOMP_AMOUNT11_DCOMP_AMOUNT11__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT12 0x00001170
+#define CNA_DCOMP_AMOUNT12_DCOMP_AMOUNT12__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT12_DCOMP_AMOUNT12__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT12_DCOMP_AMOUNT12(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT12_DCOMP_AMOUNT12__SHIFT) & CNA_DCOMP_AMOUNT12_DCOMP_AMOUNT12__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT13 0x00001174
+#define CNA_DCOMP_AMOUNT13_DCOMP_AMOUNT13__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT13_DCOMP_AMOUNT13__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT13_DCOMP_AMOUNT13(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT13_DCOMP_AMOUNT13__SHIFT) & CNA_DCOMP_AMOUNT13_DCOMP_AMOUNT13__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT14 0x00001178
+#define CNA_DCOMP_AMOUNT14_DCOMP_AMOUNT14__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT14_DCOMP_AMOUNT14__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT14_DCOMP_AMOUNT14(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT14_DCOMP_AMOUNT14__SHIFT) & CNA_DCOMP_AMOUNT14_DCOMP_AMOUNT14__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT15 0x0000117c
+#define CNA_DCOMP_AMOUNT15_DCOMP_AMOUNT15__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT15_DCOMP_AMOUNT15__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT15_DCOMP_AMOUNT15(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT15_DCOMP_AMOUNT15__SHIFT) & CNA_DCOMP_AMOUNT15_DCOMP_AMOUNT15__MASK;
+}
+
+#define REG_CNA_CVT_CON5 0x00001180
+#define CNA_CVT_CON5_PER_CHANNEL_CVT_EN__MASK 0xffffffff
+#define CNA_CVT_CON5_PER_CHANNEL_CVT_EN__SHIFT 0
+static inline uint32_t CNA_CVT_CON5_PER_CHANNEL_CVT_EN(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON5_PER_CHANNEL_CVT_EN__SHIFT) & CNA_CVT_CON5_PER_CHANNEL_CVT_EN__MASK;
+}
+
+#define REG_CNA_PAD_CON1 0x00001184
+#define CNA_PAD_CON1_PAD_VALUE__MASK 0xffffffff
+#define CNA_PAD_CON1_PAD_VALUE__SHIFT 0
+static inline uint32_t CNA_PAD_CON1_PAD_VALUE(uint32_t val)
+{
+ return ((val) << CNA_PAD_CON1_PAD_VALUE__SHIFT) & CNA_PAD_CON1_PAD_VALUE__MASK;
+}
+
+#define REG_CORE_S_STATUS 0x00003000
+#define CORE_S_STATUS_RESERVED_0__MASK 0xfffc0000
+#define CORE_S_STATUS_RESERVED_0__SHIFT 18
+static inline uint32_t CORE_S_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_S_STATUS_RESERVED_0__SHIFT) & CORE_S_STATUS_RESERVED_0__MASK;
+}
+#define CORE_S_STATUS_STATUS_1__MASK 0x00030000
+#define CORE_S_STATUS_STATUS_1__SHIFT 16
+static inline uint32_t CORE_S_STATUS_STATUS_1(uint32_t val)
+{
+ return ((val) << CORE_S_STATUS_STATUS_1__SHIFT) & CORE_S_STATUS_STATUS_1__MASK;
+}
+#define CORE_S_STATUS_RESERVED_1__MASK 0x0000fffc
+#define CORE_S_STATUS_RESERVED_1__SHIFT 2
+static inline uint32_t CORE_S_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << CORE_S_STATUS_RESERVED_1__SHIFT) & CORE_S_STATUS_RESERVED_1__MASK;
+}
+#define CORE_S_STATUS_STATUS_0__MASK 0x00000003
+#define CORE_S_STATUS_STATUS_0__SHIFT 0
+static inline uint32_t CORE_S_STATUS_STATUS_0(uint32_t val)
+{
+ return ((val) << CORE_S_STATUS_STATUS_0__SHIFT) & CORE_S_STATUS_STATUS_0__MASK;
+}
+
+#define REG_CORE_S_POINTER 0x00003004
+#define CORE_S_POINTER_RESERVED_0__MASK 0xfffe0000
+#define CORE_S_POINTER_RESERVED_0__SHIFT 17
+static inline uint32_t CORE_S_POINTER_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_RESERVED_0__SHIFT) & CORE_S_POINTER_RESERVED_0__MASK;
+}
+#define CORE_S_POINTER_EXECUTER__MASK 0x00010000
+#define CORE_S_POINTER_EXECUTER__SHIFT 16
+static inline uint32_t CORE_S_POINTER_EXECUTER(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_EXECUTER__SHIFT) & CORE_S_POINTER_EXECUTER__MASK;
+}
+#define CORE_S_POINTER_RESERVED_1__MASK 0x0000ffc0
+#define CORE_S_POINTER_RESERVED_1__SHIFT 6
+static inline uint32_t CORE_S_POINTER_RESERVED_1(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_RESERVED_1__SHIFT) & CORE_S_POINTER_RESERVED_1__MASK;
+}
+#define CORE_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020
+#define CORE_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5
+static inline uint32_t CORE_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & CORE_S_POINTER_EXECUTER_PP_CLEAR__MASK;
+}
+#define CORE_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010
+#define CORE_S_POINTER_POINTER_PP_CLEAR__SHIFT 4
+static inline uint32_t CORE_S_POINTER_POINTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_POINTER_PP_CLEAR__SHIFT) & CORE_S_POINTER_POINTER_PP_CLEAR__MASK;
+}
+#define CORE_S_POINTER_POINTER_PP_MODE__MASK 0x00000008
+#define CORE_S_POINTER_POINTER_PP_MODE__SHIFT 3
+static inline uint32_t CORE_S_POINTER_POINTER_PP_MODE(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_POINTER_PP_MODE__SHIFT) & CORE_S_POINTER_POINTER_PP_MODE__MASK;
+}
+#define CORE_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004
+#define CORE_S_POINTER_EXECUTER_PP_EN__SHIFT 2
+static inline uint32_t CORE_S_POINTER_EXECUTER_PP_EN(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_EXECUTER_PP_EN__SHIFT) & CORE_S_POINTER_EXECUTER_PP_EN__MASK;
+}
+#define CORE_S_POINTER_POINTER_PP_EN__MASK 0x00000002
+#define CORE_S_POINTER_POINTER_PP_EN__SHIFT 1
+static inline uint32_t CORE_S_POINTER_POINTER_PP_EN(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_POINTER_PP_EN__SHIFT) & CORE_S_POINTER_POINTER_PP_EN__MASK;
+}
+#define CORE_S_POINTER_POINTER__MASK 0x00000001
+#define CORE_S_POINTER_POINTER__SHIFT 0
+static inline uint32_t CORE_S_POINTER_POINTER(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_POINTER__SHIFT) & CORE_S_POINTER_POINTER__MASK;
+}
+
+#define REG_CORE_OPERATION_ENABLE 0x00003008
+#define CORE_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define CORE_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t CORE_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_OPERATION_ENABLE_RESERVED_0__SHIFT) & CORE_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define CORE_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define CORE_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t CORE_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << CORE_OPERATION_ENABLE_OP_EN__SHIFT) & CORE_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_CORE_MAC_GATING 0x0000300c
+#define CORE_MAC_GATING_RESERVED_0__MASK 0xf8000000
+#define CORE_MAC_GATING_RESERVED_0__SHIFT 27
+static inline uint32_t CORE_MAC_GATING_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_MAC_GATING_RESERVED_0__SHIFT) & CORE_MAC_GATING_RESERVED_0__MASK;
+}
+#define CORE_MAC_GATING_SLCG_OP_EN__MASK 0x07ffffff
+#define CORE_MAC_GATING_SLCG_OP_EN__SHIFT 0
+static inline uint32_t CORE_MAC_GATING_SLCG_OP_EN(uint32_t val)
+{
+ return ((val) << CORE_MAC_GATING_SLCG_OP_EN__SHIFT) & CORE_MAC_GATING_SLCG_OP_EN__MASK;
+}
+
+#define REG_CORE_MISC_CFG 0x00003010
+#define CORE_MISC_CFG_RESERVED_0__MASK 0xfff00000
+#define CORE_MISC_CFG_RESERVED_0__SHIFT 20
+static inline uint32_t CORE_MISC_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_RESERVED_0__SHIFT) & CORE_MISC_CFG_RESERVED_0__MASK;
+}
+#define CORE_MISC_CFG_SOFT_GATING__MASK 0x000fc000
+#define CORE_MISC_CFG_SOFT_GATING__SHIFT 14
+static inline uint32_t CORE_MISC_CFG_SOFT_GATING(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_SOFT_GATING__SHIFT) & CORE_MISC_CFG_SOFT_GATING__MASK;
+}
+#define CORE_MISC_CFG_RESERVED_1__MASK 0x00003800
+#define CORE_MISC_CFG_RESERVED_1__SHIFT 11
+static inline uint32_t CORE_MISC_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_RESERVED_1__SHIFT) & CORE_MISC_CFG_RESERVED_1__MASK;
+}
+#define CORE_MISC_CFG_PROC_PRECISION__MASK 0x00000700
+#define CORE_MISC_CFG_PROC_PRECISION__SHIFT 8
+static inline uint32_t CORE_MISC_CFG_PROC_PRECISION(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_PROC_PRECISION__SHIFT) & CORE_MISC_CFG_PROC_PRECISION__MASK;
+}
+#define CORE_MISC_CFG_RESERVED_2__MASK 0x000000fc
+#define CORE_MISC_CFG_RESERVED_2__SHIFT 2
+static inline uint32_t CORE_MISC_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_RESERVED_2__SHIFT) & CORE_MISC_CFG_RESERVED_2__MASK;
+}
+#define CORE_MISC_CFG_DW_EN__MASK 0x00000002
+#define CORE_MISC_CFG_DW_EN__SHIFT 1
+static inline uint32_t CORE_MISC_CFG_DW_EN(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_DW_EN__SHIFT) & CORE_MISC_CFG_DW_EN__MASK;
+}
+#define CORE_MISC_CFG_QD_EN__MASK 0x00000001
+#define CORE_MISC_CFG_QD_EN__SHIFT 0
+static inline uint32_t CORE_MISC_CFG_QD_EN(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_QD_EN__SHIFT) & CORE_MISC_CFG_QD_EN__MASK;
+}
+
+#define REG_CORE_DATAOUT_SIZE_0 0x00003014
+#define CORE_DATAOUT_SIZE_0_DATAOUT_HEIGHT__MASK 0xffff0000
+#define CORE_DATAOUT_SIZE_0_DATAOUT_HEIGHT__SHIFT 16
+static inline uint32_t CORE_DATAOUT_SIZE_0_DATAOUT_HEIGHT(uint32_t val)
+{
+ return ((val) << CORE_DATAOUT_SIZE_0_DATAOUT_HEIGHT__SHIFT) & CORE_DATAOUT_SIZE_0_DATAOUT_HEIGHT__MASK;
+}
+#define CORE_DATAOUT_SIZE_0_DATAOUT_WIDTH__MASK 0x0000ffff
+#define CORE_DATAOUT_SIZE_0_DATAOUT_WIDTH__SHIFT 0
+static inline uint32_t CORE_DATAOUT_SIZE_0_DATAOUT_WIDTH(uint32_t val)
+{
+ return ((val) << CORE_DATAOUT_SIZE_0_DATAOUT_WIDTH__SHIFT) & CORE_DATAOUT_SIZE_0_DATAOUT_WIDTH__MASK;
+}
+
+#define REG_CORE_DATAOUT_SIZE_1 0x00003018
+#define CORE_DATAOUT_SIZE_1_RESERVED_0__MASK 0xffff0000
+#define CORE_DATAOUT_SIZE_1_RESERVED_0__SHIFT 16
+static inline uint32_t CORE_DATAOUT_SIZE_1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_DATAOUT_SIZE_1_RESERVED_0__SHIFT) & CORE_DATAOUT_SIZE_1_RESERVED_0__MASK;
+}
+#define CORE_DATAOUT_SIZE_1_DATAOUT_CHANNEL__MASK 0x0000ffff
+#define CORE_DATAOUT_SIZE_1_DATAOUT_CHANNEL__SHIFT 0
+static inline uint32_t CORE_DATAOUT_SIZE_1_DATAOUT_CHANNEL(uint32_t val)
+{
+ return ((val) << CORE_DATAOUT_SIZE_1_DATAOUT_CHANNEL__SHIFT) & CORE_DATAOUT_SIZE_1_DATAOUT_CHANNEL__MASK;
+}
+
+#define REG_CORE_CLIP_TRUNCATE 0x0000301c
+#define CORE_CLIP_TRUNCATE_RESERVED_0__MASK 0xffffff80
+#define CORE_CLIP_TRUNCATE_RESERVED_0__SHIFT 7
+static inline uint32_t CORE_CLIP_TRUNCATE_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_CLIP_TRUNCATE_RESERVED_0__SHIFT) & CORE_CLIP_TRUNCATE_RESERVED_0__MASK;
+}
+#define CORE_CLIP_TRUNCATE_ROUND_TYPE__MASK 0x00000040
+#define CORE_CLIP_TRUNCATE_ROUND_TYPE__SHIFT 6
+static inline uint32_t CORE_CLIP_TRUNCATE_ROUND_TYPE(uint32_t val)
+{
+ return ((val) << CORE_CLIP_TRUNCATE_ROUND_TYPE__SHIFT) & CORE_CLIP_TRUNCATE_ROUND_TYPE__MASK;
+}
+#define CORE_CLIP_TRUNCATE_RESERVED_1__MASK 0x00000020
+#define CORE_CLIP_TRUNCATE_RESERVED_1__SHIFT 5
+static inline uint32_t CORE_CLIP_TRUNCATE_RESERVED_1(uint32_t val)
+{
+ return ((val) << CORE_CLIP_TRUNCATE_RESERVED_1__SHIFT) & CORE_CLIP_TRUNCATE_RESERVED_1__MASK;
+}
+#define CORE_CLIP_TRUNCATE_CLIP_TRUNCATE__MASK 0x0000001f
+#define CORE_CLIP_TRUNCATE_CLIP_TRUNCATE__SHIFT 0
+static inline uint32_t CORE_CLIP_TRUNCATE_CLIP_TRUNCATE(uint32_t val)
+{
+ return ((val) << CORE_CLIP_TRUNCATE_CLIP_TRUNCATE__SHIFT) & CORE_CLIP_TRUNCATE_CLIP_TRUNCATE__MASK;
+}
+
+#define REG_DPU_S_STATUS 0x00004000
+#define DPU_S_STATUS_RESERVED_0__MASK 0xfffc0000
+#define DPU_S_STATUS_RESERVED_0__SHIFT 18
+static inline uint32_t DPU_S_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_S_STATUS_RESERVED_0__SHIFT) & DPU_S_STATUS_RESERVED_0__MASK;
+}
+#define DPU_S_STATUS_STATUS_1__MASK 0x00030000
+#define DPU_S_STATUS_STATUS_1__SHIFT 16
+static inline uint32_t DPU_S_STATUS_STATUS_1(uint32_t val)
+{
+ return ((val) << DPU_S_STATUS_STATUS_1__SHIFT) & DPU_S_STATUS_STATUS_1__MASK;
+}
+#define DPU_S_STATUS_RESERVED_1__MASK 0x0000fffc
+#define DPU_S_STATUS_RESERVED_1__SHIFT 2
+static inline uint32_t DPU_S_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_S_STATUS_RESERVED_1__SHIFT) & DPU_S_STATUS_RESERVED_1__MASK;
+}
+#define DPU_S_STATUS_STATUS_0__MASK 0x00000003
+#define DPU_S_STATUS_STATUS_0__SHIFT 0
+static inline uint32_t DPU_S_STATUS_STATUS_0(uint32_t val)
+{
+ return ((val) << DPU_S_STATUS_STATUS_0__SHIFT) & DPU_S_STATUS_STATUS_0__MASK;
+}
+
+#define REG_DPU_S_POINTER 0x00004004
+#define DPU_S_POINTER_RESERVED_0__MASK 0xfffe0000
+#define DPU_S_POINTER_RESERVED_0__SHIFT 17
+static inline uint32_t DPU_S_POINTER_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_RESERVED_0__SHIFT) & DPU_S_POINTER_RESERVED_0__MASK;
+}
+#define DPU_S_POINTER_EXECUTER__MASK 0x00010000
+#define DPU_S_POINTER_EXECUTER__SHIFT 16
+static inline uint32_t DPU_S_POINTER_EXECUTER(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_EXECUTER__SHIFT) & DPU_S_POINTER_EXECUTER__MASK;
+}
+#define DPU_S_POINTER_RESERVED_1__MASK 0x0000ffc0
+#define DPU_S_POINTER_RESERVED_1__SHIFT 6
+static inline uint32_t DPU_S_POINTER_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_RESERVED_1__SHIFT) & DPU_S_POINTER_RESERVED_1__MASK;
+}
+#define DPU_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020
+#define DPU_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5
+static inline uint32_t DPU_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & DPU_S_POINTER_EXECUTER_PP_CLEAR__MASK;
+}
+#define DPU_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010
+#define DPU_S_POINTER_POINTER_PP_CLEAR__SHIFT 4
+static inline uint32_t DPU_S_POINTER_POINTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_POINTER_PP_CLEAR__SHIFT) & DPU_S_POINTER_POINTER_PP_CLEAR__MASK;
+}
+#define DPU_S_POINTER_POINTER_PP_MODE__MASK 0x00000008
+#define DPU_S_POINTER_POINTER_PP_MODE__SHIFT 3
+static inline uint32_t DPU_S_POINTER_POINTER_PP_MODE(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_POINTER_PP_MODE__SHIFT) & DPU_S_POINTER_POINTER_PP_MODE__MASK;
+}
+#define DPU_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004
+#define DPU_S_POINTER_EXECUTER_PP_EN__SHIFT 2
+static inline uint32_t DPU_S_POINTER_EXECUTER_PP_EN(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_EXECUTER_PP_EN__SHIFT) & DPU_S_POINTER_EXECUTER_PP_EN__MASK;
+}
+#define DPU_S_POINTER_POINTER_PP_EN__MASK 0x00000002
+#define DPU_S_POINTER_POINTER_PP_EN__SHIFT 1
+static inline uint32_t DPU_S_POINTER_POINTER_PP_EN(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_POINTER_PP_EN__SHIFT) & DPU_S_POINTER_POINTER_PP_EN__MASK;
+}
+#define DPU_S_POINTER_POINTER__MASK 0x00000001
+#define DPU_S_POINTER_POINTER__SHIFT 0
+static inline uint32_t DPU_S_POINTER_POINTER(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_POINTER__SHIFT) & DPU_S_POINTER_POINTER__MASK;
+}
+
+#define REG_DPU_OPERATION_ENABLE 0x00004008
+#define DPU_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define DPU_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t DPU_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_OPERATION_ENABLE_RESERVED_0__SHIFT) & DPU_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define DPU_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define DPU_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t DPU_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << DPU_OPERATION_ENABLE_OP_EN__SHIFT) & DPU_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_DPU_FEATURE_MODE_CFG 0x0000400c
+#define DPU_FEATURE_MODE_CFG_COMB_USE__MASK 0x80000000
+#define DPU_FEATURE_MODE_CFG_COMB_USE__SHIFT 31
+static inline uint32_t DPU_FEATURE_MODE_CFG_COMB_USE(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_COMB_USE__SHIFT) & DPU_FEATURE_MODE_CFG_COMB_USE__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_TP_EN__MASK 0x40000000
+#define DPU_FEATURE_MODE_CFG_TP_EN__SHIFT 30
+static inline uint32_t DPU_FEATURE_MODE_CFG_TP_EN(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_TP_EN__SHIFT) & DPU_FEATURE_MODE_CFG_TP_EN__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_RGP_TYPE__MASK 0x3c000000
+#define DPU_FEATURE_MODE_CFG_RGP_TYPE__SHIFT 26
+static inline uint32_t DPU_FEATURE_MODE_CFG_RGP_TYPE(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_RGP_TYPE__SHIFT) & DPU_FEATURE_MODE_CFG_RGP_TYPE__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_NONALIGN__MASK 0x02000000
+#define DPU_FEATURE_MODE_CFG_NONALIGN__SHIFT 25
+static inline uint32_t DPU_FEATURE_MODE_CFG_NONALIGN(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_NONALIGN__SHIFT) & DPU_FEATURE_MODE_CFG_NONALIGN__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_SURF_LEN__MASK 0x01fffe00
+#define DPU_FEATURE_MODE_CFG_SURF_LEN__SHIFT 9
+static inline uint32_t DPU_FEATURE_MODE_CFG_SURF_LEN(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_SURF_LEN__SHIFT) & DPU_FEATURE_MODE_CFG_SURF_LEN__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_BURST_LEN__MASK 0x000001e0
+#define DPU_FEATURE_MODE_CFG_BURST_LEN__SHIFT 5
+static inline uint32_t DPU_FEATURE_MODE_CFG_BURST_LEN(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_BURST_LEN__SHIFT) & DPU_FEATURE_MODE_CFG_BURST_LEN__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_CONV_MODE__MASK 0x00000018
+#define DPU_FEATURE_MODE_CFG_CONV_MODE__SHIFT 3
+static inline uint32_t DPU_FEATURE_MODE_CFG_CONV_MODE(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_CONV_MODE__SHIFT) & DPU_FEATURE_MODE_CFG_CONV_MODE__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_OUTPUT_MODE__MASK 0x00000006
+#define DPU_FEATURE_MODE_CFG_OUTPUT_MODE__SHIFT 1
+static inline uint32_t DPU_FEATURE_MODE_CFG_OUTPUT_MODE(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_OUTPUT_MODE__SHIFT) & DPU_FEATURE_MODE_CFG_OUTPUT_MODE__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_FLYING_MODE__MASK 0x00000001
+#define DPU_FEATURE_MODE_CFG_FLYING_MODE__SHIFT 0
+static inline uint32_t DPU_FEATURE_MODE_CFG_FLYING_MODE(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_FLYING_MODE__SHIFT) & DPU_FEATURE_MODE_CFG_FLYING_MODE__MASK;
+}
+
+#define REG_DPU_DATA_FORMAT 0x00004010
+#define DPU_DATA_FORMAT_OUT_PRECISION__MASK 0xe0000000
+#define DPU_DATA_FORMAT_OUT_PRECISION__SHIFT 29
+static inline uint32_t DPU_DATA_FORMAT_OUT_PRECISION(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_OUT_PRECISION__SHIFT) & DPU_DATA_FORMAT_OUT_PRECISION__MASK;
+}
+#define DPU_DATA_FORMAT_IN_PRECISION__MASK 0x1c000000
+#define DPU_DATA_FORMAT_IN_PRECISION__SHIFT 26
+static inline uint32_t DPU_DATA_FORMAT_IN_PRECISION(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_IN_PRECISION__SHIFT) & DPU_DATA_FORMAT_IN_PRECISION__MASK;
+}
+#define DPU_DATA_FORMAT_EW_TRUNCATE_NEG__MASK 0x03ff0000
+#define DPU_DATA_FORMAT_EW_TRUNCATE_NEG__SHIFT 16
+static inline uint32_t DPU_DATA_FORMAT_EW_TRUNCATE_NEG(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_EW_TRUNCATE_NEG__SHIFT) & DPU_DATA_FORMAT_EW_TRUNCATE_NEG__MASK;
+}
+#define DPU_DATA_FORMAT_BN_MUL_SHIFT_VALUE_NEG__MASK 0x0000fc00
+#define DPU_DATA_FORMAT_BN_MUL_SHIFT_VALUE_NEG__SHIFT 10
+static inline uint32_t DPU_DATA_FORMAT_BN_MUL_SHIFT_VALUE_NEG(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_BN_MUL_SHIFT_VALUE_NEG__SHIFT) & DPU_DATA_FORMAT_BN_MUL_SHIFT_VALUE_NEG__MASK;
+}
+#define DPU_DATA_FORMAT_BS_MUL_SHIFT_VALUE_NEG__MASK 0x000003f0
+#define DPU_DATA_FORMAT_BS_MUL_SHIFT_VALUE_NEG__SHIFT 4
+static inline uint32_t DPU_DATA_FORMAT_BS_MUL_SHIFT_VALUE_NEG(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_BS_MUL_SHIFT_VALUE_NEG__SHIFT) & DPU_DATA_FORMAT_BS_MUL_SHIFT_VALUE_NEG__MASK;
+}
+#define DPU_DATA_FORMAT_MC_SURF_OUT__MASK 0x00000008
+#define DPU_DATA_FORMAT_MC_SURF_OUT__SHIFT 3
+static inline uint32_t DPU_DATA_FORMAT_MC_SURF_OUT(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_MC_SURF_OUT__SHIFT) & DPU_DATA_FORMAT_MC_SURF_OUT__MASK;
+}
+#define DPU_DATA_FORMAT_PROC_PRECISION__MASK 0x00000007
+#define DPU_DATA_FORMAT_PROC_PRECISION__SHIFT 0
+static inline uint32_t DPU_DATA_FORMAT_PROC_PRECISION(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_PROC_PRECISION__SHIFT) & DPU_DATA_FORMAT_PROC_PRECISION__MASK;
+}
+
+#define REG_DPU_OFFSET_PEND 0x00004014
+#define DPU_OFFSET_PEND_RESERVED_0__MASK 0xffff0000
+#define DPU_OFFSET_PEND_RESERVED_0__SHIFT 16
+static inline uint32_t DPU_OFFSET_PEND_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_OFFSET_PEND_RESERVED_0__SHIFT) & DPU_OFFSET_PEND_RESERVED_0__MASK;
+}
+#define DPU_OFFSET_PEND_OFFSET_PEND__MASK 0x0000ffff
+#define DPU_OFFSET_PEND_OFFSET_PEND__SHIFT 0
+static inline uint32_t DPU_OFFSET_PEND_OFFSET_PEND(uint32_t val)
+{
+ return ((val) << DPU_OFFSET_PEND_OFFSET_PEND__SHIFT) & DPU_OFFSET_PEND_OFFSET_PEND__MASK;
+}
+
+#define REG_DPU_DST_BASE_ADDR 0x00004020
+#define DPU_DST_BASE_ADDR_DST_BASE_ADDR__MASK 0xffffffff
+#define DPU_DST_BASE_ADDR_DST_BASE_ADDR__SHIFT 0
+static inline uint32_t DPU_DST_BASE_ADDR_DST_BASE_ADDR(uint32_t val)
+{
+ return ((val) << DPU_DST_BASE_ADDR_DST_BASE_ADDR__SHIFT) & DPU_DST_BASE_ADDR_DST_BASE_ADDR__MASK;
+}
+
+#define REG_DPU_DST_SURF_STRIDE 0x00004024
+#define DPU_DST_SURF_STRIDE_DST_SURF_STRIDE__MASK 0xfffffff0
+#define DPU_DST_SURF_STRIDE_DST_SURF_STRIDE__SHIFT 4
+static inline uint32_t DPU_DST_SURF_STRIDE_DST_SURF_STRIDE(uint32_t val)
+{
+ return ((val) << DPU_DST_SURF_STRIDE_DST_SURF_STRIDE__SHIFT) & DPU_DST_SURF_STRIDE_DST_SURF_STRIDE__MASK;
+}
+#define DPU_DST_SURF_STRIDE_RESERVED_0__MASK 0x0000000f
+#define DPU_DST_SURF_STRIDE_RESERVED_0__SHIFT 0
+static inline uint32_t DPU_DST_SURF_STRIDE_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_DST_SURF_STRIDE_RESERVED_0__SHIFT) & DPU_DST_SURF_STRIDE_RESERVED_0__MASK;
+}
+
+#define REG_DPU_DATA_CUBE_WIDTH 0x00004030
+#define DPU_DATA_CUBE_WIDTH_RESERVED_0__MASK 0xffffe000
+#define DPU_DATA_CUBE_WIDTH_RESERVED_0__SHIFT 13
+static inline uint32_t DPU_DATA_CUBE_WIDTH_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_WIDTH_RESERVED_0__SHIFT) & DPU_DATA_CUBE_WIDTH_RESERVED_0__MASK;
+}
+#define DPU_DATA_CUBE_WIDTH_WIDTH__MASK 0x00001fff
+#define DPU_DATA_CUBE_WIDTH_WIDTH__SHIFT 0
+static inline uint32_t DPU_DATA_CUBE_WIDTH_WIDTH(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_WIDTH_WIDTH__SHIFT) & DPU_DATA_CUBE_WIDTH_WIDTH__MASK;
+}
+
+#define REG_DPU_DATA_CUBE_HEIGHT 0x00004034
+#define DPU_DATA_CUBE_HEIGHT_RESERVED_0__MASK 0xfe000000
+#define DPU_DATA_CUBE_HEIGHT_RESERVED_0__SHIFT 25
+static inline uint32_t DPU_DATA_CUBE_HEIGHT_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_HEIGHT_RESERVED_0__SHIFT) & DPU_DATA_CUBE_HEIGHT_RESERVED_0__MASK;
+}
+#define DPU_DATA_CUBE_HEIGHT_MINMAX_CTL__MASK 0x01c00000
+#define DPU_DATA_CUBE_HEIGHT_MINMAX_CTL__SHIFT 22
+static inline uint32_t DPU_DATA_CUBE_HEIGHT_MINMAX_CTL(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_HEIGHT_MINMAX_CTL__SHIFT) & DPU_DATA_CUBE_HEIGHT_MINMAX_CTL__MASK;
+}
+#define DPU_DATA_CUBE_HEIGHT_RESERVED_1__MASK 0x003fe000
+#define DPU_DATA_CUBE_HEIGHT_RESERVED_1__SHIFT 13
+static inline uint32_t DPU_DATA_CUBE_HEIGHT_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_HEIGHT_RESERVED_1__SHIFT) & DPU_DATA_CUBE_HEIGHT_RESERVED_1__MASK;
+}
+#define DPU_DATA_CUBE_HEIGHT_HEIGHT__MASK 0x00001fff
+#define DPU_DATA_CUBE_HEIGHT_HEIGHT__SHIFT 0
+static inline uint32_t DPU_DATA_CUBE_HEIGHT_HEIGHT(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_HEIGHT_HEIGHT__SHIFT) & DPU_DATA_CUBE_HEIGHT_HEIGHT__MASK;
+}
+
+#define REG_DPU_DATA_CUBE_NOTCH_ADDR 0x00004038
+#define DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_0__MASK 0xe0000000
+#define DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_0__SHIFT 29
+static inline uint32_t DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_0__SHIFT) & DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_0__MASK;
+}
+#define DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_1__MASK 0x1fff0000
+#define DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_1__SHIFT 16
+static inline uint32_t DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_1(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_1__SHIFT) & DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_1__MASK;
+}
+#define DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_1__MASK 0x0000e000
+#define DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_1__SHIFT 13
+static inline uint32_t DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_1__SHIFT) & DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_1__MASK;
+}
+#define DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_0__MASK 0x00001fff
+#define DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_0__SHIFT 0
+static inline uint32_t DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_0(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_0__SHIFT) & DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_0__MASK;
+}
+
+#define REG_DPU_DATA_CUBE_CHANNEL 0x0000403c
+#define DPU_DATA_CUBE_CHANNEL_RESERVED_0__MASK 0xe0000000
+#define DPU_DATA_CUBE_CHANNEL_RESERVED_0__SHIFT 29
+static inline uint32_t DPU_DATA_CUBE_CHANNEL_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_CHANNEL_RESERVED_0__SHIFT) & DPU_DATA_CUBE_CHANNEL_RESERVED_0__MASK;
+}
+#define DPU_DATA_CUBE_CHANNEL_ORIG_CHANNEL__MASK 0x1fff0000
+#define DPU_DATA_CUBE_CHANNEL_ORIG_CHANNEL__SHIFT 16
+static inline uint32_t DPU_DATA_CUBE_CHANNEL_ORIG_CHANNEL(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_CHANNEL_ORIG_CHANNEL__SHIFT) & DPU_DATA_CUBE_CHANNEL_ORIG_CHANNEL__MASK;
+}
+#define DPU_DATA_CUBE_CHANNEL_RESERVED_1__MASK 0x0000e000
+#define DPU_DATA_CUBE_CHANNEL_RESERVED_1__SHIFT 13
+static inline uint32_t DPU_DATA_CUBE_CHANNEL_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_CHANNEL_RESERVED_1__SHIFT) & DPU_DATA_CUBE_CHANNEL_RESERVED_1__MASK;
+}
+#define DPU_DATA_CUBE_CHANNEL_CHANNEL__MASK 0x00001fff
+#define DPU_DATA_CUBE_CHANNEL_CHANNEL__SHIFT 0
+static inline uint32_t DPU_DATA_CUBE_CHANNEL_CHANNEL(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_CHANNEL_CHANNEL__SHIFT) & DPU_DATA_CUBE_CHANNEL_CHANNEL__MASK;
+}
+
+#define REG_DPU_BS_CFG 0x00004040
+#define DPU_BS_CFG_RESERVED_0__MASK 0xfff00000
+#define DPU_BS_CFG_RESERVED_0__SHIFT 20
+static inline uint32_t DPU_BS_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_RESERVED_0__SHIFT) & DPU_BS_CFG_RESERVED_0__MASK;
+}
+#define DPU_BS_CFG_BS_ALU_ALGO__MASK 0x000f0000
+#define DPU_BS_CFG_BS_ALU_ALGO__SHIFT 16
+static inline uint32_t DPU_BS_CFG_BS_ALU_ALGO(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_ALU_ALGO__SHIFT) & DPU_BS_CFG_BS_ALU_ALGO__MASK;
+}
+#define DPU_BS_CFG_RESERVED_1__MASK 0x0000fe00
+#define DPU_BS_CFG_RESERVED_1__SHIFT 9
+static inline uint32_t DPU_BS_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_RESERVED_1__SHIFT) & DPU_BS_CFG_RESERVED_1__MASK;
+}
+#define DPU_BS_CFG_BS_ALU_SRC__MASK 0x00000100
+#define DPU_BS_CFG_BS_ALU_SRC__SHIFT 8
+static inline uint32_t DPU_BS_CFG_BS_ALU_SRC(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_ALU_SRC__SHIFT) & DPU_BS_CFG_BS_ALU_SRC__MASK;
+}
+#define DPU_BS_CFG_BS_RELUX_EN__MASK 0x00000080
+#define DPU_BS_CFG_BS_RELUX_EN__SHIFT 7
+static inline uint32_t DPU_BS_CFG_BS_RELUX_EN(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_RELUX_EN__SHIFT) & DPU_BS_CFG_BS_RELUX_EN__MASK;
+}
+#define DPU_BS_CFG_BS_RELU_BYPASS__MASK 0x00000040
+#define DPU_BS_CFG_BS_RELU_BYPASS__SHIFT 6
+static inline uint32_t DPU_BS_CFG_BS_RELU_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_RELU_BYPASS__SHIFT) & DPU_BS_CFG_BS_RELU_BYPASS__MASK;
+}
+#define DPU_BS_CFG_BS_MUL_PRELU__MASK 0x00000020
+#define DPU_BS_CFG_BS_MUL_PRELU__SHIFT 5
+static inline uint32_t DPU_BS_CFG_BS_MUL_PRELU(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_MUL_PRELU__SHIFT) & DPU_BS_CFG_BS_MUL_PRELU__MASK;
+}
+#define DPU_BS_CFG_BS_MUL_BYPASS__MASK 0x00000010
+#define DPU_BS_CFG_BS_MUL_BYPASS__SHIFT 4
+static inline uint32_t DPU_BS_CFG_BS_MUL_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_MUL_BYPASS__SHIFT) & DPU_BS_CFG_BS_MUL_BYPASS__MASK;
+}
+#define DPU_BS_CFG_RESERVED_2__MASK 0x0000000c
+#define DPU_BS_CFG_RESERVED_2__SHIFT 2
+static inline uint32_t DPU_BS_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_RESERVED_2__SHIFT) & DPU_BS_CFG_RESERVED_2__MASK;
+}
+#define DPU_BS_CFG_BS_ALU_BYPASS__MASK 0x00000002
+#define DPU_BS_CFG_BS_ALU_BYPASS__SHIFT 1
+static inline uint32_t DPU_BS_CFG_BS_ALU_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_ALU_BYPASS__SHIFT) & DPU_BS_CFG_BS_ALU_BYPASS__MASK;
+}
+#define DPU_BS_CFG_BS_BYPASS__MASK 0x00000001
+#define DPU_BS_CFG_BS_BYPASS__SHIFT 0
+static inline uint32_t DPU_BS_CFG_BS_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_BYPASS__SHIFT) & DPU_BS_CFG_BS_BYPASS__MASK;
+}
+
+#define REG_DPU_BS_ALU_CFG 0x00004044
+#define DPU_BS_ALU_CFG_BS_ALU_OPERAND__MASK 0xffffffff
+#define DPU_BS_ALU_CFG_BS_ALU_OPERAND__SHIFT 0
+static inline uint32_t DPU_BS_ALU_CFG_BS_ALU_OPERAND(uint32_t val)
+{
+ return ((val) << DPU_BS_ALU_CFG_BS_ALU_OPERAND__SHIFT) & DPU_BS_ALU_CFG_BS_ALU_OPERAND__MASK;
+}
+
+#define REG_DPU_BS_MUL_CFG 0x00004048
+#define DPU_BS_MUL_CFG_BS_MUL_OPERAND__MASK 0xffff0000
+#define DPU_BS_MUL_CFG_BS_MUL_OPERAND__SHIFT 16
+static inline uint32_t DPU_BS_MUL_CFG_BS_MUL_OPERAND(uint32_t val)
+{
+ return ((val) << DPU_BS_MUL_CFG_BS_MUL_OPERAND__SHIFT) & DPU_BS_MUL_CFG_BS_MUL_OPERAND__MASK;
+}
+#define DPU_BS_MUL_CFG_RESERVED_0__MASK 0x0000c000
+#define DPU_BS_MUL_CFG_RESERVED_0__SHIFT 14
+static inline uint32_t DPU_BS_MUL_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_BS_MUL_CFG_RESERVED_0__SHIFT) & DPU_BS_MUL_CFG_RESERVED_0__MASK;
+}
+#define DPU_BS_MUL_CFG_BS_MUL_SHIFT_VALUE__MASK 0x00003f00
+#define DPU_BS_MUL_CFG_BS_MUL_SHIFT_VALUE__SHIFT 8
+static inline uint32_t DPU_BS_MUL_CFG_BS_MUL_SHIFT_VALUE(uint32_t val)
+{
+ return ((val) << DPU_BS_MUL_CFG_BS_MUL_SHIFT_VALUE__SHIFT) & DPU_BS_MUL_CFG_BS_MUL_SHIFT_VALUE__MASK;
+}
+#define DPU_BS_MUL_CFG_RESERVED_1__MASK 0x000000fc
+#define DPU_BS_MUL_CFG_RESERVED_1__SHIFT 2
+static inline uint32_t DPU_BS_MUL_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_BS_MUL_CFG_RESERVED_1__SHIFT) & DPU_BS_MUL_CFG_RESERVED_1__MASK;
+}
+#define DPU_BS_MUL_CFG_BS_TRUNCATE_SRC__MASK 0x00000002
+#define DPU_BS_MUL_CFG_BS_TRUNCATE_SRC__SHIFT 1
+static inline uint32_t DPU_BS_MUL_CFG_BS_TRUNCATE_SRC(uint32_t val)
+{
+ return ((val) << DPU_BS_MUL_CFG_BS_TRUNCATE_SRC__SHIFT) & DPU_BS_MUL_CFG_BS_TRUNCATE_SRC__MASK;
+}
+#define DPU_BS_MUL_CFG_BS_MUL_SRC__MASK 0x00000001
+#define DPU_BS_MUL_CFG_BS_MUL_SRC__SHIFT 0
+static inline uint32_t DPU_BS_MUL_CFG_BS_MUL_SRC(uint32_t val)
+{
+ return ((val) << DPU_BS_MUL_CFG_BS_MUL_SRC__SHIFT) & DPU_BS_MUL_CFG_BS_MUL_SRC__MASK;
+}
+
+#define REG_DPU_BS_RELUX_CMP_VALUE 0x0000404c
+#define DPU_BS_RELUX_CMP_VALUE_BS_RELUX_CMP_DAT__MASK 0xffffffff
+#define DPU_BS_RELUX_CMP_VALUE_BS_RELUX_CMP_DAT__SHIFT 0
+static inline uint32_t DPU_BS_RELUX_CMP_VALUE_BS_RELUX_CMP_DAT(uint32_t val)
+{
+ return ((val) << DPU_BS_RELUX_CMP_VALUE_BS_RELUX_CMP_DAT__SHIFT) & DPU_BS_RELUX_CMP_VALUE_BS_RELUX_CMP_DAT__MASK;
+}
+
+#define REG_DPU_BS_OW_CFG 0x00004050
+#define DPU_BS_OW_CFG_RGP_CNTER__MASK 0xf0000000
+#define DPU_BS_OW_CFG_RGP_CNTER__SHIFT 28
+static inline uint32_t DPU_BS_OW_CFG_RGP_CNTER(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_RGP_CNTER__SHIFT) & DPU_BS_OW_CFG_RGP_CNTER__MASK;
+}
+#define DPU_BS_OW_CFG_TP_ORG_EN__MASK 0x08000000
+#define DPU_BS_OW_CFG_TP_ORG_EN__SHIFT 27
+static inline uint32_t DPU_BS_OW_CFG_TP_ORG_EN(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_TP_ORG_EN__SHIFT) & DPU_BS_OW_CFG_TP_ORG_EN__MASK;
+}
+#define DPU_BS_OW_CFG_RESERVED_0__MASK 0x07fff800
+#define DPU_BS_OW_CFG_RESERVED_0__SHIFT 11
+static inline uint32_t DPU_BS_OW_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_RESERVED_0__SHIFT) & DPU_BS_OW_CFG_RESERVED_0__MASK;
+}
+#define DPU_BS_OW_CFG_SIZE_E_2__MASK 0x00000700
+#define DPU_BS_OW_CFG_SIZE_E_2__SHIFT 8
+static inline uint32_t DPU_BS_OW_CFG_SIZE_E_2(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_SIZE_E_2__SHIFT) & DPU_BS_OW_CFG_SIZE_E_2__MASK;
+}
+#define DPU_BS_OW_CFG_SIZE_E_1__MASK 0x000000e0
+#define DPU_BS_OW_CFG_SIZE_E_1__SHIFT 5
+static inline uint32_t DPU_BS_OW_CFG_SIZE_E_1(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_SIZE_E_1__SHIFT) & DPU_BS_OW_CFG_SIZE_E_1__MASK;
+}
+#define DPU_BS_OW_CFG_SIZE_E_0__MASK 0x0000001c
+#define DPU_BS_OW_CFG_SIZE_E_0__SHIFT 2
+static inline uint32_t DPU_BS_OW_CFG_SIZE_E_0(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_SIZE_E_0__SHIFT) & DPU_BS_OW_CFG_SIZE_E_0__MASK;
+}
+#define DPU_BS_OW_CFG_OD_BYPASS__MASK 0x00000002
+#define DPU_BS_OW_CFG_OD_BYPASS__SHIFT 1
+static inline uint32_t DPU_BS_OW_CFG_OD_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_OD_BYPASS__SHIFT) & DPU_BS_OW_CFG_OD_BYPASS__MASK;
+}
+#define DPU_BS_OW_CFG_OW_SRC__MASK 0x00000001
+#define DPU_BS_OW_CFG_OW_SRC__SHIFT 0
+static inline uint32_t DPU_BS_OW_CFG_OW_SRC(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_OW_SRC__SHIFT) & DPU_BS_OW_CFG_OW_SRC__MASK;
+}
+
+#define REG_DPU_BS_OW_OP 0x00004054
+#define DPU_BS_OW_OP_RESERVED_0__MASK 0xffff0000
+#define DPU_BS_OW_OP_RESERVED_0__SHIFT 16
+static inline uint32_t DPU_BS_OW_OP_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_OP_RESERVED_0__SHIFT) & DPU_BS_OW_OP_RESERVED_0__MASK;
+}
+#define DPU_BS_OW_OP_OW_OP__MASK 0x0000ffff
+#define DPU_BS_OW_OP_OW_OP__SHIFT 0
+static inline uint32_t DPU_BS_OW_OP_OW_OP(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_OP_OW_OP__SHIFT) & DPU_BS_OW_OP_OW_OP__MASK;
+}
+
+#define REG_DPU_WDMA_SIZE_0 0x00004058
+#define DPU_WDMA_SIZE_0_RESERVED_0__MASK 0xf0000000
+#define DPU_WDMA_SIZE_0_RESERVED_0__SHIFT 28
+static inline uint32_t DPU_WDMA_SIZE_0_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_0_RESERVED_0__SHIFT) & DPU_WDMA_SIZE_0_RESERVED_0__MASK;
+}
+#define DPU_WDMA_SIZE_0_TP_PRECISION__MASK 0x08000000
+#define DPU_WDMA_SIZE_0_TP_PRECISION__SHIFT 27
+static inline uint32_t DPU_WDMA_SIZE_0_TP_PRECISION(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_0_TP_PRECISION__SHIFT) & DPU_WDMA_SIZE_0_TP_PRECISION__MASK;
+}
+#define DPU_WDMA_SIZE_0_SIZE_C_WDMA__MASK 0x07ff0000
+#define DPU_WDMA_SIZE_0_SIZE_C_WDMA__SHIFT 16
+static inline uint32_t DPU_WDMA_SIZE_0_SIZE_C_WDMA(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_0_SIZE_C_WDMA__SHIFT) & DPU_WDMA_SIZE_0_SIZE_C_WDMA__MASK;
+}
+#define DPU_WDMA_SIZE_0_RESERVED_1__MASK 0x0000e000
+#define DPU_WDMA_SIZE_0_RESERVED_1__SHIFT 13
+static inline uint32_t DPU_WDMA_SIZE_0_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_0_RESERVED_1__SHIFT) & DPU_WDMA_SIZE_0_RESERVED_1__MASK;
+}
+#define DPU_WDMA_SIZE_0_CHANNEL_WDMA__MASK 0x00001fff
+#define DPU_WDMA_SIZE_0_CHANNEL_WDMA__SHIFT 0
+static inline uint32_t DPU_WDMA_SIZE_0_CHANNEL_WDMA(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_0_CHANNEL_WDMA__SHIFT) & DPU_WDMA_SIZE_0_CHANNEL_WDMA__MASK;
+}
+
+#define REG_DPU_WDMA_SIZE_1 0x0000405c
+#define DPU_WDMA_SIZE_1_RESERVED_0__MASK 0xe0000000
+#define DPU_WDMA_SIZE_1_RESERVED_0__SHIFT 29
+static inline uint32_t DPU_WDMA_SIZE_1_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_1_RESERVED_0__SHIFT) & DPU_WDMA_SIZE_1_RESERVED_0__MASK;
+}
+#define DPU_WDMA_SIZE_1_HEIGHT_WDMA__MASK 0x1fff0000
+#define DPU_WDMA_SIZE_1_HEIGHT_WDMA__SHIFT 16
+static inline uint32_t DPU_WDMA_SIZE_1_HEIGHT_WDMA(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_1_HEIGHT_WDMA__SHIFT) & DPU_WDMA_SIZE_1_HEIGHT_WDMA__MASK;
+}
+#define DPU_WDMA_SIZE_1_RESERVED_1__MASK 0x0000e000
+#define DPU_WDMA_SIZE_1_RESERVED_1__SHIFT 13
+static inline uint32_t DPU_WDMA_SIZE_1_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_1_RESERVED_1__SHIFT) & DPU_WDMA_SIZE_1_RESERVED_1__MASK;
+}
+#define DPU_WDMA_SIZE_1_WIDTH_WDMA__MASK 0x00001fff
+#define DPU_WDMA_SIZE_1_WIDTH_WDMA__SHIFT 0
+static inline uint32_t DPU_WDMA_SIZE_1_WIDTH_WDMA(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_1_WIDTH_WDMA__SHIFT) & DPU_WDMA_SIZE_1_WIDTH_WDMA__MASK;
+}
+
+#define REG_DPU_BN_CFG 0x00004060
+#define DPU_BN_CFG_RESERVED_0__MASK 0xfff00000
+#define DPU_BN_CFG_RESERVED_0__SHIFT 20
+static inline uint32_t DPU_BN_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_RESERVED_0__SHIFT) & DPU_BN_CFG_RESERVED_0__MASK;
+}
+#define DPU_BN_CFG_BN_ALU_ALGO__MASK 0x000f0000
+#define DPU_BN_CFG_BN_ALU_ALGO__SHIFT 16
+static inline uint32_t DPU_BN_CFG_BN_ALU_ALGO(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_ALU_ALGO__SHIFT) & DPU_BN_CFG_BN_ALU_ALGO__MASK;
+}
+#define DPU_BN_CFG_RESERVED_1__MASK 0x0000fe00
+#define DPU_BN_CFG_RESERVED_1__SHIFT 9
+static inline uint32_t DPU_BN_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_RESERVED_1__SHIFT) & DPU_BN_CFG_RESERVED_1__MASK;
+}
+#define DPU_BN_CFG_BN_ALU_SRC__MASK 0x00000100
+#define DPU_BN_CFG_BN_ALU_SRC__SHIFT 8
+static inline uint32_t DPU_BN_CFG_BN_ALU_SRC(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_ALU_SRC__SHIFT) & DPU_BN_CFG_BN_ALU_SRC__MASK;
+}
+#define DPU_BN_CFG_BN_RELUX_EN__MASK 0x00000080
+#define DPU_BN_CFG_BN_RELUX_EN__SHIFT 7
+static inline uint32_t DPU_BN_CFG_BN_RELUX_EN(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_RELUX_EN__SHIFT) & DPU_BN_CFG_BN_RELUX_EN__MASK;
+}
+#define DPU_BN_CFG_BN_RELU_BYPASS__MASK 0x00000040
+#define DPU_BN_CFG_BN_RELU_BYPASS__SHIFT 6
+static inline uint32_t DPU_BN_CFG_BN_RELU_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_RELU_BYPASS__SHIFT) & DPU_BN_CFG_BN_RELU_BYPASS__MASK;
+}
+#define DPU_BN_CFG_BN_MUL_PRELU__MASK 0x00000020
+#define DPU_BN_CFG_BN_MUL_PRELU__SHIFT 5
+static inline uint32_t DPU_BN_CFG_BN_MUL_PRELU(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_MUL_PRELU__SHIFT) & DPU_BN_CFG_BN_MUL_PRELU__MASK;
+}
+#define DPU_BN_CFG_BN_MUL_BYPASS__MASK 0x00000010
+#define DPU_BN_CFG_BN_MUL_BYPASS__SHIFT 4
+static inline uint32_t DPU_BN_CFG_BN_MUL_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_MUL_BYPASS__SHIFT) & DPU_BN_CFG_BN_MUL_BYPASS__MASK;
+}
+#define DPU_BN_CFG_RESERVED_2__MASK 0x0000000c
+#define DPU_BN_CFG_RESERVED_2__SHIFT 2
+static inline uint32_t DPU_BN_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_RESERVED_2__SHIFT) & DPU_BN_CFG_RESERVED_2__MASK;
+}
+#define DPU_BN_CFG_BN_ALU_BYPASS__MASK 0x00000002
+#define DPU_BN_CFG_BN_ALU_BYPASS__SHIFT 1
+static inline uint32_t DPU_BN_CFG_BN_ALU_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_ALU_BYPASS__SHIFT) & DPU_BN_CFG_BN_ALU_BYPASS__MASK;
+}
+#define DPU_BN_CFG_BN_BYPASS__MASK 0x00000001
+#define DPU_BN_CFG_BN_BYPASS__SHIFT 0
+static inline uint32_t DPU_BN_CFG_BN_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_BYPASS__SHIFT) & DPU_BN_CFG_BN_BYPASS__MASK;
+}
+
+#define REG_DPU_BN_ALU_CFG 0x00004064
+#define DPU_BN_ALU_CFG_BN_ALU_OPERAND__MASK 0xffffffff
+#define DPU_BN_ALU_CFG_BN_ALU_OPERAND__SHIFT 0
+static inline uint32_t DPU_BN_ALU_CFG_BN_ALU_OPERAND(uint32_t val)
+{
+ return ((val) << DPU_BN_ALU_CFG_BN_ALU_OPERAND__SHIFT) & DPU_BN_ALU_CFG_BN_ALU_OPERAND__MASK;
+}
+
+#define REG_DPU_BN_MUL_CFG 0x00004068
+#define DPU_BN_MUL_CFG_BN_MUL_OPERAND__MASK 0xffff0000
+#define DPU_BN_MUL_CFG_BN_MUL_OPERAND__SHIFT 16
+static inline uint32_t DPU_BN_MUL_CFG_BN_MUL_OPERAND(uint32_t val)
+{
+ return ((val) << DPU_BN_MUL_CFG_BN_MUL_OPERAND__SHIFT) & DPU_BN_MUL_CFG_BN_MUL_OPERAND__MASK;
+}
+#define DPU_BN_MUL_CFG_RESERVED_0__MASK 0x0000c000
+#define DPU_BN_MUL_CFG_RESERVED_0__SHIFT 14
+static inline uint32_t DPU_BN_MUL_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_BN_MUL_CFG_RESERVED_0__SHIFT) & DPU_BN_MUL_CFG_RESERVED_0__MASK;
+}
+#define DPU_BN_MUL_CFG_BN_MUL_SHIFT_VALUE__MASK 0x00003f00
+#define DPU_BN_MUL_CFG_BN_MUL_SHIFT_VALUE__SHIFT 8
+static inline uint32_t DPU_BN_MUL_CFG_BN_MUL_SHIFT_VALUE(uint32_t val)
+{
+ return ((val) << DPU_BN_MUL_CFG_BN_MUL_SHIFT_VALUE__SHIFT) & DPU_BN_MUL_CFG_BN_MUL_SHIFT_VALUE__MASK;
+}
+#define DPU_BN_MUL_CFG_RESERVED_1__MASK 0x000000fc
+#define DPU_BN_MUL_CFG_RESERVED_1__SHIFT 2
+static inline uint32_t DPU_BN_MUL_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_BN_MUL_CFG_RESERVED_1__SHIFT) & DPU_BN_MUL_CFG_RESERVED_1__MASK;
+}
+#define DPU_BN_MUL_CFG_BN_TRUNCATE_SRC__MASK 0x00000002
+#define DPU_BN_MUL_CFG_BN_TRUNCATE_SRC__SHIFT 1
+static inline uint32_t DPU_BN_MUL_CFG_BN_TRUNCATE_SRC(uint32_t val)
+{
+ return ((val) << DPU_BN_MUL_CFG_BN_TRUNCATE_SRC__SHIFT) & DPU_BN_MUL_CFG_BN_TRUNCATE_SRC__MASK;
+}
+#define DPU_BN_MUL_CFG_BN_MUL_SRC__MASK 0x00000001
+#define DPU_BN_MUL_CFG_BN_MUL_SRC__SHIFT 0
+static inline uint32_t DPU_BN_MUL_CFG_BN_MUL_SRC(uint32_t val)
+{
+ return ((val) << DPU_BN_MUL_CFG_BN_MUL_SRC__SHIFT) & DPU_BN_MUL_CFG_BN_MUL_SRC__MASK;
+}
+
+#define REG_DPU_BN_RELUX_CMP_VALUE 0x0000406c
+#define DPU_BN_RELUX_CMP_VALUE_BN_RELUX_CMP_DAT__MASK 0xffffffff
+#define DPU_BN_RELUX_CMP_VALUE_BN_RELUX_CMP_DAT__SHIFT 0
+static inline uint32_t DPU_BN_RELUX_CMP_VALUE_BN_RELUX_CMP_DAT(uint32_t val)
+{
+ return ((val) << DPU_BN_RELUX_CMP_VALUE_BN_RELUX_CMP_DAT__SHIFT) & DPU_BN_RELUX_CMP_VALUE_BN_RELUX_CMP_DAT__MASK;
+}
+
+#define REG_DPU_EW_CFG 0x00004070
+#define DPU_EW_CFG_EW_CVT_TYPE__MASK 0x80000000
+#define DPU_EW_CFG_EW_CVT_TYPE__SHIFT 31
+static inline uint32_t DPU_EW_CFG_EW_CVT_TYPE(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_CVT_TYPE__SHIFT) & DPU_EW_CFG_EW_CVT_TYPE__MASK;
+}
+#define DPU_EW_CFG_EW_CVT_ROUND__MASK 0x40000000
+#define DPU_EW_CFG_EW_CVT_ROUND__SHIFT 30
+static inline uint32_t DPU_EW_CFG_EW_CVT_ROUND(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_CVT_ROUND__SHIFT) & DPU_EW_CFG_EW_CVT_ROUND__MASK;
+}
+#define DPU_EW_CFG_EW_DATA_MODE__MASK 0x30000000
+#define DPU_EW_CFG_EW_DATA_MODE__SHIFT 28
+static inline uint32_t DPU_EW_CFG_EW_DATA_MODE(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_DATA_MODE__SHIFT) & DPU_EW_CFG_EW_DATA_MODE__MASK;
+}
+#define DPU_EW_CFG_RESERVED_0__MASK 0x0f000000
+#define DPU_EW_CFG_RESERVED_0__SHIFT 24
+static inline uint32_t DPU_EW_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_RESERVED_0__SHIFT) & DPU_EW_CFG_RESERVED_0__MASK;
+}
+#define DPU_EW_CFG_EDATA_SIZE__MASK 0x00c00000
+#define DPU_EW_CFG_EDATA_SIZE__SHIFT 22
+static inline uint32_t DPU_EW_CFG_EDATA_SIZE(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EDATA_SIZE__SHIFT) & DPU_EW_CFG_EDATA_SIZE__MASK;
+}
+#define DPU_EW_CFG_EW_EQUAL_EN__MASK 0x00200000
+#define DPU_EW_CFG_EW_EQUAL_EN__SHIFT 21
+static inline uint32_t DPU_EW_CFG_EW_EQUAL_EN(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_EQUAL_EN__SHIFT) & DPU_EW_CFG_EW_EQUAL_EN__MASK;
+}
+#define DPU_EW_CFG_EW_BINARY_EN__MASK 0x00100000
+#define DPU_EW_CFG_EW_BINARY_EN__SHIFT 20
+static inline uint32_t DPU_EW_CFG_EW_BINARY_EN(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_BINARY_EN__SHIFT) & DPU_EW_CFG_EW_BINARY_EN__MASK;
+}
+#define DPU_EW_CFG_EW_ALU_ALGO__MASK 0x000f0000
+#define DPU_EW_CFG_EW_ALU_ALGO__SHIFT 16
+static inline uint32_t DPU_EW_CFG_EW_ALU_ALGO(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_ALU_ALGO__SHIFT) & DPU_EW_CFG_EW_ALU_ALGO__MASK;
+}
+#define DPU_EW_CFG_RESERVED_1__MASK 0x0000f800
+#define DPU_EW_CFG_RESERVED_1__SHIFT 11
+static inline uint32_t DPU_EW_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_RESERVED_1__SHIFT) & DPU_EW_CFG_RESERVED_1__MASK;
+}
+#define DPU_EW_CFG_EW_RELUX_EN__MASK 0x00000400
+#define DPU_EW_CFG_EW_RELUX_EN__SHIFT 10
+static inline uint32_t DPU_EW_CFG_EW_RELUX_EN(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_RELUX_EN__SHIFT) & DPU_EW_CFG_EW_RELUX_EN__MASK;
+}
+#define DPU_EW_CFG_EW_RELU_BYPASS__MASK 0x00000200
+#define DPU_EW_CFG_EW_RELU_BYPASS__SHIFT 9
+static inline uint32_t DPU_EW_CFG_EW_RELU_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_RELU_BYPASS__SHIFT) & DPU_EW_CFG_EW_RELU_BYPASS__MASK;
+}
+#define DPU_EW_CFG_EW_OP_CVT_BYPASS__MASK 0x00000100
+#define DPU_EW_CFG_EW_OP_CVT_BYPASS__SHIFT 8
+static inline uint32_t DPU_EW_CFG_EW_OP_CVT_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_OP_CVT_BYPASS__SHIFT) & DPU_EW_CFG_EW_OP_CVT_BYPASS__MASK;
+}
+#define DPU_EW_CFG_EW_LUT_BYPASS__MASK 0x00000080
+#define DPU_EW_CFG_EW_LUT_BYPASS__SHIFT 7
+static inline uint32_t DPU_EW_CFG_EW_LUT_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_LUT_BYPASS__SHIFT) & DPU_EW_CFG_EW_LUT_BYPASS__MASK;
+}
+#define DPU_EW_CFG_EW_OP_SRC__MASK 0x00000040
+#define DPU_EW_CFG_EW_OP_SRC__SHIFT 6
+static inline uint32_t DPU_EW_CFG_EW_OP_SRC(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_OP_SRC__SHIFT) & DPU_EW_CFG_EW_OP_SRC__MASK;
+}
+#define DPU_EW_CFG_EW_MUL_PRELU__MASK 0x00000020
+#define DPU_EW_CFG_EW_MUL_PRELU__SHIFT 5
+static inline uint32_t DPU_EW_CFG_EW_MUL_PRELU(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_MUL_PRELU__SHIFT) & DPU_EW_CFG_EW_MUL_PRELU__MASK;
+}
+#define DPU_EW_CFG_RESERVED_2__MASK 0x00000018
+#define DPU_EW_CFG_RESERVED_2__SHIFT 3
+static inline uint32_t DPU_EW_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_RESERVED_2__SHIFT) & DPU_EW_CFG_RESERVED_2__MASK;
+}
+#define DPU_EW_CFG_EW_OP_TYPE__MASK 0x00000004
+#define DPU_EW_CFG_EW_OP_TYPE__SHIFT 2
+static inline uint32_t DPU_EW_CFG_EW_OP_TYPE(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_OP_TYPE__SHIFT) & DPU_EW_CFG_EW_OP_TYPE__MASK;
+}
+#define DPU_EW_CFG_EW_OP_BYPASS__MASK 0x00000002
+#define DPU_EW_CFG_EW_OP_BYPASS__SHIFT 1
+static inline uint32_t DPU_EW_CFG_EW_OP_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_OP_BYPASS__SHIFT) & DPU_EW_CFG_EW_OP_BYPASS__MASK;
+}
+#define DPU_EW_CFG_EW_BYPASS__MASK 0x00000001
+#define DPU_EW_CFG_EW_BYPASS__SHIFT 0
+static inline uint32_t DPU_EW_CFG_EW_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_BYPASS__SHIFT) & DPU_EW_CFG_EW_BYPASS__MASK;
+}
+
+#define REG_DPU_EW_CVT_OFFSET_VALUE 0x00004074
+#define DPU_EW_CVT_OFFSET_VALUE_EW_OP_CVT_OFFSET__MASK 0xffffffff
+#define DPU_EW_CVT_OFFSET_VALUE_EW_OP_CVT_OFFSET__SHIFT 0
+static inline uint32_t DPU_EW_CVT_OFFSET_VALUE_EW_OP_CVT_OFFSET(uint32_t val)
+{
+ return ((val) << DPU_EW_CVT_OFFSET_VALUE_EW_OP_CVT_OFFSET__SHIFT) & DPU_EW_CVT_OFFSET_VALUE_EW_OP_CVT_OFFSET__MASK;
+}
+
+#define REG_DPU_EW_CVT_SCALE_VALUE 0x00004078
+#define DPU_EW_CVT_SCALE_VALUE_EW_TRUNCATE__MASK 0xffc00000
+#define DPU_EW_CVT_SCALE_VALUE_EW_TRUNCATE__SHIFT 22
+static inline uint32_t DPU_EW_CVT_SCALE_VALUE_EW_TRUNCATE(uint32_t val)
+{
+ return ((val) << DPU_EW_CVT_SCALE_VALUE_EW_TRUNCATE__SHIFT) & DPU_EW_CVT_SCALE_VALUE_EW_TRUNCATE__MASK;
+}
+#define DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SHIFT__MASK 0x003f0000
+#define DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SHIFT__SHIFT 16
+static inline uint32_t DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SHIFT(uint32_t val)
+{
+ return ((val) << DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SHIFT__SHIFT) & DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SHIFT__MASK;
+}
+#define DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SCALE__MASK 0x0000ffff
+#define DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SCALE__SHIFT 0
+static inline uint32_t DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SCALE(uint32_t val)
+{
+ return ((val) << DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SCALE__SHIFT) & DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SCALE__MASK;
+}
+
+#define REG_DPU_EW_RELUX_CMP_VALUE 0x0000407c
+#define DPU_EW_RELUX_CMP_VALUE_EW_RELUX_CMP_DAT__MASK 0xffffffff
+#define DPU_EW_RELUX_CMP_VALUE_EW_RELUX_CMP_DAT__SHIFT 0
+static inline uint32_t DPU_EW_RELUX_CMP_VALUE_EW_RELUX_CMP_DAT(uint32_t val)
+{
+ return ((val) << DPU_EW_RELUX_CMP_VALUE_EW_RELUX_CMP_DAT__SHIFT) & DPU_EW_RELUX_CMP_VALUE_EW_RELUX_CMP_DAT__MASK;
+}
+
+#define REG_DPU_OUT_CVT_OFFSET 0x00004080
+#define DPU_OUT_CVT_OFFSET_OUT_CVT_OFFSET__MASK 0xffffffff
+#define DPU_OUT_CVT_OFFSET_OUT_CVT_OFFSET__SHIFT 0
+static inline uint32_t DPU_OUT_CVT_OFFSET_OUT_CVT_OFFSET(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_OFFSET_OUT_CVT_OFFSET__SHIFT) & DPU_OUT_CVT_OFFSET_OUT_CVT_OFFSET__MASK;
+}
+
+#define REG_DPU_OUT_CVT_SCALE 0x00004084
+#define DPU_OUT_CVT_SCALE_RESERVED_0__MASK 0xfffe0000
+#define DPU_OUT_CVT_SCALE_RESERVED_0__SHIFT 17
+static inline uint32_t DPU_OUT_CVT_SCALE_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SCALE_RESERVED_0__SHIFT) & DPU_OUT_CVT_SCALE_RESERVED_0__MASK;
+}
+#define DPU_OUT_CVT_SCALE_FP32TOFP16_EN__MASK 0x00010000
+#define DPU_OUT_CVT_SCALE_FP32TOFP16_EN__SHIFT 16
+static inline uint32_t DPU_OUT_CVT_SCALE_FP32TOFP16_EN(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SCALE_FP32TOFP16_EN__SHIFT) & DPU_OUT_CVT_SCALE_FP32TOFP16_EN__MASK;
+}
+#define DPU_OUT_CVT_SCALE_OUT_CVT_SCALE__MASK 0x0000ffff
+#define DPU_OUT_CVT_SCALE_OUT_CVT_SCALE__SHIFT 0
+static inline uint32_t DPU_OUT_CVT_SCALE_OUT_CVT_SCALE(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SCALE_OUT_CVT_SCALE__SHIFT) & DPU_OUT_CVT_SCALE_OUT_CVT_SCALE__MASK;
+}
+
+#define REG_DPU_OUT_CVT_SHIFT 0x00004088
+#define DPU_OUT_CVT_SHIFT_CVT_TYPE__MASK 0x80000000
+#define DPU_OUT_CVT_SHIFT_CVT_TYPE__SHIFT 31
+static inline uint32_t DPU_OUT_CVT_SHIFT_CVT_TYPE(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SHIFT_CVT_TYPE__SHIFT) & DPU_OUT_CVT_SHIFT_CVT_TYPE__MASK;
+}
+#define DPU_OUT_CVT_SHIFT_CVT_ROUND__MASK 0x40000000
+#define DPU_OUT_CVT_SHIFT_CVT_ROUND__SHIFT 30
+static inline uint32_t DPU_OUT_CVT_SHIFT_CVT_ROUND(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SHIFT_CVT_ROUND__SHIFT) & DPU_OUT_CVT_SHIFT_CVT_ROUND__MASK;
+}
+#define DPU_OUT_CVT_SHIFT_RESERVED_0__MASK 0x3ff00000
+#define DPU_OUT_CVT_SHIFT_RESERVED_0__SHIFT 20
+static inline uint32_t DPU_OUT_CVT_SHIFT_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SHIFT_RESERVED_0__SHIFT) & DPU_OUT_CVT_SHIFT_RESERVED_0__MASK;
+}
+#define DPU_OUT_CVT_SHIFT_MINUS_EXP__MASK 0x000ff000
+#define DPU_OUT_CVT_SHIFT_MINUS_EXP__SHIFT 12
+static inline uint32_t DPU_OUT_CVT_SHIFT_MINUS_EXP(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SHIFT_MINUS_EXP__SHIFT) & DPU_OUT_CVT_SHIFT_MINUS_EXP__MASK;
+}
+#define DPU_OUT_CVT_SHIFT_OUT_CVT_SHIFT__MASK 0x00000fff
+#define DPU_OUT_CVT_SHIFT_OUT_CVT_SHIFT__SHIFT 0
+static inline uint32_t DPU_OUT_CVT_SHIFT_OUT_CVT_SHIFT(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SHIFT_OUT_CVT_SHIFT__SHIFT) & DPU_OUT_CVT_SHIFT_OUT_CVT_SHIFT__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_0 0x00004090
+#define DPU_EW_OP_VALUE_0_EW_OPERAND_0__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_0_EW_OPERAND_0__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_0_EW_OPERAND_0(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_0_EW_OPERAND_0__SHIFT) & DPU_EW_OP_VALUE_0_EW_OPERAND_0__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_1 0x00004094
+#define DPU_EW_OP_VALUE_1_EW_OPERAND_1__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_1_EW_OPERAND_1__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_1_EW_OPERAND_1(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_1_EW_OPERAND_1__SHIFT) & DPU_EW_OP_VALUE_1_EW_OPERAND_1__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_2 0x00004098
+#define DPU_EW_OP_VALUE_2_EW_OPERAND_2__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_2_EW_OPERAND_2__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_2_EW_OPERAND_2(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_2_EW_OPERAND_2__SHIFT) & DPU_EW_OP_VALUE_2_EW_OPERAND_2__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_3 0x0000409c
+#define DPU_EW_OP_VALUE_3_EW_OPERAND_3__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_3_EW_OPERAND_3__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_3_EW_OPERAND_3(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_3_EW_OPERAND_3__SHIFT) & DPU_EW_OP_VALUE_3_EW_OPERAND_3__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_4 0x000040a0
+#define DPU_EW_OP_VALUE_4_EW_OPERAND_4__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_4_EW_OPERAND_4__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_4_EW_OPERAND_4(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_4_EW_OPERAND_4__SHIFT) & DPU_EW_OP_VALUE_4_EW_OPERAND_4__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_5 0x000040a4
+#define DPU_EW_OP_VALUE_5_EW_OPERAND_5__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_5_EW_OPERAND_5__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_5_EW_OPERAND_5(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_5_EW_OPERAND_5__SHIFT) & DPU_EW_OP_VALUE_5_EW_OPERAND_5__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_6 0x000040a8
+#define DPU_EW_OP_VALUE_6_EW_OPERAND_6__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_6_EW_OPERAND_6__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_6_EW_OPERAND_6(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_6_EW_OPERAND_6__SHIFT) & DPU_EW_OP_VALUE_6_EW_OPERAND_6__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_7 0x000040ac
+#define DPU_EW_OP_VALUE_7_EW_OPERAND_7__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_7_EW_OPERAND_7__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_7_EW_OPERAND_7(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_7_EW_OPERAND_7__SHIFT) & DPU_EW_OP_VALUE_7_EW_OPERAND_7__MASK;
+}
+
+#define REG_DPU_SURFACE_ADD 0x000040c0
+#define DPU_SURFACE_ADD_SURF_ADD__MASK 0xfffffff0
+#define DPU_SURFACE_ADD_SURF_ADD__SHIFT 4
+static inline uint32_t DPU_SURFACE_ADD_SURF_ADD(uint32_t val)
+{
+ return ((val) << DPU_SURFACE_ADD_SURF_ADD__SHIFT) & DPU_SURFACE_ADD_SURF_ADD__MASK;
+}
+#define DPU_SURFACE_ADD_RESERVED_0__MASK 0x0000000f
+#define DPU_SURFACE_ADD_RESERVED_0__SHIFT 0
+static inline uint32_t DPU_SURFACE_ADD_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_SURFACE_ADD_RESERVED_0__SHIFT) & DPU_SURFACE_ADD_RESERVED_0__MASK;
+}
+
+#define REG_DPU_LUT_ACCESS_CFG 0x00004100
+#define DPU_LUT_ACCESS_CFG_RESERVED_0__MASK 0xfffc0000
+#define DPU_LUT_ACCESS_CFG_RESERVED_0__SHIFT 18
+static inline uint32_t DPU_LUT_ACCESS_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_CFG_RESERVED_0__SHIFT) & DPU_LUT_ACCESS_CFG_RESERVED_0__MASK;
+}
+#define DPU_LUT_ACCESS_CFG_LUT_ACCESS_TYPE__MASK 0x00020000
+#define DPU_LUT_ACCESS_CFG_LUT_ACCESS_TYPE__SHIFT 17
+static inline uint32_t DPU_LUT_ACCESS_CFG_LUT_ACCESS_TYPE(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_CFG_LUT_ACCESS_TYPE__SHIFT) & DPU_LUT_ACCESS_CFG_LUT_ACCESS_TYPE__MASK;
+}
+#define DPU_LUT_ACCESS_CFG_LUT_TABLE_ID__MASK 0x00010000
+#define DPU_LUT_ACCESS_CFG_LUT_TABLE_ID__SHIFT 16
+static inline uint32_t DPU_LUT_ACCESS_CFG_LUT_TABLE_ID(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_CFG_LUT_TABLE_ID__SHIFT) & DPU_LUT_ACCESS_CFG_LUT_TABLE_ID__MASK;
+}
+#define DPU_LUT_ACCESS_CFG_RESERVED_1__MASK 0x0000fc00
+#define DPU_LUT_ACCESS_CFG_RESERVED_1__SHIFT 10
+static inline uint32_t DPU_LUT_ACCESS_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_CFG_RESERVED_1__SHIFT) & DPU_LUT_ACCESS_CFG_RESERVED_1__MASK;
+}
+#define DPU_LUT_ACCESS_CFG_LUT_ADDR__MASK 0x000003ff
+#define DPU_LUT_ACCESS_CFG_LUT_ADDR__SHIFT 0
+static inline uint32_t DPU_LUT_ACCESS_CFG_LUT_ADDR(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_CFG_LUT_ADDR__SHIFT) & DPU_LUT_ACCESS_CFG_LUT_ADDR__MASK;
+}
+
+#define REG_DPU_LUT_ACCESS_DATA 0x00004104
+#define DPU_LUT_ACCESS_DATA_RESERVED_0__MASK 0xffff0000
+#define DPU_LUT_ACCESS_DATA_RESERVED_0__SHIFT 16
+static inline uint32_t DPU_LUT_ACCESS_DATA_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_DATA_RESERVED_0__SHIFT) & DPU_LUT_ACCESS_DATA_RESERVED_0__MASK;
+}
+#define DPU_LUT_ACCESS_DATA_LUT_ACCESS_DATA__MASK 0x0000ffff
+#define DPU_LUT_ACCESS_DATA_LUT_ACCESS_DATA__SHIFT 0
+static inline uint32_t DPU_LUT_ACCESS_DATA_LUT_ACCESS_DATA(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_DATA_LUT_ACCESS_DATA__SHIFT) & DPU_LUT_ACCESS_DATA_LUT_ACCESS_DATA__MASK;
+}
+
+#define REG_DPU_LUT_CFG 0x00004108
+#define DPU_LUT_CFG_RESERVED_0__MASK 0xffffff00
+#define DPU_LUT_CFG_RESERVED_0__SHIFT 8
+static inline uint32_t DPU_LUT_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_RESERVED_0__SHIFT) & DPU_LUT_CFG_RESERVED_0__MASK;
+}
+#define DPU_LUT_CFG_LUT_CAL_SEL__MASK 0x00000080
+#define DPU_LUT_CFG_LUT_CAL_SEL__SHIFT 7
+static inline uint32_t DPU_LUT_CFG_LUT_CAL_SEL(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_CAL_SEL__SHIFT) & DPU_LUT_CFG_LUT_CAL_SEL__MASK;
+}
+#define DPU_LUT_CFG_LUT_HYBRID_PRIORITY__MASK 0x00000040
+#define DPU_LUT_CFG_LUT_HYBRID_PRIORITY__SHIFT 6
+static inline uint32_t DPU_LUT_CFG_LUT_HYBRID_PRIORITY(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_HYBRID_PRIORITY__SHIFT) & DPU_LUT_CFG_LUT_HYBRID_PRIORITY__MASK;
+}
+#define DPU_LUT_CFG_LUT_OFLOW_PRIORITY__MASK 0x00000020
+#define DPU_LUT_CFG_LUT_OFLOW_PRIORITY__SHIFT 5
+static inline uint32_t DPU_LUT_CFG_LUT_OFLOW_PRIORITY(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_OFLOW_PRIORITY__SHIFT) & DPU_LUT_CFG_LUT_OFLOW_PRIORITY__MASK;
+}
+#define DPU_LUT_CFG_LUT_UFLOW_PRIORITY__MASK 0x00000010
+#define DPU_LUT_CFG_LUT_UFLOW_PRIORITY__SHIFT 4
+static inline uint32_t DPU_LUT_CFG_LUT_UFLOW_PRIORITY(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_UFLOW_PRIORITY__SHIFT) & DPU_LUT_CFG_LUT_UFLOW_PRIORITY__MASK;
+}
+#define DPU_LUT_CFG_LUT_LO_LE_MUX__MASK 0x0000000c
+#define DPU_LUT_CFG_LUT_LO_LE_MUX__SHIFT 2
+static inline uint32_t DPU_LUT_CFG_LUT_LO_LE_MUX(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_LO_LE_MUX__SHIFT) & DPU_LUT_CFG_LUT_LO_LE_MUX__MASK;
+}
+#define DPU_LUT_CFG_LUT_EXPAND_EN__MASK 0x00000002
+#define DPU_LUT_CFG_LUT_EXPAND_EN__SHIFT 1
+static inline uint32_t DPU_LUT_CFG_LUT_EXPAND_EN(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_EXPAND_EN__SHIFT) & DPU_LUT_CFG_LUT_EXPAND_EN__MASK;
+}
+#define DPU_LUT_CFG_LUT_ROAD_SEL__MASK 0x00000001
+#define DPU_LUT_CFG_LUT_ROAD_SEL__SHIFT 0
+static inline uint32_t DPU_LUT_CFG_LUT_ROAD_SEL(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_ROAD_SEL__SHIFT) & DPU_LUT_CFG_LUT_ROAD_SEL__MASK;
+}
+
+#define REG_DPU_LUT_INFO 0x0000410c
+#define DPU_LUT_INFO_RESERVED_0__MASK 0xff000000
+#define DPU_LUT_INFO_RESERVED_0__SHIFT 24
+static inline uint32_t DPU_LUT_INFO_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_LUT_INFO_RESERVED_0__SHIFT) & DPU_LUT_INFO_RESERVED_0__MASK;
+}
+#define DPU_LUT_INFO_LUT_LO_INDEX_SELECT__MASK 0x00ff0000
+#define DPU_LUT_INFO_LUT_LO_INDEX_SELECT__SHIFT 16
+static inline uint32_t DPU_LUT_INFO_LUT_LO_INDEX_SELECT(uint32_t val)
+{
+ return ((val) << DPU_LUT_INFO_LUT_LO_INDEX_SELECT__SHIFT) & DPU_LUT_INFO_LUT_LO_INDEX_SELECT__MASK;
+}
+#define DPU_LUT_INFO_LUT_LE_INDEX_SELECT__MASK 0x0000ff00
+#define DPU_LUT_INFO_LUT_LE_INDEX_SELECT__SHIFT 8
+static inline uint32_t DPU_LUT_INFO_LUT_LE_INDEX_SELECT(uint32_t val)
+{
+ return ((val) << DPU_LUT_INFO_LUT_LE_INDEX_SELECT__SHIFT) & DPU_LUT_INFO_LUT_LE_INDEX_SELECT__MASK;
+}
+#define DPU_LUT_INFO_RESERVED_1__MASK 0x000000ff
+#define DPU_LUT_INFO_RESERVED_1__SHIFT 0
+static inline uint32_t DPU_LUT_INFO_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_LUT_INFO_RESERVED_1__SHIFT) & DPU_LUT_INFO_RESERVED_1__MASK;
+}
+
+#define REG_DPU_LUT_LE_START 0x00004110
+#define DPU_LUT_LE_START_LUT_LE_START__MASK 0xffffffff
+#define DPU_LUT_LE_START_LUT_LE_START__SHIFT 0
+static inline uint32_t DPU_LUT_LE_START_LUT_LE_START(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_START_LUT_LE_START__SHIFT) & DPU_LUT_LE_START_LUT_LE_START__MASK;
+}
+
+#define REG_DPU_LUT_LE_END 0x00004114
+#define DPU_LUT_LE_END_LUT_LE_END__MASK 0xffffffff
+#define DPU_LUT_LE_END_LUT_LE_END__SHIFT 0
+static inline uint32_t DPU_LUT_LE_END_LUT_LE_END(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_END_LUT_LE_END__SHIFT) & DPU_LUT_LE_END_LUT_LE_END__MASK;
+}
+
+#define REG_DPU_LUT_LO_START 0x00004118
+#define DPU_LUT_LO_START_LUT_LO_START__MASK 0xffffffff
+#define DPU_LUT_LO_START_LUT_LO_START__SHIFT 0
+static inline uint32_t DPU_LUT_LO_START_LUT_LO_START(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_START_LUT_LO_START__SHIFT) & DPU_LUT_LO_START_LUT_LO_START__MASK;
+}
+
+#define REG_DPU_LUT_LO_END 0x0000411c
+#define DPU_LUT_LO_END_LUT_LO_END__MASK 0xffffffff
+#define DPU_LUT_LO_END_LUT_LO_END__SHIFT 0
+static inline uint32_t DPU_LUT_LO_END_LUT_LO_END(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_END_LUT_LO_END__SHIFT) & DPU_LUT_LO_END_LUT_LO_END__MASK;
+}
+
+#define REG_DPU_LUT_LE_SLOPE_SCALE 0x00004120
+#define DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_OFLOW_SCALE__MASK 0xffff0000
+#define DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_OFLOW_SCALE__SHIFT 16
+static inline uint32_t DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_OFLOW_SCALE(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_OFLOW_SCALE__SHIFT) & DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_OFLOW_SCALE__MASK;
+}
+#define DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_UFLOW_SCALE__MASK 0x0000ffff
+#define DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_UFLOW_SCALE__SHIFT 0
+static inline uint32_t DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_UFLOW_SCALE(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_UFLOW_SCALE__SHIFT) & DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_UFLOW_SCALE__MASK;
+}
+
+#define REG_DPU_LUT_LE_SLOPE_SHIFT 0x00004124
+#define DPU_LUT_LE_SLOPE_SHIFT_RESERVED_0__MASK 0xfffffc00
+#define DPU_LUT_LE_SLOPE_SHIFT_RESERVED_0__SHIFT 10
+static inline uint32_t DPU_LUT_LE_SLOPE_SHIFT_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_SLOPE_SHIFT_RESERVED_0__SHIFT) & DPU_LUT_LE_SLOPE_SHIFT_RESERVED_0__MASK;
+}
+#define DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_OFLOW_SHIFT__MASK 0x000003e0
+#define DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_OFLOW_SHIFT__SHIFT 5
+static inline uint32_t DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_OFLOW_SHIFT(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_OFLOW_SHIFT__SHIFT) & DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_OFLOW_SHIFT__MASK;
+}
+#define DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_UFLOW_SHIFT__MASK 0x0000001f
+#define DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_UFLOW_SHIFT__SHIFT 0
+static inline uint32_t DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_UFLOW_SHIFT(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_UFLOW_SHIFT__SHIFT) & DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_UFLOW_SHIFT__MASK;
+}
+
+#define REG_DPU_LUT_LO_SLOPE_SCALE 0x00004128
+#define DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_OFLOW_SCALE__MASK 0xffff0000
+#define DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_OFLOW_SCALE__SHIFT 16
+static inline uint32_t DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_OFLOW_SCALE(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_OFLOW_SCALE__SHIFT) & DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_OFLOW_SCALE__MASK;
+}
+#define DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_UFLOW_SCALE__MASK 0x0000ffff
+#define DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_UFLOW_SCALE__SHIFT 0
+static inline uint32_t DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_UFLOW_SCALE(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_UFLOW_SCALE__SHIFT) & DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_UFLOW_SCALE__MASK;
+}
+
+#define REG_DPU_LUT_LO_SLOPE_SHIFT 0x0000412c
+#define DPU_LUT_LO_SLOPE_SHIFT_RESERVED_0__MASK 0xfffffc00
+#define DPU_LUT_LO_SLOPE_SHIFT_RESERVED_0__SHIFT 10
+static inline uint32_t DPU_LUT_LO_SLOPE_SHIFT_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_SLOPE_SHIFT_RESERVED_0__SHIFT) & DPU_LUT_LO_SLOPE_SHIFT_RESERVED_0__MASK;
+}
+#define DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_OFLOW_SHIFT__MASK 0x000003e0
+#define DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_OFLOW_SHIFT__SHIFT 5
+static inline uint32_t DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_OFLOW_SHIFT(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_OFLOW_SHIFT__SHIFT) & DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_OFLOW_SHIFT__MASK;
+}
+#define DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_UFLOW_SHIFT__MASK 0x0000001f
+#define DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_UFLOW_SHIFT__SHIFT 0
+static inline uint32_t DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_UFLOW_SHIFT(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_UFLOW_SHIFT__SHIFT) & DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_UFLOW_SHIFT__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_S_STATUS 0x00005000
+#define DPU_RDMA_RDMA_S_STATUS_RESERVED_0__MASK 0xfffc0000
+#define DPU_RDMA_RDMA_S_STATUS_RESERVED_0__SHIFT 18
+static inline uint32_t DPU_RDMA_RDMA_S_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_STATUS_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_S_STATUS_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_S_STATUS_STATUS_1__MASK 0x00030000
+#define DPU_RDMA_RDMA_S_STATUS_STATUS_1__SHIFT 16
+static inline uint32_t DPU_RDMA_RDMA_S_STATUS_STATUS_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_STATUS_STATUS_1__SHIFT) & DPU_RDMA_RDMA_S_STATUS_STATUS_1__MASK;
+}
+#define DPU_RDMA_RDMA_S_STATUS_RESERVED_1__MASK 0x0000fffc
+#define DPU_RDMA_RDMA_S_STATUS_RESERVED_1__SHIFT 2
+static inline uint32_t DPU_RDMA_RDMA_S_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_STATUS_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_S_STATUS_RESERVED_1__MASK;
+}
+#define DPU_RDMA_RDMA_S_STATUS_STATUS_0__MASK 0x00000003
+#define DPU_RDMA_RDMA_S_STATUS_STATUS_0__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_S_STATUS_STATUS_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_STATUS_STATUS_0__SHIFT) & DPU_RDMA_RDMA_S_STATUS_STATUS_0__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_S_POINTER 0x00005004
+#define DPU_RDMA_RDMA_S_POINTER_RESERVED_0__MASK 0xfffe0000
+#define DPU_RDMA_RDMA_S_POINTER_RESERVED_0__SHIFT 17
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_S_POINTER_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_EXECUTER__MASK 0x00010000
+#define DPU_RDMA_RDMA_S_POINTER_EXECUTER__SHIFT 16
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_EXECUTER(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_EXECUTER__SHIFT) & DPU_RDMA_RDMA_S_POINTER_EXECUTER__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_RESERVED_1__MASK 0x0000ffc0
+#define DPU_RDMA_RDMA_S_POINTER_RESERVED_1__SHIFT 6
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_S_POINTER_RESERVED_1__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020
+#define DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010
+#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__SHIFT) & DPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__MASK 0x00000008
+#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__SHIFT 3
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__SHIFT) & DPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004
+#define DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__SHIFT 2
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__SHIFT) & DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__MASK 0x00000002
+#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__SHIFT 1
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__SHIFT) & DPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_POINTER__MASK 0x00000001
+#define DPU_RDMA_RDMA_S_POINTER_POINTER__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_POINTER(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_POINTER__SHIFT) & DPU_RDMA_RDMA_S_POINTER_POINTER__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_OPERATION_ENABLE 0x00005008
+#define DPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define DPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t DPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define DPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__SHIFT) & DPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_DATA_CUBE_WIDTH 0x0000500c
+#define DPU_RDMA_RDMA_DATA_CUBE_WIDTH_RESERVED_0__MASK 0xffffe000
+#define DPU_RDMA_RDMA_DATA_CUBE_WIDTH_RESERVED_0__SHIFT 13
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_WIDTH_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_WIDTH_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_WIDTH_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_DATA_CUBE_WIDTH_WIDTH__MASK 0x00001fff
+#define DPU_RDMA_RDMA_DATA_CUBE_WIDTH_WIDTH__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_WIDTH_WIDTH(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_WIDTH_WIDTH__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_WIDTH_WIDTH__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_DATA_CUBE_HEIGHT 0x00005010
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_0__MASK 0xe0000000
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_0__SHIFT 29
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_EW_LINE_NOTCH_ADDR__MASK 0x1fff0000
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_EW_LINE_NOTCH_ADDR__SHIFT 16
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_EW_LINE_NOTCH_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_EW_LINE_NOTCH_ADDR__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_EW_LINE_NOTCH_ADDR__MASK;
+}
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_1__MASK 0x0000e000
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_1__SHIFT 13
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_1__MASK;
+}
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_HEIGHT__MASK 0x00001fff
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_HEIGHT__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_HEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_HEIGHT__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_HEIGHT__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_DATA_CUBE_CHANNEL 0x00005014
+#define DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_RESERVED_0__MASK 0xffffe000
+#define DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_RESERVED_0__SHIFT 13
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_CHANNEL__MASK 0x00001fff
+#define DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_CHANNEL__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_CHANNEL(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_CHANNEL__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_CHANNEL__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_SRC_BASE_ADDR 0x00005018
+#define DPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__MASK 0xffffffff
+#define DPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__SHIFT) & DPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_BRDMA_CFG 0x0000501c
+#define DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_0__MASK 0xffffffe0
+#define DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_0__SHIFT 5
+static inline uint32_t DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_BRDMA_CFG_BRDMA_DATA_USE__MASK 0x0000001e
+#define DPU_RDMA_RDMA_BRDMA_CFG_BRDMA_DATA_USE__SHIFT 1
+static inline uint32_t DPU_RDMA_RDMA_BRDMA_CFG_BRDMA_DATA_USE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_BRDMA_CFG_BRDMA_DATA_USE__SHIFT) & DPU_RDMA_RDMA_BRDMA_CFG_BRDMA_DATA_USE__MASK;
+}
+#define DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_1__MASK 0x00000001
+#define DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_1__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_1__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_BS_BASE_ADDR 0x00005020
+#define DPU_RDMA_RDMA_BS_BASE_ADDR_BS_BASE_ADDR__MASK 0xffffffff
+#define DPU_RDMA_RDMA_BS_BASE_ADDR_BS_BASE_ADDR__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_BS_BASE_ADDR_BS_BASE_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_BS_BASE_ADDR_BS_BASE_ADDR__SHIFT) & DPU_RDMA_RDMA_BS_BASE_ADDR_BS_BASE_ADDR__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_NRDMA_CFG 0x00005028
+#define DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_0__MASK 0xffffffe0
+#define DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_0__SHIFT 5
+static inline uint32_t DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_NRDMA_CFG_NRDMA_DATA_USE__MASK 0x0000001e
+#define DPU_RDMA_RDMA_NRDMA_CFG_NRDMA_DATA_USE__SHIFT 1
+static inline uint32_t DPU_RDMA_RDMA_NRDMA_CFG_NRDMA_DATA_USE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_NRDMA_CFG_NRDMA_DATA_USE__SHIFT) & DPU_RDMA_RDMA_NRDMA_CFG_NRDMA_DATA_USE__MASK;
+}
+#define DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_1__MASK 0x00000001
+#define DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_1__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_1__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_BN_BASE_ADDR 0x0000502c
+#define DPU_RDMA_RDMA_BN_BASE_ADDR_BN_BASE_ADDR__MASK 0xffffffff
+#define DPU_RDMA_RDMA_BN_BASE_ADDR_BN_BASE_ADDR__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_BN_BASE_ADDR_BN_BASE_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_BN_BASE_ADDR_BN_BASE_ADDR__SHIFT) & DPU_RDMA_RDMA_BN_BASE_ADDR_BN_BASE_ADDR__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_ERDMA_CFG 0x00005034
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_MODE__MASK 0xc0000000
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_MODE__SHIFT 30
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_MODE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_MODE__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_MODE__MASK;
+}
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_SURF_MODE__MASK 0x20000000
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_SURF_MODE__SHIFT 29
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_SURF_MODE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_SURF_MODE__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_SURF_MODE__MASK;
+}
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_NONALIGN__MASK 0x10000000
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_NONALIGN__SHIFT 28
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_NONALIGN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_NONALIGN__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_NONALIGN__MASK;
+}
+#define DPU_RDMA_RDMA_ERDMA_CFG_RESERVED_0__MASK 0x0ffffff0
+#define DPU_RDMA_RDMA_ERDMA_CFG_RESERVED_0__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_SIZE__MASK 0x0000000c
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_SIZE__SHIFT 2
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_SIZE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_SIZE__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_SIZE__MASK;
+}
+#define DPU_RDMA_RDMA_ERDMA_CFG_OV4K_BYPASS__MASK 0x00000002
+#define DPU_RDMA_RDMA_ERDMA_CFG_OV4K_BYPASS__SHIFT 1
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_OV4K_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_OV4K_BYPASS__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_OV4K_BYPASS__MASK;
+}
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DISABLE__MASK 0x00000001
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DISABLE__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DISABLE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DISABLE__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DISABLE__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_EW_BASE_ADDR 0x00005038
+#define DPU_RDMA_RDMA_EW_BASE_ADDR_EW_BASE_ADDR__MASK 0xffffffff
+#define DPU_RDMA_RDMA_EW_BASE_ADDR_EW_BASE_ADDR__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_EW_BASE_ADDR_EW_BASE_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_EW_BASE_ADDR_EW_BASE_ADDR__SHIFT) & DPU_RDMA_RDMA_EW_BASE_ADDR_EW_BASE_ADDR__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_EW_SURF_STRIDE 0x00005040
+#define DPU_RDMA_RDMA_EW_SURF_STRIDE_EW_SURF_STRIDE__MASK 0xfffffff0
+#define DPU_RDMA_RDMA_EW_SURF_STRIDE_EW_SURF_STRIDE__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_EW_SURF_STRIDE_EW_SURF_STRIDE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_EW_SURF_STRIDE_EW_SURF_STRIDE__SHIFT) & DPU_RDMA_RDMA_EW_SURF_STRIDE_EW_SURF_STRIDE__MASK;
+}
+#define DPU_RDMA_RDMA_EW_SURF_STRIDE_RESERVED_0__MASK 0x0000000f
+#define DPU_RDMA_RDMA_EW_SURF_STRIDE_RESERVED_0__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_EW_SURF_STRIDE_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_EW_SURF_STRIDE_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_EW_SURF_STRIDE_RESERVED_0__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_FEATURE_MODE_CFG 0x00005044
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_RESERVED_0__MASK 0xfffc0000
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_RESERVED_0__SHIFT 18
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_IN_PRECISION__MASK 0x00038000
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_IN_PRECISION__SHIFT 15
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_IN_PRECISION(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_IN_PRECISION__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_IN_PRECISION__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_BURST_LEN__MASK 0x00007800
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_BURST_LEN__SHIFT 11
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_BURST_LEN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_BURST_LEN__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_BURST_LEN__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_COMB_USE__MASK 0x00000700
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_COMB_USE__SHIFT 8
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_COMB_USE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_COMB_USE__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_COMB_USE__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_PROC_PRECISION__MASK 0x000000e0
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_PROC_PRECISION__SHIFT 5
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_PROC_PRECISION(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_PROC_PRECISION__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_PROC_PRECISION__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_DISABLE__MASK 0x00000010
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_DISABLE__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_DISABLE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_DISABLE__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_DISABLE__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_FP16TOFP32_EN__MASK 0x00000008
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_FP16TOFP32_EN__SHIFT 3
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_FP16TOFP32_EN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_FP16TOFP32_EN__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_FP16TOFP32_EN__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_CONV_MODE__MASK 0x00000006
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_CONV_MODE__SHIFT 1
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_CONV_MODE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_CONV_MODE__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_CONV_MODE__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_FLYING_MODE__MASK 0x00000001
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_FLYING_MODE__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_FLYING_MODE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_FLYING_MODE__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_FLYING_MODE__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_SRC_DMA_CFG 0x00005048
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_LINE_NOTCH_ADDR__MASK 0xfff80000
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_LINE_NOTCH_ADDR__SHIFT 19
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_LINE_NOTCH_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_LINE_NOTCH_ADDR__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_LINE_NOTCH_ADDR__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_RESERVED_0__MASK 0x0007c000
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_RESERVED_0__SHIFT 14
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_POOLING_METHOD__MASK 0x00002000
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_POOLING_METHOD__SHIFT 13
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_POOLING_METHOD(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_POOLING_METHOD__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_POOLING_METHOD__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_UNPOOLING_EN__MASK 0x00001000
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_UNPOOLING_EN__SHIFT 12
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_UNPOOLING_EN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_UNPOOLING_EN__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_UNPOOLING_EN__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_HEIGHT__MASK 0x00000e00
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_HEIGHT__SHIFT 9
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_HEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_HEIGHT__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_HEIGHT__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_WIDTH__MASK 0x000001c0
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_WIDTH__SHIFT 6
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_WIDTH(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_WIDTH__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_WIDTH__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_HEIGHT__MASK 0x00000038
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_HEIGHT__SHIFT 3
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_HEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_HEIGHT__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_HEIGHT__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_WIDTH__MASK 0x00000007
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_WIDTH__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_WIDTH(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_WIDTH__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_WIDTH__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_SURF_NOTCH 0x0000504c
+#define DPU_RDMA_RDMA_SURF_NOTCH_SURF_NOTCH_ADDR__MASK 0xfffffff0
+#define DPU_RDMA_RDMA_SURF_NOTCH_SURF_NOTCH_ADDR__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_SURF_NOTCH_SURF_NOTCH_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SURF_NOTCH_SURF_NOTCH_ADDR__SHIFT) & DPU_RDMA_RDMA_SURF_NOTCH_SURF_NOTCH_ADDR__MASK;
+}
+#define DPU_RDMA_RDMA_SURF_NOTCH_RESERVED_0__MASK 0x0000000f
+#define DPU_RDMA_RDMA_SURF_NOTCH_RESERVED_0__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_SURF_NOTCH_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SURF_NOTCH_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_SURF_NOTCH_RESERVED_0__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_PAD_CFG 0x00005064
+#define DPU_RDMA_RDMA_PAD_CFG_PAD_VALUE__MASK 0xffff0000
+#define DPU_RDMA_RDMA_PAD_CFG_PAD_VALUE__SHIFT 16
+static inline uint32_t DPU_RDMA_RDMA_PAD_CFG_PAD_VALUE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_PAD_CFG_PAD_VALUE__SHIFT) & DPU_RDMA_RDMA_PAD_CFG_PAD_VALUE__MASK;
+}
+#define DPU_RDMA_RDMA_PAD_CFG_RESERVED_0__MASK 0x0000ff80
+#define DPU_RDMA_RDMA_PAD_CFG_RESERVED_0__SHIFT 7
+static inline uint32_t DPU_RDMA_RDMA_PAD_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_PAD_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_PAD_CFG_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_PAD_CFG_PAD_TOP__MASK 0x00000070
+#define DPU_RDMA_RDMA_PAD_CFG_PAD_TOP__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_PAD_CFG_PAD_TOP(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_PAD_CFG_PAD_TOP__SHIFT) & DPU_RDMA_RDMA_PAD_CFG_PAD_TOP__MASK;
+}
+#define DPU_RDMA_RDMA_PAD_CFG_RESERVED_1__MASK 0x00000008
+#define DPU_RDMA_RDMA_PAD_CFG_RESERVED_1__SHIFT 3
+static inline uint32_t DPU_RDMA_RDMA_PAD_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_PAD_CFG_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_PAD_CFG_RESERVED_1__MASK;
+}
+#define DPU_RDMA_RDMA_PAD_CFG_PAD_LEFT__MASK 0x00000007
+#define DPU_RDMA_RDMA_PAD_CFG_PAD_LEFT__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_PAD_CFG_PAD_LEFT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_PAD_CFG_PAD_LEFT__SHIFT) & DPU_RDMA_RDMA_PAD_CFG_PAD_LEFT__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_WEIGHT 0x00005068
+#define DPU_RDMA_RDMA_WEIGHT_E_WEIGHT__MASK 0xff000000
+#define DPU_RDMA_RDMA_WEIGHT_E_WEIGHT__SHIFT 24
+static inline uint32_t DPU_RDMA_RDMA_WEIGHT_E_WEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_WEIGHT_E_WEIGHT__SHIFT) & DPU_RDMA_RDMA_WEIGHT_E_WEIGHT__MASK;
+}
+#define DPU_RDMA_RDMA_WEIGHT_N_WEIGHT__MASK 0x00ff0000
+#define DPU_RDMA_RDMA_WEIGHT_N_WEIGHT__SHIFT 16
+static inline uint32_t DPU_RDMA_RDMA_WEIGHT_N_WEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_WEIGHT_N_WEIGHT__SHIFT) & DPU_RDMA_RDMA_WEIGHT_N_WEIGHT__MASK;
+}
+#define DPU_RDMA_RDMA_WEIGHT_B_WEIGHT__MASK 0x0000ff00
+#define DPU_RDMA_RDMA_WEIGHT_B_WEIGHT__SHIFT 8
+static inline uint32_t DPU_RDMA_RDMA_WEIGHT_B_WEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_WEIGHT_B_WEIGHT__SHIFT) & DPU_RDMA_RDMA_WEIGHT_B_WEIGHT__MASK;
+}
+#define DPU_RDMA_RDMA_WEIGHT_M_WEIGHT__MASK 0x000000ff
+#define DPU_RDMA_RDMA_WEIGHT_M_WEIGHT__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_WEIGHT_M_WEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_WEIGHT_M_WEIGHT__SHIFT) & DPU_RDMA_RDMA_WEIGHT_M_WEIGHT__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_EW_SURF_NOTCH 0x0000506c
+#define DPU_RDMA_RDMA_EW_SURF_NOTCH_EW_SURF_NOTCH__MASK 0xfffffff0
+#define DPU_RDMA_RDMA_EW_SURF_NOTCH_EW_SURF_NOTCH__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_EW_SURF_NOTCH_EW_SURF_NOTCH(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_EW_SURF_NOTCH_EW_SURF_NOTCH__SHIFT) & DPU_RDMA_RDMA_EW_SURF_NOTCH_EW_SURF_NOTCH__MASK;
+}
+#define DPU_RDMA_RDMA_EW_SURF_NOTCH_RESERVED_0__MASK 0x0000000f
+#define DPU_RDMA_RDMA_EW_SURF_NOTCH_RESERVED_0__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_EW_SURF_NOTCH_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_EW_SURF_NOTCH_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_EW_SURF_NOTCH_RESERVED_0__MASK;
+}
+
+#define REG_PPU_S_STATUS 0x00006000
+#define PPU_S_STATUS_RESERVED_0__MASK 0xfffc0000
+#define PPU_S_STATUS_RESERVED_0__SHIFT 18
+static inline uint32_t PPU_S_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_S_STATUS_RESERVED_0__SHIFT) & PPU_S_STATUS_RESERVED_0__MASK;
+}
+#define PPU_S_STATUS_STATUS_1__MASK 0x00030000
+#define PPU_S_STATUS_STATUS_1__SHIFT 16
+static inline uint32_t PPU_S_STATUS_STATUS_1(uint32_t val)
+{
+ return ((val) << PPU_S_STATUS_STATUS_1__SHIFT) & PPU_S_STATUS_STATUS_1__MASK;
+}
+#define PPU_S_STATUS_RESERVED_1__MASK 0x0000fffc
+#define PPU_S_STATUS_RESERVED_1__SHIFT 2
+static inline uint32_t PPU_S_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_S_STATUS_RESERVED_1__SHIFT) & PPU_S_STATUS_RESERVED_1__MASK;
+}
+#define PPU_S_STATUS_STATUS_0__MASK 0x00000003
+#define PPU_S_STATUS_STATUS_0__SHIFT 0
+static inline uint32_t PPU_S_STATUS_STATUS_0(uint32_t val)
+{
+ return ((val) << PPU_S_STATUS_STATUS_0__SHIFT) & PPU_S_STATUS_STATUS_0__MASK;
+}
+
+#define REG_PPU_S_POINTER 0x00006004
+#define PPU_S_POINTER_RESERVED_0__MASK 0xfffe0000
+#define PPU_S_POINTER_RESERVED_0__SHIFT 17
+static inline uint32_t PPU_S_POINTER_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_RESERVED_0__SHIFT) & PPU_S_POINTER_RESERVED_0__MASK;
+}
+#define PPU_S_POINTER_EXECUTER__MASK 0x00010000
+#define PPU_S_POINTER_EXECUTER__SHIFT 16
+static inline uint32_t PPU_S_POINTER_EXECUTER(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_EXECUTER__SHIFT) & PPU_S_POINTER_EXECUTER__MASK;
+}
+#define PPU_S_POINTER_RESERVED_1__MASK 0x0000ffc0
+#define PPU_S_POINTER_RESERVED_1__SHIFT 6
+static inline uint32_t PPU_S_POINTER_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_RESERVED_1__SHIFT) & PPU_S_POINTER_RESERVED_1__MASK;
+}
+#define PPU_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020
+#define PPU_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5
+static inline uint32_t PPU_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & PPU_S_POINTER_EXECUTER_PP_CLEAR__MASK;
+}
+#define PPU_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010
+#define PPU_S_POINTER_POINTER_PP_CLEAR__SHIFT 4
+static inline uint32_t PPU_S_POINTER_POINTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_POINTER_PP_CLEAR__SHIFT) & PPU_S_POINTER_POINTER_PP_CLEAR__MASK;
+}
+#define PPU_S_POINTER_POINTER_PP_MODE__MASK 0x00000008
+#define PPU_S_POINTER_POINTER_PP_MODE__SHIFT 3
+static inline uint32_t PPU_S_POINTER_POINTER_PP_MODE(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_POINTER_PP_MODE__SHIFT) & PPU_S_POINTER_POINTER_PP_MODE__MASK;
+}
+#define PPU_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004
+#define PPU_S_POINTER_EXECUTER_PP_EN__SHIFT 2
+static inline uint32_t PPU_S_POINTER_EXECUTER_PP_EN(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_EXECUTER_PP_EN__SHIFT) & PPU_S_POINTER_EXECUTER_PP_EN__MASK;
+}
+#define PPU_S_POINTER_POINTER_PP_EN__MASK 0x00000002
+#define PPU_S_POINTER_POINTER_PP_EN__SHIFT 1
+static inline uint32_t PPU_S_POINTER_POINTER_PP_EN(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_POINTER_PP_EN__SHIFT) & PPU_S_POINTER_POINTER_PP_EN__MASK;
+}
+#define PPU_S_POINTER_POINTER__MASK 0x00000001
+#define PPU_S_POINTER_POINTER__SHIFT 0
+static inline uint32_t PPU_S_POINTER_POINTER(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_POINTER__SHIFT) & PPU_S_POINTER_POINTER__MASK;
+}
+
+#define REG_PPU_OPERATION_ENABLE 0x00006008
+#define PPU_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define PPU_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t PPU_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_ENABLE_RESERVED_0__SHIFT) & PPU_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define PPU_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define PPU_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t PPU_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_ENABLE_OP_EN__SHIFT) & PPU_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_PPU_DATA_CUBE_IN_WIDTH 0x0000600c
+#define PPU_DATA_CUBE_IN_WIDTH_RESERVED_0__MASK 0xffffe000
+#define PPU_DATA_CUBE_IN_WIDTH_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_DATA_CUBE_IN_WIDTH_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_IN_WIDTH_RESERVED_0__SHIFT) & PPU_DATA_CUBE_IN_WIDTH_RESERVED_0__MASK;
+}
+#define PPU_DATA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__MASK 0x00001fff
+#define PPU_DATA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__SHIFT 0
+static inline uint32_t PPU_DATA_CUBE_IN_WIDTH_CUBE_IN_WIDTH(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__SHIFT) & PPU_DATA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__MASK;
+}
+
+#define REG_PPU_DATA_CUBE_IN_HEIGHT 0x00006010
+#define PPU_DATA_CUBE_IN_HEIGHT_RESERVED_0__MASK 0xffffe000
+#define PPU_DATA_CUBE_IN_HEIGHT_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_DATA_CUBE_IN_HEIGHT_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_IN_HEIGHT_RESERVED_0__SHIFT) & PPU_DATA_CUBE_IN_HEIGHT_RESERVED_0__MASK;
+}
+#define PPU_DATA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__MASK 0x00001fff
+#define PPU_DATA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__SHIFT 0
+static inline uint32_t PPU_DATA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__SHIFT) & PPU_DATA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__MASK;
+}
+
+#define REG_PPU_DATA_CUBE_IN_CHANNEL 0x00006014
+#define PPU_DATA_CUBE_IN_CHANNEL_RESERVED_0__MASK 0xffffe000
+#define PPU_DATA_CUBE_IN_CHANNEL_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_DATA_CUBE_IN_CHANNEL_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_IN_CHANNEL_RESERVED_0__SHIFT) & PPU_DATA_CUBE_IN_CHANNEL_RESERVED_0__MASK;
+}
+#define PPU_DATA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__MASK 0x00001fff
+#define PPU_DATA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__SHIFT 0
+static inline uint32_t PPU_DATA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__SHIFT) & PPU_DATA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__MASK;
+}
+
+#define REG_PPU_DATA_CUBE_OUT_WIDTH 0x00006018
+#define PPU_DATA_CUBE_OUT_WIDTH_RESERVED_0__MASK 0xffffe000
+#define PPU_DATA_CUBE_OUT_WIDTH_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_DATA_CUBE_OUT_WIDTH_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_OUT_WIDTH_RESERVED_0__SHIFT) & PPU_DATA_CUBE_OUT_WIDTH_RESERVED_0__MASK;
+}
+#define PPU_DATA_CUBE_OUT_WIDTH_CUBE_OUT_WIDTH__MASK 0x00001fff
+#define PPU_DATA_CUBE_OUT_WIDTH_CUBE_OUT_WIDTH__SHIFT 0
+static inline uint32_t PPU_DATA_CUBE_OUT_WIDTH_CUBE_OUT_WIDTH(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_OUT_WIDTH_CUBE_OUT_WIDTH__SHIFT) & PPU_DATA_CUBE_OUT_WIDTH_CUBE_OUT_WIDTH__MASK;
+}
+
+#define REG_PPU_DATA_CUBE_OUT_HEIGHT 0x0000601c
+#define PPU_DATA_CUBE_OUT_HEIGHT_RESERVED_0__MASK 0xffffe000
+#define PPU_DATA_CUBE_OUT_HEIGHT_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_DATA_CUBE_OUT_HEIGHT_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_OUT_HEIGHT_RESERVED_0__SHIFT) & PPU_DATA_CUBE_OUT_HEIGHT_RESERVED_0__MASK;
+}
+#define PPU_DATA_CUBE_OUT_HEIGHT_CUBE_OUT_HEIGHT__MASK 0x00001fff
+#define PPU_DATA_CUBE_OUT_HEIGHT_CUBE_OUT_HEIGHT__SHIFT 0
+static inline uint32_t PPU_DATA_CUBE_OUT_HEIGHT_CUBE_OUT_HEIGHT(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_OUT_HEIGHT_CUBE_OUT_HEIGHT__SHIFT) & PPU_DATA_CUBE_OUT_HEIGHT_CUBE_OUT_HEIGHT__MASK;
+}
+
+#define REG_PPU_DATA_CUBE_OUT_CHANNEL 0x00006020
+#define PPU_DATA_CUBE_OUT_CHANNEL_RESERVED_0__MASK 0xffffe000
+#define PPU_DATA_CUBE_OUT_CHANNEL_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_DATA_CUBE_OUT_CHANNEL_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_OUT_CHANNEL_RESERVED_0__SHIFT) & PPU_DATA_CUBE_OUT_CHANNEL_RESERVED_0__MASK;
+}
+#define PPU_DATA_CUBE_OUT_CHANNEL_CUBE_OUT_CHANNEL__MASK 0x00001fff
+#define PPU_DATA_CUBE_OUT_CHANNEL_CUBE_OUT_CHANNEL__SHIFT 0
+static inline uint32_t PPU_DATA_CUBE_OUT_CHANNEL_CUBE_OUT_CHANNEL(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_OUT_CHANNEL_CUBE_OUT_CHANNEL__SHIFT) & PPU_DATA_CUBE_OUT_CHANNEL_CUBE_OUT_CHANNEL__MASK;
+}
+
+#define REG_PPU_OPERATION_MODE_CFG 0x00006024
+#define PPU_OPERATION_MODE_CFG_RESERVED_0__MASK 0x80000000
+#define PPU_OPERATION_MODE_CFG_RESERVED_0__SHIFT 31
+static inline uint32_t PPU_OPERATION_MODE_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_RESERVED_0__SHIFT) & PPU_OPERATION_MODE_CFG_RESERVED_0__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_INDEX_EN__MASK 0x40000000
+#define PPU_OPERATION_MODE_CFG_INDEX_EN__SHIFT 30
+static inline uint32_t PPU_OPERATION_MODE_CFG_INDEX_EN(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_INDEX_EN__SHIFT) & PPU_OPERATION_MODE_CFG_INDEX_EN__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_RESERVED_1__MASK 0x20000000
+#define PPU_OPERATION_MODE_CFG_RESERVED_1__SHIFT 29
+static inline uint32_t PPU_OPERATION_MODE_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_RESERVED_1__SHIFT) & PPU_OPERATION_MODE_CFG_RESERVED_1__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_NOTCH_ADDR__MASK 0x1fff0000
+#define PPU_OPERATION_MODE_CFG_NOTCH_ADDR__SHIFT 16
+static inline uint32_t PPU_OPERATION_MODE_CFG_NOTCH_ADDR(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_NOTCH_ADDR__SHIFT) & PPU_OPERATION_MODE_CFG_NOTCH_ADDR__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_RESERVED_2__MASK 0x0000ff00
+#define PPU_OPERATION_MODE_CFG_RESERVED_2__SHIFT 8
+static inline uint32_t PPU_OPERATION_MODE_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_RESERVED_2__SHIFT) & PPU_OPERATION_MODE_CFG_RESERVED_2__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_USE_CNT__MASK 0x000000e0
+#define PPU_OPERATION_MODE_CFG_USE_CNT__SHIFT 5
+static inline uint32_t PPU_OPERATION_MODE_CFG_USE_CNT(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_USE_CNT__SHIFT) & PPU_OPERATION_MODE_CFG_USE_CNT__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_FLYING_MODE__MASK 0x00000010
+#define PPU_OPERATION_MODE_CFG_FLYING_MODE__SHIFT 4
+static inline uint32_t PPU_OPERATION_MODE_CFG_FLYING_MODE(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_FLYING_MODE__SHIFT) & PPU_OPERATION_MODE_CFG_FLYING_MODE__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_RESERVED_3__MASK 0x0000000c
+#define PPU_OPERATION_MODE_CFG_RESERVED_3__SHIFT 2
+static inline uint32_t PPU_OPERATION_MODE_CFG_RESERVED_3(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_RESERVED_3__SHIFT) & PPU_OPERATION_MODE_CFG_RESERVED_3__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_POOLING_METHOD__MASK 0x00000003
+#define PPU_OPERATION_MODE_CFG_POOLING_METHOD__SHIFT 0
+static inline uint32_t PPU_OPERATION_MODE_CFG_POOLING_METHOD(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_POOLING_METHOD__SHIFT) & PPU_OPERATION_MODE_CFG_POOLING_METHOD__MASK;
+}
+
+#define REG_PPU_POOLING_KERNEL_CFG 0x00006034
+#define PPU_POOLING_KERNEL_CFG_RESERVED_0__MASK 0xff000000
+#define PPU_POOLING_KERNEL_CFG_RESERVED_0__SHIFT 24
+static inline uint32_t PPU_POOLING_KERNEL_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_RESERVED_0__SHIFT) & PPU_POOLING_KERNEL_CFG_RESERVED_0__MASK;
+}
+#define PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_HEIGHT__MASK 0x00f00000
+#define PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_HEIGHT__SHIFT 20
+static inline uint32_t PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_HEIGHT(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_HEIGHT__SHIFT) & PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_HEIGHT__MASK;
+}
+#define PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_WIDTH__MASK 0x000f0000
+#define PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_WIDTH__SHIFT 16
+static inline uint32_t PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_WIDTH(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_WIDTH__SHIFT) & PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_WIDTH__MASK;
+}
+#define PPU_POOLING_KERNEL_CFG_RESERVED_1__MASK 0x0000f000
+#define PPU_POOLING_KERNEL_CFG_RESERVED_1__SHIFT 12
+static inline uint32_t PPU_POOLING_KERNEL_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_RESERVED_1__SHIFT) & PPU_POOLING_KERNEL_CFG_RESERVED_1__MASK;
+}
+#define PPU_POOLING_KERNEL_CFG_KERNEL_HEIGHT__MASK 0x00000f00
+#define PPU_POOLING_KERNEL_CFG_KERNEL_HEIGHT__SHIFT 8
+static inline uint32_t PPU_POOLING_KERNEL_CFG_KERNEL_HEIGHT(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_KERNEL_HEIGHT__SHIFT) & PPU_POOLING_KERNEL_CFG_KERNEL_HEIGHT__MASK;
+}
+#define PPU_POOLING_KERNEL_CFG_RESERVED_2__MASK 0x000000f0
+#define PPU_POOLING_KERNEL_CFG_RESERVED_2__SHIFT 4
+static inline uint32_t PPU_POOLING_KERNEL_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_RESERVED_2__SHIFT) & PPU_POOLING_KERNEL_CFG_RESERVED_2__MASK;
+}
+#define PPU_POOLING_KERNEL_CFG_KERNEL_WIDTH__MASK 0x0000000f
+#define PPU_POOLING_KERNEL_CFG_KERNEL_WIDTH__SHIFT 0
+static inline uint32_t PPU_POOLING_KERNEL_CFG_KERNEL_WIDTH(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_KERNEL_WIDTH__SHIFT) & PPU_POOLING_KERNEL_CFG_KERNEL_WIDTH__MASK;
+}
+
+#define REG_PPU_RECIP_KERNEL_WIDTH 0x00006038
+#define PPU_RECIP_KERNEL_WIDTH_RESERVED_0__MASK 0xfffe0000
+#define PPU_RECIP_KERNEL_WIDTH_RESERVED_0__SHIFT 17
+static inline uint32_t PPU_RECIP_KERNEL_WIDTH_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RECIP_KERNEL_WIDTH_RESERVED_0__SHIFT) & PPU_RECIP_KERNEL_WIDTH_RESERVED_0__MASK;
+}
+#define PPU_RECIP_KERNEL_WIDTH_RECIP_KERNEL_WIDTH__MASK 0x0001ffff
+#define PPU_RECIP_KERNEL_WIDTH_RECIP_KERNEL_WIDTH__SHIFT 0
+static inline uint32_t PPU_RECIP_KERNEL_WIDTH_RECIP_KERNEL_WIDTH(uint32_t val)
+{
+ return ((val) << PPU_RECIP_KERNEL_WIDTH_RECIP_KERNEL_WIDTH__SHIFT) & PPU_RECIP_KERNEL_WIDTH_RECIP_KERNEL_WIDTH__MASK;
+}
+
+#define REG_PPU_RECIP_KERNEL_HEIGHT 0x0000603c
+#define PPU_RECIP_KERNEL_HEIGHT_RESERVED_0__MASK 0xfffe0000
+#define PPU_RECIP_KERNEL_HEIGHT_RESERVED_0__SHIFT 17
+static inline uint32_t PPU_RECIP_KERNEL_HEIGHT_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RECIP_KERNEL_HEIGHT_RESERVED_0__SHIFT) & PPU_RECIP_KERNEL_HEIGHT_RESERVED_0__MASK;
+}
+#define PPU_RECIP_KERNEL_HEIGHT_RECIP_KERNEL_HEIGHT__MASK 0x0001ffff
+#define PPU_RECIP_KERNEL_HEIGHT_RECIP_KERNEL_HEIGHT__SHIFT 0
+static inline uint32_t PPU_RECIP_KERNEL_HEIGHT_RECIP_KERNEL_HEIGHT(uint32_t val)
+{
+ return ((val) << PPU_RECIP_KERNEL_HEIGHT_RECIP_KERNEL_HEIGHT__SHIFT) & PPU_RECIP_KERNEL_HEIGHT_RECIP_KERNEL_HEIGHT__MASK;
+}
+
+#define REG_PPU_POOLING_PADDING_CFG 0x00006040
+#define PPU_POOLING_PADDING_CFG_RESERVED_0__MASK 0xffff8000
+#define PPU_POOLING_PADDING_CFG_RESERVED_0__SHIFT 15
+static inline uint32_t PPU_POOLING_PADDING_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_RESERVED_0__SHIFT) & PPU_POOLING_PADDING_CFG_RESERVED_0__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_PAD_BOTTOM__MASK 0x00007000
+#define PPU_POOLING_PADDING_CFG_PAD_BOTTOM__SHIFT 12
+static inline uint32_t PPU_POOLING_PADDING_CFG_PAD_BOTTOM(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_PAD_BOTTOM__SHIFT) & PPU_POOLING_PADDING_CFG_PAD_BOTTOM__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_RESERVED_1__MASK 0x00000800
+#define PPU_POOLING_PADDING_CFG_RESERVED_1__SHIFT 11
+static inline uint32_t PPU_POOLING_PADDING_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_RESERVED_1__SHIFT) & PPU_POOLING_PADDING_CFG_RESERVED_1__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_PAD_RIGHT__MASK 0x00000700
+#define PPU_POOLING_PADDING_CFG_PAD_RIGHT__SHIFT 8
+static inline uint32_t PPU_POOLING_PADDING_CFG_PAD_RIGHT(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_PAD_RIGHT__SHIFT) & PPU_POOLING_PADDING_CFG_PAD_RIGHT__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_RESERVED_2__MASK 0x00000080
+#define PPU_POOLING_PADDING_CFG_RESERVED_2__SHIFT 7
+static inline uint32_t PPU_POOLING_PADDING_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_RESERVED_2__SHIFT) & PPU_POOLING_PADDING_CFG_RESERVED_2__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_PAD_TOP__MASK 0x00000070
+#define PPU_POOLING_PADDING_CFG_PAD_TOP__SHIFT 4
+static inline uint32_t PPU_POOLING_PADDING_CFG_PAD_TOP(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_PAD_TOP__SHIFT) & PPU_POOLING_PADDING_CFG_PAD_TOP__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_RESERVED_3__MASK 0x00000008
+#define PPU_POOLING_PADDING_CFG_RESERVED_3__SHIFT 3
+static inline uint32_t PPU_POOLING_PADDING_CFG_RESERVED_3(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_RESERVED_3__SHIFT) & PPU_POOLING_PADDING_CFG_RESERVED_3__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_PAD_LEFT__MASK 0x00000007
+#define PPU_POOLING_PADDING_CFG_PAD_LEFT__SHIFT 0
+static inline uint32_t PPU_POOLING_PADDING_CFG_PAD_LEFT(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_PAD_LEFT__SHIFT) & PPU_POOLING_PADDING_CFG_PAD_LEFT__MASK;
+}
+
+#define REG_PPU_PADDING_VALUE_1_CFG 0x00006044
+#define PPU_PADDING_VALUE_1_CFG_PAD_VALUE_0__MASK 0xffffffff
+#define PPU_PADDING_VALUE_1_CFG_PAD_VALUE_0__SHIFT 0
+static inline uint32_t PPU_PADDING_VALUE_1_CFG_PAD_VALUE_0(uint32_t val)
+{
+ return ((val) << PPU_PADDING_VALUE_1_CFG_PAD_VALUE_0__SHIFT) & PPU_PADDING_VALUE_1_CFG_PAD_VALUE_0__MASK;
+}
+
+#define REG_PPU_PADDING_VALUE_2_CFG 0x00006048
+#define PPU_PADDING_VALUE_2_CFG_RESERVED_0__MASK 0xfffffff8
+#define PPU_PADDING_VALUE_2_CFG_RESERVED_0__SHIFT 3
+static inline uint32_t PPU_PADDING_VALUE_2_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_PADDING_VALUE_2_CFG_RESERVED_0__SHIFT) & PPU_PADDING_VALUE_2_CFG_RESERVED_0__MASK;
+}
+#define PPU_PADDING_VALUE_2_CFG_PAD_VALUE_1__MASK 0x00000007
+#define PPU_PADDING_VALUE_2_CFG_PAD_VALUE_1__SHIFT 0
+static inline uint32_t PPU_PADDING_VALUE_2_CFG_PAD_VALUE_1(uint32_t val)
+{
+ return ((val) << PPU_PADDING_VALUE_2_CFG_PAD_VALUE_1__SHIFT) & PPU_PADDING_VALUE_2_CFG_PAD_VALUE_1__MASK;
+}
+
+#define REG_PPU_DST_BASE_ADDR 0x00006070
+#define PPU_DST_BASE_ADDR_DST_BASE_ADDR__MASK 0xfffffff0
+#define PPU_DST_BASE_ADDR_DST_BASE_ADDR__SHIFT 4
+static inline uint32_t PPU_DST_BASE_ADDR_DST_BASE_ADDR(uint32_t val)
+{
+ return ((val) << PPU_DST_BASE_ADDR_DST_BASE_ADDR__SHIFT) & PPU_DST_BASE_ADDR_DST_BASE_ADDR__MASK;
+}
+#define PPU_DST_BASE_ADDR_RESERVED_0__MASK 0x0000000f
+#define PPU_DST_BASE_ADDR_RESERVED_0__SHIFT 0
+static inline uint32_t PPU_DST_BASE_ADDR_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DST_BASE_ADDR_RESERVED_0__SHIFT) & PPU_DST_BASE_ADDR_RESERVED_0__MASK;
+}
+
+#define REG_PPU_DST_SURF_STRIDE 0x0000607c
+#define PPU_DST_SURF_STRIDE_DST_SURF_STRIDE__MASK 0xfffffff0
+#define PPU_DST_SURF_STRIDE_DST_SURF_STRIDE__SHIFT 4
+static inline uint32_t PPU_DST_SURF_STRIDE_DST_SURF_STRIDE(uint32_t val)
+{
+ return ((val) << PPU_DST_SURF_STRIDE_DST_SURF_STRIDE__SHIFT) & PPU_DST_SURF_STRIDE_DST_SURF_STRIDE__MASK;
+}
+#define PPU_DST_SURF_STRIDE_RESERVED_0__MASK 0x0000000f
+#define PPU_DST_SURF_STRIDE_RESERVED_0__SHIFT 0
+static inline uint32_t PPU_DST_SURF_STRIDE_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DST_SURF_STRIDE_RESERVED_0__SHIFT) & PPU_DST_SURF_STRIDE_RESERVED_0__MASK;
+}
+
+#define REG_PPU_DATA_FORMAT 0x00006084
+#define PPU_DATA_FORMAT_INDEX_ADD__MASK 0xfffffff0
+#define PPU_DATA_FORMAT_INDEX_ADD__SHIFT 4
+static inline uint32_t PPU_DATA_FORMAT_INDEX_ADD(uint32_t val)
+{
+ return ((val) << PPU_DATA_FORMAT_INDEX_ADD__SHIFT) & PPU_DATA_FORMAT_INDEX_ADD__MASK;
+}
+#define PPU_DATA_FORMAT_DPU_FLYIN__MASK 0x00000008
+#define PPU_DATA_FORMAT_DPU_FLYIN__SHIFT 3
+static inline uint32_t PPU_DATA_FORMAT_DPU_FLYIN(uint32_t val)
+{
+ return ((val) << PPU_DATA_FORMAT_DPU_FLYIN__SHIFT) & PPU_DATA_FORMAT_DPU_FLYIN__MASK;
+}
+#define PPU_DATA_FORMAT_PROC_PRECISION__MASK 0x00000007
+#define PPU_DATA_FORMAT_PROC_PRECISION__SHIFT 0
+static inline uint32_t PPU_DATA_FORMAT_PROC_PRECISION(uint32_t val)
+{
+ return ((val) << PPU_DATA_FORMAT_PROC_PRECISION__SHIFT) & PPU_DATA_FORMAT_PROC_PRECISION__MASK;
+}
+
+#define REG_PPU_MISC_CTRL 0x000060dc
+#define PPU_MISC_CTRL_SURF_LEN__MASK 0xffff0000
+#define PPU_MISC_CTRL_SURF_LEN__SHIFT 16
+static inline uint32_t PPU_MISC_CTRL_SURF_LEN(uint32_t val)
+{
+ return ((val) << PPU_MISC_CTRL_SURF_LEN__SHIFT) & PPU_MISC_CTRL_SURF_LEN__MASK;
+}
+#define PPU_MISC_CTRL_RESERVED_0__MASK 0x0000fe00
+#define PPU_MISC_CTRL_RESERVED_0__SHIFT 9
+static inline uint32_t PPU_MISC_CTRL_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_MISC_CTRL_RESERVED_0__SHIFT) & PPU_MISC_CTRL_RESERVED_0__MASK;
+}
+#define PPU_MISC_CTRL_MC_SURF_OUT__MASK 0x00000100
+#define PPU_MISC_CTRL_MC_SURF_OUT__SHIFT 8
+static inline uint32_t PPU_MISC_CTRL_MC_SURF_OUT(uint32_t val)
+{
+ return ((val) << PPU_MISC_CTRL_MC_SURF_OUT__SHIFT) & PPU_MISC_CTRL_MC_SURF_OUT__MASK;
+}
+#define PPU_MISC_CTRL_NONALIGN__MASK 0x00000080
+#define PPU_MISC_CTRL_NONALIGN__SHIFT 7
+static inline uint32_t PPU_MISC_CTRL_NONALIGN(uint32_t val)
+{
+ return ((val) << PPU_MISC_CTRL_NONALIGN__SHIFT) & PPU_MISC_CTRL_NONALIGN__MASK;
+}
+#define PPU_MISC_CTRL_RESERVED_1__MASK 0x00000070
+#define PPU_MISC_CTRL_RESERVED_1__SHIFT 4
+static inline uint32_t PPU_MISC_CTRL_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_MISC_CTRL_RESERVED_1__SHIFT) & PPU_MISC_CTRL_RESERVED_1__MASK;
+}
+#define PPU_MISC_CTRL_BURST_LEN__MASK 0x0000000f
+#define PPU_MISC_CTRL_BURST_LEN__SHIFT 0
+static inline uint32_t PPU_MISC_CTRL_BURST_LEN(uint32_t val)
+{
+ return ((val) << PPU_MISC_CTRL_BURST_LEN__SHIFT) & PPU_MISC_CTRL_BURST_LEN__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_S_STATUS 0x00007000
+#define PPU_RDMA_RDMA_S_STATUS_RESERVED_0__MASK 0xfffc0000
+#define PPU_RDMA_RDMA_S_STATUS_RESERVED_0__SHIFT 18
+static inline uint32_t PPU_RDMA_RDMA_S_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_STATUS_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_S_STATUS_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_S_STATUS_STATUS_1__MASK 0x00030000
+#define PPU_RDMA_RDMA_S_STATUS_STATUS_1__SHIFT 16
+static inline uint32_t PPU_RDMA_RDMA_S_STATUS_STATUS_1(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_STATUS_STATUS_1__SHIFT) & PPU_RDMA_RDMA_S_STATUS_STATUS_1__MASK;
+}
+#define PPU_RDMA_RDMA_S_STATUS_RESERVED_1__MASK 0x0000fffc
+#define PPU_RDMA_RDMA_S_STATUS_RESERVED_1__SHIFT 2
+static inline uint32_t PPU_RDMA_RDMA_S_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_STATUS_RESERVED_1__SHIFT) & PPU_RDMA_RDMA_S_STATUS_RESERVED_1__MASK;
+}
+#define PPU_RDMA_RDMA_S_STATUS_STATUS_0__MASK 0x00000003
+#define PPU_RDMA_RDMA_S_STATUS_STATUS_0__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_S_STATUS_STATUS_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_STATUS_STATUS_0__SHIFT) & PPU_RDMA_RDMA_S_STATUS_STATUS_0__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_S_POINTER 0x00007004
+#define PPU_RDMA_RDMA_S_POINTER_RESERVED_0__MASK 0xfffe0000
+#define PPU_RDMA_RDMA_S_POINTER_RESERVED_0__SHIFT 17
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_S_POINTER_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_EXECUTER__MASK 0x00010000
+#define PPU_RDMA_RDMA_S_POINTER_EXECUTER__SHIFT 16
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_EXECUTER(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_EXECUTER__SHIFT) & PPU_RDMA_RDMA_S_POINTER_EXECUTER__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_RESERVED_1__MASK 0x0000ffc0
+#define PPU_RDMA_RDMA_S_POINTER_RESERVED_1__SHIFT 6
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_RESERVED_1__SHIFT) & PPU_RDMA_RDMA_S_POINTER_RESERVED_1__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020
+#define PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010
+#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__SHIFT 4
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__SHIFT) & PPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__MASK 0x00000008
+#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__SHIFT 3
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__SHIFT) & PPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004
+#define PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__SHIFT 2
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__SHIFT) & PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__MASK 0x00000002
+#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__SHIFT 1
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__SHIFT) & PPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_POINTER__MASK 0x00000001
+#define PPU_RDMA_RDMA_S_POINTER_POINTER__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_POINTER(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_POINTER__SHIFT) & PPU_RDMA_RDMA_S_POINTER_POINTER__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_OPERATION_ENABLE 0x00007008
+#define PPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define PPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t PPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define PPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__SHIFT) & PPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_CUBE_IN_WIDTH 0x0000700c
+#define PPU_RDMA_RDMA_CUBE_IN_WIDTH_RESERVED_0__MASK 0xffffe000
+#define PPU_RDMA_RDMA_CUBE_IN_WIDTH_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_WIDTH_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_CUBE_IN_WIDTH_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_WIDTH_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__MASK 0x00001fff
+#define PPU_RDMA_RDMA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_WIDTH_CUBE_IN_WIDTH(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_CUBE_IN_HEIGHT 0x00007010
+#define PPU_RDMA_RDMA_CUBE_IN_HEIGHT_RESERVED_0__MASK 0xffffe000
+#define PPU_RDMA_RDMA_CUBE_IN_HEIGHT_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_HEIGHT_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_CUBE_IN_HEIGHT_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_HEIGHT_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__MASK 0x00001fff
+#define PPU_RDMA_RDMA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_CUBE_IN_CHANNEL 0x00007014
+#define PPU_RDMA_RDMA_CUBE_IN_CHANNEL_RESERVED_0__MASK 0xffffe000
+#define PPU_RDMA_RDMA_CUBE_IN_CHANNEL_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_CHANNEL_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_CUBE_IN_CHANNEL_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_CHANNEL_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__MASK 0x00001fff
+#define PPU_RDMA_RDMA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_SRC_BASE_ADDR 0x0000701c
+#define PPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__MASK 0xffffffff
+#define PPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__SHIFT) & PPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_SRC_LINE_STRIDE 0x00007024
+#define PPU_RDMA_RDMA_SRC_LINE_STRIDE_SRC_LINE_STRIDE__MASK 0xfffffff0
+#define PPU_RDMA_RDMA_SRC_LINE_STRIDE_SRC_LINE_STRIDE__SHIFT 4
+static inline uint32_t PPU_RDMA_RDMA_SRC_LINE_STRIDE_SRC_LINE_STRIDE(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_SRC_LINE_STRIDE_SRC_LINE_STRIDE__SHIFT) & PPU_RDMA_RDMA_SRC_LINE_STRIDE_SRC_LINE_STRIDE__MASK;
+}
+#define PPU_RDMA_RDMA_SRC_LINE_STRIDE_RESERVED_0__MASK 0x0000000f
+#define PPU_RDMA_RDMA_SRC_LINE_STRIDE_RESERVED_0__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_SRC_LINE_STRIDE_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_SRC_LINE_STRIDE_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_SRC_LINE_STRIDE_RESERVED_0__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_SRC_SURF_STRIDE 0x00007028
+#define PPU_RDMA_RDMA_SRC_SURF_STRIDE_SRC_SURF_STRIDE__MASK 0xfffffff0
+#define PPU_RDMA_RDMA_SRC_SURF_STRIDE_SRC_SURF_STRIDE__SHIFT 4
+static inline uint32_t PPU_RDMA_RDMA_SRC_SURF_STRIDE_SRC_SURF_STRIDE(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_SRC_SURF_STRIDE_SRC_SURF_STRIDE__SHIFT) & PPU_RDMA_RDMA_SRC_SURF_STRIDE_SRC_SURF_STRIDE__MASK;
+}
+#define PPU_RDMA_RDMA_SRC_SURF_STRIDE_RESERVED_0__MASK 0x0000000f
+#define PPU_RDMA_RDMA_SRC_SURF_STRIDE_RESERVED_0__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_SRC_SURF_STRIDE_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_SRC_SURF_STRIDE_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_SRC_SURF_STRIDE_RESERVED_0__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_DATA_FORMAT 0x00007030
+#define PPU_RDMA_RDMA_DATA_FORMAT_RESERVED_0__MASK 0xfffffffc
+#define PPU_RDMA_RDMA_DATA_FORMAT_RESERVED_0__SHIFT 2
+static inline uint32_t PPU_RDMA_RDMA_DATA_FORMAT_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_DATA_FORMAT_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_DATA_FORMAT_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_DATA_FORMAT_IN_PRECISION__MASK 0x00000003
+#define PPU_RDMA_RDMA_DATA_FORMAT_IN_PRECISION__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_DATA_FORMAT_IN_PRECISION(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_DATA_FORMAT_IN_PRECISION__SHIFT) & PPU_RDMA_RDMA_DATA_FORMAT_IN_PRECISION__MASK;
+}
+
+#define REG_DDMA_CFG_OUTSTANDING 0x00008000
+#define DDMA_CFG_OUTSTANDING_RESERVED_0__MASK 0xffff0000
+#define DDMA_CFG_OUTSTANDING_RESERVED_0__SHIFT 16
+static inline uint32_t DDMA_CFG_OUTSTANDING_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_OUTSTANDING_RESERVED_0__SHIFT) & DDMA_CFG_OUTSTANDING_RESERVED_0__MASK;
+}
+#define DDMA_CFG_OUTSTANDING_WR_OS_CNT__MASK 0x0000ff00
+#define DDMA_CFG_OUTSTANDING_WR_OS_CNT__SHIFT 8
+static inline uint32_t DDMA_CFG_OUTSTANDING_WR_OS_CNT(uint32_t val)
+{
+ return ((val) << DDMA_CFG_OUTSTANDING_WR_OS_CNT__SHIFT) & DDMA_CFG_OUTSTANDING_WR_OS_CNT__MASK;
+}
+#define DDMA_CFG_OUTSTANDING_RD_OS_CNT__MASK 0x000000ff
+#define DDMA_CFG_OUTSTANDING_RD_OS_CNT__SHIFT 0
+static inline uint32_t DDMA_CFG_OUTSTANDING_RD_OS_CNT(uint32_t val)
+{
+ return ((val) << DDMA_CFG_OUTSTANDING_RD_OS_CNT__SHIFT) & DDMA_CFG_OUTSTANDING_RD_OS_CNT__MASK;
+}
+
+#define REG_DDMA_RD_WEIGHT_0 0x00008004
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__MASK 0xff000000
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__SHIFT 24
+static inline uint32_t DDMA_RD_WEIGHT_0_RD_WEIGHT_PDP(uint32_t val)
+{
+ return ((val) << DDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__SHIFT) & DDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__MASK;
+}
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__MASK 0x00ff0000
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__SHIFT 16
+static inline uint32_t DDMA_RD_WEIGHT_0_RD_WEIGHT_DPU(uint32_t val)
+{
+ return ((val) << DDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__SHIFT) & DDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__MASK;
+}
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__MASK 0x0000ff00
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__SHIFT 8
+static inline uint32_t DDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL(uint32_t val)
+{
+ return ((val) << DDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__SHIFT) & DDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__MASK;
+}
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__MASK 0x000000ff
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__SHIFT 0
+static inline uint32_t DDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE(uint32_t val)
+{
+ return ((val) << DDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__SHIFT) & DDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__MASK;
+}
+
+#define REG_DDMA_WR_WEIGHT_0 0x00008008
+#define DDMA_WR_WEIGHT_0_RESERVED_0__MASK 0xffff0000
+#define DDMA_WR_WEIGHT_0_RESERVED_0__SHIFT 16
+static inline uint32_t DDMA_WR_WEIGHT_0_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_WR_WEIGHT_0_RESERVED_0__SHIFT) & DDMA_WR_WEIGHT_0_RESERVED_0__MASK;
+}
+#define DDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__MASK 0x0000ff00
+#define DDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__SHIFT 8
+static inline uint32_t DDMA_WR_WEIGHT_0_WR_WEIGHT_PDP(uint32_t val)
+{
+ return ((val) << DDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__SHIFT) & DDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__MASK;
+}
+#define DDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__MASK 0x000000ff
+#define DDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__SHIFT 0
+static inline uint32_t DDMA_WR_WEIGHT_0_WR_WEIGHT_DPU(uint32_t val)
+{
+ return ((val) << DDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__SHIFT) & DDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__MASK;
+}
+
+#define REG_DDMA_CFG_ID_ERROR 0x0000800c
+#define DDMA_CFG_ID_ERROR_RESERVED_0__MASK 0xfffffc00
+#define DDMA_CFG_ID_ERROR_RESERVED_0__SHIFT 10
+static inline uint32_t DDMA_CFG_ID_ERROR_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_ID_ERROR_RESERVED_0__SHIFT) & DDMA_CFG_ID_ERROR_RESERVED_0__MASK;
+}
+#define DDMA_CFG_ID_ERROR_WR_RESP_ID__MASK 0x000003c0
+#define DDMA_CFG_ID_ERROR_WR_RESP_ID__SHIFT 6
+static inline uint32_t DDMA_CFG_ID_ERROR_WR_RESP_ID(uint32_t val)
+{
+ return ((val) << DDMA_CFG_ID_ERROR_WR_RESP_ID__SHIFT) & DDMA_CFG_ID_ERROR_WR_RESP_ID__MASK;
+}
+#define DDMA_CFG_ID_ERROR_RESERVED_1__MASK 0x00000020
+#define DDMA_CFG_ID_ERROR_RESERVED_1__SHIFT 5
+static inline uint32_t DDMA_CFG_ID_ERROR_RESERVED_1(uint32_t val)
+{
+ return ((val) << DDMA_CFG_ID_ERROR_RESERVED_1__SHIFT) & DDMA_CFG_ID_ERROR_RESERVED_1__MASK;
+}
+#define DDMA_CFG_ID_ERROR_RD_RESP_ID__MASK 0x0000001f
+#define DDMA_CFG_ID_ERROR_RD_RESP_ID__SHIFT 0
+static inline uint32_t DDMA_CFG_ID_ERROR_RD_RESP_ID(uint32_t val)
+{
+ return ((val) << DDMA_CFG_ID_ERROR_RD_RESP_ID__SHIFT) & DDMA_CFG_ID_ERROR_RD_RESP_ID__MASK;
+}
+
+#define REG_DDMA_RD_WEIGHT_1 0x00008010
+#define DDMA_RD_WEIGHT_1_RESERVED_0__MASK 0xffffff00
+#define DDMA_RD_WEIGHT_1_RESERVED_0__SHIFT 8
+static inline uint32_t DDMA_RD_WEIGHT_1_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_RD_WEIGHT_1_RESERVED_0__SHIFT) & DDMA_RD_WEIGHT_1_RESERVED_0__MASK;
+}
+#define DDMA_RD_WEIGHT_1_RD_WEIGHT_PC__MASK 0x000000ff
+#define DDMA_RD_WEIGHT_1_RD_WEIGHT_PC__SHIFT 0
+static inline uint32_t DDMA_RD_WEIGHT_1_RD_WEIGHT_PC(uint32_t val)
+{
+ return ((val) << DDMA_RD_WEIGHT_1_RD_WEIGHT_PC__SHIFT) & DDMA_RD_WEIGHT_1_RD_WEIGHT_PC__MASK;
+}
+
+#define REG_DDMA_CFG_DMA_FIFO_CLR 0x00008014
+#define DDMA_CFG_DMA_FIFO_CLR_RESERVED_0__MASK 0xfffffffe
+#define DDMA_CFG_DMA_FIFO_CLR_RESERVED_0__SHIFT 1
+static inline uint32_t DDMA_CFG_DMA_FIFO_CLR_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_FIFO_CLR_RESERVED_0__SHIFT) & DDMA_CFG_DMA_FIFO_CLR_RESERVED_0__MASK;
+}
+#define DDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__MASK 0x00000001
+#define DDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__SHIFT 0
+static inline uint32_t DDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__SHIFT) & DDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__MASK;
+}
+
+#define REG_DDMA_CFG_DMA_ARB 0x00008018
+#define DDMA_CFG_DMA_ARB_RESERVED_0__MASK 0xfffffc00
+#define DDMA_CFG_DMA_ARB_RESERVED_0__SHIFT 10
+static inline uint32_t DDMA_CFG_DMA_ARB_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_RESERVED_0__SHIFT) & DDMA_CFG_DMA_ARB_RESERVED_0__MASK;
+}
+#define DDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__MASK 0x00000200
+#define DDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__SHIFT 9
+static inline uint32_t DDMA_CFG_DMA_ARB_WR_ARBIT_MODEL(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__SHIFT) & DDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__MASK;
+}
+#define DDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__MASK 0x00000100
+#define DDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__SHIFT 8
+static inline uint32_t DDMA_CFG_DMA_ARB_RD_ARBIT_MODEL(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__SHIFT) & DDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__MASK;
+}
+#define DDMA_CFG_DMA_ARB_RESERVED_1__MASK 0x00000080
+#define DDMA_CFG_DMA_ARB_RESERVED_1__SHIFT 7
+static inline uint32_t DDMA_CFG_DMA_ARB_RESERVED_1(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_RESERVED_1__SHIFT) & DDMA_CFG_DMA_ARB_RESERVED_1__MASK;
+}
+#define DDMA_CFG_DMA_ARB_WR_FIX_ARB__MASK 0x00000070
+#define DDMA_CFG_DMA_ARB_WR_FIX_ARB__SHIFT 4
+static inline uint32_t DDMA_CFG_DMA_ARB_WR_FIX_ARB(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_WR_FIX_ARB__SHIFT) & DDMA_CFG_DMA_ARB_WR_FIX_ARB__MASK;
+}
+#define DDMA_CFG_DMA_ARB_RESERVED_2__MASK 0x00000008
+#define DDMA_CFG_DMA_ARB_RESERVED_2__SHIFT 3
+static inline uint32_t DDMA_CFG_DMA_ARB_RESERVED_2(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_RESERVED_2__SHIFT) & DDMA_CFG_DMA_ARB_RESERVED_2__MASK;
+}
+#define DDMA_CFG_DMA_ARB_RD_FIX_ARB__MASK 0x00000007
+#define DDMA_CFG_DMA_ARB_RD_FIX_ARB__SHIFT 0
+static inline uint32_t DDMA_CFG_DMA_ARB_RD_FIX_ARB(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_RD_FIX_ARB__SHIFT) & DDMA_CFG_DMA_ARB_RD_FIX_ARB__MASK;
+}
+
+#define REG_DDMA_CFG_DMA_RD_QOS 0x00008020
+#define DDMA_CFG_DMA_RD_QOS_RESERVED_0__MASK 0xfffffc00
+#define DDMA_CFG_DMA_RD_QOS_RESERVED_0__SHIFT 10
+static inline uint32_t DDMA_CFG_DMA_RD_QOS_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_QOS_RESERVED_0__SHIFT) & DDMA_CFG_DMA_RD_QOS_RESERVED_0__MASK;
+}
+#define DDMA_CFG_DMA_RD_QOS_RD_PC_QOS__MASK 0x00000300
+#define DDMA_CFG_DMA_RD_QOS_RD_PC_QOS__SHIFT 8
+static inline uint32_t DDMA_CFG_DMA_RD_QOS_RD_PC_QOS(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_QOS_RD_PC_QOS__SHIFT) & DDMA_CFG_DMA_RD_QOS_RD_PC_QOS__MASK;
+}
+#define DDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__MASK 0x000000c0
+#define DDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__SHIFT 6
+static inline uint32_t DDMA_CFG_DMA_RD_QOS_RD_PPU_QOS(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__SHIFT) & DDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__MASK;
+}
+#define DDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__MASK 0x00000030
+#define DDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__SHIFT 4
+static inline uint32_t DDMA_CFG_DMA_RD_QOS_RD_DPU_QOS(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__SHIFT) & DDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__MASK;
+}
+#define DDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__MASK 0x0000000c
+#define DDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__SHIFT 2
+static inline uint32_t DDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__SHIFT) & DDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__MASK;
+}
+#define DDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__MASK 0x00000003
+#define DDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__SHIFT 0
+static inline uint32_t DDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__SHIFT) & DDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__MASK;
+}
+
+#define REG_DDMA_CFG_DMA_RD_CFG 0x00008024
+#define DDMA_CFG_DMA_RD_CFG_RESERVED_0__MASK 0xffffe000
+#define DDMA_CFG_DMA_RD_CFG_RESERVED_0__SHIFT 13
+static inline uint32_t DDMA_CFG_DMA_RD_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_CFG_RESERVED_0__SHIFT) & DDMA_CFG_DMA_RD_CFG_RESERVED_0__MASK;
+}
+#define DDMA_CFG_DMA_RD_CFG_RD_ARLOCK__MASK 0x00001000
+#define DDMA_CFG_DMA_RD_CFG_RD_ARLOCK__SHIFT 12
+static inline uint32_t DDMA_CFG_DMA_RD_CFG_RD_ARLOCK(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_CFG_RD_ARLOCK__SHIFT) & DDMA_CFG_DMA_RD_CFG_RD_ARLOCK__MASK;
+}
+#define DDMA_CFG_DMA_RD_CFG_RD_ARCACHE__MASK 0x00000f00
+#define DDMA_CFG_DMA_RD_CFG_RD_ARCACHE__SHIFT 8
+static inline uint32_t DDMA_CFG_DMA_RD_CFG_RD_ARCACHE(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_CFG_RD_ARCACHE__SHIFT) & DDMA_CFG_DMA_RD_CFG_RD_ARCACHE__MASK;
+}
+#define DDMA_CFG_DMA_RD_CFG_RD_ARPROT__MASK 0x000000e0
+#define DDMA_CFG_DMA_RD_CFG_RD_ARPROT__SHIFT 5
+static inline uint32_t DDMA_CFG_DMA_RD_CFG_RD_ARPROT(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_CFG_RD_ARPROT__SHIFT) & DDMA_CFG_DMA_RD_CFG_RD_ARPROT__MASK;
+}
+#define DDMA_CFG_DMA_RD_CFG_RD_ARBURST__MASK 0x00000018
+#define DDMA_CFG_DMA_RD_CFG_RD_ARBURST__SHIFT 3
+static inline uint32_t DDMA_CFG_DMA_RD_CFG_RD_ARBURST(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_CFG_RD_ARBURST__SHIFT) & DDMA_CFG_DMA_RD_CFG_RD_ARBURST__MASK;
+}
+#define DDMA_CFG_DMA_RD_CFG_RD_ARSIZE__MASK 0x00000007
+#define DDMA_CFG_DMA_RD_CFG_RD_ARSIZE__SHIFT 0
+static inline uint32_t DDMA_CFG_DMA_RD_CFG_RD_ARSIZE(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_CFG_RD_ARSIZE__SHIFT) & DDMA_CFG_DMA_RD_CFG_RD_ARSIZE__MASK;
+}
+
+#define REG_DDMA_CFG_DMA_WR_CFG 0x00008028
+#define DDMA_CFG_DMA_WR_CFG_RESERVED_0__MASK 0xffffe000
+#define DDMA_CFG_DMA_WR_CFG_RESERVED_0__SHIFT 13
+static inline uint32_t DDMA_CFG_DMA_WR_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WR_CFG_RESERVED_0__SHIFT) & DDMA_CFG_DMA_WR_CFG_RESERVED_0__MASK;
+}
+#define DDMA_CFG_DMA_WR_CFG_WR_AWLOCK__MASK 0x00001000
+#define DDMA_CFG_DMA_WR_CFG_WR_AWLOCK__SHIFT 12
+static inline uint32_t DDMA_CFG_DMA_WR_CFG_WR_AWLOCK(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WR_CFG_WR_AWLOCK__SHIFT) & DDMA_CFG_DMA_WR_CFG_WR_AWLOCK__MASK;
+}
+#define DDMA_CFG_DMA_WR_CFG_WR_AWCACHE__MASK 0x00000f00
+#define DDMA_CFG_DMA_WR_CFG_WR_AWCACHE__SHIFT 8
+static inline uint32_t DDMA_CFG_DMA_WR_CFG_WR_AWCACHE(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WR_CFG_WR_AWCACHE__SHIFT) & DDMA_CFG_DMA_WR_CFG_WR_AWCACHE__MASK;
+}
+#define DDMA_CFG_DMA_WR_CFG_WR_AWPROT__MASK 0x000000e0
+#define DDMA_CFG_DMA_WR_CFG_WR_AWPROT__SHIFT 5
+static inline uint32_t DDMA_CFG_DMA_WR_CFG_WR_AWPROT(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WR_CFG_WR_AWPROT__SHIFT) & DDMA_CFG_DMA_WR_CFG_WR_AWPROT__MASK;
+}
+#define DDMA_CFG_DMA_WR_CFG_WR_AWBURST__MASK 0x00000018
+#define DDMA_CFG_DMA_WR_CFG_WR_AWBURST__SHIFT 3
+static inline uint32_t DDMA_CFG_DMA_WR_CFG_WR_AWBURST(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WR_CFG_WR_AWBURST__SHIFT) & DDMA_CFG_DMA_WR_CFG_WR_AWBURST__MASK;
+}
+#define DDMA_CFG_DMA_WR_CFG_WR_AWSIZE__MASK 0x00000007
+#define DDMA_CFG_DMA_WR_CFG_WR_AWSIZE__SHIFT 0
+static inline uint32_t DDMA_CFG_DMA_WR_CFG_WR_AWSIZE(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WR_CFG_WR_AWSIZE__SHIFT) & DDMA_CFG_DMA_WR_CFG_WR_AWSIZE__MASK;
+}
+
+#define REG_DDMA_CFG_DMA_WSTRB 0x0000802c
+#define DDMA_CFG_DMA_WSTRB_WR_WSTRB__MASK 0xffffffff
+#define DDMA_CFG_DMA_WSTRB_WR_WSTRB__SHIFT 0
+static inline uint32_t DDMA_CFG_DMA_WSTRB_WR_WSTRB(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WSTRB_WR_WSTRB__SHIFT) & DDMA_CFG_DMA_WSTRB_WR_WSTRB__MASK;
+}
+
+#define REG_DDMA_CFG_STATUS 0x00008030
+#define DDMA_CFG_STATUS_RESERVED_0__MASK 0xfffffe00
+#define DDMA_CFG_STATUS_RESERVED_0__SHIFT 9
+static inline uint32_t DDMA_CFG_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_STATUS_RESERVED_0__SHIFT) & DDMA_CFG_STATUS_RESERVED_0__MASK;
+}
+#define DDMA_CFG_STATUS_IDEL__MASK 0x00000100
+#define DDMA_CFG_STATUS_IDEL__SHIFT 8
+static inline uint32_t DDMA_CFG_STATUS_IDEL(uint32_t val)
+{
+ return ((val) << DDMA_CFG_STATUS_IDEL__SHIFT) & DDMA_CFG_STATUS_IDEL__MASK;
+}
+#define DDMA_CFG_STATUS_RESERVED_1__MASK 0x000000ff
+#define DDMA_CFG_STATUS_RESERVED_1__SHIFT 0
+static inline uint32_t DDMA_CFG_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << DDMA_CFG_STATUS_RESERVED_1__SHIFT) & DDMA_CFG_STATUS_RESERVED_1__MASK;
+}
+
+#define REG_SDMA_CFG_OUTSTANDING 0x00009000
+#define SDMA_CFG_OUTSTANDING_RESERVED_0__MASK 0xffff0000
+#define SDMA_CFG_OUTSTANDING_RESERVED_0__SHIFT 16
+static inline uint32_t SDMA_CFG_OUTSTANDING_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_OUTSTANDING_RESERVED_0__SHIFT) & SDMA_CFG_OUTSTANDING_RESERVED_0__MASK;
+}
+#define SDMA_CFG_OUTSTANDING_WR_OS_CNT__MASK 0x0000ff00
+#define SDMA_CFG_OUTSTANDING_WR_OS_CNT__SHIFT 8
+static inline uint32_t SDMA_CFG_OUTSTANDING_WR_OS_CNT(uint32_t val)
+{
+ return ((val) << SDMA_CFG_OUTSTANDING_WR_OS_CNT__SHIFT) & SDMA_CFG_OUTSTANDING_WR_OS_CNT__MASK;
+}
+#define SDMA_CFG_OUTSTANDING_RD_OS_CNT__MASK 0x000000ff
+#define SDMA_CFG_OUTSTANDING_RD_OS_CNT__SHIFT 0
+static inline uint32_t SDMA_CFG_OUTSTANDING_RD_OS_CNT(uint32_t val)
+{
+ return ((val) << SDMA_CFG_OUTSTANDING_RD_OS_CNT__SHIFT) & SDMA_CFG_OUTSTANDING_RD_OS_CNT__MASK;
+}
+
+#define REG_SDMA_RD_WEIGHT_0 0x00009004
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__MASK 0xff000000
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__SHIFT 24
+static inline uint32_t SDMA_RD_WEIGHT_0_RD_WEIGHT_PDP(uint32_t val)
+{
+ return ((val) << SDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__SHIFT) & SDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__MASK;
+}
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__MASK 0x00ff0000
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__SHIFT 16
+static inline uint32_t SDMA_RD_WEIGHT_0_RD_WEIGHT_DPU(uint32_t val)
+{
+ return ((val) << SDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__SHIFT) & SDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__MASK;
+}
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__MASK 0x0000ff00
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__SHIFT 8
+static inline uint32_t SDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL(uint32_t val)
+{
+ return ((val) << SDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__SHIFT) & SDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__MASK;
+}
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__MASK 0x000000ff
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__SHIFT 0
+static inline uint32_t SDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE(uint32_t val)
+{
+ return ((val) << SDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__SHIFT) & SDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__MASK;
+}
+
+#define REG_SDMA_WR_WEIGHT_0 0x00009008
+#define SDMA_WR_WEIGHT_0_RESERVED_0__MASK 0xffff0000
+#define SDMA_WR_WEIGHT_0_RESERVED_0__SHIFT 16
+static inline uint32_t SDMA_WR_WEIGHT_0_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_WR_WEIGHT_0_RESERVED_0__SHIFT) & SDMA_WR_WEIGHT_0_RESERVED_0__MASK;
+}
+#define SDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__MASK 0x0000ff00
+#define SDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__SHIFT 8
+static inline uint32_t SDMA_WR_WEIGHT_0_WR_WEIGHT_PDP(uint32_t val)
+{
+ return ((val) << SDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__SHIFT) & SDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__MASK;
+}
+#define SDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__MASK 0x000000ff
+#define SDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__SHIFT 0
+static inline uint32_t SDMA_WR_WEIGHT_0_WR_WEIGHT_DPU(uint32_t val)
+{
+ return ((val) << SDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__SHIFT) & SDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__MASK;
+}
+
+#define REG_SDMA_CFG_ID_ERROR 0x0000900c
+#define SDMA_CFG_ID_ERROR_RESERVED_0__MASK 0xfffffc00
+#define SDMA_CFG_ID_ERROR_RESERVED_0__SHIFT 10
+static inline uint32_t SDMA_CFG_ID_ERROR_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_ID_ERROR_RESERVED_0__SHIFT) & SDMA_CFG_ID_ERROR_RESERVED_0__MASK;
+}
+#define SDMA_CFG_ID_ERROR_WR_RESP_ID__MASK 0x000003c0
+#define SDMA_CFG_ID_ERROR_WR_RESP_ID__SHIFT 6
+static inline uint32_t SDMA_CFG_ID_ERROR_WR_RESP_ID(uint32_t val)
+{
+ return ((val) << SDMA_CFG_ID_ERROR_WR_RESP_ID__SHIFT) & SDMA_CFG_ID_ERROR_WR_RESP_ID__MASK;
+}
+#define SDMA_CFG_ID_ERROR_RESERVED_1__MASK 0x00000020
+#define SDMA_CFG_ID_ERROR_RESERVED_1__SHIFT 5
+static inline uint32_t SDMA_CFG_ID_ERROR_RESERVED_1(uint32_t val)
+{
+ return ((val) << SDMA_CFG_ID_ERROR_RESERVED_1__SHIFT) & SDMA_CFG_ID_ERROR_RESERVED_1__MASK;
+}
+#define SDMA_CFG_ID_ERROR_RD_RESP_ID__MASK 0x0000001f
+#define SDMA_CFG_ID_ERROR_RD_RESP_ID__SHIFT 0
+static inline uint32_t SDMA_CFG_ID_ERROR_RD_RESP_ID(uint32_t val)
+{
+ return ((val) << SDMA_CFG_ID_ERROR_RD_RESP_ID__SHIFT) & SDMA_CFG_ID_ERROR_RD_RESP_ID__MASK;
+}
+
+#define REG_SDMA_RD_WEIGHT_1 0x00009010
+#define SDMA_RD_WEIGHT_1_RESERVED_0__MASK 0xffffff00
+#define SDMA_RD_WEIGHT_1_RESERVED_0__SHIFT 8
+static inline uint32_t SDMA_RD_WEIGHT_1_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_RD_WEIGHT_1_RESERVED_0__SHIFT) & SDMA_RD_WEIGHT_1_RESERVED_0__MASK;
+}
+#define SDMA_RD_WEIGHT_1_RD_WEIGHT_PC__MASK 0x000000ff
+#define SDMA_RD_WEIGHT_1_RD_WEIGHT_PC__SHIFT 0
+static inline uint32_t SDMA_RD_WEIGHT_1_RD_WEIGHT_PC(uint32_t val)
+{
+ return ((val) << SDMA_RD_WEIGHT_1_RD_WEIGHT_PC__SHIFT) & SDMA_RD_WEIGHT_1_RD_WEIGHT_PC__MASK;
+}
+
+#define REG_SDMA_CFG_DMA_FIFO_CLR 0x00009014
+#define SDMA_CFG_DMA_FIFO_CLR_RESERVED_0__MASK 0xfffffffe
+#define SDMA_CFG_DMA_FIFO_CLR_RESERVED_0__SHIFT 1
+static inline uint32_t SDMA_CFG_DMA_FIFO_CLR_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_FIFO_CLR_RESERVED_0__SHIFT) & SDMA_CFG_DMA_FIFO_CLR_RESERVED_0__MASK;
+}
+#define SDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__MASK 0x00000001
+#define SDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__SHIFT 0
+static inline uint32_t SDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__SHIFT) & SDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__MASK;
+}
+
+#define REG_SDMA_CFG_DMA_ARB 0x00009018
+#define SDMA_CFG_DMA_ARB_RESERVED_0__MASK 0xfffffc00
+#define SDMA_CFG_DMA_ARB_RESERVED_0__SHIFT 10
+static inline uint32_t SDMA_CFG_DMA_ARB_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_RESERVED_0__SHIFT) & SDMA_CFG_DMA_ARB_RESERVED_0__MASK;
+}
+#define SDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__MASK 0x00000200
+#define SDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__SHIFT 9
+static inline uint32_t SDMA_CFG_DMA_ARB_WR_ARBIT_MODEL(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__SHIFT) & SDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__MASK;
+}
+#define SDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__MASK 0x00000100
+#define SDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__SHIFT 8
+static inline uint32_t SDMA_CFG_DMA_ARB_RD_ARBIT_MODEL(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__SHIFT) & SDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__MASK;
+}
+#define SDMA_CFG_DMA_ARB_RESERVED_1__MASK 0x00000080
+#define SDMA_CFG_DMA_ARB_RESERVED_1__SHIFT 7
+static inline uint32_t SDMA_CFG_DMA_ARB_RESERVED_1(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_RESERVED_1__SHIFT) & SDMA_CFG_DMA_ARB_RESERVED_1__MASK;
+}
+#define SDMA_CFG_DMA_ARB_WR_FIX_ARB__MASK 0x00000070
+#define SDMA_CFG_DMA_ARB_WR_FIX_ARB__SHIFT 4
+static inline uint32_t SDMA_CFG_DMA_ARB_WR_FIX_ARB(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_WR_FIX_ARB__SHIFT) & SDMA_CFG_DMA_ARB_WR_FIX_ARB__MASK;
+}
+#define SDMA_CFG_DMA_ARB_RESERVED_2__MASK 0x00000008
+#define SDMA_CFG_DMA_ARB_RESERVED_2__SHIFT 3
+static inline uint32_t SDMA_CFG_DMA_ARB_RESERVED_2(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_RESERVED_2__SHIFT) & SDMA_CFG_DMA_ARB_RESERVED_2__MASK;
+}
+#define SDMA_CFG_DMA_ARB_RD_FIX_ARB__MASK 0x00000007
+#define SDMA_CFG_DMA_ARB_RD_FIX_ARB__SHIFT 0
+static inline uint32_t SDMA_CFG_DMA_ARB_RD_FIX_ARB(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_RD_FIX_ARB__SHIFT) & SDMA_CFG_DMA_ARB_RD_FIX_ARB__MASK;
+}
+
+#define REG_SDMA_CFG_DMA_RD_QOS 0x00009020
+#define SDMA_CFG_DMA_RD_QOS_RESERVED_0__MASK 0xfffffc00
+#define SDMA_CFG_DMA_RD_QOS_RESERVED_0__SHIFT 10
+static inline uint32_t SDMA_CFG_DMA_RD_QOS_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_QOS_RESERVED_0__SHIFT) & SDMA_CFG_DMA_RD_QOS_RESERVED_0__MASK;
+}
+#define SDMA_CFG_DMA_RD_QOS_RD_PC_QOS__MASK 0x00000300
+#define SDMA_CFG_DMA_RD_QOS_RD_PC_QOS__SHIFT 8
+static inline uint32_t SDMA_CFG_DMA_RD_QOS_RD_PC_QOS(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_QOS_RD_PC_QOS__SHIFT) & SDMA_CFG_DMA_RD_QOS_RD_PC_QOS__MASK;
+}
+#define SDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__MASK 0x000000c0
+#define SDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__SHIFT 6
+static inline uint32_t SDMA_CFG_DMA_RD_QOS_RD_PPU_QOS(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__SHIFT) & SDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__MASK;
+}
+#define SDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__MASK 0x00000030
+#define SDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__SHIFT 4
+static inline uint32_t SDMA_CFG_DMA_RD_QOS_RD_DPU_QOS(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__SHIFT) & SDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__MASK;
+}
+#define SDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__MASK 0x0000000c
+#define SDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__SHIFT 2
+static inline uint32_t SDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__SHIFT) & SDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__MASK;
+}
+#define SDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__MASK 0x00000003
+#define SDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__SHIFT 0
+static inline uint32_t SDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__SHIFT) & SDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__MASK;
+}
+
+#define REG_SDMA_CFG_DMA_RD_CFG 0x00009024
+#define SDMA_CFG_DMA_RD_CFG_RESERVED_0__MASK 0xffffe000
+#define SDMA_CFG_DMA_RD_CFG_RESERVED_0__SHIFT 13
+static inline uint32_t SDMA_CFG_DMA_RD_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_CFG_RESERVED_0__SHIFT) & SDMA_CFG_DMA_RD_CFG_RESERVED_0__MASK;
+}
+#define SDMA_CFG_DMA_RD_CFG_RD_ARLOCK__MASK 0x00001000
+#define SDMA_CFG_DMA_RD_CFG_RD_ARLOCK__SHIFT 12
+static inline uint32_t SDMA_CFG_DMA_RD_CFG_RD_ARLOCK(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_CFG_RD_ARLOCK__SHIFT) & SDMA_CFG_DMA_RD_CFG_RD_ARLOCK__MASK;
+}
+#define SDMA_CFG_DMA_RD_CFG_RD_ARCACHE__MASK 0x00000f00
+#define SDMA_CFG_DMA_RD_CFG_RD_ARCACHE__SHIFT 8
+static inline uint32_t SDMA_CFG_DMA_RD_CFG_RD_ARCACHE(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_CFG_RD_ARCACHE__SHIFT) & SDMA_CFG_DMA_RD_CFG_RD_ARCACHE__MASK;
+}
+#define SDMA_CFG_DMA_RD_CFG_RD_ARPROT__MASK 0x000000e0
+#define SDMA_CFG_DMA_RD_CFG_RD_ARPROT__SHIFT 5
+static inline uint32_t SDMA_CFG_DMA_RD_CFG_RD_ARPROT(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_CFG_RD_ARPROT__SHIFT) & SDMA_CFG_DMA_RD_CFG_RD_ARPROT__MASK;
+}
+#define SDMA_CFG_DMA_RD_CFG_RD_ARBURST__MASK 0x00000018
+#define SDMA_CFG_DMA_RD_CFG_RD_ARBURST__SHIFT 3
+static inline uint32_t SDMA_CFG_DMA_RD_CFG_RD_ARBURST(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_CFG_RD_ARBURST__SHIFT) & SDMA_CFG_DMA_RD_CFG_RD_ARBURST__MASK;
+}
+#define SDMA_CFG_DMA_RD_CFG_RD_ARSIZE__MASK 0x00000007
+#define SDMA_CFG_DMA_RD_CFG_RD_ARSIZE__SHIFT 0
+static inline uint32_t SDMA_CFG_DMA_RD_CFG_RD_ARSIZE(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_CFG_RD_ARSIZE__SHIFT) & SDMA_CFG_DMA_RD_CFG_RD_ARSIZE__MASK;
+}
+
+#define REG_SDMA_CFG_DMA_WR_CFG 0x00009028
+#define SDMA_CFG_DMA_WR_CFG_RESERVED_0__MASK 0xffffe000
+#define SDMA_CFG_DMA_WR_CFG_RESERVED_0__SHIFT 13
+static inline uint32_t SDMA_CFG_DMA_WR_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WR_CFG_RESERVED_0__SHIFT) & SDMA_CFG_DMA_WR_CFG_RESERVED_0__MASK;
+}
+#define SDMA_CFG_DMA_WR_CFG_WR_AWLOCK__MASK 0x00001000
+#define SDMA_CFG_DMA_WR_CFG_WR_AWLOCK__SHIFT 12
+static inline uint32_t SDMA_CFG_DMA_WR_CFG_WR_AWLOCK(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WR_CFG_WR_AWLOCK__SHIFT) & SDMA_CFG_DMA_WR_CFG_WR_AWLOCK__MASK;
+}
+#define SDMA_CFG_DMA_WR_CFG_WR_AWCACHE__MASK 0x00000f00
+#define SDMA_CFG_DMA_WR_CFG_WR_AWCACHE__SHIFT 8
+static inline uint32_t SDMA_CFG_DMA_WR_CFG_WR_AWCACHE(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WR_CFG_WR_AWCACHE__SHIFT) & SDMA_CFG_DMA_WR_CFG_WR_AWCACHE__MASK;
+}
+#define SDMA_CFG_DMA_WR_CFG_WR_AWPROT__MASK 0x000000e0
+#define SDMA_CFG_DMA_WR_CFG_WR_AWPROT__SHIFT 5
+static inline uint32_t SDMA_CFG_DMA_WR_CFG_WR_AWPROT(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WR_CFG_WR_AWPROT__SHIFT) & SDMA_CFG_DMA_WR_CFG_WR_AWPROT__MASK;
+}
+#define SDMA_CFG_DMA_WR_CFG_WR_AWBURST__MASK 0x00000018
+#define SDMA_CFG_DMA_WR_CFG_WR_AWBURST__SHIFT 3
+static inline uint32_t SDMA_CFG_DMA_WR_CFG_WR_AWBURST(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WR_CFG_WR_AWBURST__SHIFT) & SDMA_CFG_DMA_WR_CFG_WR_AWBURST__MASK;
+}
+#define SDMA_CFG_DMA_WR_CFG_WR_AWSIZE__MASK 0x00000007
+#define SDMA_CFG_DMA_WR_CFG_WR_AWSIZE__SHIFT 0
+static inline uint32_t SDMA_CFG_DMA_WR_CFG_WR_AWSIZE(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WR_CFG_WR_AWSIZE__SHIFT) & SDMA_CFG_DMA_WR_CFG_WR_AWSIZE__MASK;
+}
+
+#define REG_SDMA_CFG_DMA_WSTRB 0x0000902c
+#define SDMA_CFG_DMA_WSTRB_WR_WSTRB__MASK 0xffffffff
+#define SDMA_CFG_DMA_WSTRB_WR_WSTRB__SHIFT 0
+static inline uint32_t SDMA_CFG_DMA_WSTRB_WR_WSTRB(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WSTRB_WR_WSTRB__SHIFT) & SDMA_CFG_DMA_WSTRB_WR_WSTRB__MASK;
+}
+
+#define REG_SDMA_CFG_STATUS 0x00009030
+#define SDMA_CFG_STATUS_RESERVED_0__MASK 0xfffffe00
+#define SDMA_CFG_STATUS_RESERVED_0__SHIFT 9
+static inline uint32_t SDMA_CFG_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_STATUS_RESERVED_0__SHIFT) & SDMA_CFG_STATUS_RESERVED_0__MASK;
+}
+#define SDMA_CFG_STATUS_IDEL__MASK 0x00000100
+#define SDMA_CFG_STATUS_IDEL__SHIFT 8
+static inline uint32_t SDMA_CFG_STATUS_IDEL(uint32_t val)
+{
+ return ((val) << SDMA_CFG_STATUS_IDEL__SHIFT) & SDMA_CFG_STATUS_IDEL__MASK;
+}
+#define SDMA_CFG_STATUS_RESERVED_1__MASK 0x000000ff
+#define SDMA_CFG_STATUS_RESERVED_1__SHIFT 0
+static inline uint32_t SDMA_CFG_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << SDMA_CFG_STATUS_RESERVED_1__SHIFT) & SDMA_CFG_STATUS_RESERVED_1__MASK;
+}
+
+#define REG_GLOBAL_OPERATION_ENABLE 0x0000f008
+#define GLOBAL_OPERATION_ENABLE_RESERVED_0__MASK 0xffffff80
+#define GLOBAL_OPERATION_ENABLE_RESERVED_0__SHIFT 7
+static inline uint32_t GLOBAL_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_RESERVED_0__SHIFT) & GLOBAL_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_PPU_RDMA_OP_EN__MASK 0x00000040
+#define GLOBAL_OPERATION_ENABLE_PPU_RDMA_OP_EN__SHIFT 6
+static inline uint32_t GLOBAL_OPERATION_ENABLE_PPU_RDMA_OP_EN(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_PPU_RDMA_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_PPU_RDMA_OP_EN__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_PPU_OP_EN__MASK 0x00000020
+#define GLOBAL_OPERATION_ENABLE_PPU_OP_EN__SHIFT 5
+static inline uint32_t GLOBAL_OPERATION_ENABLE_PPU_OP_EN(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_PPU_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_PPU_OP_EN__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_DPU_RDMA_OP_EN__MASK 0x00000010
+#define GLOBAL_OPERATION_ENABLE_DPU_RDMA_OP_EN__SHIFT 4
+static inline uint32_t GLOBAL_OPERATION_ENABLE_DPU_RDMA_OP_EN(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_DPU_RDMA_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_DPU_RDMA_OP_EN__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_DPU_OP_EN__MASK 0x00000008
+#define GLOBAL_OPERATION_ENABLE_DPU_OP_EN__SHIFT 3
+static inline uint32_t GLOBAL_OPERATION_ENABLE_DPU_OP_EN(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_DPU_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_DPU_OP_EN__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_CORE_OP_EN__MASK 0x00000004
+#define GLOBAL_OPERATION_ENABLE_CORE_OP_EN__SHIFT 2
+static inline uint32_t GLOBAL_OPERATION_ENABLE_CORE_OP_EN(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_CORE_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_CORE_OP_EN__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_RESERVED_1__MASK 0x00000002
+#define GLOBAL_OPERATION_ENABLE_RESERVED_1__SHIFT 1
+static inline uint32_t GLOBAL_OPERATION_ENABLE_RESERVED_1(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_RESERVED_1__SHIFT) & GLOBAL_OPERATION_ENABLE_RESERVED_1__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_CNA_OP_EN__MASK 0x00000001
+#define GLOBAL_OPERATION_ENABLE_CNA_OP_EN__SHIFT 0
+static inline uint32_t GLOBAL_OPERATION_ENABLE_CNA_OP_EN(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_CNA_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_CNA_OP_EN__MASK;
+}
+
+#endif /* __ROCKET_REGISTERS_XML__ */
diff --git a/drivers/accessibility/speakup/Makefile b/drivers/accessibility/speakup/Makefile
index 6f6a83565c0d..14ba1cca87f4 100644
--- a/drivers/accessibility/speakup/Makefile
+++ b/drivers/accessibility/speakup/Makefile
@@ -40,9 +40,7 @@ hostprogs += makemapdata
makemapdata-objs := makemapdata.o
quiet_cmd_mkmap = MKMAP $@
- cmd_mkmap = TOPDIR=$(srctree) \
- SPKDIR=$(if $(KBUILD_EXTMOD),$(KBUILD_EXTMOD),$(srctree)/drivers/accessibility/speakup) \
- $(obj)/makemapdata > $@
+ cmd_mkmap = TOPDIR=$(srctree) SPKDIR=$(src) $(obj)/makemapdata > $@
$(obj)/mapdata.h: $(obj)/makemapdata
$(call cmd,mkmap)
diff --git a/drivers/accessibility/speakup/devsynth.c b/drivers/accessibility/speakup/devsynth.c
index cb7e1114e8eb..e3d909bd0480 100644
--- a/drivers/accessibility/speakup/devsynth.c
+++ b/drivers/accessibility/speakup/devsynth.c
@@ -39,13 +39,13 @@ static ssize_t speakup_file_write(struct file *fp, const char __user *buffer,
static ssize_t speakup_file_writeu(struct file *fp, const char __user *buffer,
size_t nbytes, loff_t *ppos)
{
- size_t count = nbytes, want;
+ size_t count = nbytes, consumed, want;
const char __user *ptr = buffer;
size_t bytes;
unsigned long flags;
unsigned char buf[256];
u16 ubuf[256];
- size_t in, in2, out;
+ size_t in, out;
if (!synth)
return -ENODEV;
@@ -58,57 +58,24 @@ static ssize_t speakup_file_writeu(struct file *fp, const char __user *buffer,
return -EFAULT;
/* Convert to u16 */
- for (in = 0, out = 0; in < bytes; in++) {
- unsigned char c = buf[in];
- int nbytes = 8 - fls(c ^ 0xff);
- u32 value;
-
- switch (nbytes) {
- case 8: /* 0xff */
- case 7: /* 0xfe */
- case 1: /* 0x80 */
- /* Invalid, drop */
- goto drop;
-
- case 0:
- /* ASCII, copy */
- ubuf[out++] = c;
- continue;
+ for (in = 0, out = 0; in < bytes; in += consumed) {
+ s32 value;
- default:
- /* 2..6-byte UTF-8 */
+ value = synth_utf8_get(buf + in, bytes - in, &consumed, &want);
+ if (value == -1) {
+ /* Invalid or incomplete */
- if (bytes - in < nbytes) {
+ if (want > bytes - in)
/* We don't have it all yet, stop here
* and wait for the rest
*/
bytes = in;
- want = nbytes;
- continue;
- }
-
- /* First byte */
- value = c & ((1u << (7 - nbytes)) - 1);
-
- /* Other bytes */
- for (in2 = 2; in2 <= nbytes; in2++) {
- c = buf[in + 1];
- if ((c & 0xc0) != 0x80) {
- /* Invalid, drop the head */
- want = 1;
- goto drop;
- }
- value = (value << 6) | (c & 0x3f);
- in++;
- }
-
- if (value < 0x10000)
- ubuf[out++] = value;
- want = 1;
- break;
+
+ continue;
}
-drop:
- /* empty statement */;
+
+ if (value < 0x10000)
+ ubuf[out++] = value;
}
count -= bytes;
diff --git a/drivers/accessibility/speakup/genmap.c b/drivers/accessibility/speakup/genmap.c
index 0125000e00d9..0882bab10fb8 100644
--- a/drivers/accessibility/speakup/genmap.c
+++ b/drivers/accessibility/speakup/genmap.c
@@ -10,7 +10,6 @@
#include <stdio.h>
#include <libgen.h>
#include <string.h>
-#include <linux/version.h>
#include <ctype.h>
#include "utils.h"
diff --git a/drivers/accessibility/speakup/main.c b/drivers/accessibility/speakup/main.c
index 736c2eb8c0f3..e68cf1d83787 100644
--- a/drivers/accessibility/speakup/main.c
+++ b/drivers/accessibility/speakup/main.c
@@ -574,7 +574,7 @@ static u_long get_word(struct vc_data *vc)
}
attr_ch = get_char(vc, (u_short *)tmp_pos, &spk_attr);
buf[cnt++] = attr_ch;
- while (tmpx < vc->vc_cols - 1 && cnt < sizeof(buf) - 1) {
+ while (tmpx < vc->vc_cols - 1 && cnt < ARRAY_SIZE(buf) - 1) {
tmp_pos += 2;
tmpx++;
ch = get_char(vc, (u_short *)tmp_pos, &temp);
@@ -1172,13 +1172,13 @@ static void do_handle_shift(struct vc_data *vc, u_char value, char up_flag)
if (cursor_track == read_all_mode) {
switch (value) {
case KVAL(K_SHIFT):
- del_timer(&cursor_timer);
+ timer_delete(&cursor_timer);
spk_shut_up &= 0xfe;
spk_do_flush();
read_all_doc(vc);
break;
case KVAL(K_CTRL):
- del_timer(&cursor_timer);
+ timer_delete(&cursor_timer);
cursor_track = prev_cursor_track;
spk_shut_up &= 0xfe;
spk_do_flush();
@@ -1399,7 +1399,7 @@ static void start_read_all_timer(struct vc_data *vc, enum read_all_command comma
static void kbd_fakekey2(struct vc_data *vc, enum read_all_command command)
{
- del_timer(&cursor_timer);
+ timer_delete(&cursor_timer);
speakup_fake_down_arrow();
start_read_all_timer(vc, command);
}
@@ -1415,7 +1415,7 @@ static void read_all_doc(struct vc_data *vc)
cursor_track = read_all_mode;
spk_reset_index_count(0);
if (get_sentence_buf(vc, 0) == -1) {
- del_timer(&cursor_timer);
+ timer_delete(&cursor_timer);
if (!in_keyboard_notifier)
speakup_fake_down_arrow();
start_read_all_timer(vc, RA_DOWN_ARROW);
@@ -1428,7 +1428,7 @@ static void read_all_doc(struct vc_data *vc)
static void stop_read_all(struct vc_data *vc)
{
- del_timer(&cursor_timer);
+ timer_delete(&cursor_timer);
cursor_track = prev_cursor_track;
spk_shut_up &= 0xfe;
spk_do_flush();
@@ -1528,7 +1528,7 @@ static int pre_handle_cursor(struct vc_data *vc, u_char value, char up_flag)
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return NOTIFY_STOP;
}
- del_timer(&cursor_timer);
+ timer_delete(&cursor_timer);
spk_shut_up &= 0xfe;
spk_do_flush();
start_read_all_timer(vc, value + 1);
@@ -1692,7 +1692,7 @@ static void cursor_done(struct timer_list *unused)
struct vc_data *vc = vc_cons[cursor_con].d;
unsigned long flags;
- del_timer(&cursor_timer);
+ timer_delete(&cursor_timer);
spin_lock_irqsave(&speakup_info.spinlock, flags);
if (cursor_con != fg_console) {
is_cursor = 0;
@@ -2333,7 +2333,7 @@ static void __exit speakup_exit(void)
speakup_unregister_devsynth();
speakup_cancel_selection();
speakup_cancel_paste();
- del_timer_sync(&cursor_timer);
+ timer_delete_sync(&cursor_timer);
kthread_stop(speakup_task);
speakup_task = NULL;
mutex_lock(&spk_mutex);
@@ -2437,7 +2437,7 @@ error_task:
error_vtnotifier:
unregister_keyboard_notifier(&keyboard_notifier_block);
- del_timer(&cursor_timer);
+ timer_delete(&cursor_timer);
error_kbdnotifier:
speakup_unregister_devsynth();
diff --git a/drivers/accessibility/speakup/makemapdata.c b/drivers/accessibility/speakup/makemapdata.c
index d7d41bb9b05f..55e4ef8a93dc 100644
--- a/drivers/accessibility/speakup/makemapdata.c
+++ b/drivers/accessibility/speakup/makemapdata.c
@@ -10,7 +10,6 @@
#include <stdio.h>
#include <libgen.h>
#include <string.h>
-#include <linux/version.h>
#include <ctype.h>
#include "utils.h"
diff --git a/drivers/accessibility/speakup/speakup.h b/drivers/accessibility/speakup/speakup.h
index 364fde99749e..54f1226ea061 100644
--- a/drivers/accessibility/speakup/speakup.h
+++ b/drivers/accessibility/speakup/speakup.h
@@ -76,7 +76,9 @@ int speakup_paste_selection(struct tty_struct *tty);
void speakup_cancel_paste(void);
void speakup_register_devsynth(void);
void speakup_unregister_devsynth(void);
+s32 synth_utf8_get(const char *buf, size_t count, size_t *consumed, size_t *want);
void synth_write(const char *buf, size_t count);
+void synth_writeu(const char *buf, size_t count);
int synth_supports_indexing(void);
extern struct vc_data *spk_sel_cons;
diff --git a/drivers/accessibility/speakup/synth.c b/drivers/accessibility/speakup/synth.c
index 45f906103133..d8addbf3ad0d 100644
--- a/drivers/accessibility/speakup/synth.c
+++ b/drivers/accessibility/speakup/synth.c
@@ -217,10 +217,95 @@ void synth_write(const char *_buf, size_t count)
synth_start();
}
+/* Consume one utf-8 character from buf (that contains up to count bytes),
+ * returns the unicode codepoint if valid, -1 otherwise.
+ * In all cases, returns the number of consumed bytes in *consumed,
+ * and the minimum number of bytes that would be needed for the next character
+ * in *want.
+ */
+s32 synth_utf8_get(const char *buf, size_t count, size_t *consumed, size_t *want)
+{
+ unsigned char c = buf[0];
+ int nbytes = 8 - fls(c ^ 0xff);
+ u32 value;
+ size_t i;
+
+ switch (nbytes) {
+ case 8: /* 0xff */
+ case 7: /* 0xfe */
+ case 1: /* 0x80 */
+ /* Invalid, drop */
+ *consumed = 1;
+ *want = 1;
+ return -1;
+
+ case 0:
+ /* ASCII, take as such */
+ *consumed = 1;
+ *want = 1;
+ return c;
+
+ default:
+ /* 2..6-byte UTF-8 */
+
+ if (count < nbytes) {
+ /* We don't have it all */
+ *consumed = 0;
+ *want = nbytes;
+ return -1;
+ }
+
+ /* First byte */
+ value = c & ((1u << (7 - nbytes)) - 1);
+
+ /* Other bytes */
+ for (i = 1; i < nbytes; i++) {
+ c = buf[i];
+ if ((c & 0xc0) != 0x80) {
+ /* Invalid, drop the head */
+ *consumed = i;
+ *want = 1;
+ return -1;
+ }
+ value = (value << 6) | (c & 0x3f);
+ }
+
+ *consumed = nbytes;
+ *want = 1;
+ return value;
+ }
+}
+
+void synth_writeu(const char *buf, size_t count)
+{
+ size_t i, consumed, want;
+
+ /* Convert to u16 */
+ for (i = 0; i < count; i++) {
+ s32 value;
+
+ value = synth_utf8_get(buf + i, count - i, &consumed, &want);
+ if (value == -1) {
+ /* Invalid or incomplete */
+
+ if (want > count - i)
+ /* We don't have it all, stop */
+ count = i;
+
+ continue;
+ }
+
+ if (value < 0x10000)
+ synth_buffer_add(value);
+ }
+
+ synth_start();
+}
+
void synth_printf(const char *fmt, ...)
{
va_list args;
- unsigned char buf[160], *p;
+ unsigned char buf[160];
int r;
va_start(args, fmt);
@@ -229,10 +314,7 @@ void synth_printf(const char *fmt, ...)
if (r > sizeof(buf) - 1)
r = sizeof(buf) - 1;
- p = buf;
- while (r--)
- synth_buffer_add(*p++);
- synth_start();
+ synth_writeu(buf, r);
}
EXPORT_SYMBOL_GPL(synth_printf);
@@ -439,7 +521,7 @@ void synth_release(void)
spin_lock_irqsave(&speakup_info.spinlock, flags);
pr_info("releasing synth %s\n", synth->name);
synth->alive = 0;
- del_timer(&thread_timer);
+ timer_delete(&thread_timer);
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (synth->attributes.name)
sysfs_remove_group(speakup_kobj, &synth->attributes);
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index e3a7c2aedd5f..ca00a5dbcf75 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -132,8 +132,17 @@ config ACPI_REV_OVERRIDE_POSSIBLE
makes it possible to force the kernel to return "5" as the supported
ACPI revision via the "acpi_rev_override" command line switch.
+config ACPI_EC
+ bool "Embedded Controller"
+ depends on HAS_IOPORT
+ default X86 || LOONGARCH
+ help
+ This driver handles communication with the microcontroller
+ on many x86/LoongArch laptops and other machines.
+
config ACPI_EC_DEBUGFS
tristate "EC read/write access through /sys/kernel/debug/ec"
+ depends on ACPI_EC
help
Say N to disable Embedded Controller /sys/kernel/debug interface
@@ -385,6 +394,7 @@ config ACPI_TABLE_OVERRIDE_VIA_BUILTIN_INITRD
config ACPI_DEBUG
bool "Debug Statements"
+ default y
help
The ACPI subsystem can produce debug output. Saying Y enables this
output and increases the kernel size by around 50K.
@@ -433,7 +443,7 @@ config ACPI_HOTPLUG_IOAPIC
config ACPI_SBS
tristate "Smart Battery System"
- depends on X86
+ depends on X86 && ACPI_EC
select POWER_SUPPLY
help
This driver supports the Smart Battery System, another
@@ -443,7 +453,7 @@ config ACPI_SBS
the modules will be called sbs and sbshc.
config ACPI_HED
- tristate "Hardware Error Device"
+ bool "Hardware Error Device"
help
This driver supports the Hardware Error Device (PNP0C33),
which is used to report some hardware errors notified via
@@ -451,7 +461,7 @@ config ACPI_HED
config ACPI_BGRT
bool "Boottime Graphics Resource Table support"
- depends on EFI && (X86 || ARM64)
+ depends on EFI
help
This driver adds support for exposing the ACPI Boottime Graphics
Resource Table, which allows the operating system to obtain
@@ -537,6 +547,10 @@ if ARM64
source "drivers/acpi/arm64/Kconfig"
endif
+if RISCV
+source "drivers/acpi/riscv/Kconfig"
+endif
+
config ACPI_PPTT
bool
@@ -567,6 +581,9 @@ config ACPI_FFH
Enable this feature if you want to set up and install the FFH Address
Space handler to handle FFH OpRegion in the firmware.
+config ACPI_MRRM
+ bool
+
source "drivers/acpi/pmic/Kconfig"
config ACPI_VIOT
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 39ea5cfa8326..d1b0affb844f 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -5,6 +5,10 @@
ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
+ifdef CONFIG_TRACE_BRANCH_PROFILING
+CFLAGS_processor_idle.o += -DDISABLE_BRANCH_PROFILING
+endif
+
#
# ACPI Boot-Time Table Parsing
#
@@ -41,7 +45,7 @@ acpi-y += resource.o
acpi-y += acpi_processor.o
acpi-y += processor_core.o
acpi-$(CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC) += processor_pdc.o
-acpi-y += ec.o
+acpi-$(CONFIG_ACPI_EC) += ec.o
acpi-$(CONFIG_ACPI_DOCK) += dock.o
acpi-$(CONFIG_PCI) += pci_root.o pci_link.o pci_irq.o
obj-$(CONFIG_ACPI_MCFG) += pci_mcfg.o
@@ -62,6 +66,7 @@ acpi-$(CONFIG_ACPI_WATCHDOG) += acpi_watchdog.o
acpi-$(CONFIG_ACPI_PRMT) += prmt.o
acpi-$(CONFIG_ACPI_PCC) += acpi_pcc.o
acpi-$(CONFIG_ACPI_FFH) += acpi_ffh.o
+acpi-$(CONFIG_ACPI_MRRM) += acpi_mrrm.o
# Address translation
acpi-$(CONFIG_ACPI_ADXL) += acpi_adxl.o
@@ -77,6 +82,7 @@ obj-$(CONFIG_ACPI_TINY_POWER_BUTTON) += tiny-power-button.o
obj-$(CONFIG_ACPI_FAN) += fan.o
fan-objs := fan_core.o
fan-objs += fan_attr.o
+fan-$(CONFIG_HWMON) += fan_hwmon.o
obj-$(CONFIG_ACPI_VIDEO) += video.o
obj-$(CONFIG_ACPI_TAD) += acpi_tad.o
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 2d4a35e6dd18..1f69be8f51a2 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -112,7 +112,7 @@ static int get_ac_property(struct power_supply *psy,
return 0;
}
-static enum power_supply_property ac_props[] = {
+static const enum power_supply_property ac_props[] = {
POWER_SUPPLY_PROP_ONLINE,
};
@@ -145,7 +145,7 @@ static void acpi_ac_notify(acpi_handle handle, u32 event, void *data)
dev_name(&adev->dev), event,
(u32) ac->state);
acpi_notifier_call_chain(adev, event, (u32) ac->state);
- kobject_uevent(&ac->charger->dev.kobj, KOBJ_CHANGE);
+ power_supply_changed(ac->charger);
}
}
@@ -213,8 +213,8 @@ static int acpi_ac_probe(struct platform_device *pdev)
return -ENOMEM;
ac->device = adev;
- strcpy(acpi_device_name(adev), ACPI_AC_DEVICE_NAME);
- strcpy(acpi_device_class(adev), ACPI_AC_CLASS);
+ strscpy(acpi_device_name(adev), ACPI_AC_DEVICE_NAME);
+ strscpy(acpi_device_class(adev), ACPI_AC_CLASS);
platform_set_drvdata(pdev, ac);
@@ -268,7 +268,7 @@ static int acpi_ac_resume(struct device *dev)
if (acpi_ac_get_state(ac))
return 0;
if (old_state != ac->state)
- kobject_uevent(&ac->charger->dev.kobj, KOBJ_CHANGE);
+ power_supply_changed(ac->charger);
return 0;
}
@@ -290,7 +290,7 @@ static void acpi_ac_remove(struct platform_device *pdev)
static struct platform_driver acpi_ac_driver = {
.probe = acpi_ac_probe,
- .remove_new = acpi_ac_remove,
+ .remove = acpi_ac_remove,
.driver = {
.name = "ac",
.acpi_match_table = ac_device_ids,
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index 80f945cbec8a..49539f7528c6 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -86,7 +86,7 @@ static int fch_misc_setup(struct apd_private_data *pdata)
if (!clk_data->name)
return -ENOMEM;
- strcpy(clk_data->name, obj->string.pointer);
+ strscpy(clk_data->name, obj->string.pointer, obj->string.length);
} else {
/* Set default name to mclk if entry missing in firmware */
clk_data->name = "mclk";
@@ -118,6 +118,11 @@ static const struct apd_device_desc wt_i2c_desc = {
.fixed_clk_rate = 150000000,
};
+static const struct apd_device_desc wt_i3c_desc = {
+ .setup = acpi_apd_setup,
+ .fixed_clk_rate = 125000000,
+};
+
static struct property_entry uart_properties[] = {
PROPERTY_ENTRY_U32("reg-io-width", 4),
PROPERTY_ENTRY_U32("reg-shift", 2),
@@ -231,6 +236,7 @@ static const struct acpi_device_id acpi_apd_device_ids[] = {
{ "AMD0030", },
{ "AMD0040", APD_ADDR(fch_misc_desc)},
{ "AMDI0010", APD_ADDR(wt_i2c_desc) },
+ { "AMDI0015", APD_ADDR(wt_i3c_desc) },
{ "AMDI0019", APD_ADDR(wt_i2c_desc) },
{ "AMDI0020", APD_ADDR(cz_uart_desc) },
{ "AMDI0022", APD_ADDR(cz_uart_desc) },
diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c
index d50261d05f3a..515b20d0b698 100644
--- a/drivers/acpi/acpi_dbg.c
+++ b/drivers/acpi/acpi_dbg.c
@@ -569,11 +569,11 @@ static int acpi_aml_release(struct inode *inode, struct file *file)
return 0;
}
-static int acpi_aml_read_user(char __user *buf, int len)
+static ssize_t acpi_aml_read_user(char __user *buf, size_t len)
{
- int ret;
struct circ_buf *crc = &acpi_aml_io.out_crc;
- int n;
+ ssize_t ret;
+ size_t n;
char *p;
ret = acpi_aml_lock_read(crc, ACPI_AML_OUT_USER);
@@ -582,7 +582,7 @@ static int acpi_aml_read_user(char __user *buf, int len)
/* sync head before removing logs */
smp_rmb();
p = &crc->buf[crc->tail];
- n = min(len, circ_count_to_end(crc));
+ n = min_t(size_t, len, circ_count_to_end(crc));
if (copy_to_user(buf, p, n)) {
ret = -EFAULT;
goto out;
@@ -599,8 +599,8 @@ out:
static ssize_t acpi_aml_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- int ret = 0;
- int size = 0;
+ ssize_t ret = 0;
+ ssize_t size = 0;
if (!count)
return 0;
@@ -639,11 +639,11 @@ again:
return size > 0 ? size : ret;
}
-static int acpi_aml_write_user(const char __user *buf, int len)
+static ssize_t acpi_aml_write_user(const char __user *buf, size_t len)
{
- int ret;
struct circ_buf *crc = &acpi_aml_io.in_crc;
- int n;
+ ssize_t ret;
+ size_t n;
char *p;
ret = acpi_aml_lock_write(crc, ACPI_AML_IN_USER);
@@ -652,7 +652,7 @@ static int acpi_aml_write_user(const char __user *buf, int len)
/* sync tail before inserting cmds */
smp_mb();
p = &crc->buf[crc->head];
- n = min(len, circ_space_to_end(crc));
+ n = min_t(size_t, len, circ_space_to_end(crc));
if (copy_from_user(p, buf, n)) {
ret = -EFAULT;
goto out;
@@ -663,14 +663,14 @@ static int acpi_aml_write_user(const char __user *buf, int len)
ret = n;
out:
acpi_aml_unlock_fifo(ACPI_AML_IN_USER, ret >= 0);
- return n;
+ return ret;
}
static ssize_t acpi_aml_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- int ret = 0;
- int size = 0;
+ ssize_t ret = 0;
+ ssize_t size = 0;
if (!count)
return 0;
diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c
index ca87a0939135..f6b9562779de 100644
--- a/drivers/acpi/acpi_extlog.c
+++ b/drivers/acpi/acpi_extlog.c
@@ -15,6 +15,7 @@
#include <acpi/ghes.h>
#include <asm/cpu.h>
#include <asm/mce.h>
+#include <asm/msr.h>
#include "apei/apei-internal.h"
#include <ras/ras_event.h>
@@ -234,7 +235,7 @@ static int __init extlog_init(void)
u64 cap;
int rc;
- if (rdmsrl_safe(MSR_IA32_MCG_CAP, &cap) ||
+ if (rdmsrq_safe(MSR_IA32_MCG_CAP, &cap) ||
!(cap & MCG_ELOG_P) ||
!extlog_get_l1addr())
return -ENODEV;
@@ -251,6 +252,10 @@ static int __init extlog_init(void)
}
extlog_l1_hdr = acpi_os_map_iomem(l1_dirbase, l1_hdr_size);
+ if (!extlog_l1_hdr) {
+ rc = -ENOMEM;
+ goto err_release_l1_hdr;
+ }
l1_head = (struct extlog_l1_head *)extlog_l1_hdr;
l1_size = l1_head->total_len;
l1_percpu_entry = l1_head->entries;
@@ -268,6 +273,10 @@ static int __init extlog_init(void)
goto err;
}
extlog_l1_addr = acpi_os_map_iomem(l1_dirbase, l1_size);
+ if (!extlog_l1_addr) {
+ rc = -ENOMEM;
+ goto err_release_l1_dir;
+ }
l1_entry_base = (u64 *)((u8 *)extlog_l1_addr + l1_hdr_size);
/* remap elog table */
@@ -279,6 +288,10 @@ static int __init extlog_init(void)
goto err_release_l1_dir;
}
elog_addr = acpi_os_map_iomem(elog_base, elog_size);
+ if (!elog_addr) {
+ rc = -ENOMEM;
+ goto err_release_elog;
+ }
rc = -ENOMEM;
/* allocate buffer to save elog record */
@@ -300,6 +313,8 @@ err_release_l1_dir:
if (extlog_l1_addr)
acpi_os_unmap_iomem(extlog_l1_addr, l1_size);
release_mem_region(l1_dirbase, l1_size);
+err_release_l1_hdr:
+ release_mem_region(l1_dirbase, l1_hdr_size);
err:
pr_warn(FW_BUG "Extended error log disabled because of problems parsing f/w tables\n");
return rc;
diff --git a/drivers/acpi/acpi_lpit.c b/drivers/acpi/acpi_lpit.c
index 794962c5c88e..b8d98b1b48ae 100644
--- a/drivers/acpi/acpi_lpit.c
+++ b/drivers/acpi/acpi_lpit.c
@@ -39,7 +39,7 @@ static int lpit_read_residency_counter_us(u64 *counter, bool io_mem)
return 0;
}
- err = rdmsrl_safe(residency_info_ffh.gaddr.address, counter);
+ err = rdmsrq_safe(residency_info_ffh.gaddr.address, counter);
if (!err) {
u64 mask = GENMASK_ULL(residency_info_ffh.gaddr.bit_offset +
residency_info_ffh.gaddr. bit_width - 1,
diff --git a/drivers/acpi/acpi_mrrm.c b/drivers/acpi/acpi_mrrm.c
new file mode 100644
index 000000000000..6d69554c940e
--- /dev/null
+++ b/drivers/acpi/acpi_mrrm.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2025, Intel Corporation.
+ *
+ * Memory Range and Region Mapping (MRRM) structure
+ *
+ * Parse and report the platform's MRRM table in /sys.
+ */
+
+#define pr_fmt(fmt) "acpi/mrrm: " fmt
+
+#include <linux/acpi.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+
+/* Default assume one memory region covering all system memory, per the spec */
+static int max_mem_region = 1;
+
+/* Access for use by resctrl file system */
+int acpi_mrrm_max_mem_region(void)
+{
+ return max_mem_region;
+}
+
+struct mrrm_mem_range_entry {
+ u64 base;
+ u64 length;
+ int node;
+ u8 local_region_id;
+ u8 remote_region_id;
+};
+
+static struct mrrm_mem_range_entry *mrrm_mem_range_entry;
+static u32 mrrm_mem_entry_num;
+
+static int get_node_num(struct mrrm_mem_range_entry *e)
+{
+ unsigned int nid;
+
+ for_each_online_node(nid) {
+ for (int z = 0; z < MAX_NR_ZONES; z++) {
+ struct zone *zone = NODE_DATA(nid)->node_zones + z;
+
+ if (!populated_zone(zone))
+ continue;
+ if (zone_intersects(zone, PHYS_PFN(e->base), PHYS_PFN(e->length)))
+ return zone_to_nid(zone);
+ }
+ }
+
+ return -ENOENT;
+}
+
+static __init int acpi_parse_mrrm(struct acpi_table_header *table)
+{
+ struct acpi_mrrm_mem_range_entry *mre_entry;
+ struct acpi_table_mrrm *mrrm;
+ void *mre, *mrrm_end;
+ int mre_count = 0;
+
+ mrrm = (struct acpi_table_mrrm *)table;
+ if (!mrrm)
+ return -ENODEV;
+
+ if (mrrm->header.revision != 1)
+ return -EINVAL;
+
+ if (mrrm->flags & ACPI_MRRM_FLAGS_REGION_ASSIGNMENT_OS)
+ return -EOPNOTSUPP;
+
+ mrrm_end = (void *)mrrm + mrrm->header.length - 1;
+ mre = (void *)mrrm + sizeof(struct acpi_table_mrrm);
+ while (mre < mrrm_end) {
+ mre_entry = mre;
+ mre_count++;
+ mre += mre_entry->header.length;
+ }
+ if (!mre_count) {
+ pr_info(FW_BUG "No ranges listed in MRRM table\n");
+ return -EINVAL;
+ }
+
+ mrrm_mem_range_entry = kmalloc_array(mre_count, sizeof(*mrrm_mem_range_entry),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!mrrm_mem_range_entry)
+ return -ENOMEM;
+
+ mre = (void *)mrrm + sizeof(struct acpi_table_mrrm);
+ while (mre < mrrm_end) {
+ struct mrrm_mem_range_entry *e;
+
+ mre_entry = mre;
+ e = mrrm_mem_range_entry + mrrm_mem_entry_num;
+
+ e->base = mre_entry->addr_base;
+ e->length = mre_entry->addr_len;
+ e->node = get_node_num(e);
+
+ if (mre_entry->region_id_flags & ACPI_MRRM_VALID_REGION_ID_FLAGS_LOCAL)
+ e->local_region_id = mre_entry->local_region_id;
+ else
+ e->local_region_id = -1;
+ if (mre_entry->region_id_flags & ACPI_MRRM_VALID_REGION_ID_FLAGS_REMOTE)
+ e->remote_region_id = mre_entry->remote_region_id;
+ else
+ e->remote_region_id = -1;
+
+ mrrm_mem_entry_num++;
+ mre += mre_entry->header.length;
+ }
+
+ max_mem_region = mrrm->max_mem_region;
+
+ return 0;
+}
+
+#define RANGE_ATTR(name, fmt) \
+static ssize_t name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+{ \
+ struct mrrm_mem_range_entry *mre; \
+ const char *kname = kobject_name(kobj); \
+ int n, ret; \
+ \
+ ret = kstrtoint(kname + 5, 10, &n); \
+ if (ret) \
+ return ret; \
+ \
+ mre = mrrm_mem_range_entry + n; \
+ \
+ return sysfs_emit(buf, fmt, mre->name); \
+} \
+static struct kobj_attribute name##_attr = __ATTR_RO(name)
+
+RANGE_ATTR(base, "0x%llx\n");
+RANGE_ATTR(length, "0x%llx\n");
+RANGE_ATTR(node, "%d\n");
+RANGE_ATTR(local_region_id, "%d\n");
+RANGE_ATTR(remote_region_id, "%d\n");
+
+static struct attribute *memory_range_attrs[] = {
+ &base_attr.attr,
+ &length_attr.attr,
+ &node_attr.attr,
+ &local_region_id_attr.attr,
+ &remote_region_id_attr.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(memory_range);
+
+static __init int add_boot_memory_ranges(void)
+{
+ struct kobject *pkobj, *kobj, **kobjs;
+ int ret = -EINVAL;
+ char name[16];
+ int i;
+
+ pkobj = kobject_create_and_add("memory_ranges", acpi_kobj);
+ if (!pkobj)
+ return -ENOMEM;
+
+ kobjs = kcalloc(mrrm_mem_entry_num, sizeof(*kobjs), GFP_KERNEL);
+ if (!kobjs) {
+ kobject_put(pkobj);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < mrrm_mem_entry_num; i++) {
+ scnprintf(name, sizeof(name), "range%d", i);
+ kobj = kobject_create_and_add(name, pkobj);
+ if (!kobj) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = sysfs_create_groups(kobj, memory_range_groups);
+ if (ret) {
+ kobject_put(kobj);
+ goto cleanup;
+ }
+ kobjs[i] = kobj;
+ }
+
+ kfree(kobjs);
+ return 0;
+
+cleanup:
+ for (int j = 0; j < i; j++) {
+ if (kobjs[j]) {
+ sysfs_remove_groups(kobjs[j], memory_range_groups);
+ kobject_put(kobjs[j]);
+ }
+ }
+ kfree(kobjs);
+ kobject_put(pkobj);
+ return ret;
+}
+
+static __init int mrrm_init(void)
+{
+ int ret;
+
+ ret = acpi_table_parse(ACPI_SIG_MRRM, acpi_parse_mrrm);
+ if (ret < 0)
+ return ret;
+
+ return add_boot_memory_ranges();
+}
+device_initcall(mrrm_init);
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index bd1ad07f0290..c9a0bcaba2e4 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -19,16 +19,21 @@
#include <linux/acpi.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
+#include <asm/cpuid/api.h>
#include <asm/mwait.h>
#include <xen/xen.h>
#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
#define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
#define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
+
+#define ACPI_PROCESSOR_AGGREGATOR_STATUS_SUCCESS 0
+#define ACPI_PROCESSOR_AGGREGATOR_STATUS_NO_ACTION 1
+
static DEFINE_MUTEX(isolated_cpus_lock);
static DEFINE_MUTEX(round_robin_lock);
-static unsigned long power_saving_mwait_eax;
+static unsigned int power_saving_mwait_eax;
static unsigned char tsc_detected_unstable;
static unsigned char tsc_marked_unstable;
@@ -42,10 +47,8 @@ static void power_saving_mwait_init(void)
if (!boot_cpu_has(X86_FEATURE_MWAIT))
return;
- if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
- return;
- cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
+ cpuid(CPUID_LEAF_MWAIT, &eax, &ebx, &ecx, &edx);
if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
!(ecx & CPUID5_ECX_INTERRUPT_BREAK))
@@ -132,8 +135,10 @@ static void exit_round_robin(unsigned int tsk_index)
{
struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits);
- cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
- tsk_in_cpu[tsk_index] = -1;
+ if (tsk_in_cpu[tsk_index] != -1) {
+ cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
+ tsk_in_cpu[tsk_index] = -1;
+ }
}
static unsigned int idle_pct = 5; /* percentage */
@@ -382,16 +387,23 @@ static void acpi_pad_handle_notify(acpi_handle handle)
.length = 4,
.pointer = (void *)&idle_cpus,
};
+ u32 status;
mutex_lock(&isolated_cpus_lock);
num_cpus = acpi_pad_pur(handle);
if (num_cpus < 0) {
- mutex_unlock(&isolated_cpus_lock);
- return;
+ /* The ACPI specification says that if no action was performed when
+ * processing the _PUR object, _OST should still be evaluated, albeit
+ * with a different status code.
+ */
+ status = ACPI_PROCESSOR_AGGREGATOR_STATUS_NO_ACTION;
+ } else {
+ status = ACPI_PROCESSOR_AGGREGATOR_STATUS_SUCCESS;
+ acpi_pad_idle_cpus(num_cpus);
}
- acpi_pad_idle_cpus(num_cpus);
+
idle_cpus = acpi_pad_idle_cpus_num();
- acpi_evaluate_ost(handle, ACPI_PROCESSOR_AGGREGATOR_NOTIFY, 0, &param);
+ acpi_evaluate_ost(handle, ACPI_PROCESSOR_AGGREGATOR_NOTIFY, status, &param);
mutex_unlock(&isolated_cpus_lock);
}
@@ -417,8 +429,8 @@ static int acpi_pad_probe(struct platform_device *pdev)
struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
acpi_status status;
- strcpy(acpi_device_name(adev), ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME);
- strcpy(acpi_device_class(adev), ACPI_PROCESSOR_AGGREGATOR_CLASS);
+ strscpy(acpi_device_name(adev), ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME);
+ strscpy(acpi_device_class(adev), ACPI_PROCESSOR_AGGREGATOR_CLASS);
status = acpi_install_notify_handler(adev->handle,
ACPI_DEVICE_NOTIFY, acpi_pad_notify, adev);
@@ -449,7 +461,7 @@ MODULE_DEVICE_TABLE(acpi, pad_device_ids);
static struct platform_driver acpi_pad_driver = {
.probe = acpi_pad_probe,
- .remove_new = acpi_pad_remove,
+ .remove = acpi_pad_remove,
.driver = {
.dev_groups = acpi_pad_groups,
.name = "processor_aggregator",
diff --git a/drivers/acpi/acpi_pcc.c b/drivers/acpi/acpi_pcc.c
index 07a034a53aca..97064e943768 100644
--- a/drivers/acpi/acpi_pcc.c
+++ b/drivers/acpi/acpi_pcc.c
@@ -31,7 +31,6 @@
struct pcc_data {
struct pcc_mbox_chan *pcc_chan;
- void __iomem *pcc_comm_addr;
struct completion done;
struct mbox_client cl;
struct acpi_pcc_info ctx;
@@ -81,14 +80,6 @@ acpi_pcc_address_space_setup(acpi_handle region_handle, u32 function,
ret = AE_SUPPORT;
goto err_free_channel;
}
- data->pcc_comm_addr = acpi_os_ioremap(pcc_chan->shmem_base_addr,
- pcc_chan->shmem_size);
- if (!data->pcc_comm_addr) {
- pr_err("Failed to ioremap PCC comm region mem for %d\n",
- ctx->subspace_id);
- ret = AE_NO_MEMORY;
- goto err_free_channel;
- }
*region_context = data;
return AE_OK;
@@ -113,7 +104,7 @@ acpi_pcc_address_space_handler(u32 function, acpi_physical_address addr,
reinit_completion(&data->done);
/* Write to Shared Memory */
- memcpy_toio(data->pcc_comm_addr, (void *)value, data->ctx.length);
+ memcpy_toio(data->pcc_chan->shmem, (void *)value, data->ctx.length);
ret = mbox_send_message(data->pcc_chan->mchan, NULL);
if (ret < 0)
@@ -134,7 +125,7 @@ acpi_pcc_address_space_handler(u32 function, acpi_physical_address addr,
mbox_chan_txdone(data->pcc_chan->mchan, ret);
- memcpy_fromio(value, data->pcc_comm_addr, data->ctx.length);
+ memcpy_fromio(value, data->pcc_chan->shmem, data->ctx.length);
return AE_OK;
}
diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c
index 01abf26764b0..4ad88187dc7a 100644
--- a/drivers/acpi/acpi_pnp.c
+++ b/drivers/acpi/acpi_pnp.c
@@ -120,8 +120,6 @@ static const struct acpi_device_id acpi_pnp_device_ids[] = {
{"IBM0071"},
/* smsc-ircc2 */
{"SMCf010"},
- /* sb1000 */
- {"GIC1000"},
/* parport_pc */
{"PNP0400"}, /* Standard LPT Printer Port */
{"PNP0401"}, /* ECP Printer Port */
@@ -355,8 +353,10 @@ static bool acpi_pnp_match(const char *idstr, const struct acpi_device_id **matc
* device represented by it.
*/
static const struct acpi_device_id acpi_nonpnp_device_ids[] = {
+ {"INT3F0D"},
{"INTC1080"},
{"INTC1081"},
+ {"INTC1099"},
{""},
};
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 7a0dd35d62c9..7ec1dc04fd11 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -35,6 +35,17 @@ EXPORT_PER_CPU_SYMBOL(processors);
struct acpi_processor_errata errata __read_mostly;
EXPORT_SYMBOL_GPL(errata);
+acpi_handle acpi_get_processor_handle(int cpu)
+{
+ struct acpi_processor *pr;
+
+ pr = per_cpu(processors, cpu);
+ if (pr)
+ return pr->handle;
+
+ return NULL;
+}
+
static int acpi_processor_errata_piix4(struct pci_dev *dev)
{
u8 value1 = 0;
@@ -183,20 +194,44 @@ static void __init acpi_pcc_cpufreq_init(void) {}
#endif /* CONFIG_X86 */
/* Initialization */
+static DEFINE_PER_CPU(void *, processor_device_array);
+
+static int acpi_processor_set_per_cpu(struct acpi_processor *pr,
+ struct acpi_device *device)
+{
+ BUG_ON(pr->id >= nr_cpu_ids);
+
+ /*
+ * Buggy BIOS check.
+ * ACPI id of processors can be reported wrongly by the BIOS.
+ * Don't trust it blindly
+ */
+ if (per_cpu(processor_device_array, pr->id) != NULL &&
+ per_cpu(processor_device_array, pr->id) != device) {
+ dev_warn(&device->dev,
+ "BIOS reported wrong ACPI id %d for the processor\n",
+ pr->id);
+ return -EINVAL;
+ }
+ /*
+ * processor_device_array is not cleared on errors to allow buggy BIOS
+ * checks.
+ */
+ per_cpu(processor_device_array, pr->id) = device;
+ per_cpu(processors, pr->id) = pr;
+
+ return 0;
+}
+
#ifdef CONFIG_ACPI_HOTPLUG_CPU
-static int acpi_processor_hotadd_init(struct acpi_processor *pr)
+static int acpi_processor_hotadd_init(struct acpi_processor *pr,
+ struct acpi_device *device)
{
- unsigned long long sta;
- acpi_status status;
int ret;
if (invalid_phys_cpuid(pr->phys_id))
return -ENODEV;
- status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta);
- if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT))
- return -ENODEV;
-
cpu_maps_update_begin();
cpus_write_lock();
@@ -204,19 +239,26 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
if (ret)
goto out;
+ ret = acpi_processor_set_per_cpu(pr, device);
+ if (ret) {
+ acpi_unmap_cpu(pr->id);
+ goto out;
+ }
+
ret = arch_register_cpu(pr->id);
if (ret) {
+ /* Leave the processor device array in place to detect buggy bios */
+ per_cpu(processors, pr->id) = NULL;
acpi_unmap_cpu(pr->id);
goto out;
}
/*
- * CPU got hot-added, but cpu_data is not initialized yet. Set a flag
- * to delay cpu_idle/throttling initialization and do it when the CPU
- * gets online for the first time.
+ * CPU got hot-added, but cpu_data is not initialized yet. Do
+ * cpu_idle/throttling initialization when the CPU gets online for
+ * the first time.
*/
pr_info("CPU%d has been hot-added\n", pr->id);
- pr->flags.need_hotplug_init = 1;
out:
cpus_write_unlock();
@@ -224,7 +266,8 @@ out:
return ret;
}
#else
-static inline int acpi_processor_hotadd_init(struct acpi_processor *pr)
+static inline int acpi_processor_hotadd_init(struct acpi_processor *pr,
+ struct acpi_device *device)
{
return -ENODEV;
}
@@ -232,13 +275,14 @@ static inline int acpi_processor_hotadd_init(struct acpi_processor *pr)
static int acpi_processor_get_info(struct acpi_device *device)
{
- union acpi_object object = { 0 };
+ union acpi_object object = { .processor = { 0 } };
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
struct acpi_processor *pr = acpi_driver_data(device);
int device_declaration = 0;
acpi_status status = AE_OK;
static int cpu0_initialized;
unsigned long long value;
+ int ret;
acpi_processor_errata();
@@ -315,19 +359,19 @@ static int acpi_processor_get_info(struct acpi_device *device)
}
/*
- * Extra Processor objects may be enumerated on MP systems with
- * less than the max # of CPUs. They should be ignored _iff
- * they are physically not present.
- *
- * NOTE: Even if the processor has a cpuid, it may not be present
- * because cpuid <-> apicid mapping is persistent now.
+ * This code is not called unless we know the CPU is present and
+ * enabled. The two paths are:
+ * a) Initially present CPUs on architectures that do not defer
+ * their arch_register_cpu() calls until this point.
+ * b) Hotplugged CPUs (enabled bit in _STA has transitioned from not
+ * enabled to enabled)
*/
- if (invalid_logical_cpuid(pr->id) || !cpu_present(pr->id)) {
- int ret = acpi_processor_hotadd_init(pr);
-
- if (ret)
- return ret;
- }
+ if (!get_cpu_device(pr->id))
+ ret = acpi_processor_hotadd_init(pr, device);
+ else
+ ret = acpi_processor_set_per_cpu(pr, device);
+ if (ret)
+ return ret;
/*
* On some boxes several processors use the same processor bus id.
@@ -372,8 +416,6 @@ static int acpi_processor_get_info(struct acpi_device *device)
* (cpu_data(cpu)) values, like CPU feature flags, family, model, etc.
* Such things have to be put in and set up by the processor driver's .probe().
*/
-static DEFINE_PER_CPU(void *, processor_device_array);
-
static int acpi_processor_add(struct acpi_device *device,
const struct acpi_device_id *id)
{
@@ -394,45 +436,23 @@ static int acpi_processor_add(struct acpi_device *device,
}
pr->handle = device->handle;
- strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
+ strscpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
+ strscpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
device->driver_data = pr;
result = acpi_processor_get_info(device);
if (result) /* Processor is not physically present or unavailable */
- return 0;
-
- BUG_ON(pr->id >= nr_cpu_ids);
-
- /*
- * Buggy BIOS check.
- * ACPI id of processors can be reported wrongly by the BIOS.
- * Don't trust it blindly
- */
- if (per_cpu(processor_device_array, pr->id) != NULL &&
- per_cpu(processor_device_array, pr->id) != device) {
- dev_warn(&device->dev,
- "BIOS reported wrong ACPI id %d for the processor\n",
- pr->id);
- /* Give up, but do not abort the namespace scan. */
- goto err;
- }
- /*
- * processor_device_array is not cleared on errors to allow buggy BIOS
- * checks.
- */
- per_cpu(processor_device_array, pr->id) = device;
- per_cpu(processors, pr->id) = pr;
+ goto err_clear_driver_data;
dev = get_cpu_device(pr->id);
if (!dev) {
result = -ENODEV;
- goto err;
+ goto err_clear_per_cpu;
}
result = acpi_bind_one(dev, device);
if (result)
- goto err;
+ goto err_clear_per_cpu;
pr->dev = dev;
@@ -443,10 +463,11 @@ static int acpi_processor_add(struct acpi_device *device,
dev_err(dev, "Processor driver could not be attached\n");
acpi_unbind_one(dev);
- err:
- free_cpumask_var(pr->throttling.shared_cpu_map);
- device->driver_data = NULL;
+ err_clear_per_cpu:
per_cpu(processors, pr->id) = NULL;
+ err_clear_driver_data:
+ device->driver_data = NULL;
+ free_cpumask_var(pr->throttling.shared_cpu_map);
err_free_pr:
kfree(pr);
return result;
@@ -454,7 +475,7 @@ static int acpi_processor_add(struct acpi_device *device,
#ifdef CONFIG_ACPI_HOTPLUG_CPU
/* Removal */
-static void acpi_processor_remove(struct acpi_device *device)
+static void acpi_processor_post_eject(struct acpi_device *device)
{
struct acpi_processor *pr;
@@ -476,10 +497,6 @@ static void acpi_processor_remove(struct acpi_device *device)
device_release_driver(pr->dev);
acpi_unbind_one(pr->dev);
- /* Clean up. */
- per_cpu(processor_device_array, pr->id) = NULL;
- per_cpu(processors, pr->id) = NULL;
-
cpu_maps_update_begin();
cpus_write_lock();
@@ -487,6 +504,10 @@ static void acpi_processor_remove(struct acpi_device *device)
arch_unregister_cpu(pr->id);
acpi_unmap_cpu(pr->id);
+ /* Clean up. */
+ per_cpu(processor_device_array, pr->id) = NULL;
+ per_cpu(processors, pr->id) = NULL;
+
cpus_write_unlock();
cpu_maps_update_done();
@@ -598,9 +619,9 @@ static bool __init acpi_early_processor_osc(void)
void __init acpi_early_processor_control_setup(void)
{
if (acpi_early_processor_osc()) {
- pr_info("_OSC evaluated successfully for all CPUs\n");
+ pr_debug("_OSC evaluated successfully for all CPUs\n");
} else {
- pr_info("_OSC evaluation for CPUs failed, trying _PDC\n");
+ pr_debug("_OSC evaluation for CPUs failed, trying _PDC\n");
acpi_early_processor_set_pdc();
}
}
@@ -622,7 +643,7 @@ static struct acpi_scan_handler processor_handler = {
.ids = processor_device_ids,
.attach = acpi_processor_add,
#ifdef CONFIG_ACPI_HOTPLUG_CPU
- .detach = acpi_processor_remove,
+ .post_eject = acpi_processor_post_eject,
#endif
.hotplug = {
.enabled = true,
@@ -794,7 +815,7 @@ bool acpi_processor_claim_cst_control(void)
cst_control_claimed = true;
return true;
}
-EXPORT_SYMBOL_GPL(acpi_processor_claim_cst_control);
+EXPORT_SYMBOL_NS_GPL(acpi_processor_claim_cst_control, "ACPI_PROCESSOR_IDLE");
/**
* acpi_processor_evaluate_cst - Evaluate the processor _CST control method.
@@ -964,7 +985,7 @@ int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu,
memcpy(&info->states[++last_index], &cx, sizeof(cx));
}
- acpi_handle_info(handle, "Found %d idle states\n", last_index);
+ acpi_handle_debug(handle, "Found %d idle states\n", last_index);
info->count = last_index;
@@ -973,5 +994,5 @@ end:
return ret;
}
-EXPORT_SYMBOL_GPL(acpi_processor_evaluate_cst);
+EXPORT_SYMBOL_NS_GPL(acpi_processor_evaluate_cst, "ACPI_PROCESSOR_IDLE");
#endif /* CONFIG_ACPI_PROCESSOR_CSTATE */
diff --git a/drivers/acpi/acpi_tad.c b/drivers/acpi/acpi_tad.c
index 1d670dbe4d1d..6d870d97ada6 100644
--- a/drivers/acpi/acpi_tad.c
+++ b/drivers/acpi/acpi_tad.c
@@ -27,6 +27,7 @@
#include <linux/pm_runtime.h>
#include <linux/suspend.h>
+MODULE_DESCRIPTION("ACPI Time and Alarm (TAD) Device Driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Rafael J. Wysocki");
@@ -89,19 +90,18 @@ static int acpi_tad_set_real_time(struct device *dev, struct acpi_tad_rt *rt)
args[0].buffer.pointer = (u8 *)rt;
args[0].buffer.length = sizeof(*rt);
- pm_runtime_get_sync(dev);
+ PM_RUNTIME_ACQUIRE(dev, pm);
+ if (PM_RUNTIME_ACQUIRE_ERR(&pm))
+ return -ENXIO;
status = acpi_evaluate_integer(handle, "_SRT", &arg_list, &retval);
-
- pm_runtime_put_sync(dev);
-
if (ACPI_FAILURE(status) || retval)
return -EIO;
return 0;
}
-static int acpi_tad_get_real_time(struct device *dev, struct acpi_tad_rt *rt)
+static int acpi_tad_evaluate_grt(struct device *dev, struct acpi_tad_rt *rt)
{
acpi_handle handle = ACPI_HANDLE(dev);
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER };
@@ -110,12 +110,7 @@ static int acpi_tad_get_real_time(struct device *dev, struct acpi_tad_rt *rt)
acpi_status status;
int ret = -EIO;
- pm_runtime_get_sync(dev);
-
status = acpi_evaluate_object(handle, "_GRT", NULL, &output);
-
- pm_runtime_put_sync(dev);
-
if (ACPI_FAILURE(status))
goto out_free;
@@ -138,6 +133,21 @@ out_free:
return ret;
}
+static int acpi_tad_get_real_time(struct device *dev, struct acpi_tad_rt *rt)
+{
+ int ret;
+
+ PM_RUNTIME_ACQUIRE(dev, pm);
+ if (PM_RUNTIME_ACQUIRE_ERR(&pm))
+ return -ENXIO;
+
+ ret = acpi_tad_evaluate_grt(dev, rt);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static char *acpi_tad_rt_next_field(char *s, int *val)
{
char *p;
@@ -232,7 +242,7 @@ static ssize_t time_show(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- return sprintf(buf, "%u:%u:%u:%u:%u:%u:%d:%u\n",
+ return sysfs_emit(buf, "%u:%u:%u:%u:%u:%u:%d:%u\n",
rt.year, rt.month, rt.day, rt.hour, rt.minute, rt.second,
rt.tz, rt.daylight);
}
@@ -265,12 +275,11 @@ static int acpi_tad_wake_set(struct device *dev, char *method, u32 timer_id,
args[0].integer.value = timer_id;
args[1].integer.value = value;
- pm_runtime_get_sync(dev);
+ PM_RUNTIME_ACQUIRE(dev, pm);
+ if (PM_RUNTIME_ACQUIRE_ERR(&pm))
+ return -ENXIO;
status = acpi_evaluate_integer(handle, method, &arg_list, &retval);
-
- pm_runtime_put_sync(dev);
-
if (ACPI_FAILURE(status) || retval)
return -EIO;
@@ -313,12 +322,11 @@ static ssize_t acpi_tad_wake_read(struct device *dev, char *buf, char *method,
args[0].integer.value = timer_id;
- pm_runtime_get_sync(dev);
+ PM_RUNTIME_ACQUIRE(dev, pm);
+ if (PM_RUNTIME_ACQUIRE_ERR(&pm))
+ return -ENXIO;
status = acpi_evaluate_integer(handle, method, &arg_list, &retval);
-
- pm_runtime_put_sync(dev);
-
if (ACPI_FAILURE(status))
return -EIO;
@@ -369,12 +377,11 @@ static int acpi_tad_clear_status(struct device *dev, u32 timer_id)
args[0].integer.value = timer_id;
- pm_runtime_get_sync(dev);
+ PM_RUNTIME_ACQUIRE(dev, pm);
+ if (PM_RUNTIME_ACQUIRE_ERR(&pm))
+ return -ENXIO;
status = acpi_evaluate_integer(handle, "_CWS", &arg_list, &retval);
-
- pm_runtime_put_sync(dev);
-
if (ACPI_FAILURE(status) || retval)
return -EIO;
@@ -410,12 +417,11 @@ static ssize_t acpi_tad_status_read(struct device *dev, char *buf, u32 timer_id)
args[0].integer.value = timer_id;
- pm_runtime_get_sync(dev);
+ PM_RUNTIME_ACQUIRE(dev, pm);
+ if (PM_RUNTIME_ACQUIRE_ERR(&pm))
+ return -ENXIO;
status = acpi_evaluate_integer(handle, "_GWS", &arg_list, &retval);
-
- pm_runtime_put_sync(dev);
-
if (ACPI_FAILURE(status))
return -EIO;
@@ -427,7 +433,7 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
{
struct acpi_tad_driver_data *dd = dev_get_drvdata(dev);
- return sprintf(buf, "0x%02X\n", dd->capabilities);
+ return sysfs_emit(buf, "0x%02X\n", dd->capabilities);
}
static DEVICE_ATTR_RO(caps);
@@ -562,21 +568,24 @@ static void acpi_tad_remove(struct platform_device *pdev)
device_init_wakeup(dev, false);
- pm_runtime_get_sync(dev);
+ if (dd->capabilities & ACPI_TAD_RT)
+ sysfs_remove_group(&dev->kobj, &acpi_tad_time_attr_group);
if (dd->capabilities & ACPI_TAD_DC_WAKE)
sysfs_remove_group(&dev->kobj, &acpi_tad_dc_attr_group);
sysfs_remove_group(&dev->kobj, &acpi_tad_attr_group);
- acpi_tad_disable_timer(dev, ACPI_TAD_AC_TIMER);
- acpi_tad_clear_status(dev, ACPI_TAD_AC_TIMER);
- if (dd->capabilities & ACPI_TAD_DC_WAKE) {
- acpi_tad_disable_timer(dev, ACPI_TAD_DC_TIMER);
- acpi_tad_clear_status(dev, ACPI_TAD_DC_TIMER);
+ scoped_guard(pm_runtime_noresume, dev) {
+ acpi_tad_disable_timer(dev, ACPI_TAD_AC_TIMER);
+ acpi_tad_clear_status(dev, ACPI_TAD_AC_TIMER);
+ if (dd->capabilities & ACPI_TAD_DC_WAKE) {
+ acpi_tad_disable_timer(dev, ACPI_TAD_DC_TIMER);
+ acpi_tad_clear_status(dev, ACPI_TAD_DC_TIMER);
+ }
}
- pm_runtime_put_sync(dev);
+ pm_runtime_suspend(dev);
pm_runtime_disable(dev);
acpi_remove_cmos_rtc_space_handler(handle);
}
@@ -683,7 +692,7 @@ static struct platform_driver acpi_tad_driver = {
.acpi_match_table = acpi_tad_ids,
},
.probe = acpi_tad_probe,
- .remove_new = acpi_tad_remove,
+ .remove = acpi_tad_remove,
};
MODULE_DEVICE_TABLE(acpi, acpi_tad_ids);
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 1fda30388297..be8e7e18abca 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -27,6 +27,7 @@
#include <linux/acpi.h>
#include <acpi/video.h>
#include <linux/uaccess.h>
+#include <linux/string_choices.h>
#define ACPI_VIDEO_BUS_NAME "Video Bus"
#define ACPI_VIDEO_DEVICE_NAME "Video Device"
@@ -610,16 +611,28 @@ acpi_video_device_lcd_get_level_current(struct acpi_video_device *device,
return 0;
}
+/**
+ * acpi_video_device_EDID() - Get EDID from ACPI _DDC
+ * @device: video output device (LCD, CRT, ..)
+ * @edid: address for returned EDID pointer
+ * @length: _DDC length to request (must be a multiple of 128)
+ *
+ * Get EDID from ACPI _DDC. On success, a pointer to the EDID data is written
+ * to the @edid address, and the length of the EDID is returned. The caller is
+ * responsible for freeing the edid pointer.
+ *
+ * Return the length of EDID (positive value) on success or error (negative
+ * value).
+ */
static int
-acpi_video_device_EDID(struct acpi_video_device *device,
- union acpi_object **edid, int length)
+acpi_video_device_EDID(struct acpi_video_device *device, void **edid, int length)
{
- int status;
+ acpi_status status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
union acpi_object arg0 = { ACPI_TYPE_INTEGER };
struct acpi_object_list args = { 1, &arg0 };
-
+ int ret;
*edid = NULL;
@@ -636,16 +649,24 @@ acpi_video_device_EDID(struct acpi_video_device *device,
obj = buffer.pointer;
- if (obj && obj->type == ACPI_TYPE_BUFFER)
- *edid = obj;
- else {
+ /*
+ * Some buggy implementations incorrectly return the EDID buffer in an ACPI package.
+ * In this case, extract the buffer from the package.
+ */
+ if (obj && obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 1)
+ obj = &obj->package.elements[0];
+
+ if (obj && obj->type == ACPI_TYPE_BUFFER) {
+ *edid = kmemdup(obj->buffer.pointer, obj->buffer.length, GFP_KERNEL);
+ ret = *edid ? obj->buffer.length : -ENOMEM;
+ } else {
acpi_handle_debug(device->dev->handle,
"Invalid _DDC data for length %d\n", length);
- status = -EFAULT;
- kfree(obj);
+ ret = -EFAULT;
}
- return status;
+ kfree(buffer.pointer);
+ return ret;
}
/* bus */
@@ -1128,8 +1149,8 @@ static int acpi_video_bus_get_one_device(struct acpi_device *device, void *arg)
return -ENOMEM;
}
- strcpy(acpi_device_name(device), ACPI_VIDEO_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS);
+ strscpy(acpi_device_name(device), ACPI_VIDEO_DEVICE_NAME);
+ strscpy(acpi_device_class(device), ACPI_VIDEO_CLASS);
data->device_id = device_id;
data->video = video;
@@ -1435,9 +1456,7 @@ int acpi_video_get_edid(struct acpi_device *device, int type, int device_id,
{
struct acpi_video_bus *video;
struct acpi_video_device *video_device;
- union acpi_object *buffer = NULL;
- acpi_status status;
- int i, length;
+ int i, length, ret;
if (!device || !acpi_driver_data(device))
return -EINVAL;
@@ -1477,16 +1496,10 @@ int acpi_video_get_edid(struct acpi_device *device, int type, int device_id,
}
for (length = 512; length > 0; length -= 128) {
- status = acpi_video_device_EDID(video_device, &buffer,
- length);
- if (ACPI_SUCCESS(status))
- break;
+ ret = acpi_video_device_EDID(video_device, edid, length);
+ if (ret > 0)
+ return ret;
}
- if (!length)
- continue;
-
- *edid = buffer->buffer.pointer;
- return length;
}
return -ENODEV;
@@ -1946,8 +1959,10 @@ static void acpi_video_bus_remove_notify_handler(struct acpi_video_bus *video)
struct acpi_video_device *dev;
mutex_lock(&video->device_list_lock);
- list_for_each_entry(dev, &video->video_device_list, entry)
+ list_for_each_entry(dev, &video->video_device_list, entry) {
acpi_video_dev_remove_notify_handler(dev);
+ cancel_delayed_work_sync(&dev->switch_brightness_work);
+ }
mutex_unlock(&video->device_list_lock);
acpi_video_bus_stop_devices(video);
@@ -2010,8 +2025,8 @@ static int acpi_video_bus_add(struct acpi_device *device)
}
video->device = device;
- strcpy(acpi_device_name(device), ACPI_VIDEO_BUS_NAME);
- strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS);
+ strscpy(acpi_device_name(device), ACPI_VIDEO_BUS_NAME);
+ strscpy(acpi_device_class(device), ACPI_VIDEO_CLASS);
device->driver_data = video;
acpi_video_bus_find_cap(video);
@@ -2034,9 +2049,9 @@ static int acpi_video_bus_add(struct acpi_device *device)
pr_info("%s [%s] (multi-head: %s rom: %s post: %s)\n",
ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device),
- video->flags.multihead ? "yes" : "no",
- video->flags.rom ? "yes" : "no",
- video->flags.post ? "yes" : "no");
+ str_yes_no(video->flags.multihead),
+ str_yes_no(video->flags.rom),
+ str_yes_no(video->flags.post));
mutex_lock(&video_list_lock);
list_add_tail(&video->entry, &video_bus_head);
mutex_unlock(&video_list_lock);
diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h
index 9d4cbd956627..d7d4649ce66f 100644
--- a/drivers/acpi/acpica/acapps.h
+++ b/drivers/acpi/acpica/acapps.h
@@ -3,7 +3,7 @@
*
* Module Name: acapps - common include for ACPI applications/tools
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -17,7 +17,7 @@
/* Common info for tool signons */
#define ACPICA_NAME "Intel ACPI Component Architecture"
-#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2022 Intel Corporation"
+#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2025 Intel Corporation"
#if ACPI_MACHINE_WIDTH == 64
#define ACPI_WIDTH " (64-bit version)"
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
index 4536dc9d3979..662231f4f881 100644
--- a/drivers/acpi/acpica/accommon.h
+++ b/drivers/acpi/acpica/accommon.h
@@ -3,7 +3,7 @@
*
* Name: accommon.h - Common include files for generation of ACPICA source
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acconvert.h b/drivers/acpi/acpica/acconvert.h
index c6ba6a36cfb5..24998f2d7539 100644
--- a/drivers/acpi/acpica/acconvert.h
+++ b/drivers/acpi/acpica/acconvert.h
@@ -3,7 +3,7 @@
*
* Module Name: acapps - common include for ACPI applications/tools
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 911875c5a5f1..91241bd6917a 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -3,7 +3,7 @@
*
* Name: acdebug.h - ACPI/AML debugger
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index 73eecbf62f06..5d48a344b35f 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -3,7 +3,7 @@
*
* Name: acdispat.h - dispatcher (parser to interpreter interface)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index ddd072cbc738..b40fb3a5ac8a 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -3,7 +3,7 @@
*
* Name: acevents.h - Event subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -188,7 +188,7 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
u8 acpi_ns_is_locked);
void
-acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
+acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, u32 max_depth,
acpi_adr_space_type space_id, u32 function);
acpi_status
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index f4c90fc99be2..c8a750d2674c 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -3,7 +3,7 @@
*
* Name: acglobal.h - Declarations for global variables
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -29,11 +29,7 @@ ACPI_INIT_GLOBAL(u32, acpi_gbl_dsdt_index, ACPI_INVALID_TABLE_INDEX);
ACPI_INIT_GLOBAL(u32, acpi_gbl_facs_index, ACPI_INVALID_TABLE_INDEX);
ACPI_INIT_GLOBAL(u32, acpi_gbl_xfacs_index, ACPI_INVALID_TABLE_INDEX);
ACPI_INIT_GLOBAL(u32, acpi_gbl_fadt_index, ACPI_INVALID_TABLE_INDEX);
-
-#if (!ACPI_REDUCED_HARDWARE)
-ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_FACS);
-
-#endif /* !ACPI_REDUCED_HARDWARE */
+ACPI_INIT_GLOBAL(struct acpi_table_facs *, acpi_gbl_FACS, NULL);
/* These addresses are calculated from the FADT Event Block addresses */
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 79bbfe00d241..6aec56c65fa0 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -3,7 +3,7 @@
*
* Name: achware.h -- hardware specific interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -103,8 +103,6 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info,
acpi_status acpi_hw_enable_all_runtime_gpes(void);
-acpi_status acpi_hw_enable_all_wakeup_gpes(void);
-
u8 acpi_hw_check_all_gpes(acpi_handle gpe_skip_device, u32 gpe_skip_number);
acpi_status
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index 955114c926bd..1ee6ac9b2baf 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -3,7 +3,7 @@
*
* Name: acinterp.h - Interpreter subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -120,6 +120,9 @@ void
acpi_ex_trace_point(acpi_trace_event_type type,
u8 begin, u8 *aml, char *pathname);
+void
+acpi_ex_trace_args(union acpi_operand_object **params, u32 count);
+
/*
* exfield - ACPI AML (p-code) execution - field manipulation
*/
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 02012168a087..f98640086f4e 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -3,7 +3,7 @@
*
* Name: aclocal.h - Internal data types used across the ACPI subsystem
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -293,7 +293,7 @@ acpi_status (*acpi_internal_method) (struct acpi_walk_state * walk_state);
* expected_return_btypes - Allowed type(s) for the return value
*/
struct acpi_name_info {
- char name[ACPI_NAMESEG_SIZE];
+ char name[ACPI_NAMESEG_SIZE] ACPI_NONSTRING;
u16 argument_list;
u8 expected_btypes;
};
@@ -370,7 +370,7 @@ typedef acpi_status (*acpi_object_converter) (struct acpi_namespace_node *
converted_object);
struct acpi_simple_repair_info {
- char name[ACPI_NAMESEG_SIZE];
+ char name[ACPI_NAMESEG_SIZE] ACPI_NONSTRING;
u32 unexpected_btypes;
u32 package_index;
acpi_object_converter object_converter;
@@ -1090,6 +1090,8 @@ struct acpi_port_info {
#define ACPI_ADDRESS_TYPE_IO_RANGE 1
#define ACPI_ADDRESS_TYPE_BUS_NUMBER_RANGE 2
+#define ACPI_ADDRESS_TYPE_PCC_NUMBER 0xA
+
/* Resource descriptor types and masks */
#define ACPI_RESOURCE_NAME_LARGE 0x80
@@ -1139,7 +1141,7 @@ struct acpi_port_info {
#define ACPI_RESOURCE_NAME_PIN_GROUP_FUNCTION 0x91
#define ACPI_RESOURCE_NAME_PIN_GROUP_CONFIG 0x92
#define ACPI_RESOURCE_NAME_CLOCK_INPUT 0x93
-#define ACPI_RESOURCE_NAME_LARGE_MAX 0x94
+#define ACPI_RESOURCE_NAME_LARGE_MAX 0x93
/*****************************************************************************
*
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index de83dd22292b..4e9402c02410 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -3,7 +3,7 @@
*
* Name: acmacros.h - C macros for the entire subsystem.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 9448bc026b9b..13f050fecb49 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -3,7 +3,7 @@
*
* Name: acnamesp.h - Namespace subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 8fc02946d3cd..6ffcc7a0a0c2 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -3,7 +3,7 @@
*
* Name: acobject.h - Definition of union acpi_operand_object (Internal object only)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h
index da96d80e6b3a..a2a9e51d7ac6 100644
--- a/drivers/acpi/acpica/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
@@ -3,7 +3,7 @@
*
* Name: acopcode.h - AML opcode information for the AML parser and interpreter
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index 6dad786a382c..65a15dee092b 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -3,7 +3,7 @@
*
* Module Name: acparser.h - AML Parser subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 2e442f5a3123..da2c45880cc7 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -3,7 +3,7 @@
*
* Name: acpredef - Information table for ACPI predefined methods and objects
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -450,7 +450,8 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
{{"_DSM",
METHOD_4ARGS(ACPI_TYPE_BUFFER, ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER,
- ACPI_TYPE_PACKAGE),
+ ACPI_TYPE_ANY | ACPI_TYPE_PACKAGE) |
+ ARG_COUNT_IS_MINIMUM,
METHOD_RETURNS(ACPI_RTYPE_ALL)}}, /* Must return a value, but it can be of any type */
{{"_DSS", METHOD_1ARGS(ACPI_TYPE_INTEGER),
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index d772ff9ca07d..e8a92be5adae 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -3,7 +3,7 @@
*
* Name: acresrc.h - Resource Manager function prototypes
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index f8fee94ba708..e690f604cfa0 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -3,7 +3,7 @@
*
* Name: acstruct.h - Internal structs
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index b6ae979b01b6..ebef72bf58d0 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -3,7 +3,7 @@
*
* Name: actables.h - ACPI table management
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index edfdbbef81c1..3990d509bbab 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -3,7 +3,7 @@
*
* Name: acutils.h -- prototypes for the common (subsystem-wide) procedures
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index effe52b40dce..c5b544a006c5 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -5,7 +5,7 @@
* Declarations and definitions contained herein are derived
* directly from the ACPI specification.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index 4e88f9fc2a28..54d6e51e0b9a 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -3,7 +3,7 @@
*
* Module Name: amlresrc.h - AML resource descriptors
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -504,10 +504,6 @@ struct aml_resource_pin_group_config {
#define AML_RESOURCE_PIN_GROUP_CONFIG_REVISION 1 /* ACPI 6.2 */
-/* restore default alignment */
-
-#pragma pack()
-
/* Union of all resource descriptors, so we can allocate the worst case */
union aml_resource {
@@ -562,6 +558,10 @@ union aml_resource {
u8 byte_item;
};
+/* restore default alignment */
+
+#pragma pack()
+
/* Interfaces used by both the disassembler and compiler */
void
diff --git a/drivers/acpi/acpica/dbconvert.c b/drivers/acpi/acpica/dbconvert.c
index 2b84ac093698..8dbab6932049 100644
--- a/drivers/acpi/acpica/dbconvert.c
+++ b/drivers/acpi/acpica/dbconvert.c
@@ -174,6 +174,8 @@ acpi_status acpi_db_convert_to_package(char *string, union acpi_object *object)
elements =
ACPI_ALLOCATE_ZEROED(DB_DEFAULT_PKG_ELEMENTS *
sizeof(union acpi_object));
+ if (!elements)
+ return (AE_NO_MEMORY);
this = string;
for (i = 0; i < (DB_DEFAULT_PKG_ELEMENTS - 1); i++) {
diff --git a/drivers/acpi/acpica/dbhistry.c b/drivers/acpi/acpica/dbhistry.c
index e874c1dddefa..554ae35108bd 100644
--- a/drivers/acpi/acpica/dbhistry.c
+++ b/drivers/acpi/acpica/dbhistry.c
@@ -3,7 +3,7 @@
*
* Module Name: dbhistry - debugger HISTORY command
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
index 4354c175e12e..e2f00c54cb36 100644
--- a/drivers/acpi/acpica/dsargs.c
+++ b/drivers/acpi/acpica/dsargs.c
@@ -4,7 +4,7 @@
* Module Name: dsargs - Support for execution of dynamic arguments for static
* objects (regions, fields, buffer fields, etc.)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index 80c69af06948..c1f79d7a2026 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -4,7 +4,7 @@
* Module Name: dscontrol - Support for execution control opcodes -
* if/else/while/return
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsdebug.c b/drivers/acpi/acpica/dsdebug.c
index c5c8380a3114..274b74255551 100644
--- a/drivers/acpi/acpica/dsdebug.c
+++ b/drivers/acpi/acpica/dsdebug.c
@@ -3,7 +3,7 @@
*
* Module Name: dsdebug - Parser/Interpreter interface - debugging
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index 532401ecdab0..df132c9089c7 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -3,7 +3,7 @@
*
* Module Name: dsfield - Dispatcher field routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index 6e0e362e461f..57cd9e2d1109 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -3,7 +3,7 @@
*
* Module Name: dsinit - Object initialization namespace walk
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index e809c2aed78a..45ec32e81903 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -3,7 +3,7 @@
*
* Module Name: dsmethod - Parser/Interpreter interface - control method parsing
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -462,7 +462,6 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
struct acpi_walk_state *next_walk_state = NULL;
union acpi_operand_object *obj_desc;
struct acpi_evaluate_info *info;
- u32 i;
ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state);
@@ -483,6 +482,20 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
return_ACPI_STATUS(AE_NULL_OBJECT);
}
+ if (this_walk_state->num_operands < obj_desc->method.param_count) {
+ ACPI_ERROR((AE_INFO, "Missing argument(s) for method [%4.4s]",
+ acpi_ut_get_node_name(method_node)));
+
+ return_ACPI_STATUS(AE_AML_TOO_FEW_ARGUMENTS);
+ }
+
+ else if (this_walk_state->num_operands > obj_desc->method.param_count) {
+ ACPI_ERROR((AE_INFO, "Too many arguments for method [%4.4s]",
+ acpi_ut_get_node_name(method_node)));
+
+ return_ACPI_STATUS(AE_AML_TOO_MANY_ARGUMENTS);
+ }
+
/* Init for new method, possibly wait on method mutex */
status =
@@ -539,14 +552,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
* Delete the operands on the previous walkstate operand stack
* (they were copied to new objects)
*/
- for (i = 0; i < obj_desc->method.param_count; i++) {
- acpi_ut_remove_reference(this_walk_state->operands[i]);
- this_walk_state->operands[i] = NULL;
- }
-
- /* Clear the operand stack */
-
- this_walk_state->num_operands = 0;
+ acpi_ds_clear_operands(this_walk_state);
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"**** Begin nested execution of [%4.4s] **** WalkState=%p\n",
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
index eca50517ad82..5393de4dbc4c 100644
--- a/drivers/acpi/acpica/dsmthdat.c
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -188,6 +188,7 @@ acpi_ds_method_data_init_args(union acpi_operand_object **params,
index++;
}
+ acpi_ex_trace_args(params, index);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%u args passed to method\n", index));
return_ACPI_STATUS(AE_OK);
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 555f148d666b..1bf7eec49899 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -3,7 +3,7 @@
*
* Module Name: dsobject - Dispatcher object management routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index dd3059000885..5699b0872848 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -3,7 +3,7 @@
*
* Module Name: dsopcode - Dispatcher support for regions and fields
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dspkginit.c b/drivers/acpi/acpica/dspkginit.c
index ecf793fe9919..1ed2386fab82 100644
--- a/drivers/acpi/acpica/dspkginit.c
+++ b/drivers/acpi/acpica/dspkginit.c
@@ -3,7 +3,7 @@
*
* Module Name: dspkginit - Completion of deferred package initialization
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index fb9ed5e1da89..baf6a1f27605 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -668,6 +668,8 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state,
union acpi_parse_object *arguments[ACPI_OBJ_NUM_OPERANDS];
u32 arg_count = 0;
u32 index = walk_state->num_operands;
+ u32 prev_num_operands = walk_state->num_operands;
+ u32 new_num_operands;
u32 i;
ACPI_FUNCTION_TRACE_PTR(ds_create_operands, first_arg);
@@ -696,6 +698,7 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state,
/* Create the interpreter arguments, in reverse order */
+ new_num_operands = index;
index--;
for (i = 0; i < arg_count; i++) {
arg = arguments[index];
@@ -720,7 +723,11 @@ cleanup:
* pop everything off of the operand stack and delete those
* objects
*/
- acpi_ds_obj_stack_pop_and_delete(arg_count, walk_state);
+ walk_state->num_operands = (u8)(i);
+ acpi_ds_obj_stack_pop_and_delete(new_num_operands, walk_state);
+
+ /* Restore operand count */
+ walk_state->num_operands = (u8)(prev_num_operands);
ACPI_EXCEPTION((AE_INFO, status, "While creating Arg %u", index));
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index a43336f05206..5c5c6d8a4e48 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -4,7 +4,7 @@
* Module Name: dswexec - Dispatcher method execution callbacks;
* dispatch to interpreter.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index f7b8496c8bdd..666419b6a5c6 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -3,7 +3,7 @@
*
* Module Name: dswload - Dispatcher first pass namespace load callbacks
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 541235f498c2..bfc54c914757 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -3,7 +3,7 @@
*
* Module Name: dswload2 - Dispatcher second pass namespace load callbacks
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c
index 1fdd07ae862c..375a8fa43d9d 100644
--- a/drivers/acpi/acpica/dswscope.c
+++ b/drivers/acpi/acpica/dswscope.c
@@ -3,7 +3,7 @@
*
* Module Name: dswscope - Scope stack manipulation
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index 75338a13c802..02aaddb89df9 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -3,7 +3,7 @@
*
* Module Name: dswstate - Dispatcher parse tree walk management routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index 9e78c5b9ad52..6cdd39c987b8 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -3,7 +3,7 @@
*
* Module Name: evevent - Fixed Event handling and dispatch
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index 989dc01af03f..df2a4ab0e0da 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -3,7 +3,7 @@
*
* Module Name: evglock - Global Lock support
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -42,6 +42,10 @@ acpi_status acpi_ev_init_global_lock_handler(void)
return_ACPI_STATUS(AE_OK);
}
+ if (!acpi_gbl_use_global_lock) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
/* Attempt installation of the global lock handler */
status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 934b201d3820..ba65b2ea49b2 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpe - General Purpose Event handling and dispatch
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 58e1890ab25b..fadd93caf1d5 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpeblk - GPE block creation and initialization.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 38f408cf13ce..eb769739420e 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpeinit - System GPE initialization and update
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index ee3b1ea656d4..d15b1d75c8ec 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpeutil - GPE utilities
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c
index 1c8cb6d924df..5a35dae945e2 100644
--- a/drivers/acpi/acpica/evhandler.c
+++ b/drivers/acpi/acpica/evhandler.c
@@ -3,7 +3,7 @@
*
* Module Name: evhandler - Support for Address Space handlers
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index e68e876d3b84..04a23a6c3bb1 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -3,7 +3,7 @@
*
* Module Name: evmisc - Miscellaneous event manager support functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 18fdf2bc2d49..fa3475da7ea9 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -3,7 +3,7 @@
*
* Module Name: evregion - Operation Region support
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -65,6 +65,7 @@ acpi_status acpi_ev_initialize_op_regions(void)
acpi_gbl_default_address_spaces
[i])) {
acpi_ev_execute_reg_methods(acpi_gbl_root_node,
+ ACPI_UINT32_MAX,
acpi_gbl_default_address_spaces
[i], ACPI_REG_CONNECT);
}
@@ -672,6 +673,7 @@ cleanup1:
* FUNCTION: acpi_ev_execute_reg_methods
*
* PARAMETERS: node - Namespace node for the device
+ * max_depth - Depth to which search for _REG
* space_id - The address space ID
* function - Passed to _REG: On (1) or Off (0)
*
@@ -683,7 +685,7 @@ cleanup1:
******************************************************************************/
void
-acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
+acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, u32 max_depth,
acpi_adr_space_type space_id, u32 function)
{
struct acpi_reg_walk_info info;
@@ -717,7 +719,7 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
* regions and _REG methods. (i.e. handlers must be installed for all
* regions of this Space ID before we can run any _REG methods)
*/
- (void)acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX,
+ (void)acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, max_depth,
ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run, NULL,
&info, NULL);
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 46d1b3f5582d..b03952798af5 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -3,7 +3,7 @@
*
* Module Name: evrgnini- ACPI address_space (op_region) init
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index 24fa6433d562..86a8d41c079c 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -3,7 +3,7 @@
*
* Module Name: evxface - External interfaces for ACPI events
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 48bf845191d2..4b052908d2e7 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -3,7 +3,7 @@
*
* Module Name: evxfevnt - External Interfaces, ACPI event disable/enable
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 4eeeb3b7ab7e..60dacec1b121 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -3,7 +3,7 @@
*
* Module Name: evxfgpe - External Interfaces for General Purpose Events (GPEs)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index 3197e6303c5b..bccc672c934c 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -4,7 +4,7 @@
* Module Name: evxfregn - External Interfaces, ACPI Operation Regions and
* Address Spaces.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -85,7 +85,8 @@ acpi_install_address_space_handler_internal(acpi_handle device,
/* Run all _REG methods for this address space */
if (run_reg) {
- acpi_ev_execute_reg_methods(node, space_id, ACPI_REG_CONNECT);
+ acpi_ev_execute_reg_methods(node, ACPI_UINT32_MAX, space_id,
+ ACPI_REG_CONNECT);
}
unlock_and_exit:
@@ -231,8 +232,6 @@ acpi_remove_address_space_handler(acpi_handle device,
/* Now we can delete the handler object */
- acpi_os_release_mutex(handler_obj->address_space.
- context_mutex);
acpi_ut_remove_reference(handler_obj);
goto unlock_and_exit;
}
@@ -263,6 +262,7 @@ ACPI_EXPORT_SYMBOL(acpi_remove_address_space_handler)
* FUNCTION: acpi_execute_reg_methods
*
* PARAMETERS: device - Handle for the device
+ * max_depth - Depth to which search for _REG
* space_id - The address space ID
*
* RETURN: Status
@@ -271,7 +271,8 @@ ACPI_EXPORT_SYMBOL(acpi_remove_address_space_handler)
*
******************************************************************************/
acpi_status
-acpi_execute_reg_methods(acpi_handle device, acpi_adr_space_type space_id)
+acpi_execute_reg_methods(acpi_handle device, u32 max_depth,
+ acpi_adr_space_type space_id)
{
struct acpi_namespace_node *node;
acpi_status status;
@@ -296,7 +297,8 @@ acpi_execute_reg_methods(acpi_handle device, acpi_adr_space_type space_id)
/* Run all _REG methods for this address space */
- acpi_ev_execute_reg_methods(node, space_id, ACPI_REG_CONNECT);
+ acpi_ev_execute_reg_methods(node, max_depth, space_id,
+ ACPI_REG_CONNECT);
} else {
status = AE_BAD_PARAMETER;
}
diff --git a/drivers/acpi/acpica/exconcat.c b/drivers/acpi/acpica/exconcat.c
index 2fb78b35565b..c248c9b162fa 100644
--- a/drivers/acpi/acpica/exconcat.c
+++ b/drivers/acpi/acpica/exconcat.c
@@ -3,7 +3,7 @@
*
* Module Name: exconcat - Concatenate-type AML operators
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 473115309860..4d7dd0fc6b07 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -3,7 +3,7 @@
*
* Module Name: exconfig - Namespace reconfiguration (Load/Unload opcodes)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index 3729bf3b74f7..fded9bfc2436 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -3,7 +3,7 @@
*
* Module Name: exconvrt - Object conversion routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -17,7 +17,8 @@ ACPI_MODULE_NAME("exconvrt")
/* Local prototypes */
static u32
-acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 max_length);
+acpi_ex_convert_to_ascii(u64 integer,
+ u16 base, u8 *string, u8 max_length, u8 leading_zeros);
/*******************************************************************************
*
@@ -225,8 +226,8 @@ acpi_ex_convert_to_buffer(union acpi_operand_object *obj_desc,
/* Copy the string to the buffer */
new_buf = return_desc->buffer.pointer;
- strncpy((char *)new_buf, (char *)obj_desc->string.pointer,
- obj_desc->string.length);
+ memcpy((char *)new_buf, (char *)obj_desc->string.pointer,
+ obj_desc->string.length);
break;
default:
@@ -249,6 +250,7 @@ acpi_ex_convert_to_buffer(union acpi_operand_object *obj_desc,
* base - ACPI_STRING_DECIMAL or ACPI_STRING_HEX
* string - Where the string is returned
* data_width - Size of data item to be converted, in bytes
+ * leading_zeros - Allow leading zeros
*
* RETURN: Actual string length
*
@@ -257,7 +259,8 @@ acpi_ex_convert_to_buffer(union acpi_operand_object *obj_desc,
******************************************************************************/
static u32
-acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 data_width)
+acpi_ex_convert_to_ascii(u64 integer,
+ u16 base, u8 *string, u8 data_width, u8 leading_zeros)
{
u64 digit;
u32 i;
@@ -266,7 +269,8 @@ acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 data_width)
u32 hex_length;
u32 decimal_length;
u32 remainder;
- u8 supress_zeros;
+ u8 supress_zeros = !leading_zeros;
+ u8 hex_char;
ACPI_FUNCTION_ENTRY();
@@ -293,7 +297,6 @@ acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 data_width)
break;
}
- supress_zeros = TRUE; /* No leading zeros */
remainder = 0;
for (i = decimal_length; i > 0; i--) {
@@ -328,8 +331,17 @@ acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 data_width)
/* Get one hex digit, most significant digits first */
- string[k] = (u8)
+ hex_char = (u8)
acpi_ut_hex_to_ascii_char(integer, ACPI_MUL_4(j));
+
+ /* Supress leading zeros until the first non-zero character */
+
+ if (hex_char == ACPI_ASCII_ZERO && supress_zeros) {
+ continue;
+ }
+
+ supress_zeros = FALSE;
+ string[k] = hex_char;
k++;
}
break;
@@ -379,6 +391,7 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
u32 string_length = 0;
u16 base = 16;
u8 separator = ',';
+ u8 leading_zeros;
ACPI_FUNCTION_TRACE_PTR(ex_convert_to_string, obj_desc);
@@ -400,14 +413,26 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
* Make room for the maximum decimal number size
*/
string_length = ACPI_MAX_DECIMAL_DIGITS;
+ leading_zeros = FALSE;
base = 10;
break;
+ case ACPI_EXPLICIT_CONVERT_HEX:
+ /*
+ * From to_hex_string.
+ *
+ * Supress leading zeros and append "0x"
+ */
+ string_length =
+ ACPI_MUL_2(acpi_gbl_integer_byte_width) + 2;
+ leading_zeros = FALSE;
+ break;
default:
/* Two hex string characters for each integer byte */
string_length = ACPI_MUL_2(acpi_gbl_integer_byte_width);
+ leading_zeros = TRUE;
break;
}
@@ -422,17 +447,32 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
}
new_buf = return_desc->buffer.pointer;
+ if (type == ACPI_EXPLICIT_CONVERT_HEX) {
+
+ /* Append "0x" prefix for explicit hex conversion */
+
+ *new_buf++ = '0';
+ *new_buf++ = 'x';
+ }
/* Convert integer to string */
string_length =
acpi_ex_convert_to_ascii(obj_desc->integer.value, base,
new_buf,
- acpi_gbl_integer_byte_width);
+ acpi_gbl_integer_byte_width,
+ leading_zeros);
/* Null terminate at the correct place */
return_desc->string.length = string_length;
+ if (type == ACPI_EXPLICIT_CONVERT_HEX) {
+
+ /* Take "0x" prefix into account */
+
+ return_desc->string.length += 2;
+ }
+
new_buf[string_length] = 0;
break;
@@ -448,6 +488,7 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
* From ACPI: "If the input is a buffer, it is converted to a
* a string of decimal values separated by commas."
*/
+ leading_zeros = FALSE;
base = 10;
/*
@@ -475,6 +516,7 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
*
* Each hex number is prefixed with 0x (11/2018)
*/
+ leading_zeros = TRUE;
separator = ' ';
string_length = (obj_desc->buffer.length * 5);
break;
@@ -488,6 +530,7 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
*
* Each hex number is prefixed with 0x (11/2018)
*/
+ leading_zeros = TRUE;
separator = ',';
string_length = (obj_desc->buffer.length * 5);
break;
@@ -528,7 +571,8 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
new_buf += acpi_ex_convert_to_ascii((u64) obj_desc->
buffer.pointer[i],
- base, new_buf, 1);
+ base, new_buf, 1,
+ leading_zeros);
/* Each digit is separated by either a comma or space */
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 1bea9d97652c..052c69567997 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -3,7 +3,7 @@
*
* Module Name: excreate - Named object creation
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index 3f86bfada510..81a07a52b73c 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -3,7 +3,7 @@
*
* Module Name: exdebug - Support for stores to the AML Debug Object
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 2e2da8790224..d8aeebaab70a 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -3,7 +3,7 @@
*
* Module Name: exdump - Interpreter debug output routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index 61ff36189ace..ced3ff9d0a86 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -3,7 +3,7 @@
*
* Module Name: exfield - AML execution - field_unit read/write
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index cf6c812a8b6d..0771934c0455 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -3,7 +3,7 @@
*
* Module Name: exfldio - Aml Field I/O
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index c6f2a9166ac0..07cbac58ed21 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -3,7 +3,7 @@
*
* Module Name: exmisc - ACPI AML (p-code) execution - specific opcodes
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index 65c487facdda..1fa013197fcf 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -3,7 +3,7 @@
*
* Module Name: exmutex - ASL Mutex Acquire/Release functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index 9a448165bfeb..76ab73c37e90 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -3,7 +3,7 @@
*
* Module Name: exnames - interpreter/scanner name load/execute
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index 20fb34b68bee..6ac7e0ca5c9d 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg1 - AML execution - opcodes with 1 argument
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index 743c258bf2e8..a94fa4d70e99 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg2 - AML execution - opcodes with 2 arguments
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index d3091f619909..bf08110ed6d2 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg3 - AML execution - opcodes with 3 arguments
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index 1af35e143ba9..cb078e39abf7 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg6 - AML execution - opcodes with 6 arguments
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 08196fa17080..1b1a006e82de 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -3,7 +3,7 @@
*
* Module Name: exprep - ACPI AML field prep utilities
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -437,6 +437,9 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
if (info->connection_node) {
second_desc = info->connection_node->object;
+ if (second_desc == NULL) {
+ break;
+ }
if (!(second_desc->common.flags & AOPOBJ_DATA_VALID)) {
status =
acpi_ds_get_buffer_arguments(second_desc);
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 8907b8bf4267..a390a1c2b0ab 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -3,7 +3,7 @@
*
* Module Name: exregion - ACPI default op_region (address space) handlers
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -44,7 +44,6 @@ acpi_ex_system_memory_space_handler(u32 function,
struct acpi_mem_mapping *mm = mem_info->cur_mm;
u32 length;
acpi_size map_length;
- acpi_size page_boundary_map_length;
#ifdef ACPI_MISALIGNMENT_NOT_SUPPORTED
u32 remainder;
#endif
@@ -138,26 +137,8 @@ acpi_ex_system_memory_space_handler(u32 function,
map_length = (acpi_size)
((mem_info->address + mem_info->length) - address);
- /*
- * If mapping the entire remaining portion of the region will cross
- * a page boundary, just map up to the page boundary, do not cross.
- * On some systems, crossing a page boundary while mapping regions
- * can cause warnings if the pages have different attributes
- * due to resource management.
- *
- * This has the added benefit of constraining a single mapping to
- * one page, which is similar to the original code that used a 4k
- * maximum window.
- */
- page_boundary_map_length = (acpi_size)
- (ACPI_ROUND_UP(address, ACPI_DEFAULT_PAGE_SIZE) - address);
- if (page_boundary_map_length == 0) {
- page_boundary_map_length = ACPI_DEFAULT_PAGE_SIZE;
- }
-
- if (map_length > page_boundary_map_length) {
- map_length = page_boundary_map_length;
- }
+ if (map_length > ACPI_DEFAULT_PAGE_SIZE)
+ map_length = ACPI_DEFAULT_PAGE_SIZE;
/* Create a new mapping starting at the address given */
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index 873de01b8ad2..dd83631090fc 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -3,7 +3,7 @@
*
* Module Name: exresnte - AML Interpreter object resolution
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index 24a78b5e266c..4589de3f3012 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -3,7 +3,7 @@
*
* Module Name: exresolv - AML Interpreter object resolution
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index 3a437e6ace5c..782ee353a709 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -3,7 +3,7 @@
*
* Module Name: exresop - AML Interpreter operand/object resolution
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exserial.c b/drivers/acpi/acpica/exserial.c
index 5241f4c01c76..6d2581ec22ad 100644
--- a/drivers/acpi/acpica/exserial.c
+++ b/drivers/acpi/acpica/exserial.c
@@ -3,7 +3,7 @@
*
* Module Name: exserial - field_unit support for serial address spaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -201,6 +201,12 @@ acpi_ex_read_serial_bus(union acpi_operand_object *obj_desc,
function = ACPI_READ;
break;
+ case ACPI_ADR_SPACE_FIXED_HARDWARE:
+
+ buffer_length = ACPI_FFH_INPUT_BUFFER_SIZE;
+ function = ACPI_READ;
+ break;
+
default:
return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
}
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index 575c7a39f1aa..cbc42207496d 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -3,7 +3,7 @@
*
* Module Name: exstore - AML Interpreter object store support
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c
index b01ae015e1b5..0470b2639831 100644
--- a/drivers/acpi/acpica/exstoren.c
+++ b/drivers/acpi/acpica/exstoren.c
@@ -4,7 +4,7 @@
* Module Name: exstoren - AML Interpreter object store support,
* Store to Node (namespace object)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index 37c3131a82fa..5b168fbc03e8 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -3,7 +3,7 @@
*
* Module Name: exstorob - AML object store support, store to object
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index f665ffd9a396..7f843c9d8a06 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -3,7 +3,7 @@
*
* Module Name: exsystem - Interface to OS services
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -133,14 +133,15 @@ acpi_status acpi_ex_system_do_stall(u32 how_long_us)
* (ACPI specifies 100 usec as max, but this gives some slack in
* order to support existing BIOSs)
*/
- ACPI_ERROR((AE_INFO,
- "Time parameter is too large (%u)", how_long_us));
+ ACPI_ERROR_ONCE((AE_INFO,
+ "Time parameter is too large (%u)",
+ how_long_us));
status = AE_AML_OPERAND_VALUE;
} else {
if (how_long_us > 100) {
- ACPI_WARNING((AE_INFO,
- "Time parameter %u us > 100 us violating ACPI spec, please fix the firmware.",
- how_long_us));
+ ACPI_WARNING_ONCE((AE_INFO,
+ "Time parameter %u us > 100 us violating ACPI spec, please fix the firmware.",
+ how_long_us));
}
acpi_os_stall(how_long_us);
}
diff --git a/drivers/acpi/acpica/extrace.c b/drivers/acpi/acpica/extrace.c
index f1730221ff13..36934d4f26fb 100644
--- a/drivers/acpi/acpica/extrace.c
+++ b/drivers/acpi/acpica/extrace.c
@@ -3,7 +3,7 @@
*
* Module Name: extrace - Support for interpreter execution tracing
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -136,9 +136,9 @@ acpi_ex_trace_point(acpi_trace_event_type type,
if (pathname) {
ACPI_DEBUG_PRINT((ACPI_DB_TRACE_POINT,
- "%s %s [0x%p:%s] execution.\n",
+ "%s %s [%s] execution.\n",
acpi_ex_get_trace_event_name(type),
- begin ? "Begin" : "End", aml, pathname));
+ begin ? "Begin" : "End", pathname));
} else {
ACPI_DEBUG_PRINT((ACPI_DB_TRACE_POINT,
"%s %s [0x%p] execution.\n",
@@ -149,6 +149,57 @@ acpi_ex_trace_point(acpi_trace_event_type type,
/*******************************************************************************
*
+ * FUNCTION: acpi_ex_trace_args
+ *
+ * PARAMETERS: params - AML method arguments
+ * count - numer of method arguments
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Trace any arguments
+ *
+ ******************************************************************************/
+
+void
+acpi_ex_trace_args(union acpi_operand_object **params, u32 count)
+{
+ u32 i;
+
+ ACPI_FUNCTION_NAME(ex_trace_args);
+
+ for (i = 0; i < count; i++) {
+ union acpi_operand_object *obj_desc = params[i];
+
+ if (!i) {
+ ACPI_DEBUG_PRINT((ACPI_DB_TRACE_POINT, " "));
+ }
+
+ switch (obj_desc->common.type) {
+ case ACPI_TYPE_INTEGER:
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, "%llx", obj_desc->integer.value));
+ break;
+ case ACPI_TYPE_STRING:
+ if (!obj_desc->string.length) {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, "NULL"));
+ continue;
+ }
+ if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_TRACE_POINT, _COMPONENT))
+ acpi_ut_print_string(obj_desc->string.pointer, ACPI_UINT8_MAX);
+ break;
+ default:
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, "Unknown"));
+ break;
+ }
+ if (i+1 == count) {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, "\n"));
+ } else {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, ", "));
+ }
+ }
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_ex_start_trace_method
*
* PARAMETERS: method_node - Node of the method
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index f4d4a033f166..cc10c0732218 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -3,7 +3,7 @@
*
* Module Name: exutils - interpreter/scanner utilities
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index 790f342dcd25..a1e1fa787566 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -3,7 +3,7 @@
*
* Module Name: hwacpi - ACPI Hardware Initialization/Mode Interface
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c
index a9ba9190408b..631fd8e2b774 100644
--- a/drivers/acpi/acpica/hwesleep.c
+++ b/drivers/acpi/acpica/hwesleep.c
@@ -4,7 +4,7 @@
* Name: hwesleep.c - ACPI Hardware Sleep/Wake Support functions for the
* extended FADT-V5 sleep registers.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index e0c847ab8324..386f4759c317 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -3,7 +3,7 @@
*
* Module Name: hwgpe - Low level GPE enable/disable/clear functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index e0921f08b71a..87d78bef6323 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -4,7 +4,7 @@
* Name: hwsleep.c - ACPI Hardware Sleep/Wake Support functions for the
* original/legacy sleep/PM registers.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index 192c04b5a599..a5e0bccae6a4 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -3,7 +3,7 @@
*
* Name: hwtimer.c - ACPI Power Management Timer Interface
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index b8de458f0368..496fd9e49f0b 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -3,7 +3,7 @@
*
* Module Name: hwvalid - I/O request validation
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index c31f803995c6..847cd1b2493d 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -3,7 +3,7 @@
*
* Module Name: hwxface - Public ACPICA hardware interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index 36ea48f64110..9aabe30416da 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -3,7 +3,7 @@
*
* Name: hwxfsleep.c - ACPI Hardware Sleep/Wake External Interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -16,20 +16,11 @@
ACPI_MODULE_NAME("hwxfsleep")
/* Local prototypes */
-#if (!ACPI_REDUCED_HARDWARE)
static acpi_status
acpi_hw_set_firmware_waking_vector(struct acpi_table_facs *facs,
acpi_physical_address physical_address,
acpi_physical_address physical_address64);
-#endif
-
-/*
- * These functions are removed for the ACPI_REDUCED_HARDWARE case:
- * acpi_set_firmware_waking_vector
- * acpi_enter_sleep_state_s4bios
- */
-#if (!ACPI_REDUCED_HARDWARE)
/*******************************************************************************
*
* FUNCTION: acpi_hw_set_firmware_waking_vector
@@ -115,6 +106,12 @@ acpi_set_firmware_waking_vector(acpi_physical_address physical_address,
ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector)
+/*
+ * These functions are removed for the ACPI_REDUCED_HARDWARE case:
+ * acpi_enter_sleep_state_s4bios
+ */
+
+#if (!ACPI_REDUCED_HARDWARE)
/*******************************************************************************
*
* FUNCTION: acpi_enter_sleep_state_s4bios
diff --git a/drivers/acpi/acpica/nsarguments.c b/drivers/acpi/acpica/nsarguments.c
index 3efb46f0dc54..366d54a1d157 100644
--- a/drivers/acpi/acpica/nsarguments.c
+++ b/drivers/acpi/acpica/nsarguments.c
@@ -3,7 +3,7 @@
*
* Module Name: nsarguments - Validation of args for ACPI predefined methods
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c
index 7e5a683ae957..f05a92b88642 100644
--- a/drivers/acpi/acpica/nsconvert.c
+++ b/drivers/acpi/acpica/nsconvert.c
@@ -4,7 +4,7 @@
* Module Name: nsconvert - Object conversions for objects returned by
* predefined methods
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 90a26cb0c472..6dc20486ad51 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -3,7 +3,7 @@
*
* Module Name: nsdump - table dumping routines for debug
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index fa116ebe49a3..d5b16aaec233 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -3,7 +3,7 @@
*
* Module Name: nsdump - table dumping routines for debug
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index 86d126fdb27d..03373e7f7978 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -3,7 +3,7 @@
*
* Module Name: nsinit - namespace initialization
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index fcb9de0f77a2..6ec4c646fff7 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -3,7 +3,7 @@
*
* Module Name: nsload - namespace loading/expanding/contracting procedures
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index d91153f65700..22aeeeb56cff 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -194,7 +194,7 @@ acpi_ns_build_normalized_path(struct acpi_namespace_node *node,
char *full_path, u32 path_size, u8 no_trailing)
{
u32 length = 0, i;
- char name[ACPI_NAMESEG_SIZE];
+ char name[ACPI_NAMESEG_SIZE] ACPI_NONSTRING;
u8 do_no_trailing;
char c, *left, *right;
struct acpi_namespace_node *next_node;
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index 31e551cf4ea6..959e6379bc4c 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -3,7 +3,7 @@
*
* Module Name: nsparse - namespace interface to AML parser
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index cf57bd69616d..81995ee48c49 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -3,7 +3,7 @@
*
* Module Name: nspredef - Validation of ACPI predefined methods and objects
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c
index dd37fc108fce..ca137ce5674f 100644
--- a/drivers/acpi/acpica/nsprepkg.c
+++ b/drivers/acpi/acpica/nsprepkg.c
@@ -3,7 +3,7 @@
*
* Module Name: nsprepkg - Validation of package objects for predefined names
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index b8657004190d..accfdcfb7e62 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -3,7 +3,7 @@
*
* Module Name: nsrepair - Repair for objects returned by predefined methods
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 1bb7b71f07f1..8dbb870f40d2 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -4,7 +4,7 @@
* Module Name: nsrepair2 - Repair for objects returned by specific
* predefined methods
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -25,7 +25,7 @@ acpi_status (*acpi_repair_function) (struct acpi_evaluate_info * info,
return_object_ptr);
typedef struct acpi_repair_info {
- char name[ACPI_NAMESEG_SIZE];
+ char name[ACPI_NAMESEG_SIZE] ACPI_NONSTRING;
acpi_repair_function repair_function;
} acpi_repair_info;
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index 06ffdb6808f5..49cc07e2ac5a 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -4,7 +4,7 @@
* Module Name: nsutils - Utilities for accessing ACPI namespace, accessing
* parents and siblings and Scope manipulation
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index eee396a77bae..5670ff5a43cd 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -3,7 +3,7 @@
*
* Module Name: nswalk - Functions for walking the ACPI namespace
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -169,9 +169,12 @@ acpi_ns_walk_namespace(acpi_object_type type,
if (start_node == ACPI_ROOT_OBJECT) {
start_node = acpi_gbl_root_node;
- if (!start_node) {
- return_ACPI_STATUS(AE_NO_NAMESPACE);
- }
+ }
+
+ /* Avoid walking the namespace if the StartNode is NULL */
+
+ if (!start_node) {
+ return_ACPI_STATUS(AE_NO_NAMESPACE);
}
/* Null child means "get first node" */
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 5d5bcf165298..1db831545ec8 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -4,7 +4,7 @@
* Module Name: nsxfname - Public interfaces to the ACPI subsystem
* ACPI Namespace oriented interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 422c074ed289..6f6ae38ec044 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -3,7 +3,7 @@
*
* Module Name: psargs - Parse AML opcode arguments
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -25,6 +25,8 @@ acpi_ps_get_next_package_length(struct acpi_parse_state *parser_state);
static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
*parser_state);
+static void acpi_ps_free_field_list(union acpi_parse_object *start);
+
/*******************************************************************************
*
* FUNCTION: acpi_ps_get_next_package_length
@@ -685,6 +687,39 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
/*******************************************************************************
*
+ * FUNCTION: acpi_ps_free_field_list
+ *
+ * PARAMETERS: start - First Op in field list
+ *
+ * RETURN: None.
+ *
+ * DESCRIPTION: Free all Op objects inside a field list.
+ *
+ ******************************************************************************/
+
+static void acpi_ps_free_field_list(union acpi_parse_object *start)
+{
+ union acpi_parse_object *cur = start;
+ union acpi_parse_object *next;
+ union acpi_parse_object *arg;
+
+ while (cur) {
+ next = cur->common.next;
+
+ /* AML_INT_CONNECTION_OP can have a single argument */
+
+ arg = acpi_ps_get_arg(cur, 0);
+ if (arg) {
+ acpi_ps_free_op(arg);
+ }
+
+ acpi_ps_free_op(cur);
+ cur = next;
+ }
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_ps_get_next_arg
*
* PARAMETERS: walk_state - Current state
@@ -751,6 +786,10 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
while (parser_state->aml < parser_state->pkg_end) {
field = acpi_ps_get_next_field(parser_state);
if (!field) {
+ if (arg) {
+ acpi_ps_free_field_list(arg);
+ }
+
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -820,6 +859,10 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
acpi_ps_get_next_namepath(walk_state, parser_state,
arg,
ACPI_NOT_METHOD_CALL);
+ if (ACPI_FAILURE(status)) {
+ acpi_ps_free_op(arg);
+ return_ACPI_STATUS(status);
+ }
} else {
/* Single complex argument, nothing returned */
@@ -854,6 +897,10 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
acpi_ps_get_next_namepath(walk_state, parser_state,
arg,
ACPI_POSSIBLE_METHOD_CALL);
+ if (ACPI_FAILURE(status)) {
+ acpi_ps_free_op(arg);
+ return_ACPI_STATUS(status);
+ }
if (arg->common.aml_opcode == AML_INT_METHODCALL_OP) {
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index d0fd55636129..c989cadf271c 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -3,7 +3,7 @@
*
* Module Name: psloop - Main AML parse loop
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index 54471083ba54..496a1c1d5b0b 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -3,7 +3,7 @@
*
* Module Name: psobject - Support for parse objects
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -636,7 +636,8 @@ acpi_status
acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
union acpi_parse_object *op, acpi_status status)
{
- acpi_status status2;
+ acpi_status return_status = status;
+ u8 ascending = TRUE;
ACPI_FUNCTION_TRACE_PTR(ps_complete_final_op, walk_state);
@@ -650,7 +651,7 @@ acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
op));
do {
if (op) {
- if (walk_state->ascending_callback != NULL) {
+ if (ascending && walk_state->ascending_callback != NULL) {
walk_state->op = op;
walk_state->op_info =
acpi_ps_get_opcode_info(op->common.
@@ -672,49 +673,26 @@ acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
}
if (status == AE_CTRL_TERMINATE) {
- status = AE_OK;
-
- /* Clean up */
- do {
- if (op) {
- status2 =
- acpi_ps_complete_this_op
- (walk_state, op);
- if (ACPI_FAILURE
- (status2)) {
- return_ACPI_STATUS
- (status2);
- }
- }
-
- acpi_ps_pop_scope(&
- (walk_state->
- parser_state),
- &op,
- &walk_state->
- arg_types,
- &walk_state->
- arg_count);
-
- } while (op);
-
- return_ACPI_STATUS(status);
+ ascending = FALSE;
+ return_status = AE_CTRL_TERMINATE;
}
else if (ACPI_FAILURE(status)) {
/* First error is most important */
- (void)
- acpi_ps_complete_this_op(walk_state,
- op);
- return_ACPI_STATUS(status);
+ ascending = FALSE;
+ return_status = status;
}
}
- status2 = acpi_ps_complete_this_op(walk_state, op);
- if (ACPI_FAILURE(status2)) {
- return_ACPI_STATUS(status2);
+ status = acpi_ps_complete_this_op(walk_state, op);
+ if (ACPI_FAILURE(status)) {
+ ascending = FALSE;
+ if (ACPI_SUCCESS(return_status) ||
+ return_status == AE_CTRL_TERMINATE) {
+ return_status = status;
+ }
}
}
@@ -724,5 +702,5 @@ acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
} while (op);
- return_ACPI_STATUS(status);
+ return_ACPI_STATUS(return_status);
}
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index 39e31030e5f4..bf6103986f48 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -3,7 +3,7 @@
*
* Module Name: psopcode - Parser/Interpreter opcode information table
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c
index bccf606e08b4..532ea307a675 100644
--- a/drivers/acpi/acpica/psopinfo.c
+++ b/drivers/acpi/acpica/psopinfo.c
@@ -3,7 +3,7 @@
*
* Module Name: psopinfo - AML opcode information functions and dispatch tables
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -34,7 +34,7 @@ static const u8 acpi_gbl_argument_count[] =
const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode)
{
-#ifdef ACPI_DEBUG_OUTPUT
+#if defined ACPI_ASL_COMPILER && defined ACPI_DEBUG_OUTPUT
const char *opcode_name = "Unknown AML opcode";
#endif
@@ -102,11 +102,11 @@ const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode)
default:
break;
}
-#endif
/* Unknown AML opcode */
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%s [%4.4X]\n", opcode_name, opcode));
+#endif
return (&acpi_gbl_aml_op_info[_UNK]);
}
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 10a072953d78..55a416e56fd8 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -3,7 +3,7 @@
*
* Module Name: psparse - Parser top level AML parse routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c
index a0035bde7556..c4e4483f0a0b 100644
--- a/drivers/acpi/acpica/psscope.c
+++ b/drivers/acpi/acpica/psscope.c
@@ -3,7 +3,7 @@
*
* Module Name: psscope - Parser scope stack management routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c
index 7f7f5ecd4011..5a285d3f2cdb 100644
--- a/drivers/acpi/acpica/pstree.c
+++ b/drivers/acpi/acpica/pstree.c
@@ -3,7 +3,7 @@
*
* Module Name: pstree - Parser op tree manipulation/traversal/search
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index d550c4af4702..ada1dc304d25 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -3,7 +3,7 @@
*
* Module Name: psutils - Parser miscellaneous utilities (Parser only)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c
index d92817c72b8d..2f3ebcd8aebe 100644
--- a/drivers/acpi/acpica/pswalk.c
+++ b/drivers/acpi/acpica/pswalk.c
@@ -3,7 +3,7 @@
*
* Module Name: pswalk - Parser routines to walk parsed op tree(s)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index 6f4eace0ba69..d480de075a90 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -3,7 +3,7 @@
*
* Module Name: psxface - Parser external interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/rsaddr.c b/drivers/acpi/acpica/rsaddr.c
index fff48001d7ef..f92010e667cd 100644
--- a/drivers/acpi/acpica/rsaddr.c
+++ b/drivers/acpi/acpica/rsaddr.c
@@ -272,17 +272,13 @@ u8
acpi_rs_get_address_common(struct acpi_resource *resource,
union aml_resource *aml)
{
- struct aml_resource_address address;
-
ACPI_FUNCTION_ENTRY();
- /* Avoid undefined behavior: member access within misaligned address */
-
- memcpy(&address, aml, sizeof(address));
-
/* Validate the Resource Type */
- if ((address.resource_type > 2) && (address.resource_type < 0xC0)) {
+ if ((aml->address.resource_type > 2) &&
+ (aml->address.resource_type < 0xC0) &&
+ (aml->address.resource_type != 0x0A)) {
return (FALSE);
}
@@ -303,7 +299,7 @@ acpi_rs_get_address_common(struct acpi_resource *resource,
/* Generic resource type, just grab the type_specific byte */
resource->data.address.info.type_specific =
- address.specific_flags;
+ aml->address.specific_flags;
}
return (TRUE);
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index 6e7a152d6459..242daf45e20e 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -608,18 +608,12 @@ acpi_rs_get_list_length(u8 *aml_buffer,
case ACPI_RESOURCE_NAME_SERIAL_BUS:{
- /* Avoid undefined behavior: member access within misaligned address */
-
- struct aml_resource_common_serialbus
- common_serial_bus;
- memcpy(&common_serial_bus, aml_resource,
- sizeof(common_serial_bus));
-
minimum_aml_resource_length =
acpi_gbl_resource_aml_serial_bus_sizes
- [common_serial_bus.type];
+ [aml_resource->common_serial_bus.type];
extra_struct_bytes +=
- common_serial_bus.resource_length -
+ aml_resource->common_serial_bus.
+ resource_length -
minimum_aml_resource_length;
break;
}
@@ -688,16 +682,10 @@ acpi_rs_get_list_length(u8 *aml_buffer,
*/
if (acpi_ut_get_resource_type(aml_buffer) ==
ACPI_RESOURCE_NAME_SERIAL_BUS) {
-
- /* Avoid undefined behavior: member access within misaligned address */
-
- struct aml_resource_common_serialbus common_serial_bus;
- memcpy(&common_serial_bus, aml_resource,
- sizeof(common_serial_bus));
-
buffer_size =
acpi_gbl_resource_struct_serial_bus_sizes
- [common_serial_bus.type] + extra_struct_bytes;
+ [aml_resource->common_serial_bus.type] +
+ extra_struct_bytes;
} else {
buffer_size =
acpi_gbl_resource_struct_sizes[resource_index] +
diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c
index 611bc71c193f..5b7d7074ce4f 100644
--- a/drivers/acpi/acpica/rsdump.c
+++ b/drivers/acpi/acpica/rsdump.c
@@ -48,6 +48,7 @@ static void acpi_rs_dump_address_common(union acpi_resource_data *resource);
static void
acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table);
+#ifdef ACPI_DEBUGGER
/*******************************************************************************
*
* FUNCTION: acpi_rs_dump_resource_list
@@ -160,6 +161,7 @@ void acpi_rs_dump_irq_list(u8 *route_table)
prt_element, prt_element->length);
}
}
+#endif
/*******************************************************************************
*
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
index 164c96e063c6..e46efaa889cd 100644
--- a/drivers/acpi/acpica/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -55,21 +55,15 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
aml_resource = ACPI_CAST_PTR(union aml_resource, aml);
if (acpi_ut_get_resource_type(aml) == ACPI_RESOURCE_NAME_SERIAL_BUS) {
-
- /* Avoid undefined behavior: member access within misaligned address */
-
- struct aml_resource_common_serialbus common_serial_bus;
- memcpy(&common_serial_bus, aml_resource,
- sizeof(common_serial_bus));
-
- if (common_serial_bus.type > AML_RESOURCE_MAX_SERIALBUSTYPE) {
+ if (aml_resource->common_serial_bus.type >
+ AML_RESOURCE_MAX_SERIALBUSTYPE) {
conversion_table = NULL;
} else {
/* This is an I2C, SPI, UART, or CSI2 serial_bus descriptor */
conversion_table =
acpi_gbl_convert_resource_serial_bus_dispatch
- [common_serial_bus.type];
+ [aml_resource->common_serial_bus.type];
}
} else {
conversion_table =
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index a1f10e4409a3..5b98e09fff76 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -3,7 +3,7 @@
*
* Module Name: tbdata - Table manager data structure functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 3c126c6d306b..c6658b2f3027 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -3,7 +3,7 @@
*
* Module Name: tbfadt - FADT table utilities
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index 1c1b2e284bd9..d71a73216380 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -3,7 +3,7 @@
*
* Module Name: tbfind - find table
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -57,8 +57,8 @@ acpi_tb_find_table(char *signature,
memset(&header, 0, sizeof(struct acpi_table_header));
ACPI_COPY_NAMESEG(header.signature, signature);
- strncpy(header.oem_id, oem_id, ACPI_OEM_ID_SIZE);
- strncpy(header.oem_table_id, oem_table_id, ACPI_OEM_TABLE_ID_SIZE);
+ memcpy(header.oem_id, oem_id, ACPI_OEM_ID_SIZE);
+ memcpy(header.oem_table_id, oem_table_id, ACPI_OEM_TABLE_ID_SIZE);
/* Search for the table */
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 0dc003c20e4d..ee9b85bc238b 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -3,7 +3,7 @@
*
* Module Name: tbinstal - ACPI table installation and removal
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index 58b02e4b254b..e5631027f7f1 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -3,7 +3,7 @@
*
* Module Name: tbprint - Table output utilities
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -95,6 +95,11 @@ acpi_tb_print_table_header(acpi_physical_address address,
{
struct acpi_table_header local_header;
+#pragma GCC diagnostic push
+#if defined(__GNUC__) && __GNUC__ >= 11
+#pragma GCC diagnostic ignored "-Wstringop-overread"
+#endif
+
if (ACPI_COMPARE_NAMESEG(header->signature, ACPI_SIG_FACS)) {
/* FACS only has signature and length fields */
@@ -121,6 +126,14 @@ acpi_tb_print_table_header(acpi_physical_address address,
ACPI_CAST_PTR(struct acpi_table_rsdp,
header)->revision,
local_header.oem_id));
+ } else if (acpi_gbl_CDAT && !acpi_ut_valid_nameseg(header->signature)) {
+
+ /* CDAT does not use the common ACPI table header */
+
+ ACPI_INFO(("%-4.4s 0x%8.8X%8.8X %06X",
+ ACPI_SIG_CDAT, ACPI_FORMAT_UINT64(address),
+ ACPI_CAST_PTR(struct acpi_table_cdat,
+ header)->length));
} else {
/* Standard ACPI table with full common header */
@@ -135,4 +148,5 @@ acpi_tb_print_table_header(acpi_physical_address address,
local_header.asl_compiler_id,
local_header.asl_compiler_revision));
}
+#pragma GCC diagnostic pop
}
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 15fa68a5ea6e..fa64851c7b62 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -3,7 +3,7 @@
*
* Module Name: tbutils - ACPI Table utilities
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -18,7 +18,6 @@ ACPI_MODULE_NAME("tbutils")
static acpi_physical_address
acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size);
-#if (!ACPI_REDUCED_HARDWARE)
/*******************************************************************************
*
* FUNCTION: acpi_tb_initialize_facs
@@ -56,7 +55,6 @@ acpi_status acpi_tb_initialize_facs(void)
return (AE_OK);
}
-#endif /* !ACPI_REDUCED_HARDWARE */
/*******************************************************************************
*
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 275b52dc42e9..a8f07d2641b6 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -3,7 +3,7 @@
*
* Module Name: tbxface - ACPI table-oriented external interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index 0f2a7343de3a..2a17c60a9a39 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -3,7 +3,7 @@
*
* Module Name: tbxfload - Table load/unload external interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index 5b413bbab338..961577ba9486 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -3,7 +3,7 @@
*
* Module Name: tbxfroot - Find the root ACPI table (RSDT)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
index be94d2fd99a7..c673d6c95e0a 100644
--- a/drivers/acpi/acpica/utaddress.c
+++ b/drivers/acpi/acpica/utaddress.c
@@ -3,7 +3,7 @@
*
* Module Name: utaddress - op_region address range check
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index c1fb70457e20..2418a312733a 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -3,7 +3,7 @@
*
* Module Name: utalloc - local memory allocation routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utascii.c b/drivers/acpi/acpica/utascii.c
index 2be37676edd7..259c28d3fecd 100644
--- a/drivers/acpi/acpica/utascii.c
+++ b/drivers/acpi/acpica/utascii.c
@@ -3,7 +3,7 @@
*
* Module Name: utascii - Utility ascii functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utbuffer.c b/drivers/acpi/acpica/utbuffer.c
index b054bb5eeaf0..f6e6e98e9523 100644
--- a/drivers/acpi/acpica/utbuffer.c
+++ b/drivers/acpi/acpica/utbuffer.c
@@ -3,7 +3,7 @@
*
* Module Name: utbuffer - Buffer dump routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
index 85a85f7cf750..cabec193febb 100644
--- a/drivers/acpi/acpica/utcache.c
+++ b/drivers/acpi/acpica/utcache.c
@@ -3,7 +3,7 @@
*
* Module Name: utcache - local cache allocation routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -251,9 +251,9 @@ void *acpi_os_acquire_object(struct acpi_memory_list *cache)
} else {
/* The cache is empty, create a new object */
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
ACPI_MEM_TRACKING(cache->total_allocated++);
-#ifdef ACPI_DBG_TRACK_ALLOCATIONS
if ((cache->total_allocated - cache->total_freed) >
cache->max_occupied) {
cache->max_occupied =
diff --git a/drivers/acpi/acpica/utcksum.c b/drivers/acpi/acpica/utcksum.c
index b483894c3629..e6f6030b3a3f 100644
--- a/drivers/acpi/acpica/utcksum.c
+++ b/drivers/acpi/acpica/utcksum.c
@@ -3,7 +3,7 @@
*
* Module Name: utcksum - Support generating table checksums
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 2e17e657dfa4..80458e70ac2b 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -3,7 +3,7 @@
*
* Module Name: utcopy - Internal to external object translation utilities
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 3d71bd9245c7..9f197e293c7e 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -3,7 +3,7 @@
*
* Module Name: utdebug - Debug print/trace routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 95a4b7509e01..b82130d1a8bc 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -3,7 +3,7 @@
*
* Module Name: utdecode - Utility decoding routines (value-to-string)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index 8d7736d2d269..e8180099d01f 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -140,7 +140,7 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
(void)
acpi_os_delete_semaphore
(acpi_gbl_global_lock_semaphore);
- acpi_gbl_global_lock_semaphore = NULL;
+ acpi_gbl_global_lock_semaphore = ACPI_SEMAPHORE_NULL;
acpi_os_delete_mutex(object->mutex.os_mutex);
acpi_gbl_global_lock_mutex = NULL;
@@ -157,7 +157,7 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
object, object->event.os_semaphore));
(void)acpi_os_delete_semaphore(object->event.os_semaphore);
- object->event.os_semaphore = NULL;
+ object->event.os_semaphore = ACPI_SEMAPHORE_NULL;
break;
case ACPI_TYPE_METHOD:
@@ -404,7 +404,7 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
object, object->common.type,
acpi_ut_get_object_type_name(object),
new_count));
- message = "Incremement";
+ message = "Increment";
break;
case REF_DECREMENT:
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 3e5173d03953..abc6583ed369 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -3,7 +3,7 @@
*
* Module Name: uteval - Object evaluation
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index 820820ea8119..97c55a113bae 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -3,7 +3,7 @@
*
* Module Name: utglobal - Global variables for the ACPI subsystem
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/uthex.c b/drivers/acpi/acpica/uthex.c
index e62802791dcf..8cd050e9cad5 100644
--- a/drivers/acpi/acpica/uthex.c
+++ b/drivers/acpi/acpica/uthex.c
@@ -3,7 +3,7 @@
*
* Module Name: uthex -- Hex/ASCII support functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index 15c2ce91d403..eb88335dea2c 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -3,7 +3,7 @@
*
* Module Name: utids - support for device Ids - HID, UID, CID, SUB, CLS
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index 92fbaef161a7..4bef97e8223a 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -3,7 +3,7 @@
*
* Module Name: utinit - Common ACPI subsystem initialization
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -154,7 +154,7 @@ acpi_status acpi_ut_init_globals(void)
/* Global Lock support */
- acpi_gbl_global_lock_semaphore = NULL;
+ acpi_gbl_global_lock_semaphore = ACPI_SEMAPHORE_NULL;
acpi_gbl_global_lock_mutex = NULL;
acpi_gbl_global_lock_acquired = FALSE;
acpi_gbl_global_lock_handle = 0;
diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c
index ee6d72385c5c..123dbcbc60bc 100644
--- a/drivers/acpi/acpica/utlock.c
+++ b/drivers/acpi/acpica/utlock.c
@@ -3,7 +3,7 @@
*
* Module Name: utlock - Reader/Writer lock interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index f4aae8f0d3a8..272e46208263 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -3,7 +3,7 @@
*
* Module Name: utobject - ACPI object create/delete/size/cache routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 251bd396c6fd..f6ac16729e42 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -3,7 +3,7 @@
*
* Module Name: utosi - Support for the _OSI predefined control method
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -75,6 +75,7 @@ static struct acpi_interface_info acpi_default_supported_interfaces[] = {
{"Windows 2019", NULL, 0, ACPI_OSI_WIN_10_19H1}, /* Windows 10 version 1903 - Added 08/2019 */
{"Windows 2020", NULL, 0, ACPI_OSI_WIN_10_20H1}, /* Windows 10 version 2004 - Added 08/2021 */
{"Windows 2021", NULL, 0, ACPI_OSI_WIN_11}, /* Windows 11 - Added 01/2022 */
+ {"Windows 2022", NULL, 0, ACPI_OSI_WIN_11_22H2}, /* Windows 11 version 22H2 - Added 04/2024 */
/* Feature Group Strings */
diff --git a/drivers/acpi/acpica/utpredef.c b/drivers/acpi/acpica/utpredef.c
index 29d2977d0746..d9bd80e2d32a 100644
--- a/drivers/acpi/acpica/utpredef.c
+++ b/drivers/acpi/acpica/utpredef.c
@@ -3,7 +3,7 @@
*
* Module Name: utpredef - support functions for predefined names
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c
index 42b30b9f9312..423d10569736 100644
--- a/drivers/acpi/acpica/utprint.c
+++ b/drivers/acpi/acpica/utprint.c
@@ -3,7 +3,7 @@
*
* Module Name: utprint - Formatted printing routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -333,11 +333,8 @@ int vsnprintf(char *string, acpi_size size, const char *format, va_list args)
pos = string;
- if (size != ACPI_UINT32_MAX) {
- end = string + size;
- } else {
- end = ACPI_CAST_PTR(char, ACPI_UINT32_MAX);
- }
+ size = ACPI_MIN(size, ACPI_PTR_DIFF(ACPI_MAX_PTR, string));
+ end = string + size;
for (; *format; ++format) {
if (*format != '%') {
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index cff7901f7866..e1cc3d348750 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -361,20 +361,16 @@ acpi_ut_validate_resource(struct acpi_walk_state *walk_state,
aml_resource = ACPI_CAST_PTR(union aml_resource, aml);
if (resource_type == ACPI_RESOURCE_NAME_SERIAL_BUS) {
- /* Avoid undefined behavior: member access within misaligned address */
-
- struct aml_resource_common_serialbus common_serial_bus;
- memcpy(&common_serial_bus, aml_resource,
- sizeof(common_serial_bus));
-
/* Validate the bus_type field */
- if ((common_serial_bus.type == 0) ||
- (common_serial_bus.type > AML_RESOURCE_MAX_SERIALBUSTYPE)) {
+ if ((aml_resource->common_serial_bus.type == 0) ||
+ (aml_resource->common_serial_bus.type >
+ AML_RESOURCE_MAX_SERIALBUSTYPE)) {
if (walk_state) {
ACPI_ERROR((AE_INFO,
"Invalid/unsupported SerialBus resource descriptor: BusType 0x%2.2X",
- common_serial_bus.type));
+ aml_resource->common_serial_bus.
+ type));
}
return (AE_AML_INVALID_RESOURCE_TYPE);
}
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index f5f5da441458..a99c4c9e3d39 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -3,7 +3,7 @@
*
* Module Name: uttrack - Memory allocation tracking routines (debug only)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utuuid.c b/drivers/acpi/acpica/utuuid.c
index 8f10b413e928..0682554934ca 100644
--- a/drivers/acpi/acpica/utuuid.c
+++ b/drivers/acpi/acpica/utuuid.c
@@ -3,7 +3,7 @@
*
* Module Name: utuuid -- UUID support functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index aa2e923462b7..56942b5f026b 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -3,7 +3,7 @@
*
* Module Name: utxface - External interfaces, miscellaneous utility functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
index 1915bec2b279..c1702f8fba67 100644
--- a/drivers/acpi/acpica/utxfinit.c
+++ b/drivers/acpi/acpica/utxfinit.c
@@ -3,7 +3,7 @@
*
* Module Name: utxfinit - External interfaces for ACPICA initialization
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -120,6 +120,18 @@ acpi_status ACPI_INIT_FUNCTION acpi_enable_subsystem(u32 flags)
*/
acpi_gbl_early_initialization = FALSE;
+ /*
+ * Obtain a permanent mapping for the FACS. This is required for the
+ * Global Lock and the Firmware Waking Vector
+ */
+ if (!(flags & ACPI_NO_FACS_INIT)) {
+ status = acpi_tb_initialize_facs();
+ if (ACPI_FAILURE(status)) {
+ ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
+ return_ACPI_STATUS(status);
+ }
+ }
+
#if (!ACPI_REDUCED_HARDWARE)
/* Enable ACPI mode */
@@ -138,18 +150,6 @@ acpi_status ACPI_INIT_FUNCTION acpi_enable_subsystem(u32 flags)
}
/*
- * Obtain a permanent mapping for the FACS. This is required for the
- * Global Lock and the Firmware Waking Vector
- */
- if (!(flags & ACPI_NO_FACS_INIT)) {
- status = acpi_tb_initialize_facs();
- if (ACPI_FAILURE(status)) {
- ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
- return_ACPI_STATUS(status);
- }
- }
-
- /*
* Initialize ACPI Event handling (Fixed and General Purpose)
*
* Note1: We must have the hardware and events initialized before we can
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
index 3cfe7e7475f2..070c07d68dfb 100644
--- a/drivers/acpi/apei/Kconfig
+++ b/drivers/acpi/apei/Kconfig
@@ -23,6 +23,7 @@ config ACPI_APEI_GHES
select ACPI_HED
select IRQ_WORK
select GENERIC_ALLOCATOR
+ select ARM_SDE_INTERFACE if ARM64
help
Generic Hardware Error Source provides a way to report
platform hardware errors (such as that from chipset). It
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index c7c26872f4ce..9c84f3da7c09 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -28,7 +28,7 @@
#include <linux/interrupt.h>
#include <linux/debugfs.h>
#include <acpi/apei.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "apei-internal.h"
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index cd2766c69d78..77c10a7a7a9f 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -131,7 +131,7 @@ static inline u32 cper_estatus_len(struct acpi_hest_generic_status *estatus)
int apei_osc_setup(void);
-int einj_get_available_error_type(u32 *type);
+int einj_get_available_error_type(u32 *type, int einj_action);
int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, u64 param3,
u64 param4);
int einj_cxl_rch_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
diff --git a/drivers/acpi/apei/einj-core.c b/drivers/acpi/apei/einj-core.c
index 9515bcfe5e97..305c240a303f 100644
--- a/drivers/acpi/apei/einj-core.c
+++ b/drivers/acpi/apei/einj-core.c
@@ -21,8 +21,8 @@
#include <linux/nmi.h>
#include <linux/delay.h>
#include <linux/mm.h>
-#include <linux/platform_device.h>
-#include <asm/unaligned.h>
+#include <linux/device/faux.h>
+#include <linux/unaligned.h>
#include "apei-internal.h"
@@ -33,6 +33,8 @@
#define SLEEP_UNIT_MAX 5000 /* 5ms */
/* Firmware should respond within 1 seconds */
#define FIRMWARE_TIMEOUT (1 * USEC_PER_SEC)
+#define COMPONENT_LEN 16
+#define ACPI65_EINJV2_SUPP BIT(30)
#define ACPI5_VENDOR_BIT BIT(31)
#define MEM_ERROR_MASK (ACPI_EINJ_MEMORY_CORRECTABLE | \
ACPI_EINJ_MEMORY_UNCORRECTABLE | \
@@ -49,6 +51,28 @@
*/
static int acpi5;
+struct syndrome_array {
+ union {
+ u8 acpi_id[COMPONENT_LEN];
+ u8 device_id[COMPONENT_LEN];
+ u8 pcie_sbdf[COMPONENT_LEN];
+ u8 vendor_id[COMPONENT_LEN];
+ } comp_id;
+ union {
+ u8 proc_synd[COMPONENT_LEN];
+ u8 mem_synd[COMPONENT_LEN];
+ u8 pcie_synd[COMPONENT_LEN];
+ u8 vendor_synd[COMPONENT_LEN];
+ } comp_synd;
+};
+
+struct einjv2_extension_struct {
+ u32 length;
+ u16 revision;
+ u16 component_arr_count;
+ struct syndrome_array component_arr[] __counted_by(component_arr_count);
+};
+
struct set_error_type_with_address {
u32 type;
u32 vendor_extension;
@@ -57,11 +81,13 @@ struct set_error_type_with_address {
u64 memory_address;
u64 memory_address_range;
u32 pcie_sbdf;
+ struct einjv2_extension_struct einjv2_struct;
};
enum {
SETWA_FLAGS_APICID = 1,
SETWA_FLAGS_MEM = 2,
SETWA_FLAGS_PCIE_SBDF = 4,
+ SETWA_FLAGS_EINJV2 = 8,
};
/*
@@ -83,6 +109,11 @@ static struct debugfs_blob_wrapper vendor_blob;
static struct debugfs_blob_wrapper vendor_errors;
static char vendor_dev[64];
+static u32 max_nr_components;
+static u32 available_error_type;
+static u32 available_error_type_v2;
+static struct syndrome_array *syndrome_data;
+
/*
* Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the
* EINJ table through an unpublished extension. Use with caution as
@@ -149,7 +180,10 @@ static DEFINE_MUTEX(einj_mutex);
*/
bool einj_initialized __ro_after_init;
-static void *einj_param;
+static void __iomem *einj_param;
+static u32 v5param_size;
+static u32 v66param_size;
+static bool is_v2;
static void einj_exec_ctx_init(struct apei_exec_context *ctx)
{
@@ -157,13 +191,13 @@ static void einj_exec_ctx_init(struct apei_exec_context *ctx)
EINJ_TAB_ENTRY(einj_tab), einj_tab->entries);
}
-static int __einj_get_available_error_type(u32 *type)
+static int __einj_get_available_error_type(u32 *type, int einj_action)
{
struct apei_exec_context ctx;
int rc;
einj_exec_ctx_init(&ctx);
- rc = apei_exec_run(&ctx, ACPI_EINJ_GET_ERROR_TYPE);
+ rc = apei_exec_run(&ctx, einj_action);
if (rc)
return rc;
*type = apei_exec_ctx_get_output(&ctx);
@@ -172,17 +206,34 @@ static int __einj_get_available_error_type(u32 *type)
}
/* Get error injection capabilities of the platform */
-int einj_get_available_error_type(u32 *type)
+int einj_get_available_error_type(u32 *type, int einj_action)
{
int rc;
mutex_lock(&einj_mutex);
- rc = __einj_get_available_error_type(type);
+ rc = __einj_get_available_error_type(type, einj_action);
mutex_unlock(&einj_mutex);
return rc;
}
+static int einj_get_available_error_types(u32 *type1, u32 *type2)
+{
+ int rc;
+
+ rc = einj_get_available_error_type(type1, ACPI_EINJ_GET_ERROR_TYPE);
+ if (rc)
+ return rc;
+ if (*type1 & ACPI65_EINJV2_SUPP) {
+ rc = einj_get_available_error_type(type2,
+ ACPI_EINJV2_GET_ERROR_TYPE);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
static int einj_timedout(u64 *t)
{
if ((s64)*t < SLEEP_UNIT_MIN) {
@@ -214,24 +265,44 @@ static void check_vendor_extension(u64 paddr,
struct set_error_type_with_address *v5param)
{
int offset = v5param->vendor_extension;
- struct vendor_error_type_extension *v;
+ struct vendor_error_type_extension v;
+ struct vendor_error_type_extension __iomem *p;
u32 sbdf;
if (!offset)
return;
- v = acpi_os_map_iomem(paddr + offset, sizeof(*v));
- if (!v)
+ p = acpi_os_map_iomem(paddr + offset, sizeof(*p));
+ if (!p)
return;
- get_oem_vendor_struct(paddr, offset, v);
- sbdf = v->pcie_sbdf;
+ memcpy_fromio(&v, p, sizeof(v));
+ get_oem_vendor_struct(paddr, offset, &v);
+ sbdf = v.pcie_sbdf;
sprintf(vendor_dev, "%x:%x:%x.%x vendor_id=%x device_id=%x rev_id=%x\n",
sbdf >> 24, (sbdf >> 16) & 0xff,
(sbdf >> 11) & 0x1f, (sbdf >> 8) & 0x7,
- v->vendor_id, v->device_id, v->rev_id);
- acpi_os_unmap_iomem(v, sizeof(*v));
+ v.vendor_id, v.device_id, v.rev_id);
+ acpi_os_unmap_iomem(p, sizeof(v));
}
-static void *einj_get_parameter_address(void)
+static u32 einjv2_init(struct einjv2_extension_struct *e)
+{
+ if (e->revision != 1) {
+ pr_info("Unknown v2 extension revision %u\n", e->revision);
+ return 0;
+ }
+ if (e->length < sizeof(*e) || e->length > PAGE_SIZE) {
+ pr_info(FW_BUG "Bad1 v2 extension length %u\n", e->length);
+ return 0;
+ }
+ if ((e->length - sizeof(*e)) % sizeof(e->component_arr[0])) {
+ pr_info(FW_BUG "Bad2 v2 extension length %u\n", e->length);
+ return 0;
+ }
+
+ return (e->length - sizeof(*e)) / sizeof(e->component_arr[0]);
+}
+
+static void __iomem *einj_get_parameter_address(void)
{
int i;
u64 pa_v4 = 0, pa_v5 = 0;
@@ -252,26 +323,43 @@ static void *einj_get_parameter_address(void)
entry++;
}
if (pa_v5) {
- struct set_error_type_with_address *v5param;
+ struct set_error_type_with_address v5param;
+ struct set_error_type_with_address __iomem *p;
- v5param = acpi_os_map_iomem(pa_v5, sizeof(*v5param));
- if (v5param) {
+ v5param_size = sizeof(v5param);
+ p = acpi_os_map_iomem(pa_v5, sizeof(*p));
+ if (p) {
+ memcpy_fromio(&v5param, p, v5param_size);
acpi5 = 1;
- check_vendor_extension(pa_v5, v5param);
- return v5param;
+ check_vendor_extension(pa_v5, &v5param);
+ if (available_error_type & ACPI65_EINJV2_SUPP) {
+ struct einjv2_extension_struct *e;
+
+ e = &v5param.einjv2_struct;
+ max_nr_components = einjv2_init(e);
+
+ /* remap including einjv2_extension_struct */
+ acpi_os_unmap_iomem(p, v5param_size);
+ v66param_size = v5param_size - sizeof(*e) + e->length;
+ p = acpi_os_map_iomem(pa_v5, v66param_size);
+ }
+
+ return p;
}
}
if (param_extension && pa_v4) {
- struct einj_parameter *v4param;
+ struct einj_parameter v4param;
+ struct einj_parameter __iomem *p;
- v4param = acpi_os_map_iomem(pa_v4, sizeof(*v4param));
- if (!v4param)
+ p = acpi_os_map_iomem(pa_v4, sizeof(*p));
+ if (!p)
return NULL;
- if (v4param->reserved1 || v4param->reserved2) {
- acpi_os_unmap_iomem(v4param, sizeof(*v4param));
+ memcpy_fromio(&v4param, p, sizeof(v4param));
+ if (v4param.reserved1 || v4param.reserved2) {
+ acpi_os_unmap_iomem(p, sizeof(v4param));
return NULL;
}
- return v4param;
+ return p;
}
return NULL;
@@ -317,7 +405,8 @@ static struct acpi_generic_address *einj_get_trigger_parameter_region(
static int __einj_error_trigger(u64 trigger_paddr, u32 type,
u64 param1, u64 param2)
{
- struct acpi_einj_trigger *trigger_tab = NULL;
+ struct acpi_einj_trigger trigger_tab;
+ struct acpi_einj_trigger *full_trigger_tab;
struct apei_exec_context trigger_ctx;
struct apei_resources trigger_resources;
struct acpi_whea_header *trigger_entry;
@@ -325,54 +414,60 @@ static int __einj_error_trigger(u64 trigger_paddr, u32 type,
u32 table_size;
int rc = -EIO;
struct acpi_generic_address *trigger_param_region = NULL;
+ struct acpi_einj_trigger __iomem *p = NULL;
- r = request_mem_region(trigger_paddr, sizeof(*trigger_tab),
+ r = request_mem_region(trigger_paddr, sizeof(trigger_tab),
"APEI EINJ Trigger Table");
if (!r) {
pr_err("Can not request [mem %#010llx-%#010llx] for Trigger table\n",
(unsigned long long)trigger_paddr,
(unsigned long long)trigger_paddr +
- sizeof(*trigger_tab) - 1);
+ sizeof(trigger_tab) - 1);
goto out;
}
- trigger_tab = ioremap_cache(trigger_paddr, sizeof(*trigger_tab));
- if (!trigger_tab) {
+ p = ioremap_cache(trigger_paddr, sizeof(*p));
+ if (!p) {
pr_err("Failed to map trigger table!\n");
goto out_rel_header;
}
- rc = einj_check_trigger_header(trigger_tab);
+ memcpy_fromio(&trigger_tab, p, sizeof(trigger_tab));
+ rc = einj_check_trigger_header(&trigger_tab);
if (rc) {
pr_warn(FW_BUG "Invalid trigger error action table.\n");
goto out_rel_header;
}
/* No action structures in the TRIGGER_ERROR table, nothing to do */
- if (!trigger_tab->entry_count)
+ if (!trigger_tab.entry_count)
goto out_rel_header;
rc = -EIO;
- table_size = trigger_tab->table_size;
- r = request_mem_region(trigger_paddr + sizeof(*trigger_tab),
- table_size - sizeof(*trigger_tab),
+ table_size = trigger_tab.table_size;
+ full_trigger_tab = kmalloc(table_size, GFP_KERNEL);
+ if (!full_trigger_tab)
+ goto out_rel_header;
+ r = request_mem_region(trigger_paddr + sizeof(trigger_tab),
+ table_size - sizeof(trigger_tab),
"APEI EINJ Trigger Table");
if (!r) {
pr_err("Can not request [mem %#010llx-%#010llx] for Trigger Table Entry\n",
- (unsigned long long)trigger_paddr + sizeof(*trigger_tab),
+ (unsigned long long)trigger_paddr + sizeof(trigger_tab),
(unsigned long long)trigger_paddr + table_size - 1);
- goto out_rel_header;
+ goto out_free_trigger_tab;
}
- iounmap(trigger_tab);
- trigger_tab = ioremap_cache(trigger_paddr, table_size);
- if (!trigger_tab) {
+ iounmap(p);
+ p = ioremap_cache(trigger_paddr, table_size);
+ if (!p) {
pr_err("Failed to map trigger table!\n");
goto out_rel_entry;
}
+ memcpy_fromio(full_trigger_tab, p, table_size);
trigger_entry = (struct acpi_whea_header *)
- ((char *)trigger_tab + sizeof(struct acpi_einj_trigger));
+ ((char *)full_trigger_tab + sizeof(struct acpi_einj_trigger));
apei_resources_init(&trigger_resources);
apei_exec_ctx_init(&trigger_ctx, einj_ins_type,
ARRAY_SIZE(einj_ins_type),
- trigger_entry, trigger_tab->entry_count);
+ trigger_entry, trigger_tab.entry_count);
rc = apei_exec_collect_resources(&trigger_ctx, &trigger_resources);
if (rc)
goto out_fini;
@@ -390,7 +485,7 @@ static int __einj_error_trigger(u64 trigger_paddr, u32 type,
apei_resources_init(&addr_resources);
trigger_param_region = einj_get_trigger_parameter_region(
- trigger_tab, param1, param2);
+ full_trigger_tab, param1, param2);
if (trigger_param_region) {
rc = apei_resources_add(&addr_resources,
trigger_param_region->address,
@@ -419,23 +514,34 @@ out_release:
out_fini:
apei_resources_fini(&trigger_resources);
out_rel_entry:
- release_mem_region(trigger_paddr + sizeof(*trigger_tab),
- table_size - sizeof(*trigger_tab));
+ release_mem_region(trigger_paddr + sizeof(trigger_tab),
+ table_size - sizeof(trigger_tab));
+out_free_trigger_tab:
+ kfree(full_trigger_tab);
out_rel_header:
- release_mem_region(trigger_paddr, sizeof(*trigger_tab));
+ release_mem_region(trigger_paddr, sizeof(trigger_tab));
out:
- if (trigger_tab)
- iounmap(trigger_tab);
+ if (p)
+ iounmap(p);
return rc;
}
+static bool is_end_of_list(u8 *val)
+{
+ for (int i = 0; i < COMPONENT_LEN; ++i) {
+ if (val[i] != 0xFF)
+ return false;
+ }
+ return true;
+}
static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
u64 param3, u64 param4)
{
struct apei_exec_context ctx;
+ u32 param_size = is_v2 ? v66param_size : v5param_size;
u64 val, trigger_paddr, timeout = FIRMWARE_TIMEOUT;
- int rc;
+ int i, rc;
einj_exec_ctx_init(&ctx);
@@ -444,8 +550,13 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
return rc;
apei_exec_ctx_set_input(&ctx, type);
if (acpi5) {
- struct set_error_type_with_address *v5param = einj_param;
+ struct set_error_type_with_address *v5param;
+
+ v5param = kmalloc(param_size, GFP_KERNEL);
+ if (!v5param)
+ return -ENOMEM;
+ memcpy_fromio(v5param, einj_param, param_size);
v5param->type = type;
if (type & ACPI5_VENDOR_BIT) {
switch (vendor_flags) {
@@ -465,8 +576,21 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
v5param->flags = flags;
v5param->memory_address = param1;
v5param->memory_address_range = param2;
- v5param->apicid = param3;
- v5param->pcie_sbdf = param4;
+
+ if (is_v2) {
+ for (i = 0; i < max_nr_components; i++) {
+ if (is_end_of_list(syndrome_data[i].comp_id.acpi_id))
+ break;
+ v5param->einjv2_struct.component_arr[i].comp_id =
+ syndrome_data[i].comp_id;
+ v5param->einjv2_struct.component_arr[i].comp_synd =
+ syndrome_data[i].comp_synd;
+ }
+ v5param->einjv2_struct.component_arr_count = i;
+ } else {
+ v5param->apicid = param3;
+ v5param->pcie_sbdf = param4;
+ }
} else {
switch (type) {
case ACPI_EINJ_PROCESSOR_CORRECTABLE:
@@ -490,15 +614,19 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
break;
}
}
+ memcpy_toio(einj_param, v5param, param_size);
+ kfree(v5param);
} else {
rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE);
if (rc)
return rc;
if (einj_param) {
- struct einj_parameter *v4param = einj_param;
+ struct einj_parameter v4param;
- v4param->param1 = param1;
- v4param->param2 = param2;
+ memcpy_fromio(&v4param, einj_param, sizeof(v4param));
+ v4param.param1 = param1;
+ v4param.param2 = param2;
+ memcpy_toio(einj_param, &v4param, sizeof(v4param));
}
}
rc = apei_exec_run(&ctx, ACPI_EINJ_EXECUTE_OPERATION);
@@ -541,6 +669,43 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
return rc;
}
+/* Allow almost all types of address except MMIO. */
+static bool is_allowed_range(u64 base_addr, u64 size)
+{
+ int i;
+ /*
+ * MMIO region is usually claimed with IORESOURCE_MEM + IORES_DESC_NONE.
+ * However, IORES_DESC_NONE is treated like a wildcard when we check if
+ * region intersects with known resource. So do an allow list check for
+ * IORES_DESCs that definitely or most likely not MMIO.
+ */
+ int non_mmio_desc[] = {
+ IORES_DESC_CRASH_KERNEL,
+ IORES_DESC_ACPI_TABLES,
+ IORES_DESC_ACPI_NV_STORAGE,
+ IORES_DESC_PERSISTENT_MEMORY,
+ IORES_DESC_PERSISTENT_MEMORY_LEGACY,
+ /* Treat IORES_DESC_DEVICE_PRIVATE_MEMORY as MMIO. */
+ IORES_DESC_RESERVED,
+ IORES_DESC_SOFT_RESERVED,
+ };
+
+ if (region_intersects(base_addr, size, IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE)
+ == REGION_INTERSECTS)
+ return true;
+
+ for (i = 0; i < ARRAY_SIZE(non_mmio_desc); ++i) {
+ if (region_intersects(base_addr, size, IORESOURCE_MEM, non_mmio_desc[i])
+ == REGION_INTERSECTS)
+ return true;
+ }
+
+ if (arch_is_platform_page(base_addr))
+ return true;
+
+ return false;
+}
+
/* Inject the specified hardware error */
int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, u64 param3,
u64 param4)
@@ -549,10 +714,15 @@ int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, u64 param3,
u64 base_addr, size;
/* If user manually set "flags", make sure it is legal */
- if (flags && (flags &
- ~(SETWA_FLAGS_APICID|SETWA_FLAGS_MEM|SETWA_FLAGS_PCIE_SBDF)))
+ if (flags && (flags & ~(SETWA_FLAGS_APICID | SETWA_FLAGS_MEM |
+ SETWA_FLAGS_PCIE_SBDF | SETWA_FLAGS_EINJV2)))
return -EINVAL;
+ /* check if type is a valid EINJv2 error type */
+ if (is_v2) {
+ if (!(type & available_error_type_v2))
+ return -EINVAL;
+ }
/*
* We need extra sanity checks for memory errors.
* Other types leap directly to injection.
@@ -582,19 +752,15 @@ int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, u64 param3,
* Disallow crazy address masks that give BIOS leeway to pick
* injection address almost anywhere. Insist on page or
* better granularity and that target address is normal RAM or
- * NVDIMM.
+ * as long as is not MMIO.
*/
base_addr = param1 & param2;
size = ~param2 + 1;
- if (((param2 & PAGE_MASK) != PAGE_MASK) ||
- ((region_intersects(base_addr, size, IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE)
- != REGION_INTERSECTS) &&
- (region_intersects(base_addr, size, IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY)
- != REGION_INTERSECTS) &&
- (region_intersects(base_addr, size, IORESOURCE_MEM, IORES_DESC_SOFT_RESERVED)
- != REGION_INTERSECTS) &&
- !arch_is_platform_page(base_addr)))
+ if ((param2 & PAGE_MASK) != PAGE_MASK)
+ return -EINVAL;
+
+ if (!is_allowed_range(base_addr, size))
return -EINVAL;
if (is_zero_pfn(base_addr >> PAGE_SHIFT))
@@ -630,6 +796,8 @@ static u64 error_param2;
static u64 error_param3;
static u64 error_param4;
static struct dentry *einj_debug_dir;
+static char einj_buf[32];
+static bool einj_v2_enabled;
static struct { u32 mask; const char *str; } const einj_error_type_string[] = {
{ BIT(0), "Processor Correctable" },
{ BIT(1), "Processor Uncorrectable non-fatal" },
@@ -646,29 +814,35 @@ static struct { u32 mask; const char *str; } const einj_error_type_string[] = {
{ BIT(31), "Vendor Defined Error Types" },
};
+static struct { u32 mask; const char *str; } const einjv2_error_type_string[] = {
+ { BIT(0), "EINJV2 Processor Error" },
+ { BIT(1), "EINJV2 Memory Error" },
+ { BIT(2), "EINJV2 PCI Express Error" },
+};
+
static int available_error_type_show(struct seq_file *m, void *v)
{
- int rc;
- u32 error_type = 0;
- rc = einj_get_available_error_type(&error_type);
- if (rc)
- return rc;
for (int pos = 0; pos < ARRAY_SIZE(einj_error_type_string); pos++)
- if (error_type & einj_error_type_string[pos].mask)
+ if (available_error_type & einj_error_type_string[pos].mask)
seq_printf(m, "0x%08x\t%s\n", einj_error_type_string[pos].mask,
einj_error_type_string[pos].str);
-
+ if ((available_error_type & ACPI65_EINJV2_SUPP) && einj_v2_enabled) {
+ for (int pos = 0; pos < ARRAY_SIZE(einjv2_error_type_string); pos++) {
+ if (available_error_type_v2 & einjv2_error_type_string[pos].mask)
+ seq_printf(m, "V2_0x%08x\t%s\n", einjv2_error_type_string[pos].mask,
+ einjv2_error_type_string[pos].str);
+ }
+ }
return 0;
}
DEFINE_SHOW_ATTRIBUTE(available_error_type);
-static int error_type_get(void *data, u64 *val)
+static ssize_t error_type_get(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
{
- *val = error_type;
-
- return 0;
+ return simple_read_from_buffer(buf, count, ppos, einj_buf, strlen(einj_buf));
}
bool einj_is_cxl_error_type(u64 type)
@@ -678,8 +852,7 @@ bool einj_is_cxl_error_type(u64 type)
int einj_validate_error_type(u64 type)
{
- u32 tval, vendor, available_error_type = 0;
- int rc;
+ u32 tval, vendor;
/* Only low 32 bits for error type are valid */
if (type & GENMASK_ULL(63, 32))
@@ -695,20 +868,36 @@ int einj_validate_error_type(u64 type)
/* Only one error type can be specified */
if (tval & (tval - 1))
return -EINVAL;
- if (!vendor) {
- rc = einj_get_available_error_type(&available_error_type);
- if (rc)
- return rc;
- if (!(type & available_error_type))
+ if (!vendor)
+ if (!(type & (available_error_type | available_error_type_v2)))
return -EINVAL;
- }
return 0;
}
-static int error_type_set(void *data, u64 val)
+static ssize_t error_type_set(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
{
int rc;
+ u64 val;
+
+ /* Leave the last character for the NUL terminator */
+ if (count > sizeof(einj_buf) - 1)
+ return -EINVAL;
+
+ memset(einj_buf, 0, sizeof(einj_buf));
+ if (copy_from_user(einj_buf, buf, count))
+ return -EFAULT;
+
+ if (strncmp(einj_buf, "V2_", 3) == 0) {
+ if (!sscanf(einj_buf, "V2_%llx", &val))
+ return -EINVAL;
+ is_v2 = true;
+ } else {
+ if (!sscanf(einj_buf, "%llx", &val))
+ return -EINVAL;
+ is_v2 = false;
+ }
rc = einj_validate_error_type(val);
if (rc)
@@ -716,17 +905,24 @@ static int error_type_set(void *data, u64 val)
error_type = val;
- return 0;
+ return count;
}
-DEFINE_DEBUGFS_ATTRIBUTE(error_type_fops, error_type_get, error_type_set,
- "0x%llx\n");
+static const struct file_operations error_type_fops = {
+ .read = error_type_get,
+ .write = error_type_set,
+};
static int error_inject_set(void *data, u64 val)
{
if (!error_type)
return -EINVAL;
+ if (is_v2)
+ error_flags |= SETWA_FLAGS_EINJV2;
+ else
+ error_flags &= ~SETWA_FLAGS_EINJV2;
+
return einj_error_inject(error_type, error_flags, error_param1, error_param2,
error_param3, error_param4);
}
@@ -749,17 +945,104 @@ static int einj_check_table(struct acpi_table_einj *einj_tab)
return 0;
}
-static int __init einj_probe(struct platform_device *pdev)
+static ssize_t u128_read(struct file *f, char __user *buf, size_t count, loff_t *off)
+{
+ char output[2 * COMPONENT_LEN + 1];
+ u8 *data = f->f_inode->i_private;
+ int i;
+
+ if (*off >= sizeof(output))
+ return 0;
+
+ for (i = 0; i < COMPONENT_LEN; i++)
+ sprintf(output + 2 * i, "%.02x", data[COMPONENT_LEN - i - 1]);
+ output[2 * COMPONENT_LEN] = '\n';
+
+ return simple_read_from_buffer(buf, count, off, output, sizeof(output));
+}
+
+static ssize_t u128_write(struct file *f, const char __user *buf, size_t count, loff_t *off)
+{
+ char input[2 + 2 * COMPONENT_LEN + 2];
+ u8 *save = f->f_inode->i_private;
+ u8 tmp[COMPONENT_LEN];
+ char byte[3] = {};
+ char *s, *e;
+ ssize_t c;
+ long val;
+ int i;
+
+ /* Require that user supply whole input line in one write(2) syscall */
+ if (*off)
+ return -EINVAL;
+
+ c = simple_write_to_buffer(input, sizeof(input), off, buf, count);
+ if (c < 0)
+ return c;
+
+ if (c < 1 || input[c - 1] != '\n')
+ return -EINVAL;
+
+ /* Empty line means invalidate this entry */
+ if (c == 1) {
+ memset(save, 0xff, COMPONENT_LEN);
+ return c;
+ }
+
+ if (input[0] == '0' && (input[1] == 'x' || input[1] == 'X'))
+ s = input + 2;
+ else
+ s = input;
+ e = input + c - 1;
+
+ for (i = 0; i < COMPONENT_LEN; i++) {
+ byte[1] = *--e;
+ byte[0] = e > s ? *--e : '0';
+ if (kstrtol(byte, 16, &val))
+ return -EINVAL;
+ tmp[i] = val;
+ if (e <= s)
+ break;
+ }
+ while (++i < COMPONENT_LEN)
+ tmp[i] = 0;
+
+ memcpy(save, tmp, COMPONENT_LEN);
+
+ return c;
+}
+
+static const struct file_operations u128_fops = {
+ .read = u128_read,
+ .write = u128_write,
+};
+
+static bool setup_einjv2_component_files(void)
+{
+ char name[32];
+
+ syndrome_data = kcalloc(max_nr_components, sizeof(syndrome_data[0]), GFP_KERNEL);
+ if (!syndrome_data)
+ return false;
+
+ for (int i = 0; i < max_nr_components; i++) {
+ sprintf(name, "component_id%d", i);
+ debugfs_create_file(name, 0600, einj_debug_dir,
+ &syndrome_data[i].comp_id, &u128_fops);
+ sprintf(name, "component_syndrome%d", i);
+ debugfs_create_file(name, 0600, einj_debug_dir,
+ &syndrome_data[i].comp_synd, &u128_fops);
+ }
+
+ return true;
+}
+
+static int __init einj_probe(struct faux_device *fdev)
{
int rc;
acpi_status status;
struct apei_exec_context ctx;
- if (acpi_disabled) {
- pr_debug("ACPI disabled.\n");
- return -ENODEV;
- }
-
status = acpi_get_table(ACPI_SIG_EINJ, 0,
(struct acpi_table_header **)&einj_tab);
if (status == AE_NOT_FOUND) {
@@ -777,6 +1060,10 @@ static int __init einj_probe(struct platform_device *pdev)
goto err_put_table;
}
+ rc = einj_get_available_error_types(&available_error_type, &available_error_type_v2);
+ if (rc)
+ goto err_put_table;
+
rc = -ENOMEM;
einj_debug_dir = debugfs_create_dir("einj", apei_get_debugfs_dir());
@@ -821,6 +1108,8 @@ static int __init einj_probe(struct platform_device *pdev)
&error_param4);
debugfs_create_x32("notrigger", S_IRUSR | S_IWUSR,
einj_debug_dir, &notrigger);
+ if (available_error_type & ACPI65_EINJV2_SUPP)
+ einj_v2_enabled = setup_einjv2_component_files();
}
if (vendor_dev[0]) {
@@ -851,14 +1140,19 @@ err_put_table:
return rc;
}
-static void __exit einj_remove(struct platform_device *pdev)
+static void einj_remove(struct faux_device *fdev)
{
struct apei_exec_context ctx;
if (einj_param) {
- acpi_size size = (acpi5) ?
- sizeof(struct set_error_type_with_address) :
- sizeof(struct einj_parameter);
+ acpi_size size;
+
+ if (v66param_size)
+ size = v66param_size;
+ else if (acpi5)
+ size = v5param_size;
+ else
+ size = sizeof(struct einj_parameter);
acpi_os_unmap_iomem(einj_param, size);
if (vendor_errors.size)
@@ -869,47 +1163,34 @@ static void __exit einj_remove(struct platform_device *pdev)
apei_resources_release(&einj_resources);
apei_resources_fini(&einj_resources);
debugfs_remove_recursive(einj_debug_dir);
+ kfree(syndrome_data);
acpi_put_table((struct acpi_table_header *)einj_tab);
}
-static struct platform_device *einj_dev;
-/*
- * einj_remove() lives in .exit.text. For drivers registered via
- * platform_driver_probe() this is ok because they cannot get unbound at
- * runtime. So mark the driver struct with __refdata to prevent modpost
- * triggering a section mismatch warning.
- */
-static struct platform_driver einj_driver __refdata = {
- .remove_new = __exit_p(einj_remove),
- .driver = {
- .name = "acpi-einj",
- },
+static struct faux_device *einj_dev;
+static struct faux_device_ops einj_device_ops = {
+ .probe = einj_probe,
+ .remove = einj_remove,
};
static int __init einj_init(void)
{
- struct platform_device_info einj_dev_info = {
- .name = "acpi-einj",
- .id = -1,
- };
- int rc;
+ if (acpi_disabled) {
+ pr_debug("ACPI disabled.\n");
+ return -ENODEV;
+ }
- einj_dev = platform_device_register_full(&einj_dev_info);
- if (IS_ERR(einj_dev))
- return PTR_ERR(einj_dev);
+ einj_dev = faux_device_create("acpi-einj", NULL, &einj_device_ops);
- rc = platform_driver_probe(&einj_driver, einj_probe);
- einj_initialized = rc == 0;
+ if (einj_dev)
+ einj_initialized = true;
return 0;
}
static void __exit einj_exit(void)
{
- if (einj_initialized)
- platform_driver_unregister(&einj_driver);
-
- platform_device_del(einj_dev);
+ faux_device_destroy(einj_dev);
}
module_init(einj_init);
diff --git a/drivers/acpi/apei/einj-cxl.c b/drivers/acpi/apei/einj-cxl.c
index 8b8be0c90709..e70a416ec925 100644
--- a/drivers/acpi/apei/einj-cxl.c
+++ b/drivers/acpi/apei/einj-cxl.c
@@ -7,9 +7,9 @@
*
* Author: Ben Cheatham <benjamin.cheatham@amd.com>
*/
-#include <linux/einj-cxl.h>
#include <linux/seq_file.h>
#include <linux/pci.h>
+#include <cxl/einj.h>
#include "apei-internal.h"
@@ -30,7 +30,7 @@ int einj_cxl_available_error_type_show(struct seq_file *m, void *v)
int cxl_err, rc;
u32 available_error_type = 0;
- rc = einj_get_available_error_type(&available_error_type);
+ rc = einj_get_available_error_type(&available_error_type, ACPI_EINJ_GET_ERROR_TYPE);
if (rc)
return rc;
@@ -45,7 +45,7 @@ int einj_cxl_available_error_type_show(struct seq_file *m, void *v)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(einj_cxl_available_error_type_show, CXL);
+EXPORT_SYMBOL_NS_GPL(einj_cxl_available_error_type_show, "CXL");
static int cxl_dport_get_sbdf(struct pci_dev *dport_dev, u64 *sbdf)
{
@@ -63,7 +63,7 @@ static int cxl_dport_get_sbdf(struct pci_dev *dport_dev, u64 *sbdf)
seg = bridge->domain_nr;
bus = pbus->number;
- *sbdf = (seg << 24) | (bus << 16) | dport_dev->devfn;
+ *sbdf = (seg << 24) | (bus << 16) | (dport_dev->devfn << 8);
return 0;
}
@@ -83,7 +83,7 @@ int einj_cxl_inject_rch_error(u64 rcrb, u64 type)
return einj_cxl_rch_error_inject(type, 0x2, rcrb, GENMASK_ULL(63, 0),
0, 0);
}
-EXPORT_SYMBOL_NS_GPL(einj_cxl_inject_rch_error, CXL);
+EXPORT_SYMBOL_NS_GPL(einj_cxl_inject_rch_error, "CXL");
int einj_cxl_inject_error(struct pci_dev *dport, u64 type)
{
@@ -104,10 +104,10 @@ int einj_cxl_inject_error(struct pci_dev *dport, u64 type)
return einj_error_inject(type, 0x4, 0, 0, 0, param4);
}
-EXPORT_SYMBOL_NS_GPL(einj_cxl_inject_error, CXL);
+EXPORT_SYMBOL_NS_GPL(einj_cxl_inject_error, "CXL");
bool einj_cxl_is_initialized(void)
{
return einj_initialized;
}
-EXPORT_SYMBOL_NS_GPL(einj_cxl_is_initialized, CXL);
+EXPORT_SYMBOL_NS_GPL(einj_cxl_is_initialized, "CXL");
diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c
index 8bc71cdc2270..ff0e8bf8e97a 100644
--- a/drivers/acpi/apei/erst-dbg.c
+++ b/drivers/acpi/apei/erst-dbg.c
@@ -60,9 +60,8 @@ static long erst_dbg_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
switch (cmd) {
case APEI_ERST_CLEAR_RECORD:
- rc = copy_from_user(&record_id, (void __user *)arg,
- sizeof(record_id));
- if (rc)
+ if (copy_from_user(&record_id, (void __user *)arg,
+ sizeof(record_id)))
return -EFAULT;
return erst_clear(record_id);
case APEI_ERST_GET_RECORD_COUNT:
@@ -175,8 +174,7 @@ static ssize_t erst_dbg_write(struct file *filp, const char __user *ubuf,
erst_dbg_buf = p;
erst_dbg_buf_len = usize;
}
- rc = copy_from_user(erst_dbg_buf, ubuf, usize);
- if (rc) {
+ if (copy_from_user(erst_dbg_buf, ubuf, usize)) {
rc = -EFAULT;
goto out;
}
@@ -199,7 +197,6 @@ static const struct file_operations erst_dbg_ops = {
.read = erst_dbg_read,
.write = erst_dbg_write,
.unlocked_ioctl = erst_dbg_ioctl,
- .llseek = no_llseek,
};
static struct miscdevice erst_dbg_dev = {
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 623cc0cb4a65..0dc767392a6c 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -22,12 +22,12 @@
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/acpi.h>
+#include <linux/bitfield.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/cper.h>
#include <linux/cleanup.h>
-#include <linux/cxl-event.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <linux/ratelimit.h>
@@ -44,12 +44,14 @@
#include <linux/uuid.h>
#include <linux/ras.h>
#include <linux/task_work.h>
+#include <linux/vmcore_info.h>
#include <acpi/actbl1.h>
#include <acpi/ghes.h>
#include <acpi/apei.h>
#include <asm/fixmap.h>
#include <asm/tlbflush.h>
+#include <cxl/event.h>
#include <ras/ras_event.h>
#include "apei-internal.h"
@@ -173,8 +175,6 @@ static struct gen_pool *ghes_estatus_pool;
static struct ghes_estatus_cache __rcu *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE];
static atomic_t ghes_estatus_cache_alloced;
-static int ghes_panic_timeout __read_mostly = 30;
-
static void __iomem *ghes_map(u64 pfn, enum fixed_addresses fixmap_idx)
{
phys_addr_t paddr;
@@ -466,39 +466,58 @@ static void ghes_clear_estatus(struct ghes *ghes,
ghes_ack_error(ghes->generic_v2);
}
-/*
- * Called as task_work before returning to user-space.
- * Ensure any queued work has been done before we return to the context that
- * triggered the notification.
+/**
+ * struct ghes_task_work - for synchronous RAS event
+ *
+ * @twork: callback_head for task work
+ * @pfn: page frame number of corrupted page
+ * @flags: work control flags
+ *
+ * Structure to pass task work to be handled before
+ * returning to user-space via task_work_add().
*/
-static void ghes_kick_task_work(struct callback_head *head)
+struct ghes_task_work {
+ struct callback_head twork;
+ u64 pfn;
+ int flags;
+};
+
+static void memory_failure_cb(struct callback_head *twork)
{
- struct acpi_hest_generic_status *estatus;
- struct ghes_estatus_node *estatus_node;
- u32 node_len;
+ struct ghes_task_work *twcb = container_of(twork, struct ghes_task_work, twork);
+ int ret;
- estatus_node = container_of(head, struct ghes_estatus_node, task_work);
- if (IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE))
- memory_failure_queue_kick(estatus_node->task_work_cpu);
+ ret = memory_failure(twcb->pfn, twcb->flags);
+ gen_pool_free(ghes_estatus_pool, (unsigned long)twcb, sizeof(*twcb));
- estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
- node_len = GHES_ESTATUS_NODE_LEN(cper_estatus_len(estatus));
- gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, node_len);
+ if (!ret || ret == -EHWPOISON || ret == -EOPNOTSUPP)
+ return;
+
+ pr_err("%#llx: Sending SIGBUS to %s:%d due to hardware memory corruption\n",
+ twcb->pfn, current->comm, task_pid_nr(current));
+ force_sig(SIGBUS);
}
static bool ghes_do_memory_failure(u64 physical_addr, int flags)
{
+ struct ghes_task_work *twcb;
unsigned long pfn;
if (!IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE))
return false;
pfn = PHYS_PFN(physical_addr);
- if (!pfn_valid(pfn) && !arch_is_platform_page(physical_addr)) {
- pr_warn_ratelimited(FW_WARN GHES_PFX
- "Invalid address in generic error data: %#llx\n",
- physical_addr);
- return false;
+
+ if (flags == MF_ACTION_REQUIRED && current->mm) {
+ twcb = (void *)gen_pool_alloc(ghes_estatus_pool, sizeof(*twcb));
+ if (!twcb)
+ return false;
+
+ twcb->pfn = pfn;
+ twcb->flags = flags;
+ init_task_work(&twcb->twork, memory_failure_cb);
+ task_work_add(current, &twcb->twork, TWA_RESUME);
+ return true;
}
memory_failure_queue(pfn, flags);
@@ -529,26 +548,25 @@ static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
}
static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata,
- int sev, bool sync)
+ int sev, bool sync)
{
struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata);
int flags = sync ? MF_ACTION_REQUIRED : 0;
+ char error_type[120];
bool queued = false;
int sec_sev, i;
char *p;
- log_arm_hw_error(err);
-
sec_sev = ghes_severity(gdata->error_severity);
+ log_arm_hw_error(err, sec_sev);
if (sev != GHES_SEV_RECOVERABLE || sec_sev != GHES_SEV_RECOVERABLE)
return false;
p = (char *)(err + 1);
for (i = 0; i < err->err_info_num; i++) {
struct cper_arm_err_info *err_info = (struct cper_arm_err_info *)p;
- bool is_cache = (err_info->type == CPER_ARM_CACHE_ERROR);
+ bool is_cache = err_info->type & CPER_ARM_CACHE_ERROR;
bool has_pa = (err_info->validation_bits & CPER_ARM_INFO_VALID_PHYSICAL_ADDR);
- const char *error_type = "unknown error";
/*
* The field (err_info->error_info & BIT(26)) is fixed to set to
@@ -562,12 +580,15 @@ static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata,
continue;
}
- if (err_info->type < ARRAY_SIZE(cper_proc_error_type_strs))
- error_type = cper_proc_error_type_strs[err_info->type];
+ cper_bits_to_str(error_type, sizeof(error_type),
+ FIELD_GET(CPER_ARM_ERR_TYPE_MASK, err_info->type),
+ cper_proc_error_type_strs,
+ ARRAY_SIZE(cper_proc_error_type_strs));
pr_warn_ratelimited(FW_WARN GHES_PFX
- "Unhandled processor error type: %s\n",
- error_type);
+ "Unhandled processor error type 0x%02x: %s%s\n",
+ err_info->type, error_type,
+ (err_info->type & ~CPER_ARM_ERR_TYPE_MASK) ? " with reserved bit(s)" : "");
p += err_info->length;
}
@@ -676,6 +697,105 @@ static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata,
schedule_work(&entry->work);
}
+/* Room for 8 entries */
+#define CXL_CPER_PROT_ERR_FIFO_DEPTH 8
+static DEFINE_KFIFO(cxl_cper_prot_err_fifo, struct cxl_cper_prot_err_work_data,
+ CXL_CPER_PROT_ERR_FIFO_DEPTH);
+
+/* Synchronize schedule_work() with cxl_cper_prot_err_work changes */
+static DEFINE_SPINLOCK(cxl_cper_prot_err_work_lock);
+struct work_struct *cxl_cper_prot_err_work;
+
+static void cxl_cper_post_prot_err(struct cxl_cper_sec_prot_err *prot_err,
+ int severity)
+{
+#ifdef CONFIG_ACPI_APEI_PCIEAER
+ struct cxl_cper_prot_err_work_data wd;
+ u8 *dvsec_start, *cap_start;
+
+ if (!(prot_err->valid_bits & PROT_ERR_VALID_AGENT_ADDRESS)) {
+ pr_err_ratelimited("CXL CPER invalid agent type\n");
+ return;
+ }
+
+ if (!(prot_err->valid_bits & PROT_ERR_VALID_ERROR_LOG)) {
+ pr_err_ratelimited("CXL CPER invalid protocol error log\n");
+ return;
+ }
+
+ if (prot_err->err_len != sizeof(struct cxl_ras_capability_regs)) {
+ pr_err_ratelimited("CXL CPER invalid RAS Cap size (%u)\n",
+ prot_err->err_len);
+ return;
+ }
+
+ if (!(prot_err->valid_bits & PROT_ERR_VALID_SERIAL_NUMBER))
+ pr_warn(FW_WARN "CXL CPER no device serial number\n");
+
+ guard(spinlock_irqsave)(&cxl_cper_prot_err_work_lock);
+
+ if (!cxl_cper_prot_err_work)
+ return;
+
+ switch (prot_err->agent_type) {
+ case RCD:
+ case DEVICE:
+ case LD:
+ case FMLD:
+ case RP:
+ case DSP:
+ case USP:
+ memcpy(&wd.prot_err, prot_err, sizeof(wd.prot_err));
+
+ dvsec_start = (u8 *)(prot_err + 1);
+ cap_start = dvsec_start + prot_err->dvsec_len;
+
+ memcpy(&wd.ras_cap, cap_start, sizeof(wd.ras_cap));
+ wd.severity = cper_severity_to_aer(severity);
+ break;
+ default:
+ pr_err_ratelimited("CXL CPER invalid agent type: %d\n",
+ prot_err->agent_type);
+ return;
+ }
+
+ if (!kfifo_put(&cxl_cper_prot_err_fifo, wd)) {
+ pr_err_ratelimited("CXL CPER kfifo overflow\n");
+ return;
+ }
+
+ schedule_work(cxl_cper_prot_err_work);
+#endif
+}
+
+int cxl_cper_register_prot_err_work(struct work_struct *work)
+{
+ if (cxl_cper_prot_err_work)
+ return -EINVAL;
+
+ guard(spinlock)(&cxl_cper_prot_err_work_lock);
+ cxl_cper_prot_err_work = work;
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_cper_register_prot_err_work, "CXL");
+
+int cxl_cper_unregister_prot_err_work(struct work_struct *work)
+{
+ if (cxl_cper_prot_err_work != work)
+ return -EINVAL;
+
+ guard(spinlock)(&cxl_cper_prot_err_work_lock);
+ cxl_cper_prot_err_work = NULL;
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_cper_unregister_prot_err_work, "CXL");
+
+int cxl_cper_prot_err_kfifo_get(struct cxl_cper_prot_err_work_data *wd)
+{
+ return kfifo_get(&cxl_cper_prot_err_fifo, wd);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_cper_prot_err_kfifo_get, "CXL");
+
/* Room for 8 entries for each of the 4 event log queues */
#define CXL_CPER_FIFO_DEPTH 32
DEFINE_KFIFO(cxl_cper_fifo, struct cxl_cper_work_data, CXL_CPER_FIFO_DEPTH);
@@ -726,7 +846,7 @@ int cxl_cper_register_work(struct work_struct *work)
cxl_cper_work = work;
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cxl_cper_register_work, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_cper_register_work, "CXL");
int cxl_cper_unregister_work(struct work_struct *work)
{
@@ -737,15 +857,49 @@ int cxl_cper_unregister_work(struct work_struct *work)
cxl_cper_work = NULL;
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cxl_cper_unregister_work, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_cper_unregister_work, "CXL");
int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd)
{
return kfifo_get(&cxl_cper_fifo, wd);
}
-EXPORT_SYMBOL_NS_GPL(cxl_cper_kfifo_get, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_cper_kfifo_get, "CXL");
+
+static void ghes_log_hwerr(int sev, guid_t *sec_type)
+{
+ if (sev != CPER_SEV_RECOVERABLE)
+ return;
+
+ if (guid_equal(sec_type, &CPER_SEC_PROC_ARM) ||
+ guid_equal(sec_type, &CPER_SEC_PROC_GENERIC) ||
+ guid_equal(sec_type, &CPER_SEC_PROC_IA)) {
+ hwerr_log_error_type(HWERR_RECOV_CPU);
+ return;
+ }
+
+ if (guid_equal(sec_type, &CPER_SEC_CXL_PROT_ERR) ||
+ guid_equal(sec_type, &CPER_SEC_CXL_GEN_MEDIA_GUID) ||
+ guid_equal(sec_type, &CPER_SEC_CXL_DRAM_GUID) ||
+ guid_equal(sec_type, &CPER_SEC_CXL_MEM_MODULE_GUID)) {
+ hwerr_log_error_type(HWERR_RECOV_CXL);
+ return;
+ }
-static bool ghes_do_proc(struct ghes *ghes,
+ if (guid_equal(sec_type, &CPER_SEC_PCIE) ||
+ guid_equal(sec_type, &CPER_SEC_PCI_X_BUS)) {
+ hwerr_log_error_type(HWERR_RECOV_PCI);
+ return;
+ }
+
+ if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
+ hwerr_log_error_type(HWERR_RECOV_MEMORY);
+ return;
+ }
+
+ hwerr_log_error_type(HWERR_RECOV_OTHERS);
+}
+
+static void ghes_do_proc(struct ghes *ghes,
const struct acpi_hest_generic_status *estatus)
{
int sev, sec_sev;
@@ -766,6 +920,7 @@ static bool ghes_do_proc(struct ghes *ghes,
if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
fru_text = gdata->fru_text;
+ ghes_log_hwerr(sev, sec_type);
if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
@@ -773,12 +928,14 @@ static bool ghes_do_proc(struct ghes *ghes,
arch_apei_report_mem_error(sev, mem_err);
queued = ghes_handle_memory_failure(gdata, sev, sync);
- }
- else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
+ } else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
ghes_handle_aer(gdata);
- }
- else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
+ } else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
queued = ghes_handle_arm_hw_error(gdata, sev, sync);
+ } else if (guid_equal(sec_type, &CPER_SEC_CXL_PROT_ERR)) {
+ struct cxl_cper_sec_prot_err *prot_err = acpi_hest_get_payload(gdata);
+
+ cxl_cper_post_prot_err(prot_err, gdata->error_severity);
} else if (guid_equal(sec_type, &CPER_SEC_CXL_GEN_MEDIA_GUID)) {
struct cxl_cper_event_rec *rec = acpi_hest_get_payload(gdata);
@@ -801,7 +958,16 @@ static bool ghes_do_proc(struct ghes *ghes,
}
}
- return queued;
+ /*
+ * If no memory failure work is queued for abnormal synchronous
+ * errors, do a force kill.
+ */
+ if (sync && !queued) {
+ dev_err(ghes->dev,
+ HW_ERR GHES_PFX "%s:%d: synchronous unrecoverable error (SIGBUS)\n",
+ current->comm, task_pid_nr(current));
+ force_sig(SIGBUS);
+ }
}
static void __ghes_print_estatus(const char *pfx,
@@ -983,14 +1149,18 @@ static void __ghes_panic(struct ghes *ghes,
struct acpi_hest_generic_status *estatus,
u64 buf_paddr, enum fixed_addresses fixmap_idx)
{
+ const char *msg = GHES_PFX "Fatal hardware error";
+
__ghes_print_estatus(KERN_EMERG, ghes->generic, estatus);
+ add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
+
ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx);
- /* reboot to log the error! */
if (!panic_timeout)
- panic_timeout = ghes_panic_timeout;
- panic("Fatal hardware error!");
+ pr_emerg("%s but panic disabled\n", msg);
+
+ panic(msg);
}
static int ghes_proc(struct ghes *ghes)
@@ -1035,7 +1205,7 @@ static void ghes_add_timer(struct ghes *ghes)
static void ghes_poll_func(struct timer_list *t)
{
- struct ghes *ghes = from_timer(ghes, t, timer);
+ struct ghes *ghes = timer_container_of(ghes, t, timer);
unsigned long flags;
spin_lock_irqsave(&ghes_notify_lock_irq, flags);
@@ -1068,12 +1238,10 @@ static int ghes_notify_hed(struct notifier_block *this, unsigned long event,
int ret = NOTIFY_DONE;
spin_lock_irqsave(&ghes_notify_lock_irq, flags);
- rcu_read_lock();
list_for_each_entry_rcu(ghes, &ghes_hed, list) {
if (!ghes_proc(ghes))
ret = NOTIFY_OK;
}
- rcu_read_unlock();
spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
return ret;
@@ -1103,9 +1271,7 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
struct ghes_estatus_node *estatus_node;
struct acpi_hest_generic *generic;
struct acpi_hest_generic_status *estatus;
- bool task_work_pending;
u32 len, node_len;
- int ret;
llnode = llist_del_all(&ghes_estatus_llist);
/*
@@ -1120,25 +1286,16 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
len = cper_estatus_len(estatus);
node_len = GHES_ESTATUS_NODE_LEN(len);
- task_work_pending = ghes_do_proc(estatus_node->ghes, estatus);
+
+ ghes_do_proc(estatus_node->ghes, estatus);
+
if (!ghes_estatus_cached(estatus)) {
generic = estatus_node->generic;
if (ghes_print_estatus(NULL, generic, estatus))
ghes_estatus_cache_add(generic, estatus);
}
-
- if (task_work_pending && current->mm) {
- estatus_node->task_work.func = ghes_kick_task_work;
- estatus_node->task_work_cpu = smp_processor_id();
- ret = task_work_add(current, &estatus_node->task_work,
- TWA_RESUME);
- if (ret)
- estatus_node->task_work.func = NULL;
- }
-
- if (!estatus_node->task_work.func)
- gen_pool_free(ghes_estatus_pool,
- (unsigned long)estatus_node, node_len);
+ gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node,
+ node_len);
llnode = next;
}
@@ -1199,7 +1356,6 @@ static int ghes_in_nmi_queue_one_entry(struct ghes *ghes,
estatus_node->ghes = ghes;
estatus_node->generic = ghes->generic;
- estatus_node->task_work.func = NULL;
estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
if (__ghes_read_estatus(estatus, buf_paddr, fixmap_idx, len)) {
@@ -1605,14 +1761,14 @@ static struct platform_driver ghes_platform_driver = {
.name = "GHES",
},
.probe = ghes_probe,
- .remove_new = ghes_remove,
+ .remove = ghes_remove,
};
void __init acpi_ghes_init(void)
{
int rc;
- sdei_init();
+ acpi_sdei_init();
if (acpi_disabled)
return;
diff --git a/drivers/acpi/arm64/Kconfig b/drivers/acpi/arm64/Kconfig
index b3ed6212244c..f2fd79f22e7d 100644
--- a/drivers/acpi/arm64/Kconfig
+++ b/drivers/acpi/arm64/Kconfig
@@ -21,3 +21,6 @@ config ACPI_AGDI
config ACPI_APMT
bool
+
+config ACPI_MPAM
+ bool
diff --git a/drivers/acpi/arm64/Makefile b/drivers/acpi/arm64/Makefile
index 726944648c9b..9390b57cb564 100644
--- a/drivers/acpi/arm64/Makefile
+++ b/drivers/acpi/arm64/Makefile
@@ -1,8 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_ACPI_AGDI) += agdi.o
-obj-$(CONFIG_ACPI_IORT) += iort.o
-obj-$(CONFIG_ACPI_GTDT) += gtdt.o
obj-$(CONFIG_ACPI_APMT) += apmt.o
+obj-$(CONFIG_ACPI_FFH) += ffh.o
+obj-$(CONFIG_ACPI_GTDT) += gtdt.o
+obj-$(CONFIG_ACPI_IORT) += iort.o
+obj-$(CONFIG_ACPI_MPAM) += mpam.o
+obj-$(CONFIG_ACPI_PROCESSOR_IDLE) += cpuidle.o
obj-$(CONFIG_ARM_AMBA) += amba.o
obj-y += dma.o init.o
obj-y += thermal_cpufreq.o
diff --git a/drivers/acpi/arm64/agdi.c b/drivers/acpi/arm64/agdi.c
index f5f21dd0d277..e0df3daa4abf 100644
--- a/drivers/acpi/arm64/agdi.c
+++ b/drivers/acpi/arm64/agdi.c
@@ -88,7 +88,7 @@ static struct platform_driver agdi_driver = {
.name = "agdi",
},
.probe = agdi_probe,
- .remove_new = agdi_remove,
+ .remove = agdi_remove,
};
void __init acpi_agdi_init(void)
diff --git a/drivers/acpi/arm64/amba.c b/drivers/acpi/arm64/amba.c
index 171b5c2c7edd..1350083bce5f 100644
--- a/drivers/acpi/arm64/amba.c
+++ b/drivers/acpi/arm64/amba.c
@@ -22,24 +22,12 @@
static const struct acpi_device_id amba_id_list[] = {
{"ARMH0061", 0}, /* PL061 GPIO Device */
{"ARMH0330", 0}, /* ARM DMA Controller DMA-330 */
- {"ARMHC501", 0}, /* ARM CoreSight ETR */
- {"ARMHC502", 0}, /* ARM CoreSight STM */
- {"ARMHC503", 0}, /* ARM CoreSight Debug */
- {"ARMHC979", 0}, /* ARM CoreSight TPIU */
- {"ARMHC97C", 0}, /* ARM CoreSight SoC-400 TMC, SoC-600 ETF/ETB */
- {"ARMHC98D", 0}, /* ARM CoreSight Dynamic Replicator */
- {"ARMHC9CA", 0}, /* ARM CoreSight CATU */
- {"ARMHC9FF", 0}, /* ARM CoreSight Dynamic Funnel */
{"", 0},
};
static void amba_register_dummy_clk(void)
{
- static struct clk *amba_dummy_clk;
-
- /* If clock already registered */
- if (amba_dummy_clk)
- return;
+ struct clk *amba_dummy_clk;
amba_dummy_clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, 0, 0);
clk_register_clkdev(amba_dummy_clk, "apb_pclk", NULL);
diff --git a/drivers/acpi/arm64/cpuidle.c b/drivers/acpi/arm64/cpuidle.c
new file mode 100644
index 000000000000..801f9c450142
--- /dev/null
+++ b/drivers/acpi/arm64/cpuidle.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ARM64 CPU idle arch support
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/cpuidle.h>
+#include <linux/cpu_pm.h>
+#include <linux/psci.h>
+#include <acpi/processor.h>
+
+#define ARM64_LPI_IS_RETENTION_STATE(arch_flags) (!(arch_flags))
+
+static int psci_acpi_cpu_init_idle(unsigned int cpu)
+{
+ int i, count;
+ struct acpi_lpi_state *lpi;
+ struct acpi_processor *pr = per_cpu(processors, cpu);
+
+ if (unlikely(!pr || !pr->flags.has_lpi))
+ return -EINVAL;
+
+ /*
+ * If the PSCI cpu_suspend function hook has not been initialized
+ * idle states must not be enabled, so bail out
+ */
+ if (!psci_ops.cpu_suspend)
+ return -EOPNOTSUPP;
+
+ count = pr->power.count - 1;
+ if (count <= 0)
+ return -ENODEV;
+
+ for (i = 0; i < count; i++) {
+ u32 state;
+
+ lpi = &pr->power.lpi_states[i + 1];
+ /*
+ * Only bits[31:0] represent a PSCI power_state while
+ * bits[63:32] must be 0x0 as per ARM ACPI FFH Specification
+ */
+ state = lpi->address;
+ if (!psci_power_state_is_valid(state)) {
+ pr_warn("Invalid PSCI power state %#x\n", state);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int acpi_processor_ffh_lpi_probe(unsigned int cpu)
+{
+ return psci_acpi_cpu_init_idle(cpu);
+}
+
+__cpuidle int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
+{
+ u32 state = lpi->address;
+
+ if (ARM64_LPI_IS_RETENTION_STATE(lpi->arch_flags))
+ return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM_RCU(psci_cpu_suspend_enter,
+ lpi->index, state);
+ else
+ return CPU_PM_CPU_IDLE_ENTER_PARAM_RCU(psci_cpu_suspend_enter,
+ lpi->index, state);
+}
diff --git a/drivers/acpi/arm64/dma.c b/drivers/acpi/arm64/dma.c
index 52b2abf88689..f30f138352b7 100644
--- a/drivers/acpi/arm64/dma.c
+++ b/drivers/acpi/arm64/dma.c
@@ -26,6 +26,11 @@ void acpi_arch_dma_setup(struct device *dev)
else
end = (1ULL << 32) - 1;
+ if (dev->dma_range_map) {
+ dev_dbg(dev, "dma_range_map already set\n");
+ return;
+ }
+
ret = acpi_dma_get_range(dev, &map);
if (!ret && map) {
end = dma_range_map_max(map);
diff --git a/drivers/acpi/arm64/ffh.c b/drivers/acpi/arm64/ffh.c
new file mode 100644
index 000000000000..877edc6557e9
--- /dev/null
+++ b/drivers/acpi/arm64/ffh.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/acpi.h>
+#include <linux/arm-smccc.h>
+#include <linux/slab.h>
+
+/*
+ * Implements ARM64 specific callbacks to support ACPI FFH Operation Region as
+ * specified in https://developer.arm.com/docs/den0048/latest
+ */
+struct acpi_ffh_data {
+ struct acpi_ffh_info info;
+ void (*invoke_ffh_fn)(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3,
+ unsigned long a4, unsigned long a5,
+ unsigned long a6, unsigned long a7,
+ struct arm_smccc_res *args,
+ struct arm_smccc_quirk *res);
+ void (*invoke_ffh64_fn)(const struct arm_smccc_1_2_regs *args,
+ struct arm_smccc_1_2_regs *res);
+};
+
+int acpi_ffh_address_space_arch_setup(void *handler_ctxt, void **region_ctxt)
+{
+ enum arm_smccc_conduit conduit;
+ struct acpi_ffh_data *ffh_ctxt;
+
+ if (arm_smccc_get_version() < ARM_SMCCC_VERSION_1_2)
+ return -EOPNOTSUPP;
+
+ conduit = arm_smccc_1_1_get_conduit();
+ if (conduit == SMCCC_CONDUIT_NONE) {
+ pr_err("%s: invalid SMCCC conduit\n", __func__);
+ return -EOPNOTSUPP;
+ }
+
+ ffh_ctxt = kzalloc(sizeof(*ffh_ctxt), GFP_KERNEL);
+ if (!ffh_ctxt)
+ return -ENOMEM;
+
+ if (conduit == SMCCC_CONDUIT_SMC) {
+ ffh_ctxt->invoke_ffh_fn = __arm_smccc_smc;
+ ffh_ctxt->invoke_ffh64_fn = arm_smccc_1_2_smc;
+ } else {
+ ffh_ctxt->invoke_ffh_fn = __arm_smccc_hvc;
+ ffh_ctxt->invoke_ffh64_fn = arm_smccc_1_2_hvc;
+ }
+
+ memcpy(ffh_ctxt, handler_ctxt, sizeof(ffh_ctxt->info));
+
+ *region_ctxt = ffh_ctxt;
+ return AE_OK;
+}
+
+static bool acpi_ffh_smccc_owner_allowed(u32 fid)
+{
+ int owner = ARM_SMCCC_OWNER_NUM(fid);
+
+ if (owner == ARM_SMCCC_OWNER_STANDARD ||
+ owner == ARM_SMCCC_OWNER_SIP || owner == ARM_SMCCC_OWNER_OEM)
+ return true;
+
+ return false;
+}
+
+int acpi_ffh_address_space_arch_handler(acpi_integer *value, void *region_context)
+{
+ int ret = 0;
+ struct acpi_ffh_data *ffh_ctxt = region_context;
+
+ if (ffh_ctxt->info.offset == 0) {
+ /* SMC/HVC 32bit call */
+ struct arm_smccc_res res;
+ u32 a[8] = { 0 }, *ptr = (u32 *)value;
+
+ if (!ARM_SMCCC_IS_FAST_CALL(*ptr) || ARM_SMCCC_IS_64(*ptr) ||
+ !acpi_ffh_smccc_owner_allowed(*ptr) ||
+ ffh_ctxt->info.length > 32) {
+ ret = AE_ERROR;
+ } else {
+ int idx, len = ffh_ctxt->info.length >> 2;
+
+ for (idx = 0; idx < len; idx++)
+ a[idx] = *(ptr + idx);
+
+ ffh_ctxt->invoke_ffh_fn(a[0], a[1], a[2], a[3], a[4],
+ a[5], a[6], a[7], &res, NULL);
+ memcpy(value, &res, sizeof(res));
+ }
+
+ } else if (ffh_ctxt->info.offset == 1) {
+ /* SMC/HVC 64bit call */
+ struct arm_smccc_1_2_regs *r = (struct arm_smccc_1_2_regs *)value;
+
+ if (!ARM_SMCCC_IS_FAST_CALL(r->a0) || !ARM_SMCCC_IS_64(r->a0) ||
+ !acpi_ffh_smccc_owner_allowed(r->a0) ||
+ ffh_ctxt->info.length > sizeof(*r)) {
+ ret = AE_ERROR;
+ } else {
+ ffh_ctxt->invoke_ffh64_fn(r, r);
+ memcpy(value, r, ffh_ctxt->info.length);
+ }
+ } else {
+ ret = AE_ERROR;
+ }
+
+ return ret;
+}
diff --git a/drivers/acpi/arm64/gtdt.c b/drivers/acpi/arm64/gtdt.c
index c0e77c1c8e09..ffc867bac2d6 100644
--- a/drivers/acpi/arm64/gtdt.c
+++ b/drivers/acpi/arm64/gtdt.c
@@ -36,19 +36,25 @@ struct acpi_gtdt_descriptor {
static struct acpi_gtdt_descriptor acpi_gtdt_desc __initdata;
-static inline __init void *next_platform_timer(void *platform_timer)
+static __init bool platform_timer_valid(void *platform_timer)
{
struct acpi_gtdt_header *gh = platform_timer;
- platform_timer += gh->length;
- if (platform_timer < acpi_gtdt_desc.gtdt_end)
- return platform_timer;
+ return (platform_timer >= (void *)(acpi_gtdt_desc.gtdt + 1) &&
+ platform_timer < acpi_gtdt_desc.gtdt_end &&
+ gh->length != 0 &&
+ platform_timer + gh->length <= acpi_gtdt_desc.gtdt_end);
+}
+
+static __init void *next_platform_timer(void *platform_timer)
+{
+ struct acpi_gtdt_header *gh = platform_timer;
- return NULL;
+ return platform_timer + gh->length;
}
#define for_each_platform_timer(_g) \
- for (_g = acpi_gtdt_desc.platform_timer; _g; \
+ for (_g = acpi_gtdt_desc.platform_timer; platform_timer_valid(_g);\
_g = next_platform_timer(_g))
static inline bool is_timer_block(void *platform_timer)
@@ -157,6 +163,7 @@ int __init acpi_gtdt_init(struct acpi_table_header *table,
{
void *platform_timer;
struct acpi_table_gtdt *gtdt;
+ u32 cnt = 0;
gtdt = container_of(table, struct acpi_table_gtdt, header);
acpi_gtdt_desc.gtdt = gtdt;
@@ -176,14 +183,22 @@ int __init acpi_gtdt_init(struct acpi_table_header *table,
return 0;
}
- platform_timer = (void *)gtdt + gtdt->platform_timer_offset;
- if (platform_timer < (void *)table + sizeof(struct acpi_table_gtdt)) {
- pr_err(FW_BUG "invalid timer data.\n");
- return -EINVAL;
+ acpi_gtdt_desc.platform_timer = (void *)gtdt + gtdt->platform_timer_offset;
+ for_each_platform_timer(platform_timer)
+ cnt++;
+
+ if (cnt != gtdt->platform_timer_count) {
+ cnt = min(cnt, gtdt->platform_timer_count);
+ pr_err(FW_BUG "limiting Platform Timer count to %d\n", cnt);
}
- acpi_gtdt_desc.platform_timer = platform_timer;
+
+ if (!cnt) {
+ acpi_gtdt_desc.platform_timer = NULL;
+ return 0;
+ }
+
if (platform_timer_count)
- *platform_timer_count = gtdt->platform_timer_count;
+ *platform_timer_count = cnt;
return 0;
}
@@ -283,45 +298,11 @@ error:
if (frame->virt_irq > 0)
acpi_unregister_gsi(gtdt_frame->virtual_timer_interrupt);
frame->virt_irq = 0;
- } while (i-- >= 0 && gtdt_frame--);
+ } while (i-- > 0 && gtdt_frame--);
return -EINVAL;
}
-/**
- * acpi_arch_timer_mem_init() - Get the info of all GT blocks in GTDT table.
- * @timer_mem: The pointer to the array of struct arch_timer_mem for returning
- * the result of parsing. The element number of this array should
- * be platform_timer_count(the total number of platform timers).
- * @timer_count: It points to a integer variable which is used for storing the
- * number of GT blocks we have parsed.
- *
- * Return: 0 if success, -EINVAL/-ENODEV if error.
- */
-int __init acpi_arch_timer_mem_init(struct arch_timer_mem *timer_mem,
- int *timer_count)
-{
- int ret;
- void *platform_timer;
-
- *timer_count = 0;
- for_each_platform_timer(platform_timer) {
- if (is_timer_block(platform_timer)) {
- ret = gtdt_parse_timer_block(platform_timer, timer_mem);
- if (ret)
- return ret;
- timer_mem++;
- (*timer_count)++;
- }
- }
-
- if (*timer_count)
- pr_info("found %d memory-mapped timer block(s).\n",
- *timer_count);
-
- return 0;
-}
-
/*
* Initialize a SBSA generic Watchdog platform device info from GTDT
*/
@@ -352,7 +333,7 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
}
irq = map_gt_gsi(wd->timer_interrupt, wd->timer_flags);
- res[2] = (struct resource)DEFINE_RES_IRQ(irq);
+ res[2] = DEFINE_RES_IRQ(irq);
if (irq <= 0) {
pr_warn("failed to map the Watchdog interrupt.\n");
nr_res--;
@@ -373,11 +354,11 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
return 0;
}
-static int __init gtdt_sbsa_gwdt_init(void)
+static int __init gtdt_platform_timer_init(void)
{
void *platform_timer;
struct acpi_table_header *table;
- int ret, timer_count, gwdt_count = 0;
+ int ret, timer_count, gwdt_count = 0, mmio_timer_count = 0;
if (acpi_disabled)
return 0;
@@ -399,20 +380,41 @@ static int __init gtdt_sbsa_gwdt_init(void)
goto out_put_gtdt;
for_each_platform_timer(platform_timer) {
+ ret = 0;
+
if (is_non_secure_watchdog(platform_timer)) {
ret = gtdt_import_sbsa_gwdt(platform_timer, gwdt_count);
if (ret)
- break;
+ continue;
gwdt_count++;
+ } else if (is_timer_block(platform_timer)) {
+ struct arch_timer_mem atm = {};
+ struct platform_device *pdev;
+
+ ret = gtdt_parse_timer_block(platform_timer, &atm);
+ if (ret)
+ continue;
+
+ pdev = platform_device_register_data(NULL, "gtdt-arm-mmio-timer",
+ mmio_timer_count, &atm,
+ sizeof(atm));
+ if (IS_ERR(pdev)) {
+ pr_err("Can't register timer %d\n", mmio_timer_count);
+ continue;
+ }
+
+ mmio_timer_count++;
}
}
if (gwdt_count)
pr_info("found %d SBSA generic Watchdog(s).\n", gwdt_count);
+ if (mmio_timer_count)
+ pr_info("found %d Generic MMIO timer(s).\n", mmio_timer_count);
out_put_gtdt:
acpi_put_table(table);
return ret;
}
-device_initcall(gtdt_sbsa_gwdt_init);
+device_initcall(gtdt_platform_timer_init);
diff --git a/drivers/acpi/arm64/init.c b/drivers/acpi/arm64/init.c
index d0c8aed90fd1..7a47d8095a7d 100644
--- a/drivers/acpi/arm64/init.c
+++ b/drivers/acpi/arm64/init.c
@@ -2,7 +2,7 @@
#include <linux/acpi.h>
#include "init.h"
-void __init acpi_arm_init(void)
+void __init acpi_arch_init(void)
{
if (IS_ENABLED(CONFIG_ACPI_AGDI))
acpi_agdi_init();
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index c0b1c2c19444..65f0f56ad753 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -822,7 +822,7 @@ static struct iommu_iort_rmr_data *iort_rmr_alloc(
return NULL;
/* Create a copy of SIDs array to associate with this rmr_data */
- sids_copy = kmemdup(sids, num_sids * sizeof(*sids), GFP_KERNEL);
+ sids_copy = kmemdup_array(sids, num_sids, sizeof(*sids), GFP_KERNEL);
if (!sids_copy) {
kfree(rmr_data);
return NULL;
@@ -937,8 +937,10 @@ static u32 *iort_rmr_alloc_sids(u32 *sids, u32 count, u32 id_start,
new_sids = krealloc_array(sids, count + new_count,
sizeof(*new_sids), GFP_KERNEL);
- if (!new_sids)
+ if (!new_sids) {
+ kfree(sids);
return NULL;
+ }
for (i = count; i < total_count; i++)
new_sids[i] = id_start++;
@@ -1218,13 +1220,24 @@ static bool iort_pci_rc_supports_ats(struct acpi_iort_node *node)
return pci_rc->ats_attribute & ACPI_IORT_ATS_SUPPORTED;
}
+static bool iort_pci_rc_supports_canwbs(struct acpi_iort_node *node)
+{
+ struct acpi_iort_memory_access *memory_access;
+ struct acpi_iort_root_complex *pci_rc;
+
+ pci_rc = (struct acpi_iort_root_complex *)node->node_data;
+ memory_access =
+ (struct acpi_iort_memory_access *)&pci_rc->memory_properties;
+ return memory_access->memory_flags & ACPI_IORT_MF_CANWBS;
+}
+
static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node,
u32 streamid)
{
- const struct iommu_ops *ops;
struct fwnode_handle *iort_fwnode;
- if (!node)
+ /* If there's no SMMU driver at all, give up now */
+ if (!node || !iort_iommu_driver_enabled(node->type))
return -ENODEV;
iort_fwnode = iort_get_fwnode(node);
@@ -1232,19 +1245,10 @@ static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node,
return -ENODEV;
/*
- * If the ops look-up fails, this means that either
- * the SMMU drivers have not been probed yet or that
- * the SMMU drivers are not built in the kernel;
- * Depending on whether the SMMU drivers are built-in
- * in the kernel or not, defer the IOMMU configuration
- * or just abort it.
+ * If the SMMU drivers are enabled but not loaded/probed
+ * yet, this will defer.
*/
- ops = iommu_ops_from_fwnode(iort_fwnode);
- if (!ops)
- return iort_iommu_driver_enabled(node->type) ?
- -EPROBE_DEFER : -ENODEV;
-
- return acpi_iommu_fwspec_init(dev, streamid, iort_fwnode, ops);
+ return acpi_iommu_fwspec_init(dev, streamid, iort_fwnode);
}
struct iort_pci_alias_info {
@@ -1344,6 +1348,8 @@ int iort_iommu_configure_id(struct device *dev, const u32 *id_in)
fwspec = dev_iommu_fwspec_get(dev);
if (fwspec && iort_pci_rc_supports_ats(node))
fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
+ if (fwspec && iort_pci_rc_supports_canwbs(node))
+ fwspec->flags |= IOMMU_FWSPEC_PCI_RC_CANWBS;
} else {
node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
iort_match_node_callback, dev);
@@ -1712,6 +1718,15 @@ static struct acpi_platform_list pmcg_plat_info[] __initdata = {
/* HiSilicon Hip09 Platform */
{"HISI ", "HIP09 ", 0, ACPI_SIG_IORT, greater_than_or_equal,
"Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
+ {"HISI ", "HIP09A ", 0, ACPI_SIG_IORT, greater_than_or_equal,
+ "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
+ /* HiSilicon Hip10/11 Platform uses the same SMMU IP with Hip09 */
+ {"HISI ", "HIP10 ", 0, ACPI_SIG_IORT, greater_than_or_equal,
+ "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
+ {"HISI ", "HIP10C ", 0, ACPI_SIG_IORT, greater_than_or_equal,
+ "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
+ {"HISI ", "HIP11 ", 0, ACPI_SIG_IORT, greater_than_or_equal,
+ "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
{ }
};
diff --git a/drivers/acpi/arm64/mpam.c b/drivers/acpi/arm64/mpam.c
new file mode 100644
index 000000000000..84963a20c3e7
--- /dev/null
+++ b/drivers/acpi/arm64/mpam.c
@@ -0,0 +1,411 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2025 Arm Ltd.
+
+/* Parse the MPAM ACPI table feeding the discovered nodes into the driver */
+
+#define pr_fmt(fmt) "ACPI MPAM: " fmt
+
+#include <linux/acpi.h>
+#include <linux/arm_mpam.h>
+#include <linux/bits.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/platform_device.h>
+
+#include <acpi/processor.h>
+
+/*
+ * Flags for acpi_table_mpam_msc.*_interrupt_flags.
+ * See 2.1.1 Interrupt Flags, Table 5, of DEN0065B_MPAM_ACPI_3.0-bet.
+ */
+#define ACPI_MPAM_MSC_IRQ_MODE BIT(0)
+#define ACPI_MPAM_MSC_IRQ_TYPE_MASK GENMASK(2, 1)
+#define ACPI_MPAM_MSC_IRQ_TYPE_WIRED 0
+#define ACPI_MPAM_MSC_IRQ_AFFINITY_TYPE_MASK BIT(3)
+#define ACPI_MPAM_MSC_IRQ_AFFINITY_TYPE_PROCESSOR 0
+#define ACPI_MPAM_MSC_IRQ_AFFINITY_TYPE_PROCESSOR_CONTAINER 1
+#define ACPI_MPAM_MSC_IRQ_AFFINITY_VALID BIT(4)
+
+/*
+ * Encodings for the MSC node body interface type field.
+ * See 2.1 MPAM MSC node, Table 4 of DEN0065B_MPAM_ACPI_3.0-bet.
+ */
+#define ACPI_MPAM_MSC_IFACE_MMIO 0x00
+#define ACPI_MPAM_MSC_IFACE_PCC 0x0a
+
+static bool _is_ppi_partition(u32 flags)
+{
+ u32 aff_type, is_ppi;
+ bool ret;
+
+ is_ppi = FIELD_GET(ACPI_MPAM_MSC_IRQ_AFFINITY_VALID, flags);
+ if (!is_ppi)
+ return false;
+
+ aff_type = FIELD_GET(ACPI_MPAM_MSC_IRQ_AFFINITY_TYPE_MASK, flags);
+ ret = (aff_type == ACPI_MPAM_MSC_IRQ_AFFINITY_TYPE_PROCESSOR_CONTAINER);
+ if (ret)
+ pr_err_once("Partitioned interrupts not supported\n");
+
+ return ret;
+}
+
+static int acpi_mpam_register_irq(struct platform_device *pdev,
+ u32 intid, u32 flags)
+{
+ int irq;
+ u32 int_type;
+ int trigger;
+
+ if (!intid)
+ return -EINVAL;
+
+ if (_is_ppi_partition(flags))
+ return -EINVAL;
+
+ trigger = FIELD_GET(ACPI_MPAM_MSC_IRQ_MODE, flags);
+ int_type = FIELD_GET(ACPI_MPAM_MSC_IRQ_TYPE_MASK, flags);
+ if (int_type != ACPI_MPAM_MSC_IRQ_TYPE_WIRED)
+ return -EINVAL;
+
+ irq = acpi_register_gsi(&pdev->dev, intid, trigger, ACPI_ACTIVE_HIGH);
+ if (irq < 0)
+ pr_err_once("Failed to register interrupt 0x%x with ACPI\n", intid);
+
+ return irq;
+}
+
+static void acpi_mpam_parse_irqs(struct platform_device *pdev,
+ struct acpi_mpam_msc_node *tbl_msc,
+ struct resource *res, int *res_idx)
+{
+ u32 flags, intid;
+ int irq;
+
+ intid = tbl_msc->overflow_interrupt;
+ flags = tbl_msc->overflow_interrupt_flags;
+ irq = acpi_mpam_register_irq(pdev, intid, flags);
+ if (irq > 0)
+ res[(*res_idx)++] = DEFINE_RES_IRQ_NAMED(irq, "overflow");
+
+ intid = tbl_msc->error_interrupt;
+ flags = tbl_msc->error_interrupt_flags;
+ irq = acpi_mpam_register_irq(pdev, intid, flags);
+ if (irq > 0)
+ res[(*res_idx)++] = DEFINE_RES_IRQ_NAMED(irq, "error");
+}
+
+static int acpi_mpam_parse_resource(struct mpam_msc *msc,
+ struct acpi_mpam_resource_node *res)
+{
+ int level, nid;
+ u32 cache_id;
+
+ switch (res->locator_type) {
+ case ACPI_MPAM_LOCATION_TYPE_PROCESSOR_CACHE:
+ cache_id = res->locator.cache_locator.cache_reference;
+ level = find_acpi_cache_level_from_id(cache_id);
+ if (level <= 0) {
+ pr_err_once("Bad level (%d) for cache with id %u\n", level, cache_id);
+ return -EINVAL;
+ }
+ return mpam_ris_create(msc, res->ris_index, MPAM_CLASS_CACHE,
+ level, cache_id);
+ case ACPI_MPAM_LOCATION_TYPE_MEMORY:
+ nid = pxm_to_node(res->locator.memory_locator.proximity_domain);
+ if (nid == NUMA_NO_NODE) {
+ pr_debug("Bad proximity domain %lld, using node 0 instead\n",
+ res->locator.memory_locator.proximity_domain);
+ nid = 0;
+ }
+ return mpam_ris_create(msc, res->ris_index, MPAM_CLASS_MEMORY,
+ MPAM_CLASS_ID_DEFAULT, nid);
+ default:
+ /* These get discovered later and are treated as unknown */
+ return 0;
+ }
+}
+
+int acpi_mpam_parse_resources(struct mpam_msc *msc,
+ struct acpi_mpam_msc_node *tbl_msc)
+{
+ int i, err;
+ char *ptr, *table_end;
+ struct acpi_mpam_resource_node *resource;
+
+ table_end = (char *)tbl_msc + tbl_msc->length;
+ ptr = (char *)(tbl_msc + 1);
+ for (i = 0; i < tbl_msc->num_resource_nodes; i++) {
+ u64 max_deps, remaining_table;
+
+ if (ptr + sizeof(*resource) > table_end)
+ return -EINVAL;
+
+ resource = (struct acpi_mpam_resource_node *)ptr;
+
+ remaining_table = table_end - ptr;
+ max_deps = remaining_table / sizeof(struct acpi_mpam_func_deps);
+ if (resource->num_functional_deps > max_deps) {
+ pr_debug("MSC has impossible number of functional dependencies\n");
+ return -EINVAL;
+ }
+
+ err = acpi_mpam_parse_resource(msc, resource);
+ if (err)
+ return err;
+
+ ptr += sizeof(*resource);
+ ptr += resource->num_functional_deps * sizeof(struct acpi_mpam_func_deps);
+ }
+
+ return 0;
+}
+
+/*
+ * Creates the device power management link and returns true if the
+ * acpi id is valid and usable for cpu affinity. This is the case
+ * when the linked device is a processor or a processor container.
+ */
+static bool __init parse_msc_pm_link(struct acpi_mpam_msc_node *tbl_msc,
+ struct platform_device *pdev,
+ u32 *acpi_id)
+{
+ char hid[sizeof(tbl_msc->hardware_id_linked_device) + 1] = { 0 };
+ bool acpi_id_valid = false;
+ struct acpi_device *buddy;
+ char uid[11];
+ int len;
+
+ memcpy(hid, &tbl_msc->hardware_id_linked_device,
+ sizeof(tbl_msc->hardware_id_linked_device));
+
+ if (!strcmp(hid, ACPI_PROCESSOR_CONTAINER_HID)) {
+ *acpi_id = tbl_msc->instance_id_linked_device;
+ acpi_id_valid = true;
+ }
+
+ len = snprintf(uid, sizeof(uid), "%u",
+ tbl_msc->instance_id_linked_device);
+ if (len >= sizeof(uid)) {
+ pr_debug("Failed to convert uid of device for power management.");
+ return acpi_id_valid;
+ }
+
+ buddy = acpi_dev_get_first_match_dev(hid, uid, -1);
+ if (buddy) {
+ device_link_add(&pdev->dev, &buddy->dev, DL_FLAG_STATELESS);
+ acpi_dev_put(buddy);
+ }
+
+ return acpi_id_valid;
+}
+
+static int decode_interface_type(struct acpi_mpam_msc_node *tbl_msc,
+ enum mpam_msc_iface *iface)
+{
+ switch (tbl_msc->interface_type) {
+ case ACPI_MPAM_MSC_IFACE_MMIO:
+ *iface = MPAM_IFACE_MMIO;
+ return 0;
+ case ACPI_MPAM_MSC_IFACE_PCC:
+ *iface = MPAM_IFACE_PCC;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct platform_device * __init acpi_mpam_parse_msc(struct acpi_mpam_msc_node *tbl_msc)
+{
+ struct platform_device *pdev __free(platform_device_put) =
+ platform_device_alloc("mpam_msc", tbl_msc->identifier);
+ int next_res = 0, next_prop = 0, err;
+ /* pcc, nrdy, affinity and a sentinel */
+ struct property_entry props[4] = { 0 };
+ /* mmio, 2xirq, no sentinel. */
+ struct resource res[3] = { 0 };
+ struct acpi_device *companion;
+ enum mpam_msc_iface iface;
+ char uid[16];
+ u32 acpi_id;
+
+ if (!pdev)
+ return ERR_PTR(-ENOMEM);
+
+ /* Some power management is described in the namespace: */
+ err = snprintf(uid, sizeof(uid), "%u", tbl_msc->identifier);
+ if (err > 0 && err < sizeof(uid)) {
+ companion = acpi_dev_get_first_match_dev("ARMHAA5C", uid, -1);
+ if (companion) {
+ ACPI_COMPANION_SET(&pdev->dev, companion);
+ acpi_dev_put(companion);
+ } else {
+ pr_debug("MSC.%u: missing namespace entry\n", tbl_msc->identifier);
+ }
+ }
+
+ if (decode_interface_type(tbl_msc, &iface)) {
+ pr_debug("MSC.%u: unknown interface type\n", tbl_msc->identifier);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (iface == MPAM_IFACE_MMIO) {
+ res[next_res++] = DEFINE_RES_MEM_NAMED(tbl_msc->base_address,
+ tbl_msc->mmio_size,
+ "MPAM:MSC");
+ } else if (iface == MPAM_IFACE_PCC) {
+ props[next_prop++] = PROPERTY_ENTRY_U32("pcc-channel",
+ tbl_msc->base_address);
+ }
+
+ acpi_mpam_parse_irqs(pdev, tbl_msc, res, &next_res);
+
+ WARN_ON_ONCE(next_res > ARRAY_SIZE(res));
+ err = platform_device_add_resources(pdev, res, next_res);
+ if (err)
+ return ERR_PTR(err);
+
+ props[next_prop++] = PROPERTY_ENTRY_U32("arm,not-ready-us",
+ tbl_msc->max_nrdy_usec);
+
+ /*
+ * The MSC's CPU affinity is described via its linked power
+ * management device, but only if it points at a Processor or
+ * Processor Container.
+ */
+ if (parse_msc_pm_link(tbl_msc, pdev, &acpi_id))
+ props[next_prop++] = PROPERTY_ENTRY_U32("cpu_affinity", acpi_id);
+
+ WARN_ON_ONCE(next_prop > ARRAY_SIZE(props) - 1);
+ err = device_create_managed_software_node(&pdev->dev, props, NULL);
+ if (err)
+ return ERR_PTR(err);
+
+ /*
+ * Stash the table entry for acpi_mpam_parse_resources() to discover
+ * what this MSC controls.
+ */
+ err = platform_device_add_data(pdev, tbl_msc, tbl_msc->length);
+ if (err)
+ return ERR_PTR(err);
+
+ err = platform_device_add(pdev);
+ if (err)
+ return ERR_PTR(err);
+
+ return_ptr(pdev);
+}
+
+static int __init acpi_mpam_parse(void)
+{
+ char *table_end, *table_offset;
+ struct acpi_mpam_msc_node *tbl_msc;
+ struct platform_device *pdev;
+
+ if (acpi_disabled || !system_supports_mpam())
+ return 0;
+
+ struct acpi_table_header *table __free(acpi_put_table) =
+ acpi_get_table_pointer(ACPI_SIG_MPAM, 0);
+
+ if (IS_ERR(table))
+ return 0;
+
+ if (table->revision < 1) {
+ pr_debug("MPAM ACPI table revision %d not supported\n", table->revision);
+ return 0;
+ }
+
+ table_offset = (char *)(table + 1);
+ table_end = (char *)table + table->length;
+
+ while (table_offset < table_end) {
+ tbl_msc = (struct acpi_mpam_msc_node *)table_offset;
+ if (table_offset + sizeof(*tbl_msc) > table_end ||
+ table_offset + tbl_msc->length > table_end) {
+ pr_err("MSC entry overlaps end of ACPI table\n");
+ return -EINVAL;
+ }
+ table_offset += tbl_msc->length;
+
+ /*
+ * If any of the reserved fields are set, make no attempt to
+ * parse the MSC structure. This MSC will still be counted by
+ * acpi_mpam_count_msc(), meaning the MPAM driver can't probe
+ * against all MSC, and will never be enabled. There is no way
+ * to enable it safely, because we cannot determine safe
+ * system-wide partid and pmg ranges in this situation.
+ */
+ if (tbl_msc->reserved || tbl_msc->reserved1 || tbl_msc->reserved2) {
+ pr_err_once("Unrecognised MSC, MPAM not usable\n");
+ pr_debug("MSC.%u: reserved field set\n", tbl_msc->identifier);
+ continue;
+ }
+
+ if (!tbl_msc->mmio_size) {
+ pr_debug("MSC.%u: marked as disabled\n", tbl_msc->identifier);
+ continue;
+ }
+
+ pdev = acpi_mpam_parse_msc(tbl_msc);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+ }
+
+ return 0;
+}
+
+/**
+ * acpi_mpam_count_msc() - Count the number of MSC described by firmware.
+ *
+ * Returns the number of MSCs, or zero for an error.
+ *
+ * This can be called before or in parallel with acpi_mpam_parse().
+ */
+int acpi_mpam_count_msc(void)
+{
+ char *table_end, *table_offset;
+ struct acpi_mpam_msc_node *tbl_msc;
+ int count = 0;
+
+ if (acpi_disabled || !system_supports_mpam())
+ return 0;
+
+ struct acpi_table_header *table __free(acpi_put_table) =
+ acpi_get_table_pointer(ACPI_SIG_MPAM, 0);
+
+ if (IS_ERR(table))
+ return 0;
+
+ if (table->revision < 1)
+ return 0;
+
+ table_offset = (char *)(table + 1);
+ table_end = (char *)table + table->length;
+
+ while (table_offset < table_end) {
+ tbl_msc = (struct acpi_mpam_msc_node *)table_offset;
+
+ if (table_offset + sizeof(*tbl_msc) > table_end)
+ return -EINVAL;
+ if (tbl_msc->length < sizeof(*tbl_msc))
+ return -EINVAL;
+ if (tbl_msc->length > table_end - table_offset)
+ return -EINVAL;
+ table_offset += tbl_msc->length;
+
+ if (!tbl_msc->mmio_size)
+ continue;
+
+ count++;
+ }
+
+ return count;
+}
+
+/*
+ * Call after ACPI devices have been created, which happens behind acpi_scan_init()
+ * called from subsys_initcall(). PCC requires the mailbox driver, which is
+ * initialised from postcore_initcall().
+ */
+subsys_initcall_sync(acpi_mpam_parse);
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index b379401ff1c2..34181fa52e93 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -10,7 +10,6 @@
#define pr_fmt(fmt) "ACPI: battery: " fmt
-#include <linux/async.h>
#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/jiffies.h>
@@ -22,7 +21,7 @@
#include <linux/suspend.h>
#include <linux/types.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/acpi.h>
#include <linux/power_supply.h>
@@ -38,9 +37,10 @@
/* Battery power unit: 0 means mW, 1 means mA */
#define ACPI_BATTERY_POWER_UNIT_MA 1
-#define ACPI_BATTERY_STATE_DISCHARGING 0x1
-#define ACPI_BATTERY_STATE_CHARGING 0x2
-#define ACPI_BATTERY_STATE_CRITICAL 0x4
+#define ACPI_BATTERY_STATE_DISCHARGING 0x1
+#define ACPI_BATTERY_STATE_CHARGING 0x2
+#define ACPI_BATTERY_STATE_CRITICAL 0x4
+#define ACPI_BATTERY_STATE_CHARGE_LIMITING 0x8
#define MAX_STRING_LENGTH 64
@@ -49,8 +49,6 @@ MODULE_AUTHOR("Alexey Starikovskiy <astarikovskiy@suse.de>");
MODULE_DESCRIPTION("ACPI Battery Driver");
MODULE_LICENSE("GPL");
-static async_cookie_t async_cookie;
-static bool battery_driver_registered;
static int battery_bix_broken_package;
static int battery_notification_delay_ms;
static int battery_ac_is_broken;
@@ -93,8 +91,7 @@ enum {
};
struct acpi_battery {
- struct mutex lock;
- struct mutex sysfs_lock;
+ struct mutex update_lock;
struct power_supply *bat;
struct power_supply_desc bat_desc;
struct acpi_device *device;
@@ -155,7 +152,7 @@ static int acpi_battery_get_state(struct acpi_battery *battery);
static int acpi_battery_is_charged(struct acpi_battery *battery)
{
- /* charging, discharging or critical low */
+ /* charging, discharging, critical low or charge limited */
if (battery->state != 0)
return 0;
@@ -215,6 +212,8 @@ static int acpi_battery_get_property(struct power_supply *psy,
val->intval = acpi_battery_handle_discharging(battery);
else if (battery->state & ACPI_BATTERY_STATE_CHARGING)
val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else if (battery->state & ACPI_BATTERY_STATE_CHARGE_LIMITING)
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
else if (acpi_battery_is_charged(battery))
val->intval = POWER_SUPPLY_STATUS_FULL;
else
@@ -279,8 +278,8 @@ static int acpi_battery_get_property(struct power_supply *psy,
full_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
ret = -ENODEV;
else
- val->intval = battery->capacity_now * 100/
- full_capacity;
+ val->intval = DIV_ROUND_CLOSEST_ULL(battery->capacity_now * 100ULL,
+ full_capacity);
break;
case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
if (battery->state & ACPI_BATTERY_STATE_CRITICAL)
@@ -308,7 +307,7 @@ static int acpi_battery_get_property(struct power_supply *psy,
return ret;
}
-static enum power_supply_property charge_battery_props[] = {
+static const enum power_supply_property charge_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_TECHNOLOGY,
@@ -326,7 +325,7 @@ static enum power_supply_property charge_battery_props[] = {
POWER_SUPPLY_PROP_SERIAL_NUMBER,
};
-static enum power_supply_property charge_battery_full_cap_broken_props[] = {
+static const enum power_supply_property charge_battery_full_cap_broken_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_TECHNOLOGY,
@@ -340,7 +339,7 @@ static enum power_supply_property charge_battery_full_cap_broken_props[] = {
POWER_SUPPLY_PROP_SERIAL_NUMBER,
};
-static enum power_supply_property energy_battery_props[] = {
+static const enum power_supply_property energy_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_TECHNOLOGY,
@@ -358,7 +357,7 @@ static enum power_supply_property energy_battery_props[] = {
POWER_SUPPLY_PROP_SERIAL_NUMBER,
};
-static enum power_supply_property energy_battery_full_cap_broken_props[] = {
+static const enum power_supply_property energy_battery_full_cap_broken_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_TECHNOLOGY,
@@ -535,11 +534,9 @@ static int acpi_battery_get_info(struct acpi_battery *battery)
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_status status = AE_ERROR;
- mutex_lock(&battery->lock);
status = acpi_evaluate_object(battery->device->handle,
use_bix ? "_BIX":"_BIF",
NULL, &buffer);
- mutex_unlock(&battery->lock);
if (ACPI_FAILURE(status)) {
acpi_handle_info(battery->device->handle,
@@ -576,11 +573,8 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
msecs_to_jiffies(cache_time)))
return 0;
- mutex_lock(&battery->lock);
status = acpi_evaluate_object(battery->device->handle, "_BST",
NULL, &buffer);
- mutex_unlock(&battery->lock);
-
if (ACPI_FAILURE(status)) {
acpi_handle_info(battery->device->handle,
"_BST evaluation failed: %s",
@@ -628,11 +622,8 @@ static int acpi_battery_set_alarm(struct acpi_battery *battery)
!test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags))
return -ENODEV;
- mutex_lock(&battery->lock);
status = acpi_execute_simple_method(battery->device->handle, "_BTP",
battery->alarm);
- mutex_unlock(&battery->lock);
-
if (ACPI_FAILURE(status))
return -ENODEV;
@@ -661,7 +652,7 @@ static ssize_t acpi_battery_alarm_show(struct device *dev,
{
struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev));
- return sprintf(buf, "%d\n", battery->alarm * 1000);
+ return sysfs_emit(buf, "%d\n", battery->alarm * 1000);
}
static ssize_t acpi_battery_alarm_store(struct device *dev,
@@ -678,12 +669,18 @@ static ssize_t acpi_battery_alarm_store(struct device *dev,
return count;
}
-static const struct device_attribute alarm_attr = {
+static struct device_attribute alarm_attr = {
.attr = {.name = "alarm", .mode = 0644},
.show = acpi_battery_alarm_show,
.store = acpi_battery_alarm_store,
};
+static struct attribute *acpi_battery_attrs[] = {
+ &alarm_attr.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(acpi_battery);
+
/*
* The Battery Hooking API
*
@@ -697,28 +694,35 @@ static LIST_HEAD(acpi_battery_list);
static LIST_HEAD(battery_hook_list);
static DEFINE_MUTEX(hook_mutex);
-static void __battery_hook_unregister(struct acpi_battery_hook *hook, int lock)
+static void battery_hook_unregister_unlocked(struct acpi_battery_hook *hook)
{
struct acpi_battery *battery;
+
/*
* In order to remove a hook, we first need to
* de-register all the batteries that are registered.
*/
- if (lock)
- mutex_lock(&hook_mutex);
list_for_each_entry(battery, &acpi_battery_list, list) {
if (!hook->remove_battery(battery->bat, hook))
power_supply_changed(battery->bat);
}
- list_del(&hook->list);
- if (lock)
- mutex_unlock(&hook_mutex);
- pr_info("extension unregistered: %s\n", hook->name);
+ list_del_init(&hook->list);
+
+ pr_info("hook unregistered: %s\n", hook->name);
}
void battery_hook_unregister(struct acpi_battery_hook *hook)
{
- __battery_hook_unregister(hook, 1);
+ mutex_lock(&hook_mutex);
+ /*
+ * Ignore already unregistered battery hooks. This might happen
+ * if a battery hook was previously unloaded due to an error when
+ * adding a new battery.
+ */
+ if (!list_empty(&hook->list))
+ battery_hook_unregister_unlocked(hook);
+
+ mutex_unlock(&hook_mutex);
}
EXPORT_SYMBOL_GPL(battery_hook_unregister);
@@ -727,7 +731,6 @@ void battery_hook_register(struct acpi_battery_hook *hook)
struct acpi_battery *battery;
mutex_lock(&hook_mutex);
- INIT_LIST_HEAD(&hook->list);
list_add(&hook->list, &battery_hook_list);
/*
* Now that the driver is registered, we need
@@ -739,23 +742,38 @@ void battery_hook_register(struct acpi_battery_hook *hook)
if (hook->add_battery(battery->bat, hook)) {
/*
* If a add-battery returns non-zero,
- * the registration of the extension has failed,
+ * the registration of the hook has failed,
* and we will not add it to the list of loaded
* hooks.
*/
- pr_err("extension failed to load: %s", hook->name);
- __battery_hook_unregister(hook, 0);
+ pr_err("hook failed to load: %s", hook->name);
+ battery_hook_unregister_unlocked(hook);
goto end;
}
power_supply_changed(battery->bat);
}
- pr_info("new extension: %s\n", hook->name);
+ pr_info("new hook: %s\n", hook->name);
end:
mutex_unlock(&hook_mutex);
}
EXPORT_SYMBOL_GPL(battery_hook_register);
+static void devm_battery_hook_unregister(void *data)
+{
+ struct acpi_battery_hook *hook = data;
+
+ battery_hook_unregister(hook);
+}
+
+int devm_battery_hook_register(struct device *dev, struct acpi_battery_hook *hook)
+{
+ battery_hook_register(hook);
+
+ return devm_add_action_or_reset(dev, devm_battery_hook_unregister, hook);
+}
+EXPORT_SYMBOL_GPL(devm_battery_hook_register);
+
/*
* This function gets called right after the battery sysfs
* attributes have been added, so that the drivers that
@@ -778,12 +796,12 @@ static void battery_hook_add_battery(struct acpi_battery *battery)
list_for_each_entry_safe(hook_node, tmp, &battery_hook_list, list) {
if (hook_node->add_battery(battery->bat, hook_node)) {
/*
- * The notification of the extensions has failed, to
- * prevent further errors we will unload the extension.
+ * The notification of the hook has failed, to
+ * prevent further errors we will unload the hook.
*/
- pr_err("error in extension, unloading: %s",
+ pr_err("error in hook, unloading: %s",
hook_node->name);
- __battery_hook_unregister(hook_node, 0);
+ battery_hook_unregister_unlocked(hook_node);
}
}
mutex_unlock(&hook_mutex);
@@ -816,14 +834,18 @@ static void __exit battery_hook_exit(void)
* need to remove the hooks.
*/
list_for_each_entry_safe(hook, ptr, &battery_hook_list, list) {
- __battery_hook_unregister(hook, 1);
+ battery_hook_unregister(hook);
}
mutex_destroy(&hook_mutex);
}
static int sysfs_add_battery(struct acpi_battery *battery)
{
- struct power_supply_config psy_cfg = { .drv_data = battery, };
+ struct power_supply_config psy_cfg = {
+ .drv_data = battery,
+ .attr_grp = acpi_battery_groups,
+ .no_wakeup_source = true,
+ };
bool full_cap_broken = false;
if (!ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity) &&
@@ -858,7 +880,7 @@ static int sysfs_add_battery(struct acpi_battery *battery)
battery->bat_desc.type = POWER_SUPPLY_TYPE_BATTERY;
battery->bat_desc.get_property = acpi_battery_get_property;
- battery->bat = power_supply_register_no_ws(&battery->device->dev,
+ battery->bat = power_supply_register(&battery->device->dev,
&battery->bat_desc, &psy_cfg);
if (IS_ERR(battery->bat)) {
@@ -868,21 +890,17 @@ static int sysfs_add_battery(struct acpi_battery *battery)
return result;
}
battery_hook_add_battery(battery);
- return device_create_file(&battery->bat->dev, &alarm_attr);
+ return 0;
}
static void sysfs_remove_battery(struct acpi_battery *battery)
{
- mutex_lock(&battery->sysfs_lock);
- if (!battery->bat) {
- mutex_unlock(&battery->sysfs_lock);
+ if (!battery->bat)
return;
- }
+
battery_hook_remove_battery(battery);
- device_remove_file(&battery->bat->dev, &alarm_attr);
power_supply_unregister(battery->bat);
battery->bat = NULL;
- mutex_unlock(&battery->sysfs_lock);
}
static void find_battery(const struct dmi_header *dm, void *private)
@@ -1042,6 +1060,9 @@ static void acpi_battery_notify(acpi_handle handle, u32 event, void *data)
if (!battery)
return;
+
+ guard(mutex)(&battery->update_lock);
+
old = battery->bat;
/*
* On Acer Aspire V5-573G notifications are sometimes triggered too
@@ -1064,21 +1085,22 @@ static void acpi_battery_notify(acpi_handle handle, u32 event, void *data)
}
static int battery_notify(struct notifier_block *nb,
- unsigned long mode, void *_unused)
+ unsigned long mode, void *_unused)
{
struct acpi_battery *battery = container_of(nb, struct acpi_battery,
pm_nb);
- int result;
- switch (mode) {
- case PM_POST_HIBERNATION:
- case PM_POST_SUSPEND:
+ if (mode == PM_POST_SUSPEND || mode == PM_POST_HIBERNATION) {
+ guard(mutex)(&battery->update_lock);
+
if (!acpi_battery_present(battery))
return 0;
if (battery->bat) {
acpi_battery_refresh(battery);
} else {
+ int result;
+
result = acpi_battery_get_info(battery);
if (result)
return result;
@@ -1090,7 +1112,6 @@ static int battery_notify(struct notifier_block *nb,
acpi_battery_init_alarm(battery);
acpi_battery_get_state(battery);
- break;
}
return 0;
@@ -1168,6 +1189,8 @@ static int acpi_battery_update_retry(struct acpi_battery *battery)
{
int retry, ret;
+ guard(mutex)(&battery->update_lock);
+
for (retry = 5; retry; retry--) {
ret = acpi_battery_update(battery, false);
if (!ret)
@@ -1178,10 +1201,17 @@ static int acpi_battery_update_retry(struct acpi_battery *battery)
return ret;
}
+static void sysfs_battery_cleanup(struct acpi_battery *battery)
+{
+ guard(mutex)(&battery->update_lock);
+
+ sysfs_remove_battery(battery);
+}
+
static int acpi_battery_add(struct acpi_device *device)
{
int result = 0;
- struct acpi_battery *battery = NULL;
+ struct acpi_battery *battery;
if (!device)
return -EINVAL;
@@ -1189,15 +1219,18 @@ static int acpi_battery_add(struct acpi_device *device)
if (device->dep_unmet)
return -EPROBE_DEFER;
- battery = kzalloc(sizeof(struct acpi_battery), GFP_KERNEL);
+ battery = devm_kzalloc(&device->dev, sizeof(*battery), GFP_KERNEL);
if (!battery)
return -ENOMEM;
battery->device = device;
- strcpy(acpi_device_name(device), ACPI_BATTERY_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_BATTERY_CLASS);
+ strscpy(acpi_device_name(device), ACPI_BATTERY_DEVICE_NAME);
+ strscpy(acpi_device_class(device), ACPI_BATTERY_CLASS);
device->driver_data = battery;
- mutex_init(&battery->lock);
- mutex_init(&battery->sysfs_lock);
+
+ result = devm_mutex_init(&device->dev, &battery->update_lock);
+ if (result)
+ return result;
+
if (acpi_has_method(battery->device->handle, "_BIX"))
set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags);
@@ -1209,7 +1242,9 @@ static int acpi_battery_add(struct acpi_device *device)
device->status.battery_present ? "present" : "absent");
battery->pm_nb.notifier_call = battery_notify;
- register_pm_notifier(&battery->pm_nb);
+ result = register_pm_notifier(&battery->pm_nb);
+ if (result)
+ goto fail;
device_init_wakeup(&device->dev, 1);
@@ -1224,17 +1259,14 @@ fail_pm:
device_init_wakeup(&device->dev, 0);
unregister_pm_notifier(&battery->pm_nb);
fail:
- sysfs_remove_battery(battery);
- mutex_destroy(&battery->lock);
- mutex_destroy(&battery->sysfs_lock);
- kfree(battery);
+ sysfs_battery_cleanup(battery);
return result;
}
static void acpi_battery_remove(struct acpi_device *device)
{
- struct acpi_battery *battery = NULL;
+ struct acpi_battery *battery;
if (!device || !acpi_driver_data(device))
return;
@@ -1246,14 +1278,12 @@ static void acpi_battery_remove(struct acpi_device *device)
device_init_wakeup(&device->dev, 0);
unregister_pm_notifier(&battery->pm_nb);
- sysfs_remove_battery(battery);
- mutex_destroy(&battery->lock);
- mutex_destroy(&battery->sysfs_lock);
- kfree(battery);
+ guard(mutex)(&battery->update_lock);
+
+ sysfs_remove_battery(battery);
}
-#ifdef CONFIG_PM_SLEEP
/* this is needed to learn about changes made in suspended state */
static int acpi_battery_resume(struct device *dev)
{
@@ -1267,14 +1297,14 @@ static int acpi_battery_resume(struct device *dev)
return -EINVAL;
battery->update_time = 0;
+
+ guard(mutex)(&battery->update_lock);
+
acpi_battery_update(battery, true);
return 0;
}
-#else
-#define acpi_battery_resume NULL
-#endif
-static SIMPLE_DEV_PM_OPS(acpi_battery_pm, NULL, acpi_battery_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(acpi_battery_pm, NULL, acpi_battery_resume);
static struct acpi_driver acpi_battery_driver = {
.name = "battery",
@@ -1284,38 +1314,24 @@ static struct acpi_driver acpi_battery_driver = {
.add = acpi_battery_add,
.remove = acpi_battery_remove,
},
- .drv.pm = &acpi_battery_pm,
+ .drv.pm = pm_sleep_ptr(&acpi_battery_pm),
+ .drv.probe_type = PROBE_PREFER_ASYNCHRONOUS,
};
-static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
-{
- int result;
-
- if (acpi_quirk_skip_acpi_ac_and_battery())
- return;
-
- dmi_check_system(bat_dmi_table);
-
- result = acpi_bus_register_driver(&acpi_battery_driver);
- battery_driver_registered = (result == 0);
-}
-
static int __init acpi_battery_init(void)
{
- if (acpi_disabled)
+ if (acpi_disabled || acpi_quirk_skip_acpi_ac_and_battery())
return -ENODEV;
- async_cookie = async_schedule(acpi_battery_init_async, NULL);
- return 0;
+ dmi_check_system(bat_dmi_table);
+
+ return acpi_bus_register_driver(&acpi_battery_driver);
}
static void __exit acpi_battery_exit(void)
{
- async_synchronize_cookie(async_cookie + 1);
- if (battery_driver_registered) {
- acpi_bus_unregister_driver(&acpi_battery_driver);
- battery_hook_exit();
- }
+ acpi_bus_unregister_driver(&acpi_battery_driver);
+ battery_hook_exit();
}
module_init(acpi_battery_init);
diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
index e4fb9e225ddf..0fdd581ef96f 100644
--- a/drivers/acpi/bgrt.c
+++ b/drivers/acpi/bgrt.c
@@ -29,14 +29,7 @@ BGRT_SHOW(type, image_type);
BGRT_SHOW(xoffset, image_offset_x);
BGRT_SHOW(yoffset, image_offset_y);
-static ssize_t image_read(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off, size_t count)
-{
- memcpy(buf, attr->private + off, count);
- return count;
-}
-
-static BIN_ATTR_RO(image, 0); /* size gets filled in later */
+static __ro_after_init BIN_ATTR_SIMPLE_RO(image);
static struct attribute *bgrt_attributes[] = {
&bgrt_attr_version.attr,
@@ -47,7 +40,7 @@ static struct attribute *bgrt_attributes[] = {
NULL,
};
-static struct bin_attribute *bgrt_bin_attributes[] = {
+static const struct bin_attribute *const bgrt_bin_attributes[] = {
&bin_attr_image,
NULL,
};
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 787eca838410..a984ccd4a2a0 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -329,6 +329,8 @@ static void acpi_bus_osc_negotiate_platform_control(void)
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PPC_OST_SUPPORT;
if (IS_ENABLED(CONFIG_ACPI_THERMAL))
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_FAST_THERMAL_SAMPLING_SUPPORT;
+ if (IS_ENABLED(CONFIG_ACPI_BATTERY))
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_BATTERY_CHARGE_LIMITING_SUPPORT;
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT;
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PCLPI_SUPPORT;
@@ -1045,10 +1047,10 @@ EXPORT_SYMBOL(acpi_bus_unregister_driver);
ACPI Bus operations
-------------------------------------------------------------------------- */
-static int acpi_bus_match(struct device *dev, struct device_driver *drv)
+static int acpi_bus_match(struct device *dev, const struct device_driver *drv)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
- struct acpi_driver *acpi_drv = to_acpi_driver(drv);
+ const struct acpi_driver *acpi_drv = to_acpi_driver(drv);
return acpi_dev->flags.match_driver
&& !acpi_match_device_ids(acpi_dev, acpi_drv->ids);
@@ -1201,6 +1203,9 @@ static int __init acpi_bus_init_irq(void)
case ACPI_IRQ_MODEL_LPIC:
message = "LPIC";
break;
+ case ACPI_IRQ_MODEL_RINTC:
+ message = "RINTC";
+ break;
default:
pr_info("Unknown interrupt routing model\n");
return -ENODEV;
@@ -1401,7 +1406,7 @@ static int __init acpi_bus_init(void)
goto error1;
/*
- * Register the for all standard device notifications.
+ * Register for all standard device notifications.
*/
status =
acpi_install_notify_handler(ACPI_ROOT_OBJECT, ACPI_SYSTEM_NOTIFY,
@@ -1429,6 +1434,8 @@ static int __init acpi_bus_init(void)
struct kobject *acpi_kobj;
EXPORT_SYMBOL_GPL(acpi_kobj);
+void __weak __init acpi_arch_init(void) { }
+
static int __init acpi_init(void)
{
int result;
@@ -1439,8 +1446,10 @@ static int __init acpi_init(void)
}
acpi_kobj = kobject_create_and_add("acpi", firmware_kobj);
- if (!acpi_kobj)
- pr_debug("%s: kset create error\n", __func__);
+ if (!acpi_kobj) {
+ pr_err("Failed to register kobject\n");
+ return -ENOMEM;
+ }
init_prmt();
acpi_init_pcc();
@@ -1456,7 +1465,7 @@ static int __init acpi_init(void)
acpi_viot_early_init();
acpi_hest_init();
acpi_ghes_init();
- acpi_arm_init();
+ acpi_arch_init();
acpi_scan_init();
acpi_ec_init();
acpi_debugfs_init();
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index cc61020756be..3c6dd9b4ba0a 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -24,6 +24,7 @@
#define ACPI_BUTTON_CLASS "button"
#define ACPI_BUTTON_FILE_STATE "state"
#define ACPI_BUTTON_TYPE_UNKNOWN 0x00
+#define ACPI_BUTTON_NOTIFY_WAKE 0x02
#define ACPI_BUTTON_NOTIFY_STATUS 0x80
#define ACPI_BUTTON_SUBCLASS_POWER "power"
@@ -130,6 +131,17 @@ static const struct dmi_system_id dmi_lid_quirks[] = {
},
.driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
},
+ {
+ /*
+ * Samsung galaxybook2 ,initial _LID device notification returns
+ * lid closed.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "750XED"),
+ },
+ .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
+ },
{}
};
@@ -432,7 +444,12 @@ static void acpi_button_notify(acpi_handle handle, u32 event, void *data)
struct input_dev *input;
int keycode;
- if (event != ACPI_BUTTON_NOTIFY_STATUS) {
+ switch (event) {
+ case ACPI_BUTTON_NOTIFY_STATUS:
+ break;
+ case ACPI_BUTTON_NOTIFY_WAKE:
+ break;
+ default:
acpi_handle_debug(device->handle, "Unsupported event [0x%x]\n",
event);
return;
@@ -441,7 +458,7 @@ static void acpi_button_notify(acpi_handle handle, u32 event, void *data)
acpi_pm_wakeup_event(&device->dev);
button = acpi_driver_data(device);
- if (button->suspended)
+ if (button->suspended || event == ACPI_BUTTON_NOTIFY_WAKE)
return;
input = button->input;
@@ -547,20 +564,20 @@ static int acpi_button_add(struct acpi_device *device)
!strcmp(hid, ACPI_BUTTON_HID_POWERF)) {
button->type = ACPI_BUTTON_TYPE_POWER;
handler = acpi_button_notify;
- strcpy(name, ACPI_BUTTON_DEVICE_NAME_POWER);
+ strscpy(name, ACPI_BUTTON_DEVICE_NAME_POWER, MAX_ACPI_DEVICE_NAME_LEN);
sprintf(class, "%s/%s",
ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_POWER);
} else if (!strcmp(hid, ACPI_BUTTON_HID_SLEEP) ||
!strcmp(hid, ACPI_BUTTON_HID_SLEEPF)) {
button->type = ACPI_BUTTON_TYPE_SLEEP;
handler = acpi_button_notify;
- strcpy(name, ACPI_BUTTON_DEVICE_NAME_SLEEP);
+ strscpy(name, ACPI_BUTTON_DEVICE_NAME_SLEEP, MAX_ACPI_DEVICE_NAME_LEN);
sprintf(class, "%s/%s",
ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_SLEEP);
} else if (!strcmp(hid, ACPI_BUTTON_HID_LID)) {
button->type = ACPI_BUTTON_TYPE_LID;
handler = acpi_lid_notify;
- strcpy(name, ACPI_BUTTON_DEVICE_NAME_LID);
+ strscpy(name, ACPI_BUTTON_DEVICE_NAME_LID, MAX_ACPI_DEVICE_NAME_LEN);
sprintf(class, "%s/%s",
ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_LID);
input->open = acpi_lid_input_open;
@@ -602,8 +619,10 @@ static int acpi_button_add(struct acpi_device *device)
input_set_drvdata(input, device);
error = input_register_device(input);
- if (error)
+ if (error) {
+ input_free_device(input);
goto err_remove_fs;
+ }
switch (device->device_type) {
case ACPI_BUS_TYPE_POWER_BUTTON:
@@ -618,7 +637,7 @@ static int acpi_button_add(struct acpi_device *device)
break;
default:
status = acpi_install_notify_handler(device->handle,
- ACPI_DEVICE_NOTIFY, handler,
+ ACPI_ALL_NOTIFY, handler,
device);
break;
}
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 1d857978f5f4..3bdeeee3414e 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -41,13 +41,12 @@
#include <linux/topology.h>
#include <linux/dmi.h>
#include <linux/units.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <acpi/cppc_acpi.h>
struct cppc_pcc_data {
struct pcc_mbox_chan *pcc_channel;
- void __iomem *pcc_comm_addr;
bool pcc_channel_acquired;
unsigned int deadline_us;
unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
@@ -95,7 +94,7 @@ static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
/* pcc mapped address + header size + offset within PCC subspace */
-#define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
+#define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_channel->shmem + \
0x8 + (offs))
/* Check if a CPC register is in PCC */
@@ -103,6 +102,11 @@ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
(cpc)->cpc_entry.reg.space_id == \
ACPI_ADR_SPACE_PLATFORM_COMM)
+/* Check if a CPC register is in FFH */
+#define CPC_IN_FFH(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
+ (cpc)->cpc_entry.reg.space_id == \
+ ACPI_ADR_SPACE_FIXED_HARDWARE)
+
/* Check if a CPC register is in SystemMemory */
#define CPC_IN_SYSTEM_MEMORY(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
(cpc)->cpc_entry.reg.space_id == \
@@ -124,6 +128,20 @@ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
#define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \
!!(cpc)->cpc_entry.int_value : \
!IS_NULL_REG(&(cpc)->cpc_entry.reg))
+
+/*
+ * Each bit indicates the optionality of the register in per-cpu
+ * cpc_regs[] with the corresponding index. 0 means mandatory and 1
+ * means optional.
+ */
+#define REG_OPTIONAL (0x1FC7D0)
+
+/*
+ * Use the index of the register in per-cpu cpc_regs[] to check if
+ * it's an optional one.
+ */
+#define IS_OPTIONAL_CPC_REG(reg_idx) (REG_OPTIONAL & (1U << (reg_idx)))
+
/*
* Arbitrary Retries in case the remote processor is slow to respond
* to PCC commands. Keeping it high enough to cover emulators where
@@ -160,6 +178,7 @@ show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf);
show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf);
show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf);
show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf);
+show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, guaranteed_perf);
show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq);
show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
@@ -170,8 +189,11 @@ show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
#define GET_BIT_WIDTH(reg) ((reg)->access_width ? (8 << ((reg)->access_width - 1)) : (reg)->bit_width)
/* Shift and apply the mask for CPC reads/writes */
-#define MASK_VAL(reg, val) (((val) >> (reg)->bit_offset) & \
+#define MASK_VAL_READ(reg, val) (((val) >> (reg)->bit_offset) & \
GENMASK(((reg)->bit_width) - 1, 0))
+#define MASK_VAL_WRITE(reg, prev_val, val) \
+ ((((val) & GENMASK(((reg)->bit_width) - 1, 0)) << (reg)->bit_offset) | \
+ ((prev_val) & ~(GENMASK(((reg)->bit_width) - 1, 0) << (reg)->bit_offset))) \
static ssize_t show_feedback_ctrs(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
@@ -196,6 +218,7 @@ static struct attribute *cppc_attrs[] = {
&highest_perf.attr,
&lowest_perf.attr,
&lowest_nonlinear_perf.attr,
+ &guaranteed_perf.attr,
&nominal_perf.attr,
&nominal_freq.attr,
&lowest_freq.attr,
@@ -213,7 +236,7 @@ static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
int ret, status;
struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
struct acpi_pcct_shared_memory __iomem *generic_comm_base =
- pcc_ss_data->pcc_comm_addr;
+ pcc_ss_data->pcc_channel->shmem;
if (!pcc_ss_data->platform_owns_pcc)
return 0;
@@ -248,7 +271,7 @@ static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
int ret = -EIO, i;
struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
struct acpi_pcct_shared_memory __iomem *generic_comm_base =
- pcc_ss_data->pcc_comm_addr;
+ pcc_ss_data->pcc_channel->shmem;
unsigned int time_delta;
/*
@@ -437,7 +460,7 @@ bool acpi_cpc_valid(void)
if (acpi_disabled)
return false;
- for_each_present_cpu(cpu) {
+ for_each_online_cpu(cpu) {
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
if (!cpc_ptr)
return false;
@@ -453,7 +476,7 @@ bool cppc_allow_fast_switch(void)
struct cpc_desc *cpc_ptr;
int cpu;
- for_each_possible_cpu(cpu) {
+ for_each_online_cpu(cpu) {
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
desired_reg = &cpc_ptr->cpc_regs[DESIRED_PERF];
if (!CPC_IN_SYSTEM_MEMORY(desired_reg) &&
@@ -561,15 +584,6 @@ static int register_pcc_channel(int pcc_ss_idx)
pcc_data[pcc_ss_idx]->pcc_mpar = pcc_chan->max_access_rate;
pcc_data[pcc_ss_idx]->pcc_nominal = pcc_chan->latency;
- pcc_data[pcc_ss_idx]->pcc_comm_addr =
- acpi_os_ioremap(pcc_chan->shmem_base_addr,
- pcc_chan->shmem_size);
- if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
- pr_err("Failed to ioremap PCC comm region mem for %d\n",
- pcc_ss_idx);
- return -ENOMEM;
- }
-
/* Set flag so that we don't come here for each CPU. */
pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
}
@@ -661,10 +675,6 @@ static int pcc_data_alloc(int pcc_ss_id)
* )
*/
-#ifndef arch_init_invariance_cppc
-static inline void arch_init_invariance_cppc(void) { }
-#endif
-
/**
* acpi_cppc_processor_probe - Search for per CPU _CPC objects.
* @pr: Ptr to acpi_processor containing this CPU's logical ID.
@@ -740,7 +750,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
}
/*
- * Disregard _CPC if the number of entries in the return pachage is not
+ * Disregard _CPC if the number of entries in the return package is not
* as expected, but support future revisions being proper supersets of
* the v3 and only causing more entries to be returned by _CPC.
*/
@@ -857,6 +867,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
/* Store CPU Logical ID */
cpc_ptr->cpu_id = pr->id;
+ raw_spin_lock_init(&cpc_ptr->rmw_lock);
/* Parse PSD data for this CPU */
ret = acpi_get_psd(cpc_ptr, handle);
@@ -894,8 +905,6 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
goto out_free;
}
- arch_init_invariance_cppc();
-
kfree(output.pointer);
return 0;
@@ -1006,7 +1015,8 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
*val = 0;
size = GET_BIT_WIDTH(reg);
- if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+ if (IS_ENABLED(CONFIG_HAS_IOPORT) &&
+ reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
u32 val_u32;
acpi_status status;
@@ -1062,7 +1072,7 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
}
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
- *val = MASK_VAL(reg, *val);
+ *val = MASK_VAL_READ(reg, *val);
return 0;
}
@@ -1071,13 +1081,17 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
{
int ret_val = 0;
int size;
+ u64 prev_val;
void __iomem *vaddr = NULL;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_reg *reg = &reg_res->cpc_entry.reg;
+ struct cpc_desc *cpc_desc;
+ unsigned long flags;
size = GET_BIT_WIDTH(reg);
- if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+ if (IS_ENABLED(CONFIG_HAS_IOPORT) &&
+ reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
acpi_status status;
status = acpi_os_write_port((acpi_io_address)reg->address,
@@ -1106,8 +1120,33 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
return acpi_os_write_memory((acpi_physical_address)reg->address,
val, size);
- if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
- val = MASK_VAL(reg, val);
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ cpc_desc = per_cpu(cpc_desc_ptr, cpu);
+ if (!cpc_desc) {
+ pr_debug("No CPC descriptor for CPU:%d\n", cpu);
+ return -ENODEV;
+ }
+
+ raw_spin_lock_irqsave(&cpc_desc->rmw_lock, flags);
+ switch (size) {
+ case 8:
+ prev_val = readb_relaxed(vaddr);
+ break;
+ case 16:
+ prev_val = readw_relaxed(vaddr);
+ break;
+ case 32:
+ prev_val = readl_relaxed(vaddr);
+ break;
+ case 64:
+ prev_val = readq_relaxed(vaddr);
+ break;
+ default:
+ raw_spin_unlock_irqrestore(&cpc_desc->rmw_lock, flags);
+ return -EFAULT;
+ }
+ val = MASK_VAL_WRITE(reg, prev_val, val);
+ }
switch (size) {
case 8:
@@ -1134,46 +1173,112 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
break;
}
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ raw_spin_unlock_irqrestore(&cpc_desc->rmw_lock, flags);
+
return ret_val;
}
-static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf)
+static int cppc_get_reg_val_in_pcc(int cpu, struct cpc_register_resource *reg, u64 *val)
{
- struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
+ struct cppc_pcc_data *pcc_ss_data = NULL;
+ int ret;
+
+ if (pcc_ss_id < 0) {
+ pr_debug("Invalid pcc_ss_id\n");
+ return -ENODEV;
+ }
+
+ pcc_ss_data = pcc_data[pcc_ss_id];
+
+ down_write(&pcc_ss_data->pcc_lock);
+
+ if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
+ ret = cpc_read(cpu, reg, val);
+ else
+ ret = -EIO;
+
+ up_write(&pcc_ss_data->pcc_lock);
+
+ return ret;
+}
+
+static int cppc_get_reg_val(int cpu, enum cppc_regs reg_idx, u64 *val)
+{
+ struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
struct cpc_register_resource *reg;
+ if (val == NULL)
+ return -EINVAL;
+
if (!cpc_desc) {
- pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
+ pr_debug("No CPC descriptor for CPU:%d\n", cpu);
return -ENODEV;
}
reg = &cpc_desc->cpc_regs[reg_idx];
- if (CPC_IN_PCC(reg)) {
- int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
- struct cppc_pcc_data *pcc_ss_data = NULL;
- int ret = 0;
-
- if (pcc_ss_id < 0)
- return -EIO;
+ if ((reg->type == ACPI_TYPE_INTEGER && IS_OPTIONAL_CPC_REG(reg_idx) &&
+ !reg->cpc_entry.int_value) || (reg->type != ACPI_TYPE_INTEGER &&
+ IS_NULL_REG(&reg->cpc_entry.reg))) {
+ pr_debug("CPC register is not supported\n");
+ return -EOPNOTSUPP;
+ }
- pcc_ss_data = pcc_data[pcc_ss_id];
+ if (CPC_IN_PCC(reg))
+ return cppc_get_reg_val_in_pcc(cpu, reg, val);
- down_write(&pcc_ss_data->pcc_lock);
+ return cpc_read(cpu, reg, val);
+}
- if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
- cpc_read(cpunum, reg, perf);
- else
- ret = -EIO;
+static int cppc_set_reg_val_in_pcc(int cpu, struct cpc_register_resource *reg, u64 val)
+{
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
+ struct cppc_pcc_data *pcc_ss_data = NULL;
+ int ret;
- up_write(&pcc_ss_data->pcc_lock);
+ if (pcc_ss_id < 0) {
+ pr_debug("Invalid pcc_ss_id\n");
+ return -ENODEV;
+ }
+ ret = cpc_write(cpu, reg, val);
+ if (ret)
return ret;
+
+ pcc_ss_data = pcc_data[pcc_ss_id];
+
+ down_write(&pcc_ss_data->pcc_lock);
+ /* after writing CPC, transfer the ownership of PCC to platform */
+ ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
+ up_write(&pcc_ss_data->pcc_lock);
+
+ return ret;
+}
+
+static int cppc_set_reg_val(int cpu, enum cppc_regs reg_idx, u64 val)
+{
+ struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
+ struct cpc_register_resource *reg;
+
+ if (!cpc_desc) {
+ pr_debug("No CPC descriptor for CPU:%d\n", cpu);
+ return -ENODEV;
+ }
+
+ reg = &cpc_desc->cpc_regs[reg_idx];
+
+ /* if a register is writeable, it must be a buffer and not null */
+ if ((reg->type != ACPI_TYPE_BUFFER) || IS_NULL_REG(&reg->cpc_entry.reg)) {
+ pr_debug("CPC register is not supported\n");
+ return -EOPNOTSUPP;
}
- cpc_read(cpunum, reg, perf);
+ if (CPC_IN_PCC(reg))
+ return cppc_set_reg_val_in_pcc(cpu, reg, val);
- return 0;
+ return cpc_write(cpu, reg, val);
}
/**
@@ -1185,7 +1290,7 @@ static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf)
*/
int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
{
- return cppc_get_perf(cpunum, DESIRED_PERF, desired_perf);
+ return cppc_get_reg_val(cpunum, DESIRED_PERF, desired_perf);
}
EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
@@ -1198,7 +1303,7 @@ EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
*/
int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
{
- return cppc_get_perf(cpunum, NOMINAL_PERF, nominal_perf);
+ return cppc_get_reg_val(cpunum, NOMINAL_PERF, nominal_perf);
}
/**
@@ -1210,7 +1315,7 @@ int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
*/
int cppc_get_highest_perf(int cpunum, u64 *highest_perf)
{
- return cppc_get_perf(cpunum, HIGHEST_PERF, highest_perf);
+ return cppc_get_reg_val(cpunum, HIGHEST_PERF, highest_perf);
}
EXPORT_SYMBOL_GPL(cppc_get_highest_perf);
@@ -1223,7 +1328,7 @@ EXPORT_SYMBOL_GPL(cppc_get_highest_perf);
*/
int cppc_get_epp_perf(int cpunum, u64 *epp_perf)
{
- return cppc_get_perf(cpunum, ENERGY_PERF, epp_perf);
+ return cppc_get_reg_val(cpunum, ENERGY_PERF, epp_perf);
}
EXPORT_SYMBOL_GPL(cppc_get_epp_perf);
@@ -1330,7 +1435,7 @@ bool cppc_perf_ctrs_in_pcc(void)
{
int cpu;
- for_each_present_cpu(cpu) {
+ for_each_online_cpu(cpu) {
struct cpc_register_resource *ref_perf_reg;
struct cpc_desc *cpc_desc;
@@ -1484,9 +1589,12 @@ int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
/* after writing CPC, transfer the ownership of PCC to platform */
ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
up_write(&pcc_ss_data->pcc_lock);
+ } else if (osc_cpc_flexible_adr_space_confirmed &&
+ CPC_SUPPORTED(epp_set_reg) && CPC_IN_FFH(epp_set_reg)) {
+ ret = cpc_write(cpu, epp_set_reg, perf_ctrls->energy_perf);
} else {
ret = -ENOTSUPP;
- pr_debug("_CPC in PCC is not supported\n");
+ pr_debug("_CPC in PCC and _CPC in FFH are not supported\n");
}
return ret;
@@ -1494,53 +1602,110 @@ int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
EXPORT_SYMBOL_GPL(cppc_set_epp_perf);
/**
- * cppc_get_auto_sel_caps - Read autonomous selection register.
- * @cpunum : CPU from which to read register.
- * @perf_caps : struct where autonomous selection register value is updated.
+ * cppc_set_epp() - Write the EPP register.
+ * @cpu: CPU on which to write register.
+ * @epp_val: Value to write to the EPP register.
*/
-int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps)
+int cppc_set_epp(int cpu, u64 epp_val)
{
- struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
- struct cpc_register_resource *auto_sel_reg;
- u64 auto_sel;
+ if (epp_val > CPPC_ENERGY_PERF_MAX)
+ return -EINVAL;
- if (!cpc_desc) {
- pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
- return -ENODEV;
- }
+ return cppc_set_reg_val(cpu, ENERGY_PERF, epp_val);
+}
+EXPORT_SYMBOL_GPL(cppc_set_epp);
- auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
+/**
+ * cppc_get_auto_act_window() - Read autonomous activity window register.
+ * @cpu: CPU from which to read register.
+ * @auto_act_window: Return address.
+ *
+ * According to ACPI 6.5, s8.4.6.1.6, the value read from the autonomous
+ * activity window register consists of two parts: a 7 bits value indicate
+ * significand and a 3 bits value indicate exponent.
+ */
+int cppc_get_auto_act_window(int cpu, u64 *auto_act_window)
+{
+ unsigned int exp;
+ u64 val, sig;
+ int ret;
- if (!CPC_SUPPORTED(auto_sel_reg))
- pr_warn_once("Autonomous mode is not unsupported!\n");
+ if (auto_act_window == NULL)
+ return -EINVAL;
- if (CPC_IN_PCC(auto_sel_reg)) {
- int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
- struct cppc_pcc_data *pcc_ss_data = NULL;
- int ret = 0;
+ ret = cppc_get_reg_val(cpu, AUTO_ACT_WINDOW, &val);
+ if (ret)
+ return ret;
- if (pcc_ss_id < 0)
- return -ENODEV;
+ sig = val & CPPC_AUTO_ACT_WINDOW_MAX_SIG;
+ exp = (val >> CPPC_AUTO_ACT_WINDOW_SIG_BIT_SIZE) & CPPC_AUTO_ACT_WINDOW_MAX_EXP;
+ *auto_act_window = sig * int_pow(10, exp);
- pcc_ss_data = pcc_data[pcc_ss_id];
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cppc_get_auto_act_window);
- down_write(&pcc_ss_data->pcc_lock);
+/**
+ * cppc_set_auto_act_window() - Write autonomous activity window register.
+ * @cpu: CPU on which to write register.
+ * @auto_act_window: usec value to write to the autonomous activity window register.
+ *
+ * According to ACPI 6.5, s8.4.6.1.6, the value to write to the autonomous
+ * activity window register consists of two parts: a 7 bits value indicate
+ * significand and a 3 bits value indicate exponent.
+ */
+int cppc_set_auto_act_window(int cpu, u64 auto_act_window)
+{
+ /* The max value to store is 1270000000 */
+ u64 max_val = CPPC_AUTO_ACT_WINDOW_MAX_SIG * int_pow(10, CPPC_AUTO_ACT_WINDOW_MAX_EXP);
+ int exp = 0;
+ u64 val;
- if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0) {
- cpc_read(cpunum, auto_sel_reg, &auto_sel);
- perf_caps->auto_sel = (bool)auto_sel;
- } else {
- ret = -EIO;
- }
+ if (auto_act_window > max_val)
+ return -EINVAL;
- up_write(&pcc_ss_data->pcc_lock);
+ /*
+ * The max significand is 127, when auto_act_window is larger than
+ * 129, discard the precision of the last digit and increase the
+ * exponent by 1.
+ */
+ while (auto_act_window > CPPC_AUTO_ACT_WINDOW_SIG_CARRY_THRESH) {
+ auto_act_window /= 10;
+ exp += 1;
+ }
+
+ /* For 128 and 129, cut it to 127. */
+ if (auto_act_window > CPPC_AUTO_ACT_WINDOW_MAX_SIG)
+ auto_act_window = CPPC_AUTO_ACT_WINDOW_MAX_SIG;
+
+ val = (exp << CPPC_AUTO_ACT_WINDOW_SIG_BIT_SIZE) + auto_act_window;
+
+ return cppc_set_reg_val(cpu, AUTO_ACT_WINDOW, val);
+}
+EXPORT_SYMBOL_GPL(cppc_set_auto_act_window);
+/**
+ * cppc_get_auto_sel() - Read autonomous selection register.
+ * @cpu: CPU from which to read register.
+ * @enable: Return address.
+ */
+int cppc_get_auto_sel(int cpu, bool *enable)
+{
+ u64 auto_sel;
+ int ret;
+
+ if (enable == NULL)
+ return -EINVAL;
+
+ ret = cppc_get_reg_val(cpu, AUTO_SEL_ENABLE, &auto_sel);
+ if (ret)
return ret;
- }
+
+ *enable = (bool)auto_sel;
return 0;
}
-EXPORT_SYMBOL_GPL(cppc_get_auto_sel_caps);
+EXPORT_SYMBOL_GPL(cppc_get_auto_sel);
/**
* cppc_set_auto_sel - Write autonomous selection register.
@@ -1549,43 +1714,7 @@ EXPORT_SYMBOL_GPL(cppc_get_auto_sel_caps);
*/
int cppc_set_auto_sel(int cpu, bool enable)
{
- int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
- struct cpc_register_resource *auto_sel_reg;
- struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
- struct cppc_pcc_data *pcc_ss_data = NULL;
- int ret = -EINVAL;
-
- if (!cpc_desc) {
- pr_debug("No CPC descriptor for CPU:%d\n", cpu);
- return -ENODEV;
- }
-
- auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
-
- if (CPC_IN_PCC(auto_sel_reg)) {
- if (pcc_ss_id < 0) {
- pr_debug("Invalid pcc_ss_id\n");
- return -ENODEV;
- }
-
- if (CPC_SUPPORTED(auto_sel_reg)) {
- ret = cpc_write(cpu, auto_sel_reg, enable);
- if (ret)
- return ret;
- }
-
- pcc_ss_data = pcc_data[pcc_ss_id];
-
- down_write(&pcc_ss_data->pcc_lock);
- /* after writing CPC, transfer the ownership of PCC to platform */
- ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
- up_write(&pcc_ss_data->pcc_lock);
- } else {
- ret = -ENOTSUPP;
- pr_debug("_CPC in PCC is not supported\n");
- }
-
- return ret;
+ return cppc_set_reg_val(cpu, AUTO_SEL_ENABLE, enable);
}
EXPORT_SYMBOL_GPL(cppc_set_auto_sel);
@@ -1599,38 +1728,7 @@ EXPORT_SYMBOL_GPL(cppc_set_auto_sel);
*/
int cppc_set_enable(int cpu, bool enable)
{
- int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
- struct cpc_register_resource *enable_reg;
- struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
- struct cppc_pcc_data *pcc_ss_data = NULL;
- int ret = -EINVAL;
-
- if (!cpc_desc) {
- pr_debug("No CPC descriptor for CPU:%d\n", cpu);
- return -EINVAL;
- }
-
- enable_reg = &cpc_desc->cpc_regs[ENABLE];
-
- if (CPC_IN_PCC(enable_reg)) {
-
- if (pcc_ss_id < 0)
- return -EIO;
-
- ret = cpc_write(cpu, enable_reg, enable);
- if (ret)
- return ret;
-
- pcc_ss_data = pcc_data[pcc_ss_id];
-
- down_write(&pcc_ss_data->pcc_lock);
- /* after writing CPC, transfer the ownership of PCC to platfrom */
- ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
- up_write(&pcc_ss_data->pcc_lock);
- return ret;
- }
-
- return cpc_write(cpu, enable_reg, enable);
+ return cppc_set_reg_val(cpu, ENABLE, enable);
}
EXPORT_SYMBOL_GPL(cppc_set_enable);
@@ -1778,7 +1876,7 @@ EXPORT_SYMBOL_GPL(cppc_set_perf);
* If desired_reg is in the SystemMemory or SystemIo ACPI address space,
* then assume there is no latency.
*/
-unsigned int cppc_get_transition_latency(int cpu_num)
+int cppc_get_transition_latency(int cpu_num)
{
/*
* Expected transition latency is based on the PCCT timing values
@@ -1791,31 +1889,29 @@ unsigned int cppc_get_transition_latency(int cpu_num)
* completion of a command before issuing the next command,
* in microseconds.
*/
- unsigned int latency_ns = 0;
struct cpc_desc *cpc_desc;
struct cpc_register_resource *desired_reg;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
struct cppc_pcc_data *pcc_ss_data;
+ int latency_ns = 0;
cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
if (!cpc_desc)
- return CPUFREQ_ETERNAL;
+ return -ENODATA;
desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
if (CPC_IN_SYSTEM_MEMORY(desired_reg) || CPC_IN_SYSTEM_IO(desired_reg))
return 0;
- else if (!CPC_IN_PCC(desired_reg))
- return CPUFREQ_ETERNAL;
- if (pcc_ss_id < 0)
- return CPUFREQ_ETERNAL;
+ if (!CPC_IN_PCC(desired_reg) || pcc_ss_id < 0)
+ return -ENODATA;
pcc_ss_data = pcc_data[pcc_ss_id];
if (pcc_ss_data->pcc_mpar)
latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
- latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000);
- latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000);
+ latency_ns = max_t(int, latency_ns, pcc_ss_data->pcc_nominal * 1000);
+ latency_ns = max_t(int, latency_ns, pcc_ss_data->pcc_mrtt * 1000);
return latency_ns;
}
@@ -1837,7 +1933,7 @@ static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) {
u16 val = (u16)get_unaligned((const u16 *)
(dmi_data + DMI_PROCESSOR_MAX_SPEED));
- *mhz = val > *mhz ? val : *mhz;
+ *mhz = umax(val, *mhz);
}
}
@@ -1871,9 +1967,15 @@ unsigned int cppc_perf_to_khz(struct cppc_perf_caps *caps, unsigned int perf)
u64 mul, div;
if (caps->lowest_freq && caps->nominal_freq) {
- mul = caps->nominal_freq - caps->lowest_freq;
+ /* Avoid special case when nominal_freq is equal to lowest_freq */
+ if (caps->lowest_freq == caps->nominal_freq) {
+ mul = caps->nominal_freq;
+ div = caps->nominal_perf;
+ } else {
+ mul = caps->nominal_freq - caps->lowest_freq;
+ div = caps->nominal_perf - caps->lowest_perf;
+ }
mul *= KHZ_PER_MHZ;
- div = caps->nominal_perf - caps->lowest_perf;
offset = caps->nominal_freq * KHZ_PER_MHZ -
div64_u64(caps->nominal_perf * mul, div);
} else {
@@ -1894,11 +1996,17 @@ unsigned int cppc_khz_to_perf(struct cppc_perf_caps *caps, unsigned int freq)
{
s64 retval, offset = 0;
static u64 max_khz;
- u64 mul, div;
+ u64 mul, div;
if (caps->lowest_freq && caps->nominal_freq) {
- mul = caps->nominal_perf - caps->lowest_perf;
- div = caps->nominal_freq - caps->lowest_freq;
+ /* Avoid special case when nominal_freq is equal to lowest_freq */
+ if (caps->lowest_freq == caps->nominal_freq) {
+ mul = caps->nominal_perf;
+ div = caps->nominal_freq;
+ } else {
+ mul = caps->nominal_perf - caps->lowest_perf;
+ div = caps->nominal_freq - caps->lowest_freq;
+ }
/*
* We don't need to convert to kHz for computing offset and can
* directly use nominal_freq and lowest_freq as the div64_u64
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 3b4d048c4941..4e0583274b8f 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -1119,6 +1119,8 @@ int acpi_subsys_prepare(struct device *dev)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
+ dev_pm_set_strict_midlayer(dev, true);
+
if (dev->driver && dev->driver->pm && dev->driver->pm->prepare) {
int ret = dev->driver->pm->prepare(dev);
@@ -1147,6 +1149,8 @@ void acpi_subsys_complete(struct device *dev)
*/
if (pm_runtime_suspended(dev) && pm_resume_via_firmware())
pm_request_resume(dev);
+
+ dev_pm_set_strict_midlayer(dev, false);
}
EXPORT_SYMBOL_GPL(acpi_subsys_complete);
@@ -1161,7 +1165,7 @@ EXPORT_SYMBOL_GPL(acpi_subsys_complete);
*/
int acpi_subsys_suspend(struct device *dev)
{
- if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
+ if (!dev_pm_smart_suspend(dev) ||
acpi_dev_needs_resume(dev, ACPI_COMPANION(dev)))
pm_runtime_resume(dev);
@@ -1320,7 +1324,7 @@ EXPORT_SYMBOL_GPL(acpi_subsys_restore_early);
*/
int acpi_subsys_poweroff(struct device *dev)
{
- if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
+ if (!dev_pm_smart_suspend(dev) ||
acpi_dev_needs_resume(dev, ACPI_COMPANION(dev)))
pm_runtime_resume(dev);
@@ -1362,6 +1366,8 @@ static int acpi_subsys_poweroff_noirq(struct device *dev)
}
#endif /* CONFIG_PM_SLEEP */
+static void acpi_dev_pm_detach(struct device *dev, bool power_off);
+
static struct dev_pm_domain acpi_general_pm_domain = {
.ops = {
.runtime_suspend = acpi_subsys_runtime_suspend,
@@ -1382,6 +1388,7 @@ static struct dev_pm_domain acpi_general_pm_domain = {
.restore_early = acpi_subsys_restore_early,
#endif
},
+ .detach = acpi_dev_pm_detach,
};
/**
@@ -1465,7 +1472,6 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
acpi_device_wakeup_disable(adev);
}
- dev->pm_domain->detach = acpi_dev_pm_detach;
return 1;
}
EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
index 23373faa35ec..cd199fbe4dc9 100644
--- a/drivers/acpi/device_sysfs.c
+++ b/drivers/acpi/device_sysfs.c
@@ -439,23 +439,33 @@ static ssize_t description_show(struct device *dev,
char *buf)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
+ struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
+ union acpi_object *str_obj;
+ acpi_status status;
int result;
- if (acpi_dev->pnp.str_obj == NULL)
- return 0;
+ status = acpi_evaluate_object_typed(acpi_dev->handle, "_STR",
+ NULL, &buffer,
+ ACPI_TYPE_BUFFER);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ str_obj = buffer.pointer;
/*
* The _STR object contains a Unicode identifier for a device.
* We need to convert to utf-8 so it can be displayed.
*/
result = utf16s_to_utf8s(
- (wchar_t *)acpi_dev->pnp.str_obj->buffer.pointer,
- acpi_dev->pnp.str_obj->buffer.length,
+ (wchar_t *)str_obj->buffer.pointer,
+ str_obj->buffer.length,
UTF16_LITTLE_ENDIAN, buf,
PAGE_SIZE - 1);
buf[result++] = '\n';
+ ACPI_FREE(str_obj);
+
return result;
}
static DEVICE_ATTR_RO(description);
@@ -507,96 +517,97 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(status);
-/**
- * acpi_device_setup_files - Create sysfs attributes of an ACPI device.
- * @dev: ACPI device object.
- */
-int acpi_device_setup_files(struct acpi_device *dev)
-{
- struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
- acpi_status status;
- int result = 0;
+static struct attribute *acpi_attrs[] = {
+ &dev_attr_path.attr,
+ &dev_attr_hid.attr,
+ &dev_attr_modalias.attr,
+ &dev_attr_description.attr,
+ &dev_attr_adr.attr,
+ &dev_attr_uid.attr,
+ &dev_attr_sun.attr,
+ &dev_attr_hrv.attr,
+ &dev_attr_status.attr,
+ &dev_attr_eject.attr,
+ &dev_attr_power_state.attr,
+ &dev_attr_real_power_state.attr,
+ NULL
+};
+static bool acpi_show_attr(struct acpi_device *dev, const struct device_attribute *attr)
+{
/*
* Devices gotten from FADT don't have a "path" attribute
*/
- if (dev->handle) {
- result = device_create_file(&dev->dev, &dev_attr_path);
- if (result)
- goto end;
- }
+ if (attr == &dev_attr_path)
+ return dev->handle;
- if (!list_empty(&dev->pnp.ids)) {
- result = device_create_file(&dev->dev, &dev_attr_hid);
- if (result)
- goto end;
+ if (attr == &dev_attr_hid || attr == &dev_attr_modalias)
+ return !list_empty(&dev->pnp.ids);
- result = device_create_file(&dev->dev, &dev_attr_modalias);
- if (result)
- goto end;
- }
+ if (attr == &dev_attr_description)
+ return acpi_has_method(dev->handle, "_STR");
- /*
- * If device has _STR, 'description' file is created
- */
- if (acpi_has_method(dev->handle, "_STR")) {
- status = acpi_evaluate_object(dev->handle, "_STR",
- NULL, &buffer);
- if (ACPI_FAILURE(status))
- buffer.pointer = NULL;
- dev->pnp.str_obj = buffer.pointer;
- result = device_create_file(&dev->dev, &dev_attr_description);
- if (result)
- goto end;
- }
+ if (attr == &dev_attr_adr)
+ return dev->pnp.type.bus_address;
- if (dev->pnp.type.bus_address)
- result = device_create_file(&dev->dev, &dev_attr_adr);
- if (acpi_device_uid(dev))
- result = device_create_file(&dev->dev, &dev_attr_uid);
+ if (attr == &dev_attr_uid)
+ return acpi_device_uid(dev);
- if (acpi_has_method(dev->handle, "_SUN")) {
- result = device_create_file(&dev->dev, &dev_attr_sun);
- if (result)
- goto end;
- }
+ if (attr == &dev_attr_sun)
+ return acpi_has_method(dev->handle, "_SUN");
- if (acpi_has_method(dev->handle, "_HRV")) {
- result = device_create_file(&dev->dev, &dev_attr_hrv);
- if (result)
- goto end;
- }
+ if (attr == &dev_attr_hrv)
+ return acpi_has_method(dev->handle, "_HRV");
- if (acpi_has_method(dev->handle, "_STA")) {
- result = device_create_file(&dev->dev, &dev_attr_status);
- if (result)
- goto end;
- }
+ if (attr == &dev_attr_status)
+ return acpi_has_method(dev->handle, "_STA");
/*
* If device has _EJ0, 'eject' file is created that is used to trigger
* hot-removal function from userland.
*/
- if (acpi_has_method(dev->handle, "_EJ0")) {
- result = device_create_file(&dev->dev, &dev_attr_eject);
- if (result)
- return result;
- }
+ if (attr == &dev_attr_eject)
+ return acpi_has_method(dev->handle, "_EJ0");
- if (dev->flags.power_manageable) {
- result = device_create_file(&dev->dev, &dev_attr_power_state);
- if (result)
- return result;
+ if (attr == &dev_attr_power_state)
+ return dev->flags.power_manageable;
- if (dev->power.flags.power_resources)
- result = device_create_file(&dev->dev,
- &dev_attr_real_power_state);
- }
+ if (attr == &dev_attr_real_power_state)
+ return dev->flags.power_manageable && dev->power.flags.power_resources;
- acpi_expose_nondev_subnodes(&dev->dev.kobj, &dev->data);
+ dev_warn_once(&dev->dev, "Unexpected attribute: %s\n", attr->attr.name);
+ return false;
+}
-end:
- return result;
+static umode_t acpi_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr,
+ int attrno)
+{
+ struct acpi_device *dev = to_acpi_device(kobj_to_dev(kobj));
+
+ if (acpi_show_attr(dev, container_of(attr, struct device_attribute, attr)))
+ return attr->mode;
+ else
+ return 0;
+}
+
+static const struct attribute_group acpi_group = {
+ .attrs = acpi_attrs,
+ .is_visible = acpi_attr_is_visible,
+};
+
+const struct attribute_group *acpi_groups[] = {
+ &acpi_group,
+ NULL
+};
+
+/**
+ * acpi_device_setup_files - Create sysfs attributes of an ACPI device.
+ * @dev: ACPI device object.
+ */
+void acpi_device_setup_files(struct acpi_device *dev)
+{
+ acpi_expose_nondev_subnodes(&dev->dev.kobj, &dev->data);
}
/**
@@ -606,41 +617,4 @@ end:
void acpi_device_remove_files(struct acpi_device *dev)
{
acpi_hide_nondev_subnodes(&dev->data);
-
- if (dev->flags.power_manageable) {
- device_remove_file(&dev->dev, &dev_attr_power_state);
- if (dev->power.flags.power_resources)
- device_remove_file(&dev->dev,
- &dev_attr_real_power_state);
- }
-
- /*
- * If device has _STR, remove 'description' file
- */
- if (acpi_has_method(dev->handle, "_STR")) {
- kfree(dev->pnp.str_obj);
- device_remove_file(&dev->dev, &dev_attr_description);
- }
- /*
- * If device has _EJ0, remove 'eject' file.
- */
- if (acpi_has_method(dev->handle, "_EJ0"))
- device_remove_file(&dev->dev, &dev_attr_eject);
-
- if (acpi_has_method(dev->handle, "_SUN"))
- device_remove_file(&dev->dev, &dev_attr_sun);
-
- if (acpi_has_method(dev->handle, "_HRV"))
- device_remove_file(&dev->dev, &dev_attr_hrv);
-
- if (acpi_device_uid(dev))
- device_remove_file(&dev->dev, &dev_attr_uid);
- if (dev->pnp.type.bus_address)
- device_remove_file(&dev->dev, &dev_attr_adr);
- device_remove_file(&dev->dev, &dev_attr_modalias);
- device_remove_file(&dev->dev, &dev_attr_hid);
- if (acpi_has_method(dev->handle, "_STA"))
- device_remove_file(&dev->dev, &dev_attr_status);
- if (dev->handle)
- device_remove_file(&dev->dev, &dev_attr_path);
}
diff --git a/drivers/acpi/dptf/Makefile b/drivers/acpi/dptf/Makefile
index 297340682f66..e912a3be1d28 100644
--- a/drivers/acpi/dptf/Makefile
+++ b/drivers/acpi/dptf/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_ACPI) += int340x_thermal.o
obj-$(CONFIG_DPTF_POWER) += dptf_power.o
obj-$(CONFIG_DPTF_PCH_FIVR) += dptf_pch_fivr.o
diff --git a/drivers/acpi/dptf/dptf_pch_fivr.c b/drivers/acpi/dptf/dptf_pch_fivr.c
index d202730fafd8..8d7e555929d3 100644
--- a/drivers/acpi/dptf/dptf_pch_fivr.c
+++ b/drivers/acpi/dptf/dptf_pch_fivr.c
@@ -41,7 +41,7 @@ static int pch_fivr_read(acpi_handle handle, char *method, struct pch_fivr_resp
ret = 0;
release_buffer:
- kfree(buffer.pointer);
+ ACPI_FREE(buffer.pointer);
return ret;
}
@@ -152,13 +152,14 @@ static const struct acpi_device_id pch_fivr_device_ids[] = {
{"INTC1064", 0},
{"INTC106B", 0},
{"INTC10A3", 0},
+ {"INTC10D7", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, pch_fivr_device_ids);
static struct platform_driver pch_fivr_driver = {
.probe = pch_fivr_add,
- .remove_new = pch_fivr_remove,
+ .remove = pch_fivr_remove,
.driver = {
.name = "dptf_pch_fivr",
.acpi_match_table = pch_fivr_device_ids,
diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c
index 8023b3e23315..55ccbb8ddbe3 100644
--- a/drivers/acpi/dptf/dptf_power.c
+++ b/drivers/acpi/dptf/dptf_power.c
@@ -236,13 +236,19 @@ static const struct acpi_device_id int3407_device_ids[] = {
{"INTC106D", 0},
{"INTC10A4", 0},
{"INTC10A5", 0},
+ {"INTC10D8", 0},
+ {"INTC10D9", 0},
+ {"INTC1100", 0},
+ {"INTC1101", 0},
+ {"INTC10F7", 0},
+ {"INTC10F8", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, int3407_device_ids);
static struct platform_driver dptf_power_driver = {
.probe = dptf_power_add,
- .remove_new = dptf_power_remove,
+ .remove = dptf_power_remove,
.driver = {
.name = "dptf_power",
.acpi_match_table = int3407_device_ids,
diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c
deleted file mode 100644
index 014ada759954..000000000000
--- a/drivers/acpi/dptf/int340x_thermal.c
+++ /dev/null
@@ -1,81 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * ACPI support for int340x thermal drivers
- *
- * Copyright (C) 2014, Intel Corporation
- * Authors: Zhang Rui <rui.zhang@intel.com>
- */
-
-#include <linux/acpi.h>
-#include <linux/module.h>
-
-#include "../internal.h"
-
-#define INT3401_DEVICE 0X01
-static const struct acpi_device_id int340x_thermal_device_ids[] = {
- {"INT3400"},
- {"INT3401", INT3401_DEVICE},
- {"INT3402"},
- {"INT3403"},
- {"INT3404"},
- {"INT3406"},
- {"INT3407"},
- {"INT3408"},
- {"INT3409"},
- {"INT340A"},
- {"INT340B"},
- {"INT3532"},
- {"INTC1040"},
- {"INTC1041"},
- {"INTC1042"},
- {"INTC1043"},
- {"INTC1044"},
- {"INTC1045"},
- {"INTC1046"},
- {"INTC1047"},
- {"INTC1048"},
- {"INTC1049"},
- {"INTC1050"},
- {"INTC1060"},
- {"INTC1061"},
- {"INTC1062"},
- {"INTC1063"},
- {"INTC1064"},
- {"INTC1065"},
- {"INTC1066"},
- {"INTC1068"},
- {"INTC1069"},
- {"INTC106A"},
- {"INTC106B"},
- {"INTC106C"},
- {"INTC106D"},
- {"INTC10A0"},
- {"INTC10A1"},
- {"INTC10A2"},
- {"INTC10A3"},
- {"INTC10A4"},
- {"INTC10A5"},
- {""},
-};
-
-static int int340x_thermal_handler_attach(struct acpi_device *adev,
- const struct acpi_device_id *id)
-{
- if (IS_ENABLED(CONFIG_INT340X_THERMAL))
- acpi_create_platform_device(adev, NULL);
- /* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */
- else if (IS_ENABLED(CONFIG_INTEL_SOC_DTS_THERMAL) &&
- id->driver_data == INT3401_DEVICE)
- acpi_create_platform_device(adev, NULL);
- return 1;
-}
-
-static struct acpi_scan_handler int340x_thermal_handler = {
- .ids = int340x_thermal_device_ids,
- .attach = int340x_thermal_handler_attach,
-};
-
-void __init acpi_int340x_thermal_init(void)
-{
- acpi_scan_add_handler(&int340x_thermal_handler);
-}
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 02255795b800..59b3d50ff01e 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -23,8 +23,10 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/list.h>
+#include <linux/printk.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <linux/suspend.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
@@ -783,6 +785,9 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
unsigned long tmp;
int ret = 0;
+ if (t->rdata)
+ memset(t->rdata, 0, t->rlen);
+
/* start transaction */
spin_lock_irqsave(&ec->lock, tmp);
/* Enable GPE for command processing (IBF=0/OBF=1) */
@@ -819,8 +824,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata))
return -EINVAL;
- if (t->rdata)
- memset(t->rdata, 0, t->rlen);
mutex_lock(&ec->mutex);
if (ec->global_lock) {
@@ -847,7 +850,7 @@ static int acpi_ec_burst_enable(struct acpi_ec *ec)
.wdata = NULL, .rdata = &d,
.wlen = 0, .rlen = 1};
- return acpi_ec_transaction(ec, &t);
+ return acpi_ec_transaction_unlocked(ec, &t);
}
static int acpi_ec_burst_disable(struct acpi_ec *ec)
@@ -857,7 +860,7 @@ static int acpi_ec_burst_disable(struct acpi_ec *ec)
.wlen = 0, .rlen = 0};
return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ?
- acpi_ec_transaction(ec, &t) : 0;
+ acpi_ec_transaction_unlocked(ec, &t) : 0;
}
static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data)
@@ -873,6 +876,19 @@ static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data)
return result;
}
+static int acpi_ec_read_unlocked(struct acpi_ec *ec, u8 address, u8 *data)
+{
+ int result;
+ u8 d;
+ struct transaction t = {.command = ACPI_EC_COMMAND_READ,
+ .wdata = &address, .rdata = &d,
+ .wlen = 1, .rlen = 1};
+
+ result = acpi_ec_transaction_unlocked(ec, &t);
+ *data = d;
+ return result;
+}
+
static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
{
u8 wdata[2] = { address, data };
@@ -883,6 +899,16 @@ static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
return acpi_ec_transaction(ec, &t);
}
+static int acpi_ec_write_unlocked(struct acpi_ec *ec, u8 address, u8 data)
+{
+ u8 wdata[2] = { address, data };
+ struct transaction t = {.command = ACPI_EC_COMMAND_WRITE,
+ .wdata = wdata, .rdata = NULL,
+ .wlen = 2, .rlen = 0};
+
+ return acpi_ec_transaction_unlocked(ec, &t);
+}
+
int ec_read(u8 addr, u8 *val)
{
int err;
@@ -1323,6 +1349,7 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
struct acpi_ec *ec = handler_context;
int result = 0, i, bytes = bits / 8;
u8 *value = (u8 *)value64;
+ u32 glk;
if ((address > 0xFF) || !value || !handler_context)
return AE_BAD_PARAMETER;
@@ -1330,17 +1357,38 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
if (function != ACPI_READ && function != ACPI_WRITE)
return AE_BAD_PARAMETER;
+ mutex_lock(&ec->mutex);
+
+ if (ec->global_lock) {
+ acpi_status status;
+
+ status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
+ if (ACPI_FAILURE(status)) {
+ result = -ENODEV;
+ goto unlock;
+ }
+ }
+
if (ec->busy_polling || bits > 8)
acpi_ec_burst_enable(ec);
- for (i = 0; i < bytes; ++i, ++address, ++value)
+ for (i = 0; i < bytes; ++i, ++address, ++value) {
result = (function == ACPI_READ) ?
- acpi_ec_read(ec, address, value) :
- acpi_ec_write(ec, address, *value);
+ acpi_ec_read_unlocked(ec, address, value) :
+ acpi_ec_write_unlocked(ec, address, *value);
+ if (result < 0)
+ break;
+ }
if (ec->busy_polling || bits > 8)
acpi_ec_burst_disable(ec);
+ if (ec->global_lock)
+ acpi_release_global_lock(glk);
+
+unlock:
+ mutex_unlock(&ec->mutex);
+
switch (result) {
case -EINVAL:
return AE_BAD_PARAMETER;
@@ -1348,8 +1396,10 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
return AE_NOT_FOUND;
case -ETIME:
return AE_TIME;
- default:
+ case 0:
return AE_OK;
+ default:
+ return AE_ERROR;
}
}
@@ -1487,8 +1537,10 @@ static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
acpi_ec_start(ec, false);
if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
+ acpi_handle scope_handle = ec == first_ec ? ACPI_ROOT_OBJECT : ec->handle;
+
acpi_ec_enter_noirq(ec);
- status = acpi_install_address_space_handler_no_reg(ec->handle,
+ status = acpi_install_address_space_handler_no_reg(scope_handle,
ACPI_ADR_SPACE_EC,
&acpi_ec_space_handler,
NULL, ec);
@@ -1497,11 +1549,10 @@ static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
return -ENODEV;
}
set_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
- ec->address_space_handler_holder = ec->handle;
}
if (call_reg && !test_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags)) {
- acpi_execute_reg_methods(ec->handle, ACPI_ADR_SPACE_EC);
+ acpi_execute_reg_methods(ec->handle, ACPI_UINT32_MAX, ACPI_ADR_SPACE_EC);
set_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags);
}
@@ -1553,10 +1604,13 @@ static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
static void ec_remove_handlers(struct acpi_ec *ec)
{
+ acpi_handle scope_handle = ec == first_ec ? ACPI_ROOT_OBJECT : ec->handle;
+
if (test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
if (ACPI_FAILURE(acpi_remove_address_space_handler(
- ec->address_space_handler_holder,
- ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
+ scope_handle,
+ ACPI_ADR_SPACE_EC,
+ &acpi_ec_space_handler)))
pr_err("failed to remove space handler\n");
clear_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
}
@@ -1595,14 +1649,18 @@ static int acpi_ec_setup(struct acpi_ec *ec, struct acpi_device *device, bool ca
{
int ret;
- ret = ec_install_handlers(ec, device, call_reg);
- if (ret)
- return ret;
-
/* First EC capable of handling transactions */
if (!first_ec)
first_ec = ec;
+ ret = ec_install_handlers(ec, device, call_reg);
+ if (ret) {
+ if (ec == first_ec)
+ first_ec = NULL;
+
+ return ret;
+ }
+
pr_info("EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n", ec->command_addr,
ec->data_addr);
@@ -1621,8 +1679,8 @@ static int acpi_ec_add(struct acpi_device *device)
struct acpi_ec *ec;
int ret;
- strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_EC_CLASS);
+ strscpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
+ strscpy(acpi_device_class(device), ACPI_EC_CLASS);
if (boot_ec && (boot_ec->handle == device->handle ||
!strcmp(acpi_device_hid(device), ACPI_ECDT_HID))) {
@@ -1709,6 +1767,12 @@ static void acpi_ec_remove(struct acpi_device *device)
}
}
+void acpi_ec_register_opregions(struct acpi_device *adev)
+{
+ if (first_ec && first_ec->handle != adev->handle)
+ acpi_execute_reg_methods(adev->handle, 1, ACPI_ADR_SPACE_EC);
+}
+
static acpi_status
ec_parse_io_ports(struct acpi_resource *resource, void *context)
{
@@ -1969,6 +2033,25 @@ void __init acpi_ec_ecdt_probe(void)
goto out;
}
+ if (!strlen(ecdt_ptr->id)) {
+ /*
+ * The ECDT table on some MSI notebooks contains invalid data, together
+ * with an empty ID string ("").
+ *
+ * Section 5.2.15 of the ACPI specification requires the ID string to be
+ * a "fully qualified reference to the (...) embedded controller device",
+ * so this string always has to start with a backslash.
+ *
+ * However some ThinkBook machines have a ECDT table with a valid EC
+ * description but an invalid ID string ("_SB.PC00.LPCB.EC0").
+ *
+ * Because of this we only check if the ID string is empty in order to
+ * avoid the obvious cases.
+ */
+ pr_err(FW_BUG "Ignoring ECDT due to empty ID string\n");
+ goto out;
+ }
+
ec = acpi_ec_alloc();
if (!ec)
goto out;
@@ -2211,7 +2294,8 @@ static int acpi_ec_init_workqueues(void)
ec_wq = alloc_ordered_workqueue("kec", 0);
if (!ec_query_wq)
- ec_query_wq = alloc_workqueue("kec_query", 0, ec_max_queries);
+ ec_query_wq = alloc_workqueue("kec_query", WQ_PERCPU,
+ ec_max_queries);
if (!ec_wq || !ec_query_wq) {
acpi_ec_destroy_workqueues();
@@ -2239,6 +2323,40 @@ static const struct dmi_system_id acpi_ec_no_wakeup[] = {
DMI_MATCH(DMI_PRODUCT_FAMILY, "103C_5336AN HP ZHAN 66 Pro"),
},
},
+ /*
+ * Lenovo Legion Go S; touchscreen blocks HW sleep when woken up from EC
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/3929
+ */
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83L3"),
+ }
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83N6"),
+ }
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83Q2"),
+ }
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83Q3"),
+ }
+ },
+ {
+ // TUXEDO InfinityBook Pro AMD Gen9
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GXxHRXx"),
+ },
+ },
{ },
};
diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c
index d199a19bb292..96a9aaaaf9f7 100644
--- a/drivers/acpi/event.c
+++ b/drivers/acpi/event.c
@@ -28,8 +28,8 @@ int acpi_notifier_call_chain(struct acpi_device *dev, u32 type, u32 data)
{
struct acpi_bus_event event;
- strcpy(event.device_class, dev->pnp.device_class);
- strcpy(event.bus_id, dev->pnp.bus_id);
+ strscpy(event.device_class, dev->pnp.device_class);
+ strscpy(event.bus_id, dev->pnp.bus_id);
event.type = type;
event.data = data;
return (blocking_notifier_call_chain(&acpi_chain_head, 0, (void *)&event)
diff --git a/drivers/acpi/evged.c b/drivers/acpi/evged.c
index 11778c93254b..5c35cbc7f6ff 100644
--- a/drivers/acpi/evged.c
+++ b/drivers/acpi/evged.c
@@ -185,7 +185,7 @@ static const struct acpi_device_id ged_acpi_ids[] = {
static struct platform_driver ged_driver = {
.probe = ged_probe,
- .remove_new = ged_remove,
+ .remove = ged_remove,
.shutdown = ged_shutdown,
.driver = {
.name = MODULE_NAME,
diff --git a/drivers/acpi/fan.h b/drivers/acpi/fan.h
index f89d19c922dc..97ce3212edf3 100644
--- a/drivers/acpi/fan.h
+++ b/drivers/acpi/fan.h
@@ -10,6 +10,9 @@
#ifndef _ACPI_FAN_H_
#define _ACPI_FAN_H_
+#include <linux/kconfig.h>
+#include <linux/limits.h>
+
#define ACPI_FAN_DEVICE_IDS \
{"INT3404", }, /* Fan */ \
{"INTC1044", }, /* Fan for Tiger Lake generation */ \
@@ -17,6 +20,9 @@
{"INTC1063", }, /* Fan for Meteor Lake generation */ \
{"INTC106A", }, /* Fan for Lunar Lake generation */ \
{"INTC10A2", }, /* Fan for Raptor Lake generation */ \
+ {"INTC10D6", }, /* Fan for Panther Lake generation */ \
+ {"INTC10FE", }, /* Fan for Wildcat Lake generation */ \
+ {"INTC10F5", }, /* Fan for Nova Lake generation */ \
{"PNP0C0B", } /* Generic ACPI fan */
#define ACPI_FPS_NAME_LEN 20
@@ -45,16 +51,64 @@ struct acpi_fan_fst {
};
struct acpi_fan {
+ acpi_handle handle;
bool acpi4;
+ bool has_fst;
struct acpi_fan_fif fif;
struct acpi_fan_fps *fps;
int fps_count;
+ /* A value of 0 means that trippoint-related functions are not supported */
+ u32 fan_trip_granularity;
+#if IS_REACHABLE(CONFIG_HWMON)
+ struct device *hdev;
+#endif
struct thermal_cooling_device *cdev;
struct device_attribute fst_speed;
struct device_attribute fine_grain_control;
};
-int acpi_fan_get_fst(struct acpi_device *device, struct acpi_fan_fst *fst);
+/**
+ * acpi_fan_speed_valid - Check if fan speed value is valid
+ * @speeed: Speed value returned by the ACPI firmware
+ *
+ * Check if the fan speed value returned by the ACPI firmware is valid. This function is
+ * necessary as ACPI firmware implementations can return 0xFFFFFFFF to signal that the
+ * ACPI fan does not support speed reporting. Additionally, some buggy ACPI firmware
+ * implementations return a value larger than the 32-bit integer value defined by
+ * the ACPI specification when using placeholder values. Such invalid values are also
+ * detected by this function.
+ *
+ * Returns: True if the fan speed value is valid, false otherwise.
+ */
+static inline bool acpi_fan_speed_valid(u64 speed)
+{
+ return speed < U32_MAX;
+}
+
+/**
+ * acpi_fan_power_valid - Check if fan power value is valid
+ * @power: Power value returned by the ACPI firmware
+ *
+ * Check if the fan power value returned by the ACPI firmware is valid.
+ * See acpi_fan_speed_valid() for details.
+ *
+ * Returns: True if the fan power value is valid, false otherwise.
+ */
+static inline bool acpi_fan_power_valid(u64 power)
+{
+ return power < U32_MAX;
+}
+
+int acpi_fan_get_fst(acpi_handle handle, struct acpi_fan_fst *fst);
int acpi_fan_create_attributes(struct acpi_device *device);
void acpi_fan_delete_attributes(struct acpi_device *device);
+
+#if IS_REACHABLE(CONFIG_HWMON)
+int devm_acpi_fan_create_hwmon(struct device *dev);
+void acpi_fan_notify_hwmon(struct device *dev);
+#else
+static inline int devm_acpi_fan_create_hwmon(struct device *dev) { return 0; };
+static inline void acpi_fan_notify_hwmon(struct device *dev) { };
+#endif
+
#endif
diff --git a/drivers/acpi/fan_attr.c b/drivers/acpi/fan_attr.c
index f4f6e2381f1d..9b7fa52f3c2a 100644
--- a/drivers/acpi/fan_attr.c
+++ b/drivers/acpi/fan_attr.c
@@ -22,9 +22,9 @@ static ssize_t show_state(struct device *dev, struct device_attribute *attr, cha
int count;
if (fps->control == 0xFFFFFFFF || fps->control > 100)
- count = scnprintf(buf, PAGE_SIZE, "not-defined:");
+ count = sysfs_emit(buf, "not-defined:");
else
- count = scnprintf(buf, PAGE_SIZE, "%lld:", fps->control);
+ count = sysfs_emit(buf, "%lld:", fps->control);
if (fps->trip_point == 0xFFFFFFFF || fps->trip_point > 9)
count += sysfs_emit_at(buf, count, "not-defined:");
@@ -55,11 +55,11 @@ static ssize_t show_fan_speed(struct device *dev, struct device_attribute *attr,
struct acpi_fan_fst fst;
int status;
- status = acpi_fan_get_fst(acpi_dev, &fst);
+ status = acpi_fan_get_fst(acpi_dev->handle, &fst);
if (status)
return status;
- return sprintf(buf, "%lld\n", fst.speed);
+ return sysfs_emit(buf, "%lld\n", fst.speed);
}
static ssize_t show_fine_grain_control(struct device *dev, struct device_attribute *attr, char *buf)
@@ -67,7 +67,7 @@ static ssize_t show_fine_grain_control(struct device *dev, struct device_attribu
struct acpi_device *acpi_dev = container_of(dev, struct acpi_device, dev);
struct acpi_fan *fan = acpi_driver_data(acpi_dev);
- return sprintf(buf, "%d\n", fan->fif.fine_grain_ctrl);
+ return sysfs_emit(buf, "%d\n", fan->fif.fine_grain_ctrl);
}
int acpi_fan_create_attributes(struct acpi_device *device)
@@ -75,15 +75,6 @@ int acpi_fan_create_attributes(struct acpi_device *device)
struct acpi_fan *fan = acpi_driver_data(device);
int i, status;
- sysfs_attr_init(&fan->fine_grain_control.attr);
- fan->fine_grain_control.show = show_fine_grain_control;
- fan->fine_grain_control.store = NULL;
- fan->fine_grain_control.attr.name = "fine_grain_control";
- fan->fine_grain_control.attr.mode = 0444;
- status = sysfs_create_file(&device->dev.kobj, &fan->fine_grain_control.attr);
- if (status)
- return status;
-
/* _FST is present if we are here */
sysfs_attr_init(&fan->fst_speed.attr);
fan->fst_speed.show = show_fan_speed;
@@ -92,7 +83,19 @@ int acpi_fan_create_attributes(struct acpi_device *device)
fan->fst_speed.attr.mode = 0444;
status = sysfs_create_file(&device->dev.kobj, &fan->fst_speed.attr);
if (status)
- goto rem_fine_grain_attr;
+ return status;
+
+ if (!fan->acpi4)
+ return 0;
+
+ sysfs_attr_init(&fan->fine_grain_control.attr);
+ fan->fine_grain_control.show = show_fine_grain_control;
+ fan->fine_grain_control.store = NULL;
+ fan->fine_grain_control.attr.name = "fine_grain_control";
+ fan->fine_grain_control.attr.mode = 0444;
+ status = sysfs_create_file(&device->dev.kobj, &fan->fine_grain_control.attr);
+ if (status)
+ goto rem_fst_attr;
for (i = 0; i < fan->fps_count; ++i) {
struct acpi_fan_fps *fps = &fan->fps[i];
@@ -109,18 +112,18 @@ int acpi_fan_create_attributes(struct acpi_device *device)
for (j = 0; j < i; ++j)
sysfs_remove_file(&device->dev.kobj, &fan->fps[j].dev_attr.attr);
- goto rem_fst_attr;
+ goto rem_fine_grain_attr;
}
}
return 0;
-rem_fst_attr:
- sysfs_remove_file(&device->dev.kobj, &fan->fst_speed.attr);
-
rem_fine_grain_attr:
sysfs_remove_file(&device->dev.kobj, &fan->fine_grain_control.attr);
+rem_fst_attr:
+ sysfs_remove_file(&device->dev.kobj, &fan->fst_speed.attr);
+
return status;
}
@@ -129,9 +132,13 @@ void acpi_fan_delete_attributes(struct acpi_device *device)
struct acpi_fan *fan = acpi_driver_data(device);
int i;
+ sysfs_remove_file(&device->dev.kobj, &fan->fst_speed.attr);
+
+ if (!fan->acpi4)
+ return;
+
for (i = 0; i < fan->fps_count; ++i)
sysfs_remove_file(&device->dev.kobj, &fan->fps[i].dev_attr.attr);
- sysfs_remove_file(&device->dev.kobj, &fan->fst_speed.attr);
sysfs_remove_file(&device->dev.kobj, &fan->fine_grain_control.attr);
}
diff --git a/drivers/acpi/fan_core.c b/drivers/acpi/fan_core.c
index ff72e4ef8738..fb08b8549ed7 100644
--- a/drivers/acpi/fan_core.c
+++ b/drivers/acpi/fan_core.c
@@ -7,11 +7,16 @@
* Copyright (C) 2022 Intel Corporation. All rights reserved.
*/
+#include <linux/bits.h>
#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/math.h>
+#include <linux/math64.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/uaccess.h>
+#include <linux/uuid.h>
#include <linux/thermal.h>
#include <linux/acpi.h>
#include <linux/platform_device.h>
@@ -19,6 +24,26 @@
#include "fan.h"
+#define ACPI_FAN_NOTIFY_STATE_CHANGED 0x80
+
+/*
+ * Defined inside the "Fan Noise Signal" section at
+ * https://learn.microsoft.com/en-us/windows-hardware/design/device-experiences/design-guide.
+ */
+static const guid_t acpi_fan_microsoft_guid = GUID_INIT(0xA7611840, 0x99FE, 0x41AE, 0xA4, 0x88,
+ 0x35, 0xC7, 0x59, 0x26, 0xC8, 0xEB);
+#define ACPI_FAN_DSM_GET_TRIP_POINT_GRANULARITY 1
+#define ACPI_FAN_DSM_SET_TRIP_POINTS 2
+#define ACPI_FAN_DSM_GET_OPERATING_RANGES 3
+
+/*
+ * Ensures that fans with a very low trip point granularity
+ * do not send too many notifications.
+ */
+static uint min_trip_distance = 100;
+module_param(min_trip_distance, uint, 0);
+MODULE_PARM_DESC(min_trip_distance, "Minimum distance between fan speed trip points in RPM");
+
static const struct acpi_device_id fan_device_ids[] = {
ACPI_FAN_DEVICE_IDS,
{"", 0},
@@ -44,25 +69,30 @@ static int fan_get_max_state(struct thermal_cooling_device *cdev, unsigned long
return 0;
}
-int acpi_fan_get_fst(struct acpi_device *device, struct acpi_fan_fst *fst)
+int acpi_fan_get_fst(acpi_handle handle, struct acpi_fan_fst *fst)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
acpi_status status;
int ret = 0;
- status = acpi_evaluate_object(device->handle, "_FST", NULL, &buffer);
- if (ACPI_FAILURE(status)) {
- dev_err(&device->dev, "Get fan state failed\n");
- return -ENODEV;
- }
+ status = acpi_evaluate_object(handle, "_FST", NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ return -EIO;
obj = buffer.pointer;
- if (!obj || obj->type != ACPI_TYPE_PACKAGE ||
- obj->package.count != 3 ||
- obj->package.elements[1].type != ACPI_TYPE_INTEGER) {
- dev_err(&device->dev, "Invalid _FST data\n");
- ret = -EINVAL;
+ if (!obj)
+ return -ENODATA;
+
+ if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count != 3) {
+ ret = -EPROTO;
+ goto err;
+ }
+
+ if (obj->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ obj->package.elements[1].type != ACPI_TYPE_INTEGER ||
+ obj->package.elements[2].type != ACPI_TYPE_INTEGER) {
+ ret = -EPROTO;
goto err;
}
@@ -81,7 +111,7 @@ static int fan_get_state_acpi4(struct acpi_device *device, unsigned long *state)
struct acpi_fan_fst fst;
int status, i;
- status = acpi_fan_get_fst(device, &fst);
+ status = acpi_fan_get_fst(device->handle, &fst);
if (status)
return status;
@@ -102,7 +132,7 @@ match_fps:
break;
}
if (i == fan->fps_count) {
- dev_dbg(&device->dev, "Invalid control value returned\n");
+ dev_dbg(&device->dev, "No matching fps control value\n");
return -EINVAL;
}
@@ -203,14 +233,6 @@ static const struct thermal_cooling_device_ops fan_cooling_ops = {
* --------------------------------------------------------------------------
*/
-static bool acpi_fan_is_acpi4(struct acpi_device *device)
-{
- return acpi_has_method(device->handle, "_FIF") &&
- acpi_has_method(device->handle, "_FPS") &&
- acpi_has_method(device->handle, "_FSL") &&
- acpi_has_method(device->handle, "_FST");
-}
-
static int acpi_fan_get_fif(struct acpi_device *device)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -311,6 +333,182 @@ err:
return status;
}
+static int acpi_fan_dsm_init(struct device *dev)
+{
+ union acpi_object dummy = {
+ .package = {
+ .type = ACPI_TYPE_PACKAGE,
+ .count = 0,
+ .elements = NULL,
+ },
+ };
+ struct acpi_fan *fan = dev_get_drvdata(dev);
+ union acpi_object *obj;
+ int ret = 0;
+
+ if (!acpi_check_dsm(fan->handle, &acpi_fan_microsoft_guid, 0,
+ BIT(ACPI_FAN_DSM_GET_TRIP_POINT_GRANULARITY) |
+ BIT(ACPI_FAN_DSM_SET_TRIP_POINTS)))
+ return 0;
+
+ dev_info(dev, "Using Microsoft fan extensions\n");
+
+ obj = acpi_evaluate_dsm_typed(fan->handle, &acpi_fan_microsoft_guid, 0,
+ ACPI_FAN_DSM_GET_TRIP_POINT_GRANULARITY, &dummy,
+ ACPI_TYPE_INTEGER);
+ if (!obj)
+ return -EIO;
+
+ if (obj->integer.value > U32_MAX)
+ ret = -EOVERFLOW;
+ else
+ fan->fan_trip_granularity = obj->integer.value;
+
+ kfree(obj);
+
+ return ret;
+}
+
+static int acpi_fan_dsm_set_trip_points(struct device *dev, u64 upper, u64 lower)
+{
+ union acpi_object args[2] = {
+ {
+ .integer = {
+ .type = ACPI_TYPE_INTEGER,
+ .value = lower,
+ },
+ },
+ {
+ .integer = {
+ .type = ACPI_TYPE_INTEGER,
+ .value = upper,
+ },
+ },
+ };
+ struct acpi_fan *fan = dev_get_drvdata(dev);
+ union acpi_object in = {
+ .package = {
+ .type = ACPI_TYPE_PACKAGE,
+ .count = ARRAY_SIZE(args),
+ .elements = args,
+ },
+ };
+ union acpi_object *obj;
+
+ obj = acpi_evaluate_dsm(fan->handle, &acpi_fan_microsoft_guid, 0,
+ ACPI_FAN_DSM_SET_TRIP_POINTS, &in);
+ kfree(obj);
+
+ return 0;
+}
+
+static int acpi_fan_dsm_start(struct device *dev)
+{
+ struct acpi_fan *fan = dev_get_drvdata(dev);
+ int ret;
+
+ if (!fan->fan_trip_granularity)
+ return 0;
+
+ /*
+ * Some firmware implementations only update the values returned by the
+ * _FST control method when a notification is received. This usually
+ * works with Microsoft Windows as setting up trip points will keep
+ * triggering said notifications, but will cause issues when using _FST
+ * without the Microsoft-specific trip point extension.
+ *
+ * Because of this, an initial notification needs to be triggered to
+ * start the cycle of trip points updates. This is achieved by setting
+ * the trip points sequencially to two separate ranges. As by the
+ * Microsoft specification the firmware should trigger a notification
+ * immediately if the fan speed is outside the trip point range. This
+ * _should_ result in at least one notification as both ranges do not
+ * overlap, meaning that the current fan speed needs to be outside at
+ * least one range.
+ */
+ ret = acpi_fan_dsm_set_trip_points(dev, fan->fan_trip_granularity, 0);
+ if (ret < 0)
+ return ret;
+
+ return acpi_fan_dsm_set_trip_points(dev, fan->fan_trip_granularity * 3,
+ fan->fan_trip_granularity * 2);
+}
+
+static int acpi_fan_dsm_update_trips_points(struct device *dev, struct acpi_fan_fst *fst)
+{
+ struct acpi_fan *fan = dev_get_drvdata(dev);
+ u64 upper, lower;
+
+ if (!fan->fan_trip_granularity)
+ return 0;
+
+ if (!acpi_fan_speed_valid(fst->speed))
+ return -EINVAL;
+
+ upper = roundup_u64(fst->speed + min_trip_distance, fan->fan_trip_granularity);
+ if (fst->speed <= min_trip_distance) {
+ lower = 0;
+ } else {
+ /*
+ * Valid fan speed values cannot be larger than 32 bit, so
+ * we can safely assume that no overflow will happen here.
+ */
+ lower = rounddown((u32)fst->speed - min_trip_distance, fan->fan_trip_granularity);
+ }
+
+ return acpi_fan_dsm_set_trip_points(dev, upper, lower);
+}
+
+static void acpi_fan_notify_handler(acpi_handle handle, u32 event, void *context)
+{
+ struct device *dev = context;
+ struct acpi_fan_fst fst;
+ int ret;
+
+ switch (event) {
+ case ACPI_FAN_NOTIFY_STATE_CHANGED:
+ /*
+ * The ACPI specification says that we must evaluate _FST when we
+ * receive an ACPI event indicating that the fan state has changed.
+ */
+ ret = acpi_fan_get_fst(handle, &fst);
+ if (ret < 0) {
+ dev_err(dev, "Error retrieving current fan status: %d\n", ret);
+ } else {
+ ret = acpi_fan_dsm_update_trips_points(dev, &fst);
+ if (ret < 0)
+ dev_err(dev, "Failed to update trip points: %d\n", ret);
+ }
+
+ acpi_fan_notify_hwmon(dev);
+ acpi_bus_generate_netlink_event("fan", dev_name(dev), event, 0);
+ break;
+ default:
+ dev_dbg(dev, "Unsupported ACPI notification 0x%x\n", event);
+ break;
+ }
+}
+
+static void acpi_fan_notify_remove(void *data)
+{
+ struct acpi_fan *fan = data;
+
+ acpi_remove_notify_handler(fan->handle, ACPI_DEVICE_NOTIFY, acpi_fan_notify_handler);
+}
+
+static int devm_acpi_fan_notify_init(struct device *dev)
+{
+ struct acpi_fan *fan = dev_get_drvdata(dev);
+ acpi_status status;
+
+ status = acpi_install_notify_handler(fan->handle, ACPI_DEVICE_NOTIFY,
+ acpi_fan_notify_handler, dev);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ return devm_add_action_or_reset(dev, acpi_fan_notify_remove, fan);
+}
+
static int acpi_fan_probe(struct platform_device *pdev)
{
int result = 0;
@@ -319,15 +517,27 @@ static int acpi_fan_probe(struct platform_device *pdev)
struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
char *name;
+ if (!device)
+ return -ENODEV;
+
fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL);
if (!fan) {
dev_err(&device->dev, "No memory for fan\n");
return -ENOMEM;
}
+
+ fan->handle = device->handle;
device->driver_data = fan;
platform_set_drvdata(pdev, fan);
- if (acpi_fan_is_acpi4(device)) {
+ if (acpi_has_method(device->handle, "_FST")) {
+ fan->has_fst = true;
+ fan->acpi4 = acpi_has_method(device->handle, "_FIF") &&
+ acpi_has_method(device->handle, "_FPS") &&
+ acpi_has_method(device->handle, "_FSL");
+ }
+
+ if (fan->acpi4) {
result = acpi_fan_get_fif(device);
if (result)
return result;
@@ -335,13 +545,33 @@ static int acpi_fan_probe(struct platform_device *pdev)
result = acpi_fan_get_fps(device);
if (result)
return result;
+ }
+
+ if (fan->has_fst) {
+ result = acpi_fan_dsm_init(&pdev->dev);
+ if (result)
+ return result;
+
+ result = devm_acpi_fan_create_hwmon(&pdev->dev);
+ if (result)
+ return result;
+
+ result = devm_acpi_fan_notify_init(&pdev->dev);
+ if (result)
+ return result;
+
+ result = acpi_fan_dsm_start(&pdev->dev);
+ if (result) {
+ dev_err(&pdev->dev, "Failed to start Microsoft fan extensions\n");
+ return result;
+ }
result = acpi_fan_create_attributes(device);
if (result)
return result;
+ }
- fan->acpi4 = true;
- } else {
+ if (!fan->acpi4) {
result = acpi_device_update_power(device, NULL);
if (result) {
dev_err(&device->dev, "Failed to set initial power state\n");
@@ -367,21 +597,27 @@ static int acpi_fan_probe(struct platform_device *pdev)
result = sysfs_create_link(&pdev->dev.kobj,
&cdev->device.kobj,
"thermal_cooling");
- if (result)
+ if (result) {
dev_err(&pdev->dev, "Failed to create sysfs link 'thermal_cooling'\n");
+ goto err_unregister;
+ }
result = sysfs_create_link(&cdev->device.kobj,
&pdev->dev.kobj,
"device");
if (result) {
dev_err(&pdev->dev, "Failed to create sysfs link 'device'\n");
- goto err_end;
+ goto err_remove_link;
}
return 0;
+err_remove_link:
+ sysfs_remove_link(&pdev->dev.kobj, "thermal_cooling");
+err_unregister:
+ thermal_cooling_device_unregister(cdev);
err_end:
- if (fan->acpi4)
+ if (fan->has_fst)
acpi_fan_delete_attributes(device);
return result;
@@ -391,7 +627,7 @@ static void acpi_fan_remove(struct platform_device *pdev)
{
struct acpi_fan *fan = platform_get_drvdata(pdev);
- if (fan->acpi4) {
+ if (fan->has_fst) {
struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
acpi_fan_delete_attributes(device);
@@ -415,8 +651,14 @@ static int acpi_fan_suspend(struct device *dev)
static int acpi_fan_resume(struct device *dev)
{
- int result;
struct acpi_fan *fan = dev_get_drvdata(dev);
+ int result;
+
+ if (fan->has_fst) {
+ result = acpi_fan_dsm_start(dev);
+ if (result)
+ dev_err(dev, "Failed to start Microsoft fan extensions: %d\n", result);
+ }
if (fan->acpi4)
return 0;
@@ -444,7 +686,7 @@ static const struct dev_pm_ops acpi_fan_pm = {
static struct platform_driver acpi_fan_driver = {
.probe = acpi_fan_probe,
- .remove_new = acpi_fan_remove,
+ .remove = acpi_fan_remove,
.driver = {
.name = "acpi-fan",
.acpi_match_table = fan_device_ids,
diff --git a/drivers/acpi/fan_hwmon.c b/drivers/acpi/fan_hwmon.c
new file mode 100644
index 000000000000..d3374f8f524b
--- /dev/null
+++ b/drivers/acpi/fan_hwmon.c
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * hwmon interface for the ACPI Fan driver.
+ *
+ * Copyright (C) 2024 Armin Wolf <W_Armin@gmx.de>
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/limits.h>
+#include <linux/types.h>
+#include <linux/units.h>
+
+#include "fan.h"
+
+static struct acpi_fan_fps *acpi_fan_get_current_fps(struct acpi_fan *fan, u64 control)
+{
+ unsigned int i;
+
+ for (i = 0; i < fan->fps_count; i++) {
+ if (fan->fps[i].control == control)
+ return &fan->fps[i];
+ }
+
+ return NULL;
+}
+
+static umode_t acpi_fan_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct acpi_fan *fan = drvdata;
+ unsigned int i;
+
+ switch (type) {
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_input:
+ return 0444;
+ case hwmon_fan_target:
+ /* Only acpi4 fans support fan control. */
+ if (!fan->acpi4)
+ return 0;
+
+ /*
+ * When in fine grain control mode, not every fan control value
+ * has an associated fan performance state.
+ */
+ if (fan->fif.fine_grain_ctrl)
+ return 0;
+
+ return 0444;
+ default:
+ return 0;
+ }
+ case hwmon_power:
+ switch (attr) {
+ case hwmon_power_input:
+ /* Only acpi4 fans support fan control. */
+ if (!fan->acpi4)
+ return 0;
+
+ /*
+ * When in fine grain control mode, not every fan control value
+ * has an associated fan performance state.
+ */
+ if (fan->fif.fine_grain_ctrl)
+ return 0;
+
+ /*
+ * When all fan performance states contain no valid power data,
+ * when the associated attribute should not be created.
+ */
+ for (i = 0; i < fan->fps_count; i++) {
+ if (acpi_fan_power_valid(fan->fps[i].power))
+ return 0444;
+ }
+
+ return 0;
+ default:
+ return 0;
+ }
+ default:
+ return 0;
+ }
+}
+
+static int acpi_fan_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long *val)
+{
+ struct acpi_fan *fan = dev_get_drvdata(dev);
+ struct acpi_fan_fps *fps;
+ struct acpi_fan_fst fst;
+ int ret;
+
+ ret = acpi_fan_get_fst(fan->handle, &fst);
+ if (ret < 0)
+ return ret;
+
+ switch (type) {
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_input:
+ if (!acpi_fan_speed_valid(fst.speed))
+ return -ENODEV;
+
+ if (fst.speed > LONG_MAX)
+ return -EOVERFLOW;
+
+ *val = fst.speed;
+ return 0;
+ case hwmon_fan_target:
+ fps = acpi_fan_get_current_fps(fan, fst.control);
+ if (!fps)
+ return -EIO;
+
+ if (fps->speed > LONG_MAX)
+ return -EOVERFLOW;
+
+ *val = fps->speed;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ case hwmon_power:
+ switch (attr) {
+ case hwmon_power_input:
+ fps = acpi_fan_get_current_fps(fan, fst.control);
+ if (!fps)
+ return -EIO;
+
+ if (!acpi_fan_power_valid(fps->power))
+ return -ENODEV;
+
+ if (fps->power > LONG_MAX / MICROWATT_PER_MILLIWATT)
+ return -EOVERFLOW;
+
+ *val = fps->power * MICROWATT_PER_MILLIWATT;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct hwmon_ops acpi_fan_hwmon_ops = {
+ .is_visible = acpi_fan_hwmon_is_visible,
+ .read = acpi_fan_hwmon_read,
+};
+
+static const struct hwmon_channel_info * const acpi_fan_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT | HWMON_F_TARGET),
+ HWMON_CHANNEL_INFO(power, HWMON_P_INPUT),
+ NULL
+};
+
+static const struct hwmon_chip_info acpi_fan_hwmon_chip_info = {
+ .ops = &acpi_fan_hwmon_ops,
+ .info = acpi_fan_hwmon_info,
+};
+
+void acpi_fan_notify_hwmon(struct device *dev)
+{
+ struct acpi_fan *fan = dev_get_drvdata(dev);
+
+ hwmon_notify_event(fan->hdev, hwmon_fan, hwmon_fan_input, 0);
+}
+
+int devm_acpi_fan_create_hwmon(struct device *dev)
+{
+ struct acpi_fan *fan = dev_get_drvdata(dev);
+
+ fan->hdev = devm_hwmon_device_register_with_info(dev, "acpi_fan", fan,
+ &acpi_fan_hwmon_chip_info, NULL);
+
+ return PTR_ERR_OR_ZERO(fan->hdev);
+}
diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c
index 7652515a6be1..3499f86c411e 100644
--- a/drivers/acpi/hed.c
+++ b/drivers/acpi/hed.c
@@ -80,7 +80,12 @@ static struct acpi_driver acpi_hed_driver = {
.remove = acpi_hed_remove,
},
};
-module_acpi_driver(acpi_hed_driver);
+
+static int __init acpi_hed_driver_init(void)
+{
+ return acpi_bus_register_driver(&acpi_hed_driver);
+}
+subsys_initcall(acpi_hed_driver_init);
MODULE_AUTHOR("Huang Ying");
MODULE_DESCRIPTION("ACPI Hardware Error Device Driver");
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 60c483836756..40f875b265a9 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -27,7 +27,6 @@ static inline void acpi_pci_link_init(void) {}
void acpi_processor_init(void);
void acpi_platform_init(void);
void acpi_pnp_init(void);
-void acpi_int340x_thermal_init(void);
int acpi_sysfs_init(void);
void acpi_gpe_apply_masked_gpes(void);
void acpi_container_init(void);
@@ -118,8 +117,9 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
int type, void (*release)(struct device *));
int acpi_tie_acpi_dev(struct acpi_device *adev);
int acpi_device_add(struct acpi_device *device);
-int acpi_device_setup_files(struct acpi_device *dev);
+void acpi_device_setup_files(struct acpi_device *dev);
void acpi_device_remove_files(struct acpi_device *dev);
+extern const struct attribute_group *acpi_groups[];
void acpi_device_add_finalize(struct acpi_device *device);
void acpi_free_pnp_ids(struct acpi_device_pnp *pnp);
bool acpi_device_is_enabled(const struct acpi_device *adev);
@@ -139,6 +139,7 @@ int __acpi_device_uevent_modalias(const struct acpi_device *adev,
/* --------------------------------------------------------------------------
Power Resource
-------------------------------------------------------------------------- */
+void acpi_power_resources_init(void);
void acpi_power_resources_list_free(struct list_head *list);
int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
struct list_head *list);
@@ -174,6 +175,12 @@ bool processor_physically_present(acpi_handle handle);
static inline void acpi_early_processor_control_setup(void) {}
#endif
+#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
+void acpi_idle_rescan_dead_smt_siblings(void);
+#else
+static inline void acpi_idle_rescan_dead_smt_siblings(void) {}
+#endif
+
/* --------------------------------------------------------------------------
Embedded Controller
-------------------------------------------------------------------------- */
@@ -186,7 +193,6 @@ enum acpi_ec_event_state {
struct acpi_ec {
acpi_handle handle;
- acpi_handle address_space_handler_holder;
int gpe;
int irq;
unsigned long command_addr;
@@ -215,6 +221,8 @@ extern struct acpi_ec *first_ec;
/* External interfaces use first EC only, so remember */
typedef int (*acpi_ec_query_func) (void *data);
+#ifdef CONFIG_ACPI_EC
+
void acpi_ec_init(void);
void acpi_ec_ecdt_probe(void);
void acpi_ec_dsdt_probe(void);
@@ -224,12 +232,36 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
acpi_handle handle, acpi_ec_query_func func,
void *data);
void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
+void acpi_ec_register_opregions(struct acpi_device *adev);
#ifdef CONFIG_PM_SLEEP
void acpi_ec_flush_work(void);
bool acpi_ec_dispatch_gpe(void);
#endif
+#else
+
+static inline void acpi_ec_init(void) {}
+static inline void acpi_ec_ecdt_probe(void) {}
+static inline void acpi_ec_dsdt_probe(void) {}
+static inline void acpi_ec_block_transactions(void) {}
+static inline void acpi_ec_unblock_transactions(void) {}
+static inline int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
+ acpi_handle handle, acpi_ec_query_func func,
+ void *data)
+{
+ return -ENXIO;
+}
+static inline void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit) {}
+static inline void acpi_ec_register_opregions(struct acpi_device *adev) {}
+
+static inline void acpi_ec_flush_work(void) {}
+static inline bool acpi_ec_dispatch_gpe(void)
+{
+ return false;
+}
+
+#endif
/*--------------------------------------------------------------------------
Suspend/Resume
@@ -303,6 +335,10 @@ void acpi_mipi_check_crs_csi2(acpi_handle handle);
void acpi_mipi_scan_crs_csi2(void);
void acpi_mipi_init_crs_csi2_swnodes(void);
void acpi_mipi_crs_csi2_cleanup(void);
+#ifdef CONFIG_X86
bool acpi_graph_ignore_port(acpi_handle handle);
+#else
+static inline bool acpi_graph_ignore_port(acpi_handle handle) { return false; }
+#endif
#endif /* _ACPI_INTERNAL_H_ */
diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c
index 1687483ff319..d1595156c86a 100644
--- a/drivers/acpi/irq.c
+++ b/drivers/acpi/irq.c
@@ -12,7 +12,7 @@
enum acpi_irq_model_id acpi_irq_model;
-static struct fwnode_handle *(*acpi_get_gsi_domain_id)(u32 gsi);
+static acpi_gsi_domain_disp_fn acpi_get_gsi_domain_id;
static u32 (*acpi_gsi_to_irq_fallback)(u32 gsi);
/**
@@ -300,6 +300,25 @@ int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res)
}
EXPORT_SYMBOL_GPL(acpi_irq_get);
+const struct cpumask *acpi_irq_get_affinity(acpi_handle handle,
+ unsigned int index)
+{
+ struct irq_fwspec_info info;
+ struct irq_fwspec fwspec;
+ unsigned long flags;
+
+ if (acpi_irq_parse_one(handle, index, &fwspec, &flags))
+ return NULL;
+
+ if (irq_populate_fwspec_info(&fwspec, &info))
+ return NULL;
+
+ if (!(info.flags & IRQ_FWSPEC_INFO_AFFINITY_VALID))
+ return NULL;
+
+ return info.affinity;
+}
+
/**
* acpi_set_irq_model - Setup the GSI irqdomain information
* @model: the value assigned to acpi_irq_model
@@ -307,12 +326,24 @@ EXPORT_SYMBOL_GPL(acpi_irq_get);
* for a given GSI
*/
void __init acpi_set_irq_model(enum acpi_irq_model_id model,
- struct fwnode_handle *(*fn)(u32))
+ acpi_gsi_domain_disp_fn fn)
{
acpi_irq_model = model;
acpi_get_gsi_domain_id = fn;
}
+/*
+ * acpi_get_gsi_dispatcher() - Get the GSI dispatcher function
+ *
+ * Return the dispatcher function that computes the domain fwnode for
+ * a given GSI.
+ */
+acpi_gsi_domain_disp_fn acpi_get_gsi_dispatcher(void)
+{
+ return acpi_get_gsi_domain_id;
+}
+EXPORT_SYMBOL_GPL(acpi_get_gsi_dispatcher);
+
/**
* acpi_set_gsi_to_irq_fallback - Register a GSI transfer
* callback to fallback to arch specified implementation.
diff --git a/drivers/acpi/mipi-disco-img.c b/drivers/acpi/mipi-disco-img.c
index d05413a0672a..5b85989f96be 100644
--- a/drivers/acpi/mipi-disco-img.c
+++ b/drivers/acpi/mipi-disco-img.c
@@ -624,8 +624,7 @@ static void init_crs_csi2_swnodes(struct crs_csi2 *csi2)
if (!fwnode_property_present(adev_fwnode, "rotation")) {
struct acpi_pld_info *pld;
- status = acpi_get_physical_device_location(handle, &pld);
- if (ACPI_SUCCESS(status)) {
+ if (acpi_get_physical_device_location(handle, &pld)) {
swnodes->dev_props[NEXT_PROPERTY(prop_index, DEV_ROTATION)] =
PROPERTY_ENTRY_U32("rotation",
pld->rotation * 45U);
@@ -725,14 +724,20 @@ void acpi_mipi_crs_csi2_cleanup(void)
acpi_mipi_del_crs_csi2(csi2);
}
-static const struct dmi_system_id dmi_ignore_port_nodes[] = {
- {
- .matches = {
- DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "XPS 9315"),
- },
- },
- { }
+#ifdef CONFIG_X86
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+
+/* CPU matches for Dell generations with broken ACPI MIPI DISCO info */
+static const struct x86_cpu_id dell_broken_mipi_disco_cpu_gens[] = {
+ X86_MATCH_VFM(INTEL_TIGERLAKE, NULL),
+ X86_MATCH_VFM(INTEL_TIGERLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_ALDERLAKE, NULL),
+ X86_MATCH_VFM(INTEL_ALDERLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE, NULL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_P, NULL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_S, NULL),
+ {}
};
static const char *strnext(const char *s1, const char *s2)
@@ -761,7 +766,10 @@ bool acpi_graph_ignore_port(acpi_handle handle)
static bool dmi_tested, ignore_port;
if (!dmi_tested) {
- ignore_port = dmi_first_match(dmi_ignore_port_nodes);
+ if (dmi_name_in_vendors("Dell Inc.") &&
+ x86_match_cpu(dell_broken_mipi_disco_cpu_gens))
+ ignore_port = true;
+
dmi_tested = true;
}
@@ -794,3 +802,4 @@ out_free:
kfree(orig_path);
return false;
}
+#endif
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index d4595d1985b1..3eb56b77cb6d 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -454,8 +454,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
if (cmd_rc)
*cmd_rc = -EINVAL;
- if (cmd == ND_CMD_CALL)
+ if (cmd == ND_CMD_CALL) {
+ if (!buf || buf_len < sizeof(*call_pkg))
+ return -EINVAL;
+
call_pkg = buf;
+ }
+
func = cmd_to_func(nfit_mem, cmd, call_pkg, &family);
if (func < 0)
return func;
@@ -480,7 +485,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
cmd_mask = nd_desc->cmd_mask;
if (cmd == ND_CMD_CALL && call_pkg->nd_family) {
family = call_pkg->nd_family;
- if (family > NVDIMM_BUS_FAMILY_MAX ||
+ if (call_pkg->nd_family > NVDIMM_BUS_FAMILY_MAX ||
!test_bit(family, &nd_desc->bus_family_mask))
return -EINVAL;
family = array_index_nospec(family,
@@ -2632,7 +2637,7 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
if (ndr_desc->target_node == NUMA_NO_NODE) {
ndr_desc->target_node = phys_to_target_node(spa->address);
dev_info(acpi_desc->dev, "changing target node from %d to %d for nfit region [%pa-%pa]",
- NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end);
+ NUMA_NO_NODE, ndr_desc->target_node, &res.start, &res.end);
}
/*
@@ -3531,5 +3536,6 @@ static __exit void nfit_exit(void)
module_init(nfit_init);
module_exit(nfit_exit);
+MODULE_DESCRIPTION("ACPI NVDIMM Firmware Interface Table (NFIT) driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Intel Corporation");
diff --git a/drivers/acpi/nfit/intel.c b/drivers/acpi/nfit/intel.c
index 3902759abcba..bce6f6a18426 100644
--- a/drivers/acpi/nfit/intel.c
+++ b/drivers/acpi/nfit/intel.c
@@ -55,10 +55,9 @@ static unsigned long intel_security_flags(struct nvdimm *nvdimm,
{
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
unsigned long security_flags = 0;
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_get_security_state cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_command = NVDIMM_INTEL_GET_SECURITY_STATE,
.nd_family = NVDIMM_FAMILY_INTEL,
@@ -120,10 +119,9 @@ static unsigned long intel_security_flags(struct nvdimm *nvdimm,
static int intel_security_freeze(struct nvdimm *nvdimm)
{
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_freeze_lock cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_command = NVDIMM_INTEL_FREEZE_LOCK,
.nd_family = NVDIMM_FAMILY_INTEL,
@@ -153,10 +151,9 @@ static int intel_security_change_key(struct nvdimm *nvdimm,
unsigned int cmd = ptype == NVDIMM_MASTER ?
NVDIMM_INTEL_SET_MASTER_PASSPHRASE :
NVDIMM_INTEL_SET_PASSPHRASE;
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_set_passphrase cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_family = NVDIMM_FAMILY_INTEL,
.nd_size_in = ND_INTEL_PASSPHRASE_SIZE * 2,
@@ -195,10 +192,9 @@ static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
const struct nvdimm_key_data *key_data)
{
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_unlock_unit cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_command = NVDIMM_INTEL_UNLOCK_UNIT,
.nd_family = NVDIMM_FAMILY_INTEL,
@@ -234,10 +230,9 @@ static int intel_security_disable(struct nvdimm *nvdimm,
{
int rc;
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_disable_passphrase cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_command = NVDIMM_INTEL_DISABLE_PASSPHRASE,
.nd_family = NVDIMM_FAMILY_INTEL,
@@ -277,10 +272,9 @@ static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm,
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
unsigned int cmd = ptype == NVDIMM_MASTER ?
NVDIMM_INTEL_MASTER_SECURE_ERASE : NVDIMM_INTEL_SECURE_ERASE;
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_secure_erase cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_family = NVDIMM_FAMILY_INTEL,
.nd_size_in = ND_INTEL_PASSPHRASE_SIZE,
@@ -318,10 +312,9 @@ static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm)
{
int rc;
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_query_overwrite cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_command = NVDIMM_INTEL_QUERY_OVERWRITE,
.nd_family = NVDIMM_FAMILY_INTEL,
@@ -354,10 +347,9 @@ static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
{
int rc;
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_overwrite cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_command = NVDIMM_INTEL_OVERWRITE,
.nd_family = NVDIMM_FAMILY_INTEL,
@@ -407,10 +399,9 @@ const struct nvdimm_security_ops *intel_security_ops = &__intel_security_ops;
static int intel_bus_fwa_businfo(struct nvdimm_bus_descriptor *nd_desc,
struct nd_intel_bus_fw_activate_businfo *info)
{
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_bus_fw_activate_businfo cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO,
.nd_family = NVDIMM_BUS_FAMILY_INTEL,
@@ -518,33 +509,31 @@ static enum nvdimm_fwa_capability intel_bus_fwa_capability(
static int intel_bus_fwa_activate(struct nvdimm_bus_descriptor *nd_desc)
{
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_bus_fw_activate cmd;
- } nd_cmd = {
- .pkg = {
- .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE,
- .nd_family = NVDIMM_BUS_FAMILY_INTEL,
- .nd_size_in = sizeof(nd_cmd.cmd.iodev_state),
- .nd_size_out =
- sizeof(struct nd_intel_bus_fw_activate),
- .nd_fw_size =
- sizeof(struct nd_intel_bus_fw_activate),
- },
+ ) nd_cmd;
+ int rc;
+
+ nd_cmd.pkg = (struct nd_cmd_pkg) {
+ .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE,
+ .nd_family = NVDIMM_BUS_FAMILY_INTEL,
+ .nd_size_in = sizeof(nd_cmd.cmd.iodev_state),
+ .nd_size_out =
+ sizeof(struct nd_intel_bus_fw_activate),
+ .nd_fw_size =
+ sizeof(struct nd_intel_bus_fw_activate),
+ };
+ nd_cmd.cmd = (struct nd_intel_bus_fw_activate) {
/*
* Even though activate is run from a suspended context,
* for safety, still ask platform firmware to force
* quiesce devices by default. Let a module
* parameter override that policy.
*/
- .cmd = {
- .iodev_state = acpi_desc->fwa_noidle
- ? ND_INTEL_BUS_FWA_IODEV_OS_IDLE
- : ND_INTEL_BUS_FWA_IODEV_FORCE_IDLE,
- },
+ .iodev_state = acpi_desc->fwa_noidle
+ ? ND_INTEL_BUS_FWA_IODEV_OS_IDLE
+ : ND_INTEL_BUS_FWA_IODEV_FORCE_IDLE,
};
- int rc;
-
switch (intel_bus_fwa_state(nd_desc)) {
case NVDIMM_FWA_ARMED:
case NVDIMM_FWA_ARM_OVERFLOW:
@@ -582,10 +571,9 @@ const struct nvdimm_bus_fw_ops *intel_bus_fw_ops = &__intel_bus_fw_ops;
static int intel_fwa_dimminfo(struct nvdimm *nvdimm,
struct nd_intel_fw_activate_dimminfo *info)
{
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_fw_activate_dimminfo cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_command = NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO,
.nd_family = NVDIMM_FAMILY_INTEL,
@@ -688,27 +676,24 @@ static int intel_fwa_arm(struct nvdimm *nvdimm, enum nvdimm_fwa_trigger arm)
{
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_fw_activate_arm cmd;
- } nd_cmd = {
- .pkg = {
- .nd_command = NVDIMM_INTEL_FW_ACTIVATE_ARM,
- .nd_family = NVDIMM_FAMILY_INTEL,
- .nd_size_in = sizeof(nd_cmd.cmd.activate_arm),
- .nd_size_out =
- sizeof(struct nd_intel_fw_activate_arm),
- .nd_fw_size =
- sizeof(struct nd_intel_fw_activate_arm),
- },
- .cmd = {
- .activate_arm = arm == NVDIMM_FWA_ARM
- ? ND_INTEL_DIMM_FWA_ARM
- : ND_INTEL_DIMM_FWA_DISARM,
- },
- };
+ ) nd_cmd;
int rc;
+ nd_cmd.pkg = (struct nd_cmd_pkg) {
+ .nd_command = NVDIMM_INTEL_FW_ACTIVATE_ARM,
+ .nd_family = NVDIMM_FAMILY_INTEL,
+ .nd_size_in = sizeof(nd_cmd.cmd.activate_arm),
+ .nd_size_out = sizeof(struct nd_intel_fw_activate_arm),
+ .nd_fw_size = sizeof(struct nd_intel_fw_activate_arm),
+ };
+ nd_cmd.cmd = (struct nd_intel_fw_activate_arm) {
+ .activate_arm = arm == NVDIMM_FWA_ARM ?
+ ND_INTEL_DIMM_FWA_ARM :
+ ND_INTEL_DIMM_FWA_DISARM,
+ };
+
switch (intel_fwa_state(nvdimm)) {
case NVDIMM_FWA_INVALID:
return -ENXIO;
diff --git a/drivers/acpi/numa/Kconfig b/drivers/acpi/numa/Kconfig
index 849c2bd820b9..f33194d1e43f 100644
--- a/drivers/acpi/numa/Kconfig
+++ b/drivers/acpi/numa/Kconfig
@@ -1,9 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
config ACPI_NUMA
- bool "NUMA support"
- depends on NUMA
- depends on (X86 || ARM64 || LOONGARCH)
- default y if ARM64
+ def_bool NUMA && !X86
config ACPI_HMAT
bool "ACPI Heterogeneous Memory Attribute Table Support"
diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c
index 2c8ccc91ebe6..77a81627aaef 100644
--- a/drivers/acpi/numa/hmat.c
+++ b/drivers/acpi/numa/hmat.c
@@ -74,7 +74,6 @@ struct memory_target {
struct node_cache_attrs cache_attrs;
u8 gen_port_device_handle[ACPI_SRAT_DEVICE_HANDLE_SIZE];
bool registered;
- bool ext_updated; /* externally updated */
};
struct memory_initiator {
@@ -108,6 +107,45 @@ static struct memory_target *find_mem_target(unsigned int mem_pxm)
return NULL;
}
+/**
+ * hmat_get_extended_linear_cache_size - Retrieve the extended linear cache size
+ * @backing_res: resource from the backing media
+ * @nid: node id for the memory region
+ * @cache_size: (Output) size of extended linear cache.
+ *
+ * Return: 0 on success. Errno on failure.
+ *
+ */
+int hmat_get_extended_linear_cache_size(struct resource *backing_res, int nid,
+ resource_size_t *cache_size)
+{
+ unsigned int pxm = node_to_pxm(nid);
+ struct memory_target *target;
+ struct target_cache *tcache;
+ struct resource *res;
+
+ target = find_mem_target(pxm);
+ if (!target)
+ return -ENOENT;
+
+ list_for_each_entry(tcache, &target->caches, node) {
+ if (tcache->cache_attrs.address_mode !=
+ NODE_CACHE_ADDR_MODE_EXTENDED_LINEAR)
+ continue;
+
+ res = &target->memregions;
+ if (!resource_contains(res, backing_res))
+ continue;
+
+ *cache_size = tcache->cache_attrs.size;
+ return 0;
+ }
+
+ *cache_size = 0;
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(hmat_get_extended_linear_cache_size, "CXL");
+
static struct memory_target *acpi_find_genport_target(u32 uid)
{
struct memory_target *target;
@@ -151,7 +189,7 @@ int acpi_get_genport_coordinates(u32 uid,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(acpi_get_genport_coordinates, CXL);
+EXPORT_SYMBOL_NS_GPL(acpi_get_genport_coordinates, "CXL");
static __init void alloc_memory_initiator(unsigned int cpu_pxm)
{
@@ -329,35 +367,6 @@ static void hmat_update_target_access(struct memory_target *target,
}
}
-int hmat_update_target_coordinates(int nid, struct access_coordinate *coord,
- enum access_coordinate_class access)
-{
- struct memory_target *target;
- int pxm;
-
- if (nid == NUMA_NO_NODE)
- return -EINVAL;
-
- pxm = node_to_pxm(nid);
- guard(mutex)(&target_lock);
- target = find_mem_target(pxm);
- if (!target)
- return -ENODEV;
-
- hmat_update_target_access(target, ACPI_HMAT_READ_LATENCY,
- coord->read_latency, access);
- hmat_update_target_access(target, ACPI_HMAT_WRITE_LATENCY,
- coord->write_latency, access);
- hmat_update_target_access(target, ACPI_HMAT_READ_BANDWIDTH,
- coord->read_bandwidth, access);
- hmat_update_target_access(target, ACPI_HMAT_WRITE_BANDWIDTH,
- coord->write_bandwidth, access);
- target->ext_updated = true;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(hmat_update_target_coordinates);
-
static __init void hmat_add_locality(struct acpi_hmat_locality *hmat_loc)
{
struct memory_locality *loc;
@@ -408,7 +417,7 @@ static __init void hmat_update_target(unsigned int tgt_pxm, unsigned int init_px
if (target && target->processor_pxm == init_pxm) {
hmat_update_target_access(target, type, value,
ACCESS_COORDINATE_LOCAL);
- /* If the node has a CPU, update access 1 */
+ /* If the node has a CPU, update access ACCESS_COORDINATE_CPU */
if (node_state(pxm_to_node(init_pxm), N_CPU))
hmat_update_target_access(target, type, value,
ACCESS_COORDINATE_CPU);
@@ -442,9 +451,9 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header,
return -EINVAL;
}
- pr_info("Locality: Flags:%02x Type:%s Initiator Domains:%u Target Domains:%u Base:%lld\n",
- hmat_loc->flags, hmat_data_type(type), ipds, tpds,
- hmat_loc->entry_base_unit);
+ pr_debug("Locality: Flags:%02x Type:%s Initiator Domains:%u Target Domains:%u Base:%lld\n",
+ hmat_loc->flags, hmat_data_type(type), ipds, tpds,
+ hmat_loc->entry_base_unit);
inits = (u32 *)(hmat_loc + 1);
targs = inits + ipds;
@@ -455,9 +464,9 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header,
value = hmat_normalize(entries[init * tpds + targ],
hmat_loc->entry_base_unit,
type);
- pr_info(" Initiator-Target[%u-%u]:%u%s\n",
- inits[init], targs[targ], value,
- hmat_data_type_suffix(type));
+ pr_debug(" Initiator-Target[%u-%u]:%u%s\n",
+ inits[init], targs[targ], value,
+ hmat_data_type_suffix(type));
hmat_update_target(targs[targ], inits[init],
mem_hier, type, value);
@@ -485,9 +494,9 @@ static __init int hmat_parse_cache(union acpi_subtable_headers *header,
}
attrs = cache->cache_attributes;
- pr_info("Cache: Domain:%u Size:%llu Attrs:%08x SMBIOS Handles:%d\n",
- cache->memory_PD, cache->cache_size, attrs,
- cache->number_of_SMBIOShandles);
+ pr_debug("Cache: Domain:%u Size:%llu Attrs:%08x SMBIOS Handles:%d\n",
+ cache->memory_PD, cache->cache_size, attrs,
+ cache->number_of_SMBIOShandles);
target = find_mem_target(cache->memory_PD);
if (!target)
@@ -506,6 +515,11 @@ static __init int hmat_parse_cache(union acpi_subtable_headers *header,
switch ((attrs & ACPI_HMAT_CACHE_ASSOCIATIVITY) >> 8) {
case ACPI_HMAT_CA_DIRECT_MAPPED:
tcache->cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
+ /* Extended Linear mode is only valid if cache is direct mapped */
+ if (cache->address_mode == ACPI_HMAT_CACHE_MODE_EXTENDED_LINEAR) {
+ tcache->cache_attrs.address_mode =
+ NODE_CACHE_ADDR_MODE_EXTENDED_LINEAR;
+ }
break;
case ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING:
tcache->cache_attrs.indexing = NODE_CACHE_INDEXED;
@@ -546,9 +560,9 @@ static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *heade
}
if (hmat_revision == 1)
- pr_info("Memory (%#llx length %#llx) Flags:%04x Processor Domain:%u Memory Domain:%u\n",
- p->reserved3, p->reserved4, p->flags, p->processor_PD,
- p->memory_PD);
+ pr_debug("Memory (%#llx length %#llx) Flags:%04x Processor Domain:%u Memory Domain:%u\n",
+ p->reserved3, p->reserved4, p->flags, p->processor_PD,
+ p->memory_PD);
else
pr_info("Memory Flags:%04x Processor Domain:%u Memory Domain:%u\n",
p->flags, p->processor_PD, p->memory_PD);
@@ -729,10 +743,6 @@ static void hmat_update_target_attrs(struct memory_target *target,
u32 best = 0;
int i;
- /* Don't update if an external agent has changed the data. */
- if (target->ext_updated)
- return;
-
/* Don't update for generic port if there's no device handle */
if ((access == NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL ||
access == NODE_ACCESS_CLASS_GENPORT_SINK_CPU) &&
@@ -864,11 +874,33 @@ static void hmat_register_target_devices(struct memory_target *target)
}
}
-static void hmat_register_target(struct memory_target *target)
+static void hmat_hotplug_target(struct memory_target *target)
{
int nid = pxm_to_node(target->memory_pxm);
/*
+ * Skip offline nodes. This can happen when memory marked EFI_MEMORY_SP,
+ * "specific purpose", is applied to all the memory in a proximity
+ * domain leading to * the node being marked offline / unplugged, or if
+ * memory-only "hotplug" node is offline.
+ */
+ if (nid == NUMA_NO_NODE || !node_online(nid))
+ return;
+
+ guard(mutex)(&target_lock);
+ if (target->registered)
+ return;
+
+ hmat_register_target_initiators(target);
+ hmat_register_target_cache(target);
+ hmat_register_target_perf(target, ACCESS_COORDINATE_LOCAL);
+ hmat_register_target_perf(target, ACCESS_COORDINATE_CPU);
+ target->registered = true;
+}
+
+static void hmat_register_target(struct memory_target *target)
+{
+ /*
* Devices may belong to either an offline or online
* node, so unconditionally add them.
*/
@@ -878,32 +910,15 @@ static void hmat_register_target(struct memory_target *target)
* Register generic port perf numbers. The nid may not be
* initialized and is still NUMA_NO_NODE.
*/
- mutex_lock(&target_lock);
- if (*(u16 *)target->gen_port_device_handle) {
- hmat_update_generic_target(target);
- target->registered = true;
+ scoped_guard(mutex, &target_lock) {
+ if (*(u16 *)target->gen_port_device_handle) {
+ hmat_update_generic_target(target);
+ target->registered = true;
+ return;
+ }
}
- mutex_unlock(&target_lock);
- /*
- * Skip offline nodes. This can happen when memory
- * marked EFI_MEMORY_SP, "specific purpose", is applied
- * to all the memory in a proximity domain leading to
- * the node being marked offline / unplugged, or if
- * memory-only "hotplug" node is offline.
- */
- if (nid == NUMA_NO_NODE || !node_online(nid))
- return;
-
- mutex_lock(&target_lock);
- if (!target->registered) {
- hmat_register_target_initiators(target);
- hmat_register_target_cache(target);
- hmat_register_target_perf(target, ACCESS_COORDINATE_LOCAL);
- hmat_register_target_perf(target, ACCESS_COORDINATE_CPU);
- target->registered = true;
- }
- mutex_unlock(&target_lock);
+ hmat_hotplug_target(target);
}
static void hmat_register_targets(void)
@@ -918,10 +933,10 @@ static int hmat_callback(struct notifier_block *self,
unsigned long action, void *arg)
{
struct memory_target *target;
- struct memory_notify *mnb = arg;
- int pxm, nid = mnb->status_change_nid;
+ struct node_notify *nn = arg;
+ int pxm, nid = nn->nid;
- if (nid == NUMA_NO_NODE || action != MEM_ONLINE)
+ if (action != NODE_ADDED_FIRST_MEMORY)
return NOTIFY_OK;
pxm = node_to_pxm(nid);
@@ -929,26 +944,23 @@ static int hmat_callback(struct notifier_block *self,
if (!target)
return NOTIFY_OK;
- hmat_register_target(target);
+ hmat_hotplug_target(target);
return NOTIFY_OK;
}
-static int hmat_set_default_dram_perf(void)
+static int __init hmat_set_default_dram_perf(void)
{
int rc;
int nid, pxm;
struct memory_target *target;
struct access_coordinate *attrs;
- if (!default_dram_type)
- return -EIO;
-
- for_each_node_mask(nid, default_dram_type->nodes) {
+ for_each_node_mask(nid, default_dram_nodes) {
pxm = node_to_pxm(nid);
target = find_mem_target(pxm);
if (!target)
continue;
- attrs = &target->coord[1];
+ attrs = &target->coord[ACCESS_COORDINATE_CPU];
rc = mt_set_default_dram_perf(nid, attrs, "ACPI HMAT");
if (rc)
return rc;
@@ -975,7 +987,7 @@ static int hmat_calculate_adistance(struct notifier_block *self,
hmat_update_target_attrs(target, p_nodes, ACCESS_COORDINATE_CPU);
mutex_unlock(&target_lock);
- perf = &target->coord[1];
+ perf = &target->coord[ACCESS_COORDINATE_CPU];
if (mt_perf_to_adistance(perf, adist))
return NOTIFY_OK;
@@ -1077,7 +1089,7 @@ static __init int hmat_init(void)
hmat_register_targets();
/* Keep the table and structures if the notifier may use them */
- if (hotplug_memory_notifier(hmat_callback, HMAT_CALLBACK_PRI))
+ if (hotplug_node_notifier(hmat_callback, HMAT_CALLBACK_PRI))
goto out_put;
if (!hmat_set_default_dram_perf())
diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
index e3f26e71637a..aa87ee1583a4 100644
--- a/drivers/acpi/numa/srat.c
+++ b/drivers/acpi/numa/srat.c
@@ -14,9 +14,12 @@
#include <linux/errno.h>
#include <linux/acpi.h>
#include <linux/memblock.h>
+#include <linux/memory.h>
#include <linux/numa.h>
#include <linux/nodemask.h>
#include <linux/topology.h>
+#include <linux/numa_memblks.h>
+#include <linux/string_choices.h>
static nodemask_t nodes_found_map = NODE_MASK_NONE;
@@ -50,6 +53,7 @@ int node_to_pxm(int node)
return PXM_INVAL;
return node_to_pxm_map[node];
}
+EXPORT_SYMBOL_GPL(node_to_pxm);
static void __acpi_map_pxm_to_node(int pxm, int node)
{
@@ -80,6 +84,101 @@ int acpi_map_pxm_to_node(int pxm)
}
EXPORT_SYMBOL(acpi_map_pxm_to_node);
+#ifdef CONFIG_NUMA_EMU
+/*
+ * Take max_nid - 1 fake-numa nodes into account in both
+ * pxm_to_node_map()/node_to_pxm_map[] tables.
+ */
+int __init fix_pxm_node_maps(int max_nid)
+{
+ static int pxm_to_node_map_copy[MAX_PXM_DOMAINS] __initdata
+ = { [0 ... MAX_PXM_DOMAINS - 1] = NUMA_NO_NODE };
+ static int node_to_pxm_map_copy[MAX_NUMNODES] __initdata
+ = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
+ int i, j, index = -1, count = 0;
+ nodemask_t nodes_to_enable;
+
+ if (numa_off)
+ return -1;
+
+ /* no or incomplete node/PXM mapping set, nothing to do */
+ if (srat_disabled())
+ return 0;
+
+ /* find fake nodes PXM mapping */
+ for (i = 0; i < MAX_NUMNODES; i++) {
+ if (node_to_pxm_map[i] != PXM_INVAL) {
+ for (j = 0; j <= max_nid; j++) {
+ if ((emu_nid_to_phys[j] == i) &&
+ WARN(node_to_pxm_map_copy[j] != PXM_INVAL,
+ "Node %d is already binded to PXM %d\n",
+ j, node_to_pxm_map_copy[j]))
+ return -1;
+ if (emu_nid_to_phys[j] == i) {
+ node_to_pxm_map_copy[j] =
+ node_to_pxm_map[i];
+ if (j > index)
+ index = j;
+ count++;
+ }
+ }
+ }
+ }
+ if (index == -1) {
+ pr_debug("No node/PXM mapping has been set\n");
+ /* nothing more to be done */
+ return 0;
+ }
+ if (WARN(index != max_nid, "%d max nid when expected %d\n",
+ index, max_nid))
+ return -1;
+
+ nodes_clear(nodes_to_enable);
+
+ /* map phys nodes not used for fake nodes */
+ for (i = 0; i < MAX_NUMNODES; i++) {
+ if (node_to_pxm_map[i] != PXM_INVAL) {
+ for (j = 0; j <= max_nid; j++)
+ if (emu_nid_to_phys[j] == i)
+ break;
+ /* fake nodes PXM mapping has been done */
+ if (j <= max_nid)
+ continue;
+ /* find first hole */
+ for (j = 0;
+ j < MAX_NUMNODES &&
+ node_to_pxm_map_copy[j] != PXM_INVAL;
+ j++)
+ ;
+ if (WARN(j == MAX_NUMNODES,
+ "Number of nodes exceeds MAX_NUMNODES\n"))
+ return -1;
+ node_to_pxm_map_copy[j] = node_to_pxm_map[i];
+ node_set(j, nodes_to_enable);
+ count++;
+ }
+ }
+
+ /* creating reverse mapping in pxm_to_node_map[] */
+ for (i = 0; i < MAX_NUMNODES; i++)
+ if (node_to_pxm_map_copy[i] != PXM_INVAL &&
+ pxm_to_node_map_copy[node_to_pxm_map_copy[i]] == NUMA_NO_NODE)
+ pxm_to_node_map_copy[node_to_pxm_map_copy[i]] = i;
+
+ /* overwrite with new mapping */
+ for (i = 0; i < MAX_NUMNODES; i++) {
+ node_to_pxm_map[i] = node_to_pxm_map_copy[i];
+ pxm_to_node_map[i] = pxm_to_node_map_copy[i];
+ }
+
+ /* enable other nodes found in PXM for hotplug */
+ nodes_or(numa_nodes_parsed, nodes_to_enable, numa_nodes_parsed);
+
+ pr_debug("found %d total number of nodes\n", count);
+ return 0;
+}
+#endif
+
static void __init
acpi_table_print_srat_entry(struct acpi_subtable_header *header)
{
@@ -91,8 +190,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
pr_debug("SRAT Processor (id[0x%02x] eid[0x%02x]) in proximity domain %d %s\n",
p->apic_id, p->local_sapic_eid,
p->proximity_domain_lo,
- (p->flags & ACPI_SRAT_CPU_ENABLED) ?
- "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_SRAT_CPU_ENABLED));
}
break;
@@ -104,8 +202,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
(unsigned long long)p->base_address,
(unsigned long long)p->length,
p->proximity_domain,
- (p->flags & ACPI_SRAT_MEM_ENABLED) ?
- "enabled" : "disabled",
+ str_enabled_disabled(p->flags & ACPI_SRAT_MEM_ENABLED),
(p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) ?
" hot-pluggable" : "",
(p->flags & ACPI_SRAT_MEM_NON_VOLATILE) ?
@@ -120,8 +217,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
pr_debug("SRAT Processor (x2apicid[0x%08x]) in proximity domain %d %s\n",
p->apic_id,
p->proximity_domain,
- (p->flags & ACPI_SRAT_CPU_ENABLED) ?
- "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_SRAT_CPU_ENABLED));
}
break;
@@ -132,8 +228,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
pr_debug("SRAT Processor (acpi id[0x%04x]) in proximity domain %d %s\n",
p->acpi_processor_uid,
p->proximity_domain,
- (p->flags & ACPI_SRAT_GICC_ENABLED) ?
- "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_SRAT_GICC_ENABLED));
}
break;
@@ -142,7 +237,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
struct acpi_srat_generic_affinity *p =
(struct acpi_srat_generic_affinity *)header;
- if (p->device_handle_type == 0) {
+ if (p->device_handle_type == 1) {
/*
* For pci devices this may be the only place they
* are assigned a proximity domain
@@ -151,8 +246,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
*(u16 *)(&p->device_handle[0]),
*(u16 *)(&p->device_handle[2]),
p->proximity_domain,
- (p->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED) ?
- "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED));
} else {
/*
* In this case we can rely on the device having a
@@ -162,11 +256,22 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
(char *)(&p->device_handle[0]),
(char *)(&p->device_handle[8]),
p->proximity_domain,
- (p->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED) ?
- "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED));
}
}
break;
+
+ case ACPI_SRAT_TYPE_RINTC_AFFINITY:
+ {
+ struct acpi_srat_rintc_affinity *p =
+ (struct acpi_srat_rintc_affinity *)header;
+ pr_debug("SRAT Processor (acpi id[0x%04x]) in proximity domain %d %s\n",
+ p->acpi_processor_uid,
+ p->proximity_domain,
+ str_enabled_disabled(p->flags & ACPI_SRAT_RINTC_ENABLED));
+ }
+ break;
+
default:
pr_warn("Found unsupported SRAT entry (type = 0x%x)\n",
header->type);
@@ -325,13 +430,23 @@ static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
{
struct acpi_cedt_cfmws *cfmws;
int *fake_pxm = arg;
- u64 start, end;
+ u64 start, end, align;
int node;
+ int err;
cfmws = (struct acpi_cedt_cfmws *)header;
start = cfmws->base_hpa;
end = cfmws->base_hpa + cfmws->window_size;
+ /* Align memblock size to CFMW regions if possible */
+ align = 1UL << __ffs(start | end);
+ if (align >= SZ_256M) {
+ err = memory_block_advise_max_size(align);
+ if (err)
+ pr_warn("CFMWS: memblock size advise failed (%d)\n", err);
+ } else
+ pr_err("CFMWS: [BIOS BUG] base/size alignment violates spec\n");
+
/*
* The SRAT may have already described NUMA details for all,
* or a portion of, this CFMWS HPA range. Extend the memblks
@@ -349,7 +464,7 @@ static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
return -EINVAL;
}
- if (numa_add_memblk(node, start, end) < 0) {
+ if (numa_add_reserved_memblk(node, start, end) < 0) {
/* CXL driver must handle the NUMA_NO_NODE case */
pr_warn("ACPI NUMA: Failed to add memblk for CFMWS node %d [mem %#llx-%#llx]\n",
node, start, end);
@@ -450,6 +565,21 @@ acpi_parse_gi_affinity(union acpi_subtable_headers *header,
}
#endif /* defined(CONFIG_X86) || defined (CONFIG_ARM64) */
+static int __init
+acpi_parse_rintc_affinity(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_srat_rintc_affinity *rintc_affinity;
+
+ rintc_affinity = (struct acpi_srat_rintc_affinity *)header;
+ acpi_table_print_srat_entry(&header->common);
+
+ /* let architecture-dependent part to do it */
+ acpi_numa_rintc_affinity_init(rintc_affinity);
+
+ return 0;
+}
+
static int __init acpi_parse_srat(struct acpi_table_header *table)
{
struct acpi_table_srat *srat = (struct acpi_table_srat *)table;
@@ -485,7 +615,7 @@ int __init acpi_numa_init(void)
/* SRAT: System Resource Affinity Table */
if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
- struct acpi_subtable_proc srat_proc[4];
+ struct acpi_subtable_proc srat_proc[5];
memset(srat_proc, 0, sizeof(srat_proc));
srat_proc[0].id = ACPI_SRAT_TYPE_CPU_AFFINITY;
@@ -496,6 +626,8 @@ int __init acpi_numa_init(void)
srat_proc[2].handler = acpi_parse_gicc_affinity;
srat_proc[3].id = ACPI_SRAT_TYPE_GENERIC_AFFINITY;
srat_proc[3].handler = acpi_parse_gi_affinity;
+ srat_proc[4].id = ACPI_SRAT_TYPE_RINTC_AFFINITY;
+ srat_proc[4].handler = acpi_parse_rintc_affinity;
acpi_table_parse_entries_array(ACPI_SIG_SRAT,
sizeof(struct acpi_table_srat),
diff --git a/drivers/acpi/osi.c b/drivers/acpi/osi.c
index df9328c850bd..f2c943b934be 100644
--- a/drivers/acpi/osi.c
+++ b/drivers/acpi/osi.c
@@ -42,7 +42,6 @@ static struct acpi_osi_entry
osi_setup_entries[OSI_STRING_ENTRIES_MAX] __initdata = {
{"Module Device", true},
{"Processor Device", true},
- {"3.0 _SCP Extensions", true},
{"Processor Aggregator Device", true},
};
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 70af3fbbebe5..05393a7315fe 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -398,7 +398,7 @@ static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
list_del_rcu(&map->list);
INIT_RCU_WORK(&map->track.rwork, acpi_os_map_remove);
- queue_rcu_work(system_wq, &map->track.rwork);
+ queue_rcu_work(system_percpu_wq, &map->track.rwork);
}
/**
@@ -607,7 +607,27 @@ acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler)
void acpi_os_sleep(u64 ms)
{
- msleep(ms);
+ u64 usec = ms * USEC_PER_MSEC, delta_us = 50;
+
+ /*
+ * Use a hrtimer because the timer wheel timers are optimized for
+ * cancelation before they expire and this timer is not going to be
+ * canceled.
+ *
+ * Set the delta between the requested sleep time and the effective
+ * deadline to at least 50 us in case there is an opportunity for timer
+ * coalescing.
+ *
+ * Moreover, longer sleeps can be assumed to need somewhat less timer
+ * precision, so sacrifice some of it for making the timer a more likely
+ * candidate for coalescing by setting the delta to 1% of the sleep time
+ * if it is above 5 ms (this value is chosen so that the delta is a
+ * continuous function of the sleep time).
+ */
+ if (ms > 5)
+ delta_us = (USEC_PER_MSEC / 100) * ms;
+
+ usleep_range(usec, usec + delta_us);
}
void acpi_os_stall(u32 us)
@@ -642,6 +662,15 @@ acpi_status acpi_os_read_port(acpi_io_address port, u32 *value, u32 width)
{
u32 dummy;
+ if (!IS_ENABLED(CONFIG_HAS_IOPORT)) {
+ /*
+ * set all-1 result as if reading from non-existing
+ * I/O port
+ */
+ *value = GENMASK(width, 0);
+ return AE_NOT_IMPLEMENTED;
+ }
+
if (value)
*value = 0;
else
@@ -665,6 +694,9 @@ EXPORT_SYMBOL(acpi_os_read_port);
acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
{
+ if (!IS_ENABLED(CONFIG_HAS_IOPORT))
+ return AE_NOT_IMPLEMENTED;
+
if (width <= 8) {
outb(value, port);
} else if (width <= 16) {
@@ -1662,8 +1694,8 @@ acpi_status __init acpi_os_initialize(void)
acpi_status __init acpi_os_initialize1(void)
{
- kacpid_wq = alloc_workqueue("kacpid", 0, 1);
- kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 0);
+ kacpid_wq = alloc_workqueue("kacpid", WQ_PERCPU, 1);
+ kacpi_notify_wq = alloc_workqueue("kacpi_notify", WQ_PERCPU, 0);
kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
BUG_ON(!kacpid_wq);
BUG_ON(!kacpi_notify_wq);
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index ff30ceca2203..ad81aa03fe2f 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -22,6 +22,7 @@
#include <linux/acpi.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
+#include <linux/string_choices.h>
struct acpi_prt_entry {
struct acpi_pci_id id;
@@ -288,7 +289,7 @@ static int acpi_reroute_boot_interrupt(struct pci_dev *dev,
}
#endif /* CONFIG_X86_IO_APIC */
-static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin)
+struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin)
{
struct acpi_prt_entry *entry = NULL;
struct pci_dev *bridge;
@@ -468,7 +469,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
dev_dbg(&dev->dev, "PCI INT %c%s -> GSI %u (%s, %s) -> IRQ %d\n",
pin_name(pin), link_desc, gsi,
(triggering == ACPI_LEVEL_SENSITIVE) ? "level" : "edge",
- (polarity == ACPI_ACTIVE_LOW) ? "low" : "high", dev->irq);
+ str_low_high(polarity == ACPI_ACTIVE_LOW), dev->irq);
kfree(entry);
return 0;
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index aa1038b8aec4..bed7dc85612e 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -268,7 +268,7 @@ static int acpi_pci_link_get_current(struct acpi_pci_link *link)
link->irq.active = irq;
- acpi_handle_debug(handle, "Link at IRQ %d \n", link->irq.active);
+ acpi_handle_debug(handle, "Link at IRQ %d\n", link->irq.active);
end:
return result;
@@ -714,8 +714,8 @@ static int acpi_pci_link_add(struct acpi_device *device,
return -ENOMEM;
link->device = device;
- strcpy(acpi_device_name(device), ACPI_PCI_LINK_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_PCI_LINK_CLASS);
+ strscpy(acpi_device_name(device), ACPI_PCI_LINK_DEVICE_NAME);
+ strscpy(acpi_device_class(device), ACPI_PCI_LINK_CLASS);
device->driver_data = link;
mutex_lock(&acpi_link_lock);
@@ -748,6 +748,8 @@ static int acpi_pci_link_add(struct acpi_device *device,
if (result)
kfree(link);
+ acpi_dev_clear_dependencies(device);
+
return result < 0 ? result : 1;
}
@@ -759,7 +761,7 @@ static int acpi_pci_link_resume(struct acpi_pci_link *link)
return 0;
}
-static void irqrouter_resume(void)
+static void irqrouter_resume(void *data)
{
struct acpi_pci_link *link;
@@ -886,10 +888,14 @@ static int __init acpi_irq_balance_set(char *str)
__setup("acpi_irq_balance", acpi_irq_balance_set);
-static struct syscore_ops irqrouter_syscore_ops = {
+static const struct syscore_ops irqrouter_syscore_ops = {
.resume = irqrouter_resume,
};
+static struct syscore irqrouter_syscore = {
+ .ops = &irqrouter_syscore_ops,
+};
+
void __init acpi_pci_link_init(void)
{
if (acpi_noirq)
@@ -902,6 +908,6 @@ void __init acpi_pci_link_init(void)
else
acpi_irq_balance = 0;
}
- register_syscore_ops(&irqrouter_syscore_ops);
+ register_syscore(&irqrouter_syscore);
acpi_scan_add_handler(&pci_link_handler);
}
diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c
index 860014b89b8e..58e10a980114 100644
--- a/drivers/acpi/pci_mcfg.c
+++ b/drivers/acpi/pci_mcfg.c
@@ -181,6 +181,18 @@ static struct mcfg_fixup mcfg_quirks[] = {
LOONGSON_ECAM_MCFG("LOONGSON", 0),
LOONGSON_ECAM_MCFG("\0", 1),
LOONGSON_ECAM_MCFG("LOONGSON", 1),
+ LOONGSON_ECAM_MCFG("\0", 2),
+ LOONGSON_ECAM_MCFG("LOONGSON", 2),
+ LOONGSON_ECAM_MCFG("\0", 3),
+ LOONGSON_ECAM_MCFG("LOONGSON", 3),
+ LOONGSON_ECAM_MCFG("\0", 4),
+ LOONGSON_ECAM_MCFG("LOONGSON", 4),
+ LOONGSON_ECAM_MCFG("\0", 5),
+ LOONGSON_ECAM_MCFG("LOONGSON", 5),
+ LOONGSON_ECAM_MCFG("\0", 6),
+ LOONGSON_ECAM_MCFG("LOONGSON", 6),
+ LOONGSON_ECAM_MCFG("\0", 7),
+ LOONGSON_ECAM_MCFG("LOONGSON", 7),
#endif /* LOONGARCH */
};
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 58b89b8d950e..74ade4160314 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -293,11 +293,6 @@ struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle)
}
EXPORT_SYMBOL_GPL(acpi_pci_find_root);
-struct acpi_handle_node {
- struct list_head node;
- acpi_handle handle;
-};
-
/**
* acpi_get_pci_dev - convert ACPI CA handle to struct pci_dev
* @handle: the handle in question
@@ -694,8 +689,8 @@ static int acpi_pci_root_add(struct acpi_device *device,
root->device = device;
root->segment = segment & 0xFFFF;
- strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
+ strscpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME);
+ strscpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
device->driver_data = root;
if (hotadd && dmar_device_add(handle)) {
@@ -863,7 +858,7 @@ next:
}
}
-static void acpi_pci_root_remap_iospace(struct fwnode_handle *fwnode,
+static void acpi_pci_root_remap_iospace(const struct fwnode_handle *fwnode,
struct resource_entry *entry)
{
#ifdef PCI_IOBASE
@@ -1008,7 +1003,6 @@ struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root,
int node = acpi_get_node(device->handle);
struct pci_bus *bus;
struct pci_host_bridge *host_bridge;
- union acpi_object *obj;
info->root = root;
info->bridge = device;
@@ -1050,17 +1044,6 @@ struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root,
if (!(root->osc_ext_control_set & OSC_CXL_ERROR_REPORTING_CONTROL))
host_bridge->native_cxl_error = 0;
- /*
- * Evaluate the "PCI Boot Configuration" _DSM Function. If it
- * exists and returns 0, we must preserve any PCI resource
- * assignments made by firmware for this host bridge.
- */
- obj = acpi_evaluate_dsm_typed(ACPI_HANDLE(bus->bridge), &pci_acpi_dsm_guid, 1,
- DSM_PCI_PRESERVE_BOOT_CONFIG, NULL, ACPI_TYPE_INTEGER);
- if (obj && obj->integer.value == 0)
- host_bridge->preserve_config = 1;
- ACPI_FREE(obj);
-
acpi_dev_power_up_children_with_adr(device);
pci_scan_child_bus(bus);
diff --git a/drivers/acpi/pfr_telemetry.c b/drivers/acpi/pfr_telemetry.c
index 998264a7333d..32bdf8cbe8f2 100644
--- a/drivers/acpi/pfr_telemetry.c
+++ b/drivers/acpi/pfr_telemetry.c
@@ -272,9 +272,6 @@ static long pfrt_log_ioctl(struct file *file, unsigned int cmd, unsigned long ar
case PFRT_LOG_IOC_GET_INFO:
info.log_level = get_pfrt_log_level(pfrt_log_dev);
- if (ret < 0)
- return ret;
-
info.log_type = pfrt_log_dev->info.log_type;
info.log_revid = pfrt_log_dev->info.log_revid;
if (copy_to_user(p, &info, sizeof(info)))
@@ -425,7 +422,7 @@ static struct platform_driver acpi_pfrt_log_driver = {
.acpi_match_table = acpi_pfrt_log_ids,
},
.probe = acpi_pfrt_log_probe,
- .remove_new = acpi_pfrt_log_remove,
+ .remove = acpi_pfrt_log_remove,
};
module_platform_driver(acpi_pfrt_log_driver);
diff --git a/drivers/acpi/pfr_update.c b/drivers/acpi/pfr_update.c
index 8b2910995fc1..11b1c2828005 100644
--- a/drivers/acpi/pfr_update.c
+++ b/drivers/acpi/pfr_update.c
@@ -127,8 +127,11 @@ static int query_capability(struct pfru_update_cap_info *cap_hdr,
pfru_dev->rev_id,
PFRU_FUNC_QUERY_UPDATE_CAP,
NULL, ACPI_TYPE_PACKAGE);
- if (!out_obj)
+ if (!out_obj) {
+ dev_dbg(pfru_dev->parent_dev,
+ "Query cap failed with no object\n");
return ret;
+ }
if (out_obj->package.count < CAP_NR_IDX ||
out_obj->package.elements[CAP_STATUS_IDX].type != ACPI_TYPE_INTEGER ||
@@ -141,13 +144,17 @@ static int query_capability(struct pfru_update_cap_info *cap_hdr,
out_obj->package.elements[CAP_DRV_SVN_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[CAP_PLAT_ID_IDX].type != ACPI_TYPE_BUFFER ||
out_obj->package.elements[CAP_OEM_ID_IDX].type != ACPI_TYPE_BUFFER ||
- out_obj->package.elements[CAP_OEM_INFO_IDX].type != ACPI_TYPE_BUFFER)
+ out_obj->package.elements[CAP_OEM_INFO_IDX].type != ACPI_TYPE_BUFFER) {
+ dev_dbg(pfru_dev->parent_dev,
+ "Query cap failed with invalid package count/type\n");
goto free_acpi_buffer;
+ }
cap_hdr->status = out_obj->package.elements[CAP_STATUS_IDX].integer.value;
if (cap_hdr->status != DSM_SUCCEED) {
ret = -EBUSY;
- dev_dbg(pfru_dev->parent_dev, "Error Status:%d\n", cap_hdr->status);
+ dev_dbg(pfru_dev->parent_dev, "Query cap Error Status:%d\n",
+ cap_hdr->status);
goto free_acpi_buffer;
}
@@ -193,24 +200,32 @@ static int query_buffer(struct pfru_com_buf_info *info,
out_obj = acpi_evaluate_dsm_typed(handle, &pfru_guid,
pfru_dev->rev_id, PFRU_FUNC_QUERY_BUF,
NULL, ACPI_TYPE_PACKAGE);
- if (!out_obj)
+ if (!out_obj) {
+ dev_dbg(pfru_dev->parent_dev,
+ "Query buf failed with no object\n");
return ret;
+ }
if (out_obj->package.count < BUF_NR_IDX ||
out_obj->package.elements[BUF_STATUS_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[BUF_EXT_STATUS_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[BUF_ADDR_LOW_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[BUF_ADDR_HI_IDX].type != ACPI_TYPE_INTEGER ||
- out_obj->package.elements[BUF_SIZE_IDX].type != ACPI_TYPE_INTEGER)
+ out_obj->package.elements[BUF_SIZE_IDX].type != ACPI_TYPE_INTEGER) {
+ dev_dbg(pfru_dev->parent_dev,
+ "Query buf failed with invalid package count/type\n");
goto free_acpi_buffer;
+ }
info->status = out_obj->package.elements[BUF_STATUS_IDX].integer.value;
info->ext_status =
out_obj->package.elements[BUF_EXT_STATUS_IDX].integer.value;
if (info->status != DSM_SUCCEED) {
ret = -EBUSY;
- dev_dbg(pfru_dev->parent_dev, "Error Status:%d\n", info->status);
- dev_dbg(pfru_dev->parent_dev, "Error Extended Status:%d\n", info->ext_status);
+ dev_dbg(pfru_dev->parent_dev,
+ "Query buf failed with Error Status:%d\n", info->status);
+ dev_dbg(pfru_dev->parent_dev,
+ "Query buf failed with Error Extended Status:%d\n", info->ext_status);
goto free_acpi_buffer;
}
@@ -295,12 +310,16 @@ static bool applicable_image(const void *data, struct pfru_update_cap_info *cap,
m_img_hdr = data + size;
type = get_image_type(m_img_hdr, pfru_dev);
- if (type < 0)
+ if (type < 0) {
+ dev_dbg(pfru_dev->parent_dev, "Invalid image type\n");
return false;
+ }
size = adjust_efi_size(m_img_hdr, size);
- if (size < 0)
+ if (size < 0) {
+ dev_dbg(pfru_dev->parent_dev, "Invalid image size\n");
return false;
+ }
auth = data + size;
size += sizeof(u64) + auth->auth_info.hdr.len;
@@ -310,7 +329,7 @@ static bool applicable_image(const void *data, struct pfru_update_cap_info *cap,
if (type == PFRU_CODE_INJECT_TYPE)
return payload_hdr->rt_ver >= cap->code_rt_version;
- return payload_hdr->rt_ver >= cap->drv_rt_version;
+ return payload_hdr->svn_ver >= cap->drv_svn;
}
static void print_update_debug_info(struct pfru_updated_result *result,
@@ -346,8 +365,11 @@ static int start_update(int action, struct pfru_device *pfru_dev)
out_obj = acpi_evaluate_dsm_typed(handle, &pfru_guid,
pfru_dev->rev_id, PFRU_FUNC_START,
&in_obj, ACPI_TYPE_PACKAGE);
- if (!out_obj)
+ if (!out_obj) {
+ dev_dbg(pfru_dev->parent_dev,
+ "Update failed to start with no object\n");
return ret;
+ }
if (out_obj->package.count < UPDATE_NR_IDX ||
out_obj->package.elements[UPDATE_STATUS_IDX].type != ACPI_TYPE_INTEGER ||
@@ -355,8 +377,11 @@ static int start_update(int action, struct pfru_device *pfru_dev)
out_obj->package.elements[UPDATE_AUTH_TIME_LOW_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[UPDATE_AUTH_TIME_HI_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[UPDATE_EXEC_TIME_LOW_IDX].type != ACPI_TYPE_INTEGER ||
- out_obj->package.elements[UPDATE_EXEC_TIME_HI_IDX].type != ACPI_TYPE_INTEGER)
+ out_obj->package.elements[UPDATE_EXEC_TIME_HI_IDX].type != ACPI_TYPE_INTEGER) {
+ dev_dbg(pfru_dev->parent_dev,
+ "Update failed with invalid package count/type\n");
goto free_acpi_buffer;
+ }
update_result.status =
out_obj->package.elements[UPDATE_STATUS_IDX].integer.value;
@@ -365,8 +390,10 @@ static int start_update(int action, struct pfru_device *pfru_dev)
if (update_result.status != DSM_SUCCEED) {
ret = -EBUSY;
- dev_dbg(pfru_dev->parent_dev, "Error Status:%d\n", update_result.status);
- dev_dbg(pfru_dev->parent_dev, "Error Extended Status:%d\n",
+ dev_dbg(pfru_dev->parent_dev,
+ "Update failed with Error Status:%d\n", update_result.status);
+ dev_dbg(pfru_dev->parent_dev,
+ "Update failed with Error Extended Status:%d\n",
update_result.ext_status);
goto free_acpi_buffer;
@@ -450,8 +477,10 @@ static ssize_t pfru_write(struct file *file, const char __user *buf,
if (ret)
return ret;
- if (len > buf_info.buf_size)
+ if (len > buf_info.buf_size) {
+ dev_dbg(pfru_dev->parent_dev, "Capsule image size too large\n");
return -EINVAL;
+ }
iov.iov_base = (void __user *)buf;
iov.iov_len = len;
@@ -460,10 +489,14 @@ static ssize_t pfru_write(struct file *file, const char __user *buf,
/* map the communication buffer */
phy_addr = (phys_addr_t)((buf_info.addr_hi << 32) | buf_info.addr_lo);
buf_ptr = memremap(phy_addr, buf_info.buf_size, MEMREMAP_WB);
- if (!buf_ptr)
+ if (!buf_ptr) {
+ dev_dbg(pfru_dev->parent_dev, "Failed to remap the buffer\n");
return -ENOMEM;
+ }
if (!copy_from_iter_full(buf_ptr, len, &iter)) {
+ dev_dbg(pfru_dev->parent_dev,
+ "Failed to copy the data from the user space buffer\n");
ret = -EINVAL;
goto unmap;
}
@@ -565,7 +598,7 @@ static struct platform_driver acpi_pfru_driver = {
.acpi_match_table = acpi_pfru_ids,
},
.probe = acpi_pfru_probe,
- .remove_new = acpi_pfru_remove,
+ .remove = acpi_pfru_remove,
};
module_platform_driver(acpi_pfru_driver);
diff --git a/drivers/acpi/platform_profile.c b/drivers/acpi/platform_profile.c
index 4a9704730224..ea04a8c69215 100644
--- a/drivers/acpi/platform_profile.c
+++ b/drivers/acpi/platform_profile.c
@@ -2,16 +2,34 @@
/* Platform profile sysfs interface */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/acpi.h>
#include <linux/bits.h>
+#include <linux/cleanup.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/platform_profile.h>
#include <linux/sysfs.h>
-static struct platform_profile_handler *cur_profile;
+#define to_pprof_handler(d) (container_of(d, struct platform_profile_handler, dev))
+
static DEFINE_MUTEX(profile_lock);
+struct platform_profile_handler {
+ const char *name;
+ struct device dev;
+ int minor;
+ unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ unsigned long hidden_choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ const struct platform_profile_ops *ops;
+};
+
+struct aggregate_choices_data {
+ unsigned long aggregate[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ int count;
+};
+
static const char * const profile_names[] = {
[PLATFORM_PROFILE_LOW_POWER] = "low-power",
[PLATFORM_PROFILE_COOL] = "cool",
@@ -19,202 +37,682 @@ static const char * const profile_names[] = {
[PLATFORM_PROFILE_BALANCED] = "balanced",
[PLATFORM_PROFILE_BALANCED_PERFORMANCE] = "balanced-performance",
[PLATFORM_PROFILE_PERFORMANCE] = "performance",
+ [PLATFORM_PROFILE_MAX_POWER] = "max-power",
+ [PLATFORM_PROFILE_CUSTOM] = "custom",
};
static_assert(ARRAY_SIZE(profile_names) == PLATFORM_PROFILE_LAST);
-static ssize_t platform_profile_choices_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int len = 0;
- int err, i;
-
- err = mutex_lock_interruptible(&profile_lock);
- if (err)
- return err;
+static DEFINE_IDA(platform_profile_ida);
- if (!cur_profile) {
- mutex_unlock(&profile_lock);
- return -ENODEV;
- }
+/**
+ * _commmon_choices_show - Show the available profile choices
+ * @choices: The available profile choices
+ * @buf: The buffer to write to
+ *
+ * Return: The number of bytes written
+ */
+static ssize_t _commmon_choices_show(unsigned long *choices, char *buf)
+{
+ int i, len = 0;
- for_each_set_bit(i, cur_profile->choices, PLATFORM_PROFILE_LAST) {
+ for_each_set_bit(i, choices, PLATFORM_PROFILE_LAST) {
if (len == 0)
len += sysfs_emit_at(buf, len, "%s", profile_names[i]);
else
len += sysfs_emit_at(buf, len, " %s", profile_names[i]);
}
len += sysfs_emit_at(buf, len, "\n");
- mutex_unlock(&profile_lock);
+
return len;
}
-static ssize_t platform_profile_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+/**
+ * _store_class_profile - Set the profile for a class device
+ * @dev: The class device
+ * @data: The profile to set
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int _store_class_profile(struct device *dev, void *data)
+{
+ struct platform_profile_handler *handler;
+ int *bit = (int *)data;
+
+ lockdep_assert_held(&profile_lock);
+ handler = to_pprof_handler(dev);
+ if (!test_bit(*bit, handler->choices) && !test_bit(*bit, handler->hidden_choices))
+ return -EOPNOTSUPP;
+
+ return handler->ops->profile_set(dev, *bit);
+}
+
+/**
+ * _notify_class_profile - Notify the class device of a profile change
+ * @dev: The class device
+ * @data: Unused
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int _notify_class_profile(struct device *dev, void *data)
+{
+ struct platform_profile_handler *handler = to_pprof_handler(dev);
+
+ lockdep_assert_held(&profile_lock);
+ sysfs_notify(&handler->dev.kobj, NULL, "profile");
+ kobject_uevent(&handler->dev.kobj, KOBJ_CHANGE);
+
+ return 0;
+}
+
+/**
+ * get_class_profile - Show the current profile for a class device
+ * @dev: The class device
+ * @profile: The profile to return
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int get_class_profile(struct device *dev,
+ enum platform_profile_option *profile)
{
- enum platform_profile_option profile = PLATFORM_PROFILE_BALANCED;
+ struct platform_profile_handler *handler;
+ enum platform_profile_option val;
int err;
- err = mutex_lock_interruptible(&profile_lock);
- if (err)
+ lockdep_assert_held(&profile_lock);
+ handler = to_pprof_handler(dev);
+ err = handler->ops->profile_get(dev, &val);
+ if (err) {
+ pr_err("Failed to get profile for handler %s\n", handler->name);
return err;
+ }
+
+ if (WARN_ON(val >= PLATFORM_PROFILE_LAST))
+ return -EINVAL;
+ *profile = val;
+
+ return 0;
+}
+
+/**
+ * name_show - Show the name of the profile handler
+ * @dev: The device
+ * @attr: The attribute
+ * @buf: The buffer to write to
+ *
+ * Return: The number of bytes written
+ */
+static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct platform_profile_handler *handler = to_pprof_handler(dev);
+
+ return sysfs_emit(buf, "%s\n", handler->name);
+}
+static DEVICE_ATTR_RO(name);
+
+/**
+ * choices_show - Show the available profile choices
+ * @dev: The device
+ * @attr: The attribute
+ * @buf: The buffer to write to
+ *
+ * Return: The number of bytes written
+ */
+static ssize_t choices_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_profile_handler *handler = to_pprof_handler(dev);
+
+ return _commmon_choices_show(handler->choices, buf);
+}
+static DEVICE_ATTR_RO(choices);
+
+/**
+ * profile_show - Show the current profile for a class device
+ * @dev: The device
+ * @attr: The attribute
+ * @buf: The buffer to write to
+ *
+ * Return: The number of bytes written
+ */
+static ssize_t profile_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ enum platform_profile_option profile = PLATFORM_PROFILE_LAST;
+ int err;
- if (!cur_profile) {
- mutex_unlock(&profile_lock);
- return -ENODEV;
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
+ err = get_class_profile(dev, &profile);
+ if (err)
+ return err;
}
- err = cur_profile->profile_get(cur_profile, &profile);
- mutex_unlock(&profile_lock);
+ return sysfs_emit(buf, "%s\n", profile_names[profile]);
+}
+
+/**
+ * profile_store - Set the profile for a class device
+ * @dev: The device
+ * @attr: The attribute
+ * @buf: The buffer to read from
+ * @count: The number of bytes to read
+ *
+ * Return: The number of bytes read
+ */
+static ssize_t profile_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int index, ret;
+
+ index = sysfs_match_string(profile_names, buf);
+ if (index < 0)
+ return -EINVAL;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
+ ret = _store_class_profile(dev, &index);
+ if (ret)
+ return ret;
+ }
+
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
+
+ return count;
+}
+static DEVICE_ATTR_RW(profile);
+
+static struct attribute *profile_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_choices.attr,
+ &dev_attr_profile.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(profile);
+
+static void pprof_device_release(struct device *dev)
+{
+ struct platform_profile_handler *pprof = to_pprof_handler(dev);
+
+ kfree(pprof);
+}
+
+static const struct class platform_profile_class = {
+ .name = "platform-profile",
+ .dev_groups = profile_groups,
+ .dev_release = pprof_device_release,
+};
+
+/**
+ * _aggregate_choices - Aggregate the available profile choices
+ * @dev: The device
+ * @arg: struct aggregate_choices_data, with it's aggregate member bitmap
+ * initially filled with ones
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int _aggregate_choices(struct device *dev, void *arg)
+{
+ unsigned long tmp[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ struct aggregate_choices_data *data = arg;
+ struct platform_profile_handler *handler;
+
+ lockdep_assert_held(&profile_lock);
+
+ handler = to_pprof_handler(dev);
+ bitmap_or(tmp, handler->choices, handler->hidden_choices, PLATFORM_PROFILE_LAST);
+ bitmap_and(data->aggregate, tmp, data->aggregate, PLATFORM_PROFILE_LAST);
+ data->count++;
+
+ return 0;
+}
+
+/**
+ * _remove_hidden_choices - Remove hidden choices from aggregate data
+ * @dev: The device
+ * @arg: struct aggregate_choices_data
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int _remove_hidden_choices(struct device *dev, void *arg)
+{
+ struct aggregate_choices_data *data = arg;
+ struct platform_profile_handler *handler;
+
+ lockdep_assert_held(&profile_lock);
+ handler = to_pprof_handler(dev);
+ bitmap_andnot(data->aggregate, handler->choices,
+ handler->hidden_choices, PLATFORM_PROFILE_LAST);
+
+ return 0;
+}
+
+/**
+ * platform_profile_choices_show - Show the available profile choices for legacy sysfs interface
+ * @kobj: The kobject
+ * @attr: The attribute
+ * @buf: The buffer to write to
+ *
+ * Return: The number of bytes written
+ */
+static ssize_t platform_profile_choices_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct aggregate_choices_data data = {
+ .aggregate = { [0 ... BITS_TO_LONGS(PLATFORM_PROFILE_LAST) - 1] = ~0UL },
+ .count = 0,
+ };
+ int err;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
+ err = class_for_each_device(&platform_profile_class, NULL,
+ &data, _aggregate_choices);
+ if (err)
+ return err;
+ if (data.count == 1) {
+ err = class_for_each_device(&platform_profile_class, NULL,
+ &data, _remove_hidden_choices);
+ if (err)
+ return err;
+ }
+ }
+
+ /* no profile handler registered any more */
+ if (bitmap_empty(data.aggregate, PLATFORM_PROFILE_LAST))
+ return -EINVAL;
+
+ return _commmon_choices_show(data.aggregate, buf);
+}
+
+/**
+ * _aggregate_profiles - Aggregate the profiles for legacy sysfs interface
+ * @dev: The device
+ * @data: The profile to return
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int _aggregate_profiles(struct device *dev, void *data)
+{
+ enum platform_profile_option *profile = data;
+ enum platform_profile_option val;
+ int err;
+
+ err = get_class_profile(dev, &val);
if (err)
return err;
- /* Check that profile is valid index */
- if (WARN_ON((profile < 0) || (profile >= ARRAY_SIZE(profile_names))))
- return -EIO;
+ if (*profile != PLATFORM_PROFILE_LAST && *profile != val)
+ *profile = PLATFORM_PROFILE_CUSTOM;
+ else
+ *profile = val;
- return sysfs_emit(buf, "%s\n", profile_names[profile]);
+ return 0;
}
-static ssize_t platform_profile_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+/**
+ * _store_and_notify - Store and notify a class from legacy sysfs interface
+ * @dev: The device
+ * @data: The profile to return
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int _store_and_notify(struct device *dev, void *data)
{
- int err, i;
+ enum platform_profile_option *profile = data;
+ int err;
- err = mutex_lock_interruptible(&profile_lock);
+ err = _store_class_profile(dev, profile);
if (err)
return err;
+ return _notify_class_profile(dev, NULL);
+}
+
+/**
+ * platform_profile_show - Show the current profile for legacy sysfs interface
+ * @kobj: The kobject
+ * @attr: The attribute
+ * @buf: The buffer to write to
+ *
+ * Return: The number of bytes written
+ */
+static ssize_t platform_profile_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ enum platform_profile_option profile = PLATFORM_PROFILE_LAST;
+ int err;
- if (!cur_profile) {
- mutex_unlock(&profile_lock);
- return -ENODEV;
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
+ err = class_for_each_device(&platform_profile_class, NULL,
+ &profile, _aggregate_profiles);
+ if (err)
+ return err;
}
+ /* no profile handler registered any more */
+ if (profile == PLATFORM_PROFILE_LAST)
+ return -EINVAL;
+
+ return sysfs_emit(buf, "%s\n", profile_names[profile]);
+}
+
+/**
+ * platform_profile_store - Set the profile for legacy sysfs interface
+ * @kobj: The kobject
+ * @attr: The attribute
+ * @buf: The buffer to read from
+ * @count: The number of bytes to read
+ *
+ * Return: The number of bytes read
+ */
+static ssize_t platform_profile_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct aggregate_choices_data data = {
+ .aggregate = { [0 ... BITS_TO_LONGS(PLATFORM_PROFILE_LAST) - 1] = ~0UL },
+ .count = 0,
+ };
+ int ret;
+ int i;
+
/* Scan for a matching profile */
i = sysfs_match_string(profile_names, buf);
- if (i < 0) {
- mutex_unlock(&profile_lock);
+ if (i < 0 || i == PLATFORM_PROFILE_CUSTOM)
return -EINVAL;
- }
- /* Check that platform supports this profile choice */
- if (!test_bit(i, cur_profile->choices)) {
- mutex_unlock(&profile_lock);
- return -EOPNOTSUPP;
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
+ ret = class_for_each_device(&platform_profile_class, NULL,
+ &data, _aggregate_choices);
+ if (ret)
+ return ret;
+ if (!test_bit(i, data.aggregate))
+ return -EOPNOTSUPP;
+
+ ret = class_for_each_device(&platform_profile_class, NULL, &i,
+ _store_and_notify);
+ if (ret)
+ return ret;
}
- err = cur_profile->profile_set(cur_profile, i);
- if (!err)
- sysfs_notify(acpi_kobj, NULL, "platform_profile");
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
- mutex_unlock(&profile_lock);
- if (err)
- return err;
return count;
}
-static DEVICE_ATTR_RO(platform_profile_choices);
-static DEVICE_ATTR_RW(platform_profile);
+static struct kobj_attribute attr_platform_profile_choices = __ATTR_RO(platform_profile_choices);
+static struct kobj_attribute attr_platform_profile = __ATTR_RW(platform_profile);
static struct attribute *platform_profile_attrs[] = {
- &dev_attr_platform_profile_choices.attr,
- &dev_attr_platform_profile.attr,
+ &attr_platform_profile_choices.attr,
+ &attr_platform_profile.attr,
NULL
};
+static int profile_class_registered(struct device *dev, const void *data)
+{
+ return 1;
+}
+
+static umode_t profile_class_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
+{
+ struct device *dev;
+
+ dev = class_find_device(&platform_profile_class, NULL, NULL, profile_class_registered);
+ if (!dev)
+ return 0;
+
+ put_device(dev);
+
+ return attr->mode;
+}
+
static const struct attribute_group platform_profile_group = {
- .attrs = platform_profile_attrs
+ .attrs = platform_profile_attrs,
+ .is_visible = profile_class_is_visible,
};
-void platform_profile_notify(void)
+/**
+ * platform_profile_notify - Notify class device and legacy sysfs interface
+ * @dev: The class device
+ */
+void platform_profile_notify(struct device *dev)
{
- if (!cur_profile)
- return;
+ scoped_cond_guard(mutex_intr, return, &profile_lock) {
+ _notify_class_profile(dev, NULL);
+ }
sysfs_notify(acpi_kobj, NULL, "platform_profile");
}
EXPORT_SYMBOL_GPL(platform_profile_notify);
+/**
+ * platform_profile_cycle - Cycles profiles available on all registered class devices
+ *
+ * Return: 0 on success, -errno on failure
+ */
int platform_profile_cycle(void)
{
- enum platform_profile_option profile;
- enum platform_profile_option next;
+ struct aggregate_choices_data data = {
+ .aggregate = { [0 ... BITS_TO_LONGS(PLATFORM_PROFILE_LAST) - 1] = ~0UL },
+ .count = 0,
+ };
+ enum platform_profile_option next = PLATFORM_PROFILE_LAST;
+ enum platform_profile_option profile = PLATFORM_PROFILE_LAST;
int err;
- err = mutex_lock_interruptible(&profile_lock);
- if (err)
- return err;
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
+ err = class_for_each_device(&platform_profile_class, NULL,
+ &profile, _aggregate_profiles);
+ if (err)
+ return err;
- if (!cur_profile) {
- mutex_unlock(&profile_lock);
- return -ENODEV;
- }
+ if (profile == PLATFORM_PROFILE_MAX_POWER ||
+ profile == PLATFORM_PROFILE_CUSTOM ||
+ profile == PLATFORM_PROFILE_LAST)
+ return -EINVAL;
- err = cur_profile->profile_get(cur_profile, &profile);
- if (err) {
- mutex_unlock(&profile_lock);
- return err;
- }
+ err = class_for_each_device(&platform_profile_class, NULL,
+ &data, _aggregate_choices);
+ if (err)
+ return err;
- next = find_next_bit_wrap(cur_profile->choices, PLATFORM_PROFILE_LAST,
- profile + 1);
+ /* never iterate into a custom or max power if all drivers supported it */
+ clear_bit(PLATFORM_PROFILE_MAX_POWER, data.aggregate);
+ clear_bit(PLATFORM_PROFILE_CUSTOM, data.aggregate);
- if (WARN_ON(next == PLATFORM_PROFILE_LAST)) {
- mutex_unlock(&profile_lock);
- return -EINVAL;
- }
+ next = find_next_bit_wrap(data.aggregate,
+ PLATFORM_PROFILE_LAST,
+ profile + 1);
- err = cur_profile->profile_set(cur_profile, next);
- mutex_unlock(&profile_lock);
+ err = class_for_each_device(&platform_profile_class, NULL, &next,
+ _store_and_notify);
- if (!err)
- sysfs_notify(acpi_kobj, NULL, "platform_profile");
+ if (err)
+ return err;
+ }
- return err;
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
+
+ return 0;
}
EXPORT_SYMBOL_GPL(platform_profile_cycle);
-int platform_profile_register(struct platform_profile_handler *pprof)
+/**
+ * platform_profile_register - Creates and registers a platform profile class device
+ * @dev: Parent device
+ * @name: Name of the class device
+ * @drvdata: Driver data that will be attached to the class device
+ * @ops: Platform profile's mandatory operations
+ *
+ * Return: pointer to the new class device on success, ERR_PTR on failure
+ */
+struct device *platform_profile_register(struct device *dev, const char *name,
+ void *drvdata,
+ const struct platform_profile_ops *ops)
{
+ struct device *ppdev;
+ int minor;
int err;
- mutex_lock(&profile_lock);
- /* We can only have one active profile */
- if (cur_profile) {
- mutex_unlock(&profile_lock);
- return -EEXIST;
+ /* Sanity check */
+ if (WARN_ON_ONCE(!dev || !name || !ops || !ops->profile_get ||
+ !ops->profile_set || !ops->probe))
+ return ERR_PTR(-EINVAL);
+
+ struct platform_profile_handler *pprof __free(kfree) = kzalloc(
+ sizeof(*pprof), GFP_KERNEL);
+ if (!pprof)
+ return ERR_PTR(-ENOMEM);
+
+ err = ops->probe(drvdata, pprof->choices);
+ if (err) {
+ dev_err(dev, "platform_profile probe failed\n");
+ return ERR_PTR(err);
}
- /* Sanity check the profile handler field are set */
- if (!pprof || bitmap_empty(pprof->choices, PLATFORM_PROFILE_LAST) ||
- !pprof->profile_set || !pprof->profile_get) {
- mutex_unlock(&profile_lock);
- return -EINVAL;
+ if (bitmap_empty(pprof->choices, PLATFORM_PROFILE_LAST)) {
+ dev_err(dev, "Failed to register platform_profile class device with empty choices\n");
+ return ERR_PTR(-EINVAL);
}
- err = sysfs_create_group(acpi_kobj, &platform_profile_group);
+ if (ops->hidden_choices) {
+ err = ops->hidden_choices(drvdata, pprof->hidden_choices);
+ if (err) {
+ dev_err(dev, "platform_profile hidden_choices failed\n");
+ return ERR_PTR(err);
+ }
+ }
+
+ guard(mutex)(&profile_lock);
+
+ /* create class interface for individual handler */
+ minor = ida_alloc(&platform_profile_ida, GFP_KERNEL);
+ if (minor < 0)
+ return ERR_PTR(minor);
+
+ pprof->name = name;
+ pprof->ops = ops;
+ pprof->minor = minor;
+ pprof->dev.class = &platform_profile_class;
+ pprof->dev.parent = dev;
+ dev_set_drvdata(&pprof->dev, drvdata);
+ dev_set_name(&pprof->dev, "platform-profile-%d", pprof->minor);
+ /* device_register() takes ownership of pprof/ppdev */
+ ppdev = &no_free_ptr(pprof)->dev;
+ err = device_register(ppdev);
if (err) {
- mutex_unlock(&profile_lock);
- return err;
+ put_device(ppdev);
+ goto cleanup_ida;
}
- cur_profile = pprof;
- mutex_unlock(&profile_lock);
- return 0;
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
+
+ err = sysfs_update_group(acpi_kobj, &platform_profile_group);
+ if (err)
+ goto cleanup_cur;
+
+ return ppdev;
+
+cleanup_cur:
+ device_unregister(ppdev);
+
+cleanup_ida:
+ ida_free(&platform_profile_ida, minor);
+
+ return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(platform_profile_register);
-int platform_profile_remove(void)
+/**
+ * platform_profile_remove - Unregisters a platform profile class device
+ * @dev: Class device
+ */
+void platform_profile_remove(struct device *dev)
{
- sysfs_remove_group(acpi_kobj, &platform_profile_group);
+ struct platform_profile_handler *pprof;
- mutex_lock(&profile_lock);
- cur_profile = NULL;
- mutex_unlock(&profile_lock);
- return 0;
+ if (IS_ERR_OR_NULL(dev))
+ return;
+
+ pprof = to_pprof_handler(dev);
+
+ guard(mutex)(&profile_lock);
+
+ ida_free(&platform_profile_ida, pprof->minor);
+ device_unregister(&pprof->dev);
+
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
+ sysfs_update_group(acpi_kobj, &platform_profile_group);
}
EXPORT_SYMBOL_GPL(platform_profile_remove);
+static void devm_platform_profile_release(struct device *dev, void *res)
+{
+ struct device **ppdev = res;
+
+ platform_profile_remove(*ppdev);
+}
+
+/**
+ * devm_platform_profile_register - Device managed version of platform_profile_register
+ * @dev: Parent device
+ * @name: Name of the class device
+ * @drvdata: Driver data that will be attached to the class device
+ * @ops: Platform profile's mandatory operations
+ *
+ * Return: pointer to the new class device on success, ERR_PTR on failure
+ */
+struct device *devm_platform_profile_register(struct device *dev, const char *name,
+ void *drvdata,
+ const struct platform_profile_ops *ops)
+{
+ struct device *ppdev;
+ struct device **dr;
+
+ dr = devres_alloc(devm_platform_profile_release, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return ERR_PTR(-ENOMEM);
+
+ ppdev = platform_profile_register(dev, name, drvdata, ops);
+ if (IS_ERR(ppdev)) {
+ devres_free(dr);
+ return ppdev;
+ }
+
+ *dr = ppdev;
+ devres_add(dev, dr);
+
+ return ppdev;
+}
+EXPORT_SYMBOL_GPL(devm_platform_profile_register);
+
+static int __init platform_profile_init(void)
+{
+ int err;
+
+ if (acpi_disabled)
+ return -EOPNOTSUPP;
+
+ err = class_register(&platform_profile_class);
+ if (err)
+ return err;
+
+ err = sysfs_create_group(acpi_kobj, &platform_profile_group);
+ if (err)
+ class_unregister(&platform_profile_class);
+
+ return err;
+}
+
+static void __exit platform_profile_exit(void)
+{
+ sysfs_remove_group(acpi_kobj, &platform_profile_group);
+ class_unregister(&platform_profile_class);
+}
+module_init(platform_profile_init);
+module_exit(platform_profile_exit);
+
MODULE_AUTHOR("Mark Pearson <markpearson@lenovo.com>");
+MODULE_DESCRIPTION("ACPI platform profile sysfs interface");
MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/pmic/intel_pmic.c b/drivers/acpi/pmic/intel_pmic.c
index f20dbda1a831..134e9ca8eaa2 100644
--- a/drivers/acpi/pmic/intel_pmic.c
+++ b/drivers/acpi/pmic/intel_pmic.c
@@ -31,7 +31,7 @@ struct intel_pmic_opregion {
static struct intel_pmic_opregion *intel_pmic_opregion;
-static int pmic_get_reg_bit(int address, struct pmic_table *table,
+static int pmic_get_reg_bit(int address, const struct pmic_table *table,
int count, int *reg, int *bit)
{
int i;
diff --git a/drivers/acpi/pmic/intel_pmic.h b/drivers/acpi/pmic/intel_pmic.h
index d956b03a6ca0..006f0780ffab 100644
--- a/drivers/acpi/pmic/intel_pmic.h
+++ b/drivers/acpi/pmic/intel_pmic.h
@@ -21,9 +21,9 @@ struct intel_pmic_opregion_data {
u32 reg_address, u32 value, u32 mask);
int (*lpat_raw_to_temp)(struct acpi_lpat_conversion_table *lpat_table,
int raw);
- struct pmic_table *power_table;
+ const struct pmic_table *power_table;
int power_table_count;
- struct pmic_table *thermal_table;
+ const struct pmic_table *thermal_table;
int thermal_table_count;
/* For generic exec_mipi_pmic_seq_element handling */
int pmic_i2c_address;
diff --git a/drivers/acpi/pmic/intel_pmic_bxtwc.c b/drivers/acpi/pmic/intel_pmic_bxtwc.c
index e247615189fa..c332afbf82bd 100644
--- a/drivers/acpi/pmic/intel_pmic_bxtwc.c
+++ b/drivers/acpi/pmic/intel_pmic_bxtwc.c
@@ -24,7 +24,7 @@
#define VSWITCH1_OUTPUT BIT(4)
#define VUSBPHY_CHARGE BIT(1)
-static struct pmic_table power_table[] = {
+static const struct pmic_table power_table[] = {
{
.address = 0x0,
.reg = 0x63,
@@ -177,7 +177,7 @@ static struct pmic_table power_table[] = {
} /* MOFF -> MODEMCTRL Bit 0 */
};
-static struct pmic_table thermal_table[] = {
+static const struct pmic_table thermal_table[] = {
{
.address = 0x00,
.reg = 0x4F39
diff --git a/drivers/acpi/pmic/intel_pmic_bytcrc.c b/drivers/acpi/pmic/intel_pmic_bytcrc.c
index 2b09f8da5400..b4c21a75294a 100644
--- a/drivers/acpi/pmic/intel_pmic_bytcrc.c
+++ b/drivers/acpi/pmic/intel_pmic_bytcrc.c
@@ -16,7 +16,7 @@
#define PMIC_A0LOCK_REG 0xc5
-static struct pmic_table power_table[] = {
+static const struct pmic_table power_table[] = {
/* {
.address = 0x00,
.reg = ??,
@@ -134,7 +134,7 @@ static struct pmic_table power_table[] = {
}, /* V105 -> V1P05S, L2 SRAM */
};
-static struct pmic_table thermal_table[] = {
+static const struct pmic_table thermal_table[] = {
{
.address = 0x00,
.reg = 0x75
diff --git a/drivers/acpi/pmic/intel_pmic_chtdc_ti.c b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
index c84ef3d15181..ecb36fbc1e7f 100644
--- a/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
+++ b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
@@ -8,18 +8,22 @@
*/
#include <linux/acpi.h>
+#include <linux/bits.h>
#include <linux/init.h>
#include <linux/mfd/intel_soc_pmic.h>
#include <linux/platform_device.h>
+#include <asm/byteorder.h>
#include "intel_pmic.h"
/* registers stored in 16bit BE (high:low, total 10bit) */
+#define PMIC_REG_MASK GENMASK(9, 0)
+
#define CHTDC_TI_VBAT 0x54
#define CHTDC_TI_DIETEMP 0x56
#define CHTDC_TI_BPTHERM 0x58
#define CHTDC_TI_GPADC 0x5a
-static struct pmic_table chtdc_ti_power_table[] = {
+static const struct pmic_table chtdc_ti_power_table[] = {
{ .address = 0x00, .reg = 0x41 }, /* LDO1 */
{ .address = 0x04, .reg = 0x42 }, /* LDO2 */
{ .address = 0x08, .reg = 0x43 }, /* LDO3 */
@@ -35,7 +39,7 @@ static struct pmic_table chtdc_ti_power_table[] = {
{ .address = 0x30, .reg = 0x4e }, /* LD14 */
};
-static struct pmic_table chtdc_ti_thermal_table[] = {
+static const struct pmic_table chtdc_ti_thermal_table[] = {
{
.address = 0x00,
.reg = CHTDC_TI_GPADC
@@ -73,7 +77,7 @@ static int chtdc_ti_pmic_get_power(struct regmap *regmap, int reg, int bit,
if (regmap_read(regmap, reg, &data))
return -EIO;
- *value = data & 1;
+ *value = data & BIT(0);
return 0;
}
@@ -85,13 +89,12 @@ static int chtdc_ti_pmic_update_power(struct regmap *regmap, int reg, int bit,
static int chtdc_ti_pmic_get_raw_temp(struct regmap *regmap, int reg)
{
- u8 buf[2];
+ __be16 buf;
- if (regmap_bulk_read(regmap, reg, buf, 2))
+ if (regmap_bulk_read(regmap, reg, &buf, sizeof(buf)))
return -EIO;
- /* stored in big-endian */
- return ((buf[0] & 0x03) << 8) | buf[1];
+ return be16_to_cpu(buf) & PMIC_REG_MASK;
}
static const struct intel_pmic_opregion_data chtdc_ti_pmic_opregion_data = {
diff --git a/drivers/acpi/pmic/intel_pmic_chtwc.c b/drivers/acpi/pmic/intel_pmic_chtwc.c
index f2c42f4c79ca..81caede51ca2 100644
--- a/drivers/acpi/pmic/intel_pmic_chtwc.c
+++ b/drivers/acpi/pmic/intel_pmic_chtwc.c
@@ -70,7 +70,7 @@
* "regulator: whiskey_cove: implements Whiskey Cove pmic VRF support"
* https://github.com/intel-aero/meta-intel-aero/blob/master/recipes-kernel/linux/linux-yocto/0019-regulator-whiskey_cove-implements-WhiskeyCove-pmic-V.patch
*/
-static struct pmic_table power_table[] = {
+static const struct pmic_table power_table[] = {
{
.address = 0x0,
.reg = CHT_WC_V1P8A_CTRL,
@@ -236,11 +236,12 @@ static int intel_cht_wc_exec_mipi_pmic_seq_element(struct regmap *regmap,
u32 reg_address,
u32 value, u32 mask)
{
+ struct device *dev = regmap_get_device(regmap);
u32 address;
if (i2c_client_address > 0xff || reg_address > 0xff) {
- pr_warn("%s warning addresses too big client 0x%x reg 0x%x\n",
- __func__, i2c_client_address, reg_address);
+ dev_warn(dev, "warning addresses too big client 0x%x reg 0x%x\n",
+ i2c_client_address, reg_address);
return -ERANGE;
}
diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c
index 61bbe4c24d87..49bda5e0c8aa 100644
--- a/drivers/acpi/pmic/intel_pmic_xpower.c
+++ b/drivers/acpi/pmic/intel_pmic_xpower.c
@@ -26,7 +26,7 @@
#define AXP288_ADC_TS_CURRENT_ON_ONDEMAND (2 << 0)
#define AXP288_ADC_TS_CURRENT_ON (3 << 0)
-static struct pmic_table power_table[] = {
+static const struct pmic_table power_table[] = {
{
.address = 0x00,
.reg = 0x13,
@@ -129,7 +129,7 @@ static struct pmic_table power_table[] = {
};
/* TMP0 - TMP5 are the same, all from GPADC */
-static struct pmic_table thermal_table[] = {
+static const struct pmic_table thermal_table[] = {
{
.address = 0x00,
.reg = XPOWER_GPADC_LOW
@@ -255,7 +255,7 @@ static int intel_xpower_pmic_get_raw_temp(struct regmap *regmap, int reg)
if (ret)
return ret;
- ret = regmap_bulk_read(regmap, AXP288_GP_ADC_H, buf, 2);
+ ret = regmap_bulk_read(regmap, AXP288_GP_ADC_H, buf, sizeof(buf));
if (ret == 0)
ret = (buf[0] << 4) + ((buf[1] >> 4) & 0x0f);
@@ -274,11 +274,12 @@ static int intel_xpower_exec_mipi_pmic_seq_element(struct regmap *regmap,
u16 i2c_address, u32 reg_address,
u32 value, u32 mask)
{
+ struct device *dev = regmap_get_device(regmap);
int ret;
if (i2c_address != 0x34) {
- pr_err("%s: Unexpected i2c-addr: 0x%02x (reg-addr 0x%x value 0x%x mask 0x%x)\n",
- __func__, i2c_address, reg_address, value, mask);
+ dev_err(dev, "Unexpected i2c-addr: 0x%02x (reg-addr 0x%x value 0x%x mask 0x%x)\n",
+ i2c_address, reg_address, value, mask);
return -ENXIO;
}
diff --git a/drivers/acpi/pmic/tps68470_pmic.c b/drivers/acpi/pmic/tps68470_pmic.c
index ebd03e472955..0d1a82eeb4b0 100644
--- a/drivers/acpi/pmic/tps68470_pmic.c
+++ b/drivers/acpi/pmic/tps68470_pmic.c
@@ -376,10 +376,8 @@ static int tps68470_pmic_opregion_probe(struct platform_device *pdev)
struct tps68470_pmic_opregion *opregion;
acpi_status status;
- if (!dev || !tps68470_regmap) {
- dev_warn(dev, "dev or regmap is NULL\n");
- return -EINVAL;
- }
+ if (!tps68470_regmap)
+ return dev_err_probe(dev, -EINVAL, "regmap is missing\n");
if (!handle) {
dev_warn(dev, "acpi handle is NULL\n");
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index c2c70139c4f1..361a7721a6a8 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -23,12 +23,14 @@
#define pr_fmt(fmt) "ACPI: PM: " fmt
+#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/pm_runtime.h>
#include <linux/sysfs.h>
#include <linux/acpi.h>
@@ -62,6 +64,9 @@ struct acpi_power_resource_entry {
struct acpi_power_resource *resource;
};
+static bool hp_eb_gp12pxp_quirk;
+static bool unused_power_resources_quirk;
+
static LIST_HEAD(acpi_power_resource_list);
static DEFINE_MUTEX(power_resource_list_lock);
@@ -197,7 +202,7 @@ static int __get_state(acpi_handle handle, u8 *state)
cur_state = sta & ACPI_POWER_RESOURCE_STATE_ON;
acpi_handle_debug(handle, "Power resource is %s\n",
- cur_state ? "on" : "off");
+ str_on_off(cur_state));
*state = cur_state;
return 0;
@@ -240,7 +245,7 @@ static int acpi_power_get_list_state(struct list_head *list, u8 *state)
break;
}
- pr_debug("Power resource list is %s\n", cur_state ? "on" : "off");
+ pr_debug("Power resource list is %s\n", str_on_off(cur_state));
*state = cur_state;
return 0;
@@ -950,8 +955,8 @@ struct acpi_device *acpi_add_power_resource(acpi_handle handle)
mutex_init(&resource->resource_lock);
INIT_LIST_HEAD(&resource->list_node);
INIT_LIST_HEAD(&resource->dependents);
- strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_POWER_CLASS);
+ strscpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
+ strscpy(acpi_device_class(device), ACPI_POWER_CLASS);
device->power.state = ACPI_STATE_UNKNOWN;
device->flags.match_driver = true;
@@ -991,6 +996,38 @@ struct acpi_device *acpi_add_power_resource(acpi_handle handle)
}
#ifdef CONFIG_ACPI_SLEEP
+static bool resource_is_gp12pxp(acpi_handle handle)
+{
+ const char *path;
+ bool ret;
+
+ path = acpi_handle_path(handle);
+ ret = path && strcmp(path, "\\_SB_.PCI0.GP12.PXP_") == 0;
+ kfree(path);
+
+ return ret;
+}
+
+static void acpi_resume_on_eb_gp12pxp(struct acpi_power_resource *resource)
+{
+ acpi_handle_notice(resource->device.handle,
+ "HP EB quirk - turning OFF then ON\n");
+
+ __acpi_power_off(resource);
+ __acpi_power_on(resource);
+
+ /*
+ * Use the same delay as DSDT uses in modem _RST method.
+ *
+ * Otherwise we get "Unable to change power state from unknown to D0,
+ * device inaccessible" error for the modem PCI device after thaw.
+ *
+ * This power resource is normally being enabled only during thaw (once)
+ * so this wait is not a performance issue.
+ */
+ msleep(200);
+}
+
void acpi_resume_power_resources(void)
{
struct acpi_power_resource *resource;
@@ -1012,8 +1049,14 @@ void acpi_resume_power_resources(void)
if (state == ACPI_POWER_RESOURCE_STATE_OFF
&& resource->ref_count) {
- acpi_handle_debug(resource->device.handle, "Turning ON\n");
- __acpi_power_on(resource);
+ if (hp_eb_gp12pxp_quirk &&
+ resource_is_gp12pxp(resource->device.handle)) {
+ acpi_resume_on_eb_gp12pxp(resource);
+ } else {
+ acpi_handle_debug(resource->device.handle,
+ "Turning ON\n");
+ __acpi_power_on(resource);
+ }
}
mutex_unlock(&resource->resource_lock);
@@ -1023,6 +1066,41 @@ void acpi_resume_power_resources(void)
}
#endif
+static const struct dmi_system_id dmi_hp_elitebook_gp12pxp_quirk[] = {
+/*
+ * This laptop (and possibly similar models too) has power resource called
+ * "GP12.PXP_" for its WWAN modem.
+ *
+ * For this power resource to turn ON power for the modem it needs certain
+ * internal flag called "ONEN" to be set.
+ * This flag only gets set from this power resource "_OFF" method, while the
+ * actual modem power gets turned off during suspend by "GP12.PTS" method
+ * called from the global "_PTS" (Prepare To Sleep) method.
+ * On the other hand, this power resource "_OFF" method implementation just
+ * sets the aforementioned flag without actually doing anything else (it
+ * doesn't contain any code to actually turn off power).
+ *
+ * The above means that when upon hibernation finish we try to set this
+ * power resource back ON since its "_STA" method returns 0 (while the resource
+ * is still considered in use) its "_ON" method won't do anything since
+ * that "ONEN" flag is not set.
+ * Overall, this means the modem is dead until laptop is rebooted since its
+ * power has been cut by "_PTS" and its PCI configuration was lost and not able
+ * to be restored.
+ *
+ * The easiest way to workaround the issue is to call this power resource
+ * "_OFF" method before calling the "_ON" method to make sure the "ONEN"
+ * flag gets properly set.
+ */
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 855 G7 Notebook PC"),
+ },
+ },
+ {}
+};
+
static const struct dmi_system_id dmi_leave_unused_power_resources_on[] = {
{
/*
@@ -1045,7 +1123,7 @@ void acpi_turn_off_unused_power_resources(void)
{
struct acpi_power_resource *resource;
- if (dmi_check_system(dmi_leave_unused_power_resources_on))
+ if (unused_power_resources_quirk)
return;
mutex_lock(&power_resource_list_lock);
@@ -1064,3 +1142,10 @@ void acpi_turn_off_unused_power_resources(void)
mutex_unlock(&power_resource_list_lock);
}
+
+void __init acpi_power_resources_init(void)
+{
+ hp_eb_gp12pxp_quirk = dmi_check_system(dmi_hp_elitebook_gp12pxp_quirk);
+ unused_power_resources_quirk =
+ dmi_check_system(dmi_leave_unused_power_resources_on);
+}
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index a35dd0e41c27..de5f8c018333 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -21,6 +21,25 @@
#include <linux/cacheinfo.h>
#include <acpi/processor.h>
+/*
+ * The acpi_pptt_cache_v1 in actbl2.h, which is imported from acpica,
+ * only contains the cache_id field rather than all the fields of the
+ * Cache Type Structure. Use this alternative structure until it is
+ * resolved in acpica.
+ */
+struct acpi_pptt_cache_v1_full {
+ struct acpi_subtable_header header;
+ u16 reserved;
+ u32 flags;
+ u32 next_level_of_cache;
+ u32 size;
+ u32 number_of_sets;
+ u8 associativity;
+ u8 attributes;
+ u16 line_size;
+ u32 cache_id;
+} __packed;
+
static struct acpi_subtable_header *fetch_pptt_subtable(struct acpi_table_header *table_hdr,
u32 pptt_ref)
{
@@ -56,6 +75,18 @@ static struct acpi_pptt_cache *fetch_pptt_cache(struct acpi_table_header *table_
return (struct acpi_pptt_cache *)fetch_pptt_subtable(table_hdr, pptt_ref);
}
+static struct acpi_pptt_cache_v1_full *upgrade_pptt_cache(struct acpi_pptt_cache *cache)
+{
+ if (cache->header.length < sizeof(struct acpi_pptt_cache_v1_full))
+ return NULL;
+
+ /* No use for v1 if the only additional field is invalid */
+ if (!(cache->flags & ACPI_PPTT_CACHE_ID_VALID))
+ return NULL;
+
+ return (struct acpi_pptt_cache_v1_full *)cache;
+}
+
static struct acpi_subtable_header *acpi_get_pptt_resource(struct acpi_table_header *table_hdr,
struct acpi_pptt_processor *node,
int resource)
@@ -177,14 +208,14 @@ acpi_find_cache_level(struct acpi_table_header *table_hdr,
}
/**
- * acpi_count_levels() - Given a PPTT table, and a CPU node, count the cache
- * levels and split cache levels (data/instruction).
+ * acpi_count_levels() - Given a PPTT table, and a CPU node, count the
+ * total number of levels and split cache levels (data/instruction).
* @table_hdr: Pointer to the head of the PPTT table
* @cpu_node: processor node we wish to count caches for
- * @levels: Number of levels if success.
* @split_levels: Number of split cache levels (data/instruction) if
* success. Can by NULL.
*
+ * Return: number of levels.
* Given a processor node containing a processing unit, walk into it and count
* how many levels exist solely for it, and then walk up each level until we hit
* the root node (ignore the package level because it may be possible to have
@@ -192,14 +223,18 @@ acpi_find_cache_level(struct acpi_table_header *table_hdr,
* split cache levels (data/instruction) that exist at each level on the way
* up.
*/
-static void acpi_count_levels(struct acpi_table_header *table_hdr,
- struct acpi_pptt_processor *cpu_node,
- unsigned int *levels, unsigned int *split_levels)
+static int acpi_count_levels(struct acpi_table_header *table_hdr,
+ struct acpi_pptt_processor *cpu_node,
+ unsigned int *split_levels)
{
+ int current_level = 0;
+
do {
- acpi_find_cache_level(table_hdr, cpu_node, levels, split_levels, 0, 0);
+ acpi_find_cache_level(table_hdr, cpu_node, &current_level, split_levels, 0, 0);
cpu_node = fetch_pptt_node(table_hdr, cpu_node->parent);
} while (cpu_node);
+
+ return current_level;
}
/**
@@ -229,18 +264,20 @@ static int acpi_pptt_leaf_node(struct acpi_table_header *table_hdr,
node_entry = ACPI_PTR_DIFF(node, table_hdr);
entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr,
sizeof(struct acpi_table_pptt));
- proc_sz = sizeof(struct acpi_pptt_processor *);
+ proc_sz = sizeof(struct acpi_pptt_processor);
- while ((unsigned long)entry + proc_sz < table_end) {
+ /* ignore subtable types that are smaller than a processor node */
+ while ((unsigned long)entry + proc_sz <= table_end) {
cpu_node = (struct acpi_pptt_processor *)entry;
+
if (entry->type == ACPI_PPTT_TYPE_PROCESSOR &&
cpu_node->parent == node_entry)
return 0;
if (entry->length == 0)
return 0;
+
entry = ACPI_ADD_PTR(struct acpi_subtable_header, entry,
entry->length);
-
}
return 1;
}
@@ -270,18 +307,21 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he
table_end = (unsigned long)table_hdr + table_hdr->length;
entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr,
sizeof(struct acpi_table_pptt));
- proc_sz = sizeof(struct acpi_pptt_processor *);
+ proc_sz = sizeof(struct acpi_pptt_processor);
/* find the processor structure associated with this cpuid */
- while ((unsigned long)entry + proc_sz < table_end) {
+ while ((unsigned long)entry + proc_sz <= table_end) {
cpu_node = (struct acpi_pptt_processor *)entry;
if (entry->length == 0) {
pr_warn("Invalid zero length subtable\n");
break;
}
+ /* entry->length may not equal proc_sz, revalidate the processor structure length */
if (entry->type == ACPI_PPTT_TYPE_PROCESSOR &&
acpi_cpu_id == cpu_node->acpi_processor_id &&
+ (unsigned long)entry + entry->length <= table_end &&
+ entry->length == proc_sz + cpu_node->number_of_priv_resources * sizeof(u32) &&
acpi_pptt_leaf_node(table_hdr, cpu_node)) {
return (struct acpi_pptt_processor *)entry;
}
@@ -346,7 +386,6 @@ static struct acpi_pptt_cache *acpi_find_cache_node(struct acpi_table_header *ta
* @this_leaf: Kernel cache info structure being updated
* @found_cache: The PPTT node describing this cache instance
* @cpu_node: A unique reference to describe this cache instance
- * @revision: The revision of the PPTT table
*
* The ACPI spec implies that the fields in the cache structures are used to
* extend and correct the information probed from the hardware. Lets only
@@ -356,10 +395,9 @@ static struct acpi_pptt_cache *acpi_find_cache_node(struct acpi_table_header *ta
*/
static void update_cache_properties(struct cacheinfo *this_leaf,
struct acpi_pptt_cache *found_cache,
- struct acpi_pptt_processor *cpu_node,
- u8 revision)
+ struct acpi_pptt_processor *cpu_node)
{
- struct acpi_pptt_cache_v1* found_cache_v1;
+ struct acpi_pptt_cache_v1_full *found_cache_v1;
this_leaf->fw_token = cpu_node;
if (found_cache->flags & ACPI_PPTT_SIZE_PROPERTY_VALID)
@@ -409,9 +447,8 @@ static void update_cache_properties(struct cacheinfo *this_leaf,
found_cache->flags & ACPI_PPTT_CACHE_TYPE_VALID)
this_leaf->type = CACHE_TYPE_UNIFIED;
- if (revision >= 3 && (found_cache->flags & ACPI_PPTT_CACHE_ID_VALID)) {
- found_cache_v1 = ACPI_ADD_PTR(struct acpi_pptt_cache_v1,
- found_cache, sizeof(struct acpi_pptt_cache));
+ found_cache_v1 = upgrade_pptt_cache(found_cache);
+ if (found_cache_v1) {
this_leaf->id = found_cache_v1->cache_id;
this_leaf->attributes |= CACHE_ID;
}
@@ -436,8 +473,7 @@ static void cache_setup_acpi_cpu(struct acpi_table_header *table,
pr_debug("found = %p %p\n", found_cache, cpu_node);
if (found_cache)
update_cache_properties(this_leaf, found_cache,
- ACPI_TO_POINTER(ACPI_PTR_DIFF(cpu_node, table)),
- table->revision);
+ ACPI_TO_POINTER(ACPI_PTR_DIFF(cpu_node, table)));
index++;
}
@@ -640,7 +676,7 @@ int acpi_get_cache_info(unsigned int cpu, unsigned int *levels,
if (!cpu_node)
return -ENOENT;
- acpi_count_levels(table, cpu_node, levels, split_levels);
+ *levels = acpi_count_levels(table, cpu_node, split_levels);
pr_debug("Cache Setup: last_level=%d split_levels=%d\n",
*levels, split_levels ? *split_levels : -1);
@@ -812,3 +848,218 @@ int find_acpi_cpu_topology_hetero_id(unsigned int cpu)
return find_acpi_cpu_topology_tag(cpu, PPTT_ABORT_PACKAGE,
ACPI_PPTT_ACPI_IDENTICAL);
}
+
+/**
+ * acpi_pptt_get_child_cpus() - Find all the CPUs below a PPTT
+ * processor hierarchy node
+ *
+ * @table_hdr: A reference to the PPTT table
+ * @parent_node: A pointer to the processor hierarchy node in the
+ * table_hdr
+ * @cpus: A cpumask to fill with the CPUs below @parent_node
+ *
+ * Walks up the PPTT from every possible CPU to find if the provided
+ * @parent_node is a parent of this CPU.
+ */
+static void acpi_pptt_get_child_cpus(struct acpi_table_header *table_hdr,
+ struct acpi_pptt_processor *parent_node,
+ cpumask_t *cpus)
+{
+ struct acpi_pptt_processor *cpu_node;
+ u32 acpi_id;
+ int cpu;
+
+ cpumask_clear(cpus);
+
+ for_each_possible_cpu(cpu) {
+ acpi_id = get_acpi_id_for_cpu(cpu);
+ cpu_node = acpi_find_processor_node(table_hdr, acpi_id);
+
+ while (cpu_node) {
+ if (cpu_node == parent_node) {
+ cpumask_set_cpu(cpu, cpus);
+ break;
+ }
+ cpu_node = fetch_pptt_node(table_hdr, cpu_node->parent);
+ }
+ }
+}
+
+/**
+ * acpi_pptt_get_cpus_from_container() - Populate a cpumask with all CPUs in a
+ * processor container
+ * @acpi_cpu_id: The UID of the processor container
+ * @cpus: The resulting CPU mask
+ *
+ * Find the specified Processor Container, and fill @cpus with all the cpus
+ * below it.
+ *
+ * Not all 'Processor Hierarchy' entries in the PPTT are either a CPU
+ * or a Processor Container, they may exist purely to describe a
+ * Private resource. CPUs have to be leaves, so a Processor Container
+ * is a non-leaf that has the 'ACPI Processor ID valid' flag set.
+ */
+void acpi_pptt_get_cpus_from_container(u32 acpi_cpu_id, cpumask_t *cpus)
+{
+ struct acpi_table_header *table_hdr;
+ struct acpi_subtable_header *entry;
+ unsigned long table_end;
+ u32 proc_sz;
+
+ cpumask_clear(cpus);
+
+ table_hdr = acpi_get_pptt();
+ if (!table_hdr)
+ return;
+
+ table_end = (unsigned long)table_hdr + table_hdr->length;
+ entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr,
+ sizeof(struct acpi_table_pptt));
+ proc_sz = sizeof(struct acpi_pptt_processor);
+ while ((unsigned long)entry + proc_sz <= table_end) {
+ if (entry->type == ACPI_PPTT_TYPE_PROCESSOR) {
+ struct acpi_pptt_processor *cpu_node;
+
+ cpu_node = (struct acpi_pptt_processor *)entry;
+ if (cpu_node->flags & ACPI_PPTT_ACPI_PROCESSOR_ID_VALID &&
+ !acpi_pptt_leaf_node(table_hdr, cpu_node) &&
+ cpu_node->acpi_processor_id == acpi_cpu_id) {
+ acpi_pptt_get_child_cpus(table_hdr, cpu_node, cpus);
+ break;
+ }
+ }
+ entry = ACPI_ADD_PTR(struct acpi_subtable_header, entry,
+ entry->length);
+ }
+}
+
+/**
+ * find_acpi_cache_level_from_id() - Get the level of the specified cache
+ * @cache_id: The id field of the cache
+ *
+ * Determine the level relative to any CPU for the cache identified by
+ * cache_id. This allows the property to be found even if the CPUs are offline.
+ *
+ * The returned level can be used to group caches that are peers.
+ *
+ * The PPTT table must be rev 3 or later.
+ *
+ * If one CPU's L2 is shared with another CPU as L3, this function will return
+ * an unpredictable value.
+ *
+ * Return: -ENOENT if the PPTT doesn't exist, the revision isn't supported or
+ * the cache cannot be found.
+ * Otherwise returns a value which represents the level of the specified cache.
+ */
+int find_acpi_cache_level_from_id(u32 cache_id)
+{
+ int cpu;
+ struct acpi_table_header *table;
+
+ table = acpi_get_pptt();
+ if (!table)
+ return -ENOENT;
+
+ if (table->revision < 3)
+ return -ENOENT;
+
+ for_each_possible_cpu(cpu) {
+ bool empty;
+ int level = 1;
+ u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu);
+ struct acpi_pptt_cache *cache;
+ struct acpi_pptt_processor *cpu_node;
+
+ cpu_node = acpi_find_processor_node(table, acpi_cpu_id);
+ if (!cpu_node)
+ continue;
+
+ do {
+ int cache_type[] = {CACHE_TYPE_INST, CACHE_TYPE_DATA, CACHE_TYPE_UNIFIED};
+
+ empty = true;
+ for (int i = 0; i < ARRAY_SIZE(cache_type); i++) {
+ struct acpi_pptt_cache_v1_full *cache_v1;
+
+ cache = acpi_find_cache_node(table, acpi_cpu_id, cache_type[i],
+ level, &cpu_node);
+ if (!cache)
+ continue;
+
+ empty = false;
+
+ cache_v1 = upgrade_pptt_cache(cache);
+ if (cache_v1 && cache_v1->cache_id == cache_id)
+ return level;
+ }
+ level++;
+ } while (!empty);
+ }
+
+ return -ENOENT;
+}
+
+/**
+ * acpi_pptt_get_cpumask_from_cache_id() - Get the cpus associated with the
+ * specified cache
+ * @cache_id: The id field of the cache
+ * @cpus: Where to build the cpumask
+ *
+ * Determine which CPUs are below this cache in the PPTT. This allows the property
+ * to be found even if the CPUs are offline.
+ *
+ * The PPTT table must be rev 3 or later,
+ *
+ * Return: -ENOENT if the PPTT doesn't exist, or the cache cannot be found.
+ * Otherwise returns 0 and sets the cpus in the provided cpumask.
+ */
+int acpi_pptt_get_cpumask_from_cache_id(u32 cache_id, cpumask_t *cpus)
+{
+ int cpu;
+ struct acpi_table_header *table;
+
+ cpumask_clear(cpus);
+
+ table = acpi_get_pptt();
+ if (!table)
+ return -ENOENT;
+
+ if (table->revision < 3)
+ return -ENOENT;
+
+ for_each_possible_cpu(cpu) {
+ bool empty;
+ int level = 1;
+ u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu);
+ struct acpi_pptt_cache *cache;
+ struct acpi_pptt_processor *cpu_node;
+
+ cpu_node = acpi_find_processor_node(table, acpi_cpu_id);
+ if (!cpu_node)
+ continue;
+
+ do {
+ int cache_type[] = {CACHE_TYPE_INST, CACHE_TYPE_DATA, CACHE_TYPE_UNIFIED};
+
+ empty = true;
+ for (int i = 0; i < ARRAY_SIZE(cache_type); i++) {
+ struct acpi_pptt_cache_v1_full *cache_v1;
+
+ cache = acpi_find_cache_node(table, acpi_cpu_id, cache_type[i],
+ level, &cpu_node);
+
+ if (!cache)
+ continue;
+
+ empty = false;
+
+ cache_v1 = upgrade_pptt_cache(cache);
+ if (cache_v1 && cache_v1->cache_id == cache_id)
+ cpumask_set_cpu(cpu, cpus);
+ }
+ level++;
+ } while (!empty);
+ }
+
+ return 0;
+}
diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c
index c78453c74ef5..7b8b5d2015ec 100644
--- a/drivers/acpi/prmt.c
+++ b/drivers/acpi/prmt.c
@@ -52,7 +52,7 @@ struct prm_context_buffer {
static LIST_HEAD(prm_module_list);
struct prm_handler_info {
- guid_t guid;
+ efi_guid_t guid;
efi_status_t (__efiapi *handler_addr)(u64, void *);
u64 static_data_buffer_addr;
u64 acpi_param_buffer_addr;
@@ -72,15 +72,17 @@ struct prm_module_info {
struct prm_handler_info handlers[] __counted_by(handler_count);
};
-static u64 efi_pa_va_lookup(u64 pa)
+static u64 efi_pa_va_lookup(efi_guid_t *guid, u64 pa)
{
efi_memory_desc_t *md;
u64 pa_offset = pa & ~PAGE_MASK;
u64 page = pa & PAGE_MASK;
for_each_efi_memory_desc(md) {
- if (md->phys_addr < pa && pa < md->phys_addr + PAGE_SIZE * md->num_pages)
+ if ((md->attribute & EFI_MEMORY_RUNTIME) &&
+ (md->phys_addr < pa && pa < md->phys_addr + PAGE_SIZE * md->num_pages)) {
return pa_offset + md->virt_addr + page - md->phys_addr;
+ }
}
return 0;
@@ -148,9 +150,52 @@ acpi_parse_prmt(union acpi_subtable_headers *header, const unsigned long end)
th = &tm->handlers[cur_handler];
guid_copy(&th->guid, (guid_t *)handler_info->handler_guid);
- th->handler_addr = (void *)efi_pa_va_lookup(handler_info->handler_address);
- th->static_data_buffer_addr = efi_pa_va_lookup(handler_info->static_data_buffer_address);
- th->acpi_param_buffer_addr = efi_pa_va_lookup(handler_info->acpi_param_buffer_address);
+
+ /*
+ * Print an error message if handler_address is NULL, the parse of VA also
+ * can be skipped.
+ */
+ if (unlikely(!handler_info->handler_address)) {
+ pr_info("Skipping handler with NULL address for GUID: %pUL",
+ (guid_t *)handler_info->handler_guid);
+ continue;
+ }
+
+ th->handler_addr =
+ (void *)efi_pa_va_lookup(&th->guid, handler_info->handler_address);
+ /*
+ * Print a warning message and skip the parse of VA if handler_addr is zero
+ * which is not expected to ever happen.
+ */
+ if (unlikely(!th->handler_addr)) {
+ pr_warn("Failed to find VA of handler for GUID: %pUL, PA: 0x%llx",
+ &th->guid, handler_info->handler_address);
+ continue;
+ }
+
+ th->static_data_buffer_addr =
+ efi_pa_va_lookup(&th->guid, handler_info->static_data_buffer_address);
+ /*
+ * According to the PRM specification, static_data_buffer_address can be zero,
+ * so avoid printing a warning message in that case. Otherwise, if the
+ * return value of efi_pa_va_lookup() is zero, print the message.
+ */
+ if (unlikely(!th->static_data_buffer_addr && handler_info->static_data_buffer_address))
+ pr_warn("Failed to find VA of static data buffer for GUID: %pUL, PA: 0x%llx",
+ &th->guid, handler_info->static_data_buffer_address);
+
+ th->acpi_param_buffer_addr =
+ efi_pa_va_lookup(&th->guid, handler_info->acpi_param_buffer_address);
+
+ /*
+ * According to the PRM specification, acpi_param_buffer_address can be zero,
+ * so avoid printing a warning message in that case. Otherwise, if the
+ * return value of efi_pa_va_lookup() is zero, print the message.
+ */
+ if (unlikely(!th->acpi_param_buffer_addr && handler_info->acpi_param_buffer_address))
+ pr_warn("Failed to find VA of acpi param buffer for GUID: %pUL, PA: 0x%llx",
+ &th->guid, handler_info->acpi_param_buffer_address);
+
} while (++cur_handler < tm->handler_count && (handler_info = get_next_handler(handler_info)));
return 0;
@@ -199,6 +244,12 @@ static struct prm_handler_info *find_prm_handler(const guid_t *guid)
return (struct prm_handler_info *) find_guid_info(guid, GET_HANDLER);
}
+bool acpi_prm_handler_available(const guid_t *guid)
+{
+ return find_prm_handler(guid) && find_prm_module(guid);
+}
+EXPORT_SYMBOL_GPL(acpi_prm_handler_available);
+
/* In-coming PRM commands */
#define PRM_CMD_RUN_SERVICE 0
@@ -214,6 +265,30 @@ static struct prm_handler_info *find_prm_handler(const guid_t *guid)
#define UPDATE_LOCK_ALREADY_HELD 4
#define UPDATE_UNLOCK_WITHOUT_LOCK 5
+int acpi_call_prm_handler(guid_t handler_guid, void *param_buffer)
+{
+ struct prm_handler_info *handler = find_prm_handler(&handler_guid);
+ struct prm_module_info *module = find_prm_module(&handler_guid);
+ struct prm_context_buffer context;
+ efi_status_t status;
+
+ if (!module || !handler)
+ return -ENODEV;
+
+ memset(&context, 0, sizeof(context));
+ ACPI_COPY_NAMESEG(context.signature, "PRMC");
+ context.identifier = handler->guid;
+ context.static_data_buffer = handler->static_data_buffer_addr;
+ context.mmio_ranges = module->mmio_info;
+
+ status = efi_call_acpi_prm_handler(handler->handler_addr,
+ (u64)param_buffer,
+ &context);
+
+ return efi_status_to_err(status);
+}
+EXPORT_SYMBOL_GPL(acpi_call_prm_handler);
+
/*
* This is the PlatformRtMechanism opregion space handler.
* @function: indicates the read/write. In fact as the PlatformRtMechanism
@@ -253,6 +328,11 @@ static acpi_status acpi_platformrt_space_handler(u32 function,
if (!handler || !module)
goto invalid_guid;
+ if (!handler->handler_addr) {
+ buffer->prm_status = PRM_HANDLER_ERROR;
+ return AE_OK;
+ }
+
ACPI_COPY_NAMESEG(context.signature, "PRMC");
context.revision = 0x0;
context.reserved = 0x0;
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index 4322f2da6d10..c08ead07252b 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
-#include <linux/export.h>
+#include <linux/string_choices.h>
#include <linux/suspend.h>
#include <linux/bcd.h>
#include <linux/acpi.h>
@@ -30,17 +30,16 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
if (!dev->wakeup.flags.valid)
continue;
- seq_printf(seq, "%s\t S%d\t",
+ seq_printf(seq, "%s\t S%llu\t",
dev->pnp.bus_id,
- (u32) dev->wakeup.sleep_state);
+ dev->wakeup.sleep_state);
mutex_lock(&dev->physical_node_lock);
if (!dev->physical_node_count) {
seq_printf(seq, "%c%-8s\n",
dev->wakeup.flags.valid ? '*' : ' ',
- device_may_wakeup(&dev->dev) ?
- "enabled" : "disabled");
+ str_enabled_disabled(device_may_wakeup(&dev->dev)));
} else {
struct device *ldev;
list_for_each_entry(entry, &dev->physical_node_list,
@@ -55,9 +54,8 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
seq_printf(seq, "%c%-8s %s:%s\n",
dev->wakeup.flags.valid ? '*' : ' ',
- (device_may_wakeup(&dev->dev) ||
- device_may_wakeup(ldev)) ?
- "enabled" : "disabled",
+ str_enabled_disabled(device_may_wakeup(ldev) ||
+ device_may_wakeup(&dev->dev)),
ldev->bus ? ldev->bus->name :
"no-bus", dev_name(ldev));
put_device(ldev);
@@ -141,6 +139,5 @@ static const struct proc_ops acpi_system_wakeup_device_proc_ops = {
void __init acpi_sleep_proc_init(void)
{
/* 'wakeup device' [R/W] */
- proc_create("wakeup", S_IFREG | S_IRUGO | S_IWUSR,
- acpi_root_dir, &acpi_system_wakeup_device_proc_ops);
+ proc_create("wakeup", 0644, acpi_root_dir, &acpi_system_wakeup_device_proc_ops);
}
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index b203cfe28550..a4498357bd16 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -54,7 +54,7 @@ static int map_x2apic_id(struct acpi_subtable_header *entry,
if (!(apic->lapic_flags & ACPI_MADT_ENABLED))
return -ENODEV;
- if (device_declaration && (apic->uid == acpi_id)) {
+ if (apic->uid == acpi_id && (device_declaration || acpi_id < 255)) {
*apic_id = apic->local_apic_id;
return 0;
}
@@ -90,7 +90,8 @@ static int map_gicc_mpidr(struct acpi_subtable_header *entry,
struct acpi_madt_generic_interrupt *gicc =
container_of(entry, struct acpi_madt_generic_interrupt, header);
- if (!acpi_gicc_is_usable(gicc))
+ if (!(gicc->flags &
+ (ACPI_MADT_ENABLED | ACPI_MADT_GICC_ONLINE_CAPABLE)))
return -ENODEV;
/* device_declaration means Device object in DSDT, in the
@@ -215,6 +216,21 @@ phys_cpuid_t __init acpi_map_madt_entry(u32 acpi_id)
return rv;
}
+int __init acpi_get_madt_revision(void)
+{
+ struct acpi_table_header *madt = NULL;
+ int revision;
+
+ if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0, &madt)))
+ return -EINVAL;
+
+ revision = madt->revision;
+
+ acpi_put_table(madt);
+
+ return revision;
+}
+
static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 67db60eda370..65e779be64ff 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -33,7 +33,6 @@ MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_DESCRIPTION("ACPI Processor Driver");
MODULE_LICENSE("GPL");
-static int acpi_processor_start(struct device *dev);
static int acpi_processor_stop(struct device *dev);
static const struct acpi_device_id processor_device_ids[] = {
@@ -47,7 +46,6 @@ static struct device_driver acpi_processor_driver = {
.name = "processor",
.bus = &cpu_subsys,
.acpi_match_table = processor_device_ids,
- .probe = acpi_processor_start,
.remove = acpi_processor_stop,
};
@@ -115,12 +113,9 @@ static int acpi_soft_cpu_online(unsigned int cpu)
* CPU got physically hotplugged and onlined for the first time:
* Initialize missing things.
*/
- if (pr->flags.need_hotplug_init) {
+ if (!pr->flags.previously_online) {
int ret;
- pr_info("Will online and init hotplugged CPU: %d\n",
- pr->id);
- pr->flags.need_hotplug_init = 0;
ret = __acpi_processor_start(device);
WARN(ret, "Failed to start CPU: %d\n", pr->id);
} else {
@@ -167,9 +162,6 @@ static int __acpi_processor_start(struct acpi_device *device)
if (!pr)
return -ENODEV;
- if (pr->flags.need_hotplug_init)
- return 0;
-
result = acpi_cppc_processor_probe(pr);
if (result && !IS_ENABLED(CONFIG_ACPI_CPU_FREQ_PSS))
dev_dbg(&device->dev, "CPPC data invalid or not present\n");
@@ -185,32 +177,21 @@ static int __acpi_processor_start(struct acpi_device *device)
status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
acpi_processor_notify, device);
- if (ACPI_SUCCESS(status))
- return 0;
+ if (!ACPI_SUCCESS(status)) {
+ result = -ENODEV;
+ goto err_thermal_exit;
+ }
+ pr->flags.previously_online = 1;
- result = -ENODEV;
- acpi_processor_thermal_exit(pr, device);
+ return 0;
+err_thermal_exit:
+ acpi_processor_thermal_exit(pr, device);
err_power_exit:
acpi_processor_power_exit(pr);
return result;
}
-static int acpi_processor_start(struct device *dev)
-{
- struct acpi_device *device = ACPI_COMPANION(dev);
- int ret;
-
- if (!device)
- return -ENODEV;
-
- /* Protect against concurrent CPU hotplug operations */
- cpu_hotplug_disable();
- ret = __acpi_processor_start(device);
- cpu_hotplug_enable();
- return ret;
-}
-
static int acpi_processor_stop(struct device *dev)
{
struct acpi_device *device = ACPI_COMPANION(dev);
@@ -256,6 +237,9 @@ static struct notifier_block acpi_processor_notifier_block = {
.notifier_call = acpi_processor_notifier,
};
+void __weak acpi_processor_init_invariance_cppc(void)
+{ }
+
/*
* We keep the driver loaded even when ACPI is not running.
* This is needed for the powernow-k8 driver, that works even without
@@ -279,9 +263,9 @@ static int __init acpi_processor_driver_init(void)
if (result < 0)
return result;
- result = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
- "acpi/cpu-drv:online",
- acpi_soft_cpu_online, NULL);
+ result = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "acpi/cpu-drv:online",
+ acpi_soft_cpu_online, NULL);
if (result < 0)
goto err;
hp_online = result;
@@ -289,6 +273,15 @@ static int __init acpi_processor_driver_init(void)
NULL, acpi_soft_cpu_dead);
acpi_processor_throttling_init();
+
+ /*
+ * Frequency invariance calculations on AMD platforms can't be run until
+ * after acpi_cppc_processor_probe() has been called for all online CPUs
+ */
+ acpi_processor_init_invariance_cppc();
+
+ acpi_idle_rescan_dead_smt_siblings();
+
return 0;
err:
driver_unregister(&acpi_processor_driver);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index bd6a7857ce05..89f2f08b2554 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -16,7 +16,6 @@
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/sched.h> /* need_resched() */
-#include <linux/sort.h>
#include <linux/tick.h>
#include <linux/cpuidle.h>
#include <linux/cpu.h>
@@ -25,6 +24,8 @@
#include <acpi/processor.h>
#include <linux/context_tracking.h>
+#include "internal.h"
+
/*
* Include the apic definitions for x86 to have the APIC timer related defines
* available also for UP (on SMP it gets magically included via linux/smp.h).
@@ -56,6 +57,12 @@ struct cpuidle_driver acpi_idle_driver = {
};
#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
+void acpi_idle_rescan_dead_smt_siblings(void)
+{
+ if (cpuidle_get_driver() == &acpi_idle_driver)
+ arch_cpu_rescan_dead_smt_siblings();
+}
+
static
DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate);
@@ -269,6 +276,10 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x",
pr->power.states[ACPI_STATE_C3].address);
+ if (!pr->power.states[ACPI_STATE_C2].address &&
+ !pr->power.states[ACPI_STATE_C3].address)
+ return -ENODEV;
+
return 0;
}
@@ -386,25 +397,24 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
}
-static int acpi_cst_latency_cmp(const void *a, const void *b)
+static void acpi_cst_latency_sort(struct acpi_processor_cx *states, size_t length)
{
- const struct acpi_processor_cx *x = a, *y = b;
+ int i, j, k;
- if (!(x->valid && y->valid))
- return 0;
- if (x->latency > y->latency)
- return 1;
- if (x->latency < y->latency)
- return -1;
- return 0;
-}
-static void acpi_cst_latency_swap(void *a, void *b, int n)
-{
- struct acpi_processor_cx *x = a, *y = b;
+ for (i = 1; i < length; i++) {
+ if (!states[i].valid)
+ continue;
- if (!(x->valid && y->valid))
- return;
- swap(x->latency, y->latency);
+ for (j = i - 1, k = i; j >= 0; j--) {
+ if (!states[j].valid)
+ continue;
+
+ if (states[j].latency > states[k].latency)
+ swap(states[j].latency, states[k].latency);
+
+ k = j;
+ }
+ }
}
static int acpi_processor_power_verify(struct acpi_processor *pr)
@@ -449,10 +459,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
if (buggy_latency) {
pr_notice("FW issue: working around C-state latencies out of order\n");
- sort(&pr->power.states[1], max_cstate,
- sizeof(struct acpi_processor_cx),
- acpi_cst_latency_cmp,
- acpi_cst_latency_swap);
+ acpi_cst_latency_sort(&pr->power.states[1], max_cstate);
}
lapic_timer_propagate_broadcast(pr);
@@ -462,10 +469,8 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
{
- unsigned int i;
int result;
-
/* NOTE: the idle thread may not be running while calling
* this function */
@@ -482,17 +487,7 @@ static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
acpi_processor_get_power_info_default(pr);
pr->power.count = acpi_processor_power_verify(pr);
-
- /*
- * if one state of type C2 or C3 is available, mark this
- * CPU as being "idle manageable"
- */
- for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
- if (pr->power.states[i].valid) {
- pr->power.count = i;
- pr->flags.power = 1;
- }
- }
+ pr->flags.power = 1;
return 0;
}
@@ -583,7 +578,7 @@ static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx)
* @dev: the target CPU
* @index: the index of suggested state
*/
-static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
+static void acpi_idle_play_dead(struct cpuidle_device *dev, int index)
{
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
@@ -595,12 +590,11 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
raw_safe_halt();
else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
io_idle(cx->address);
+ } else if (cx->entry_method == ACPI_CSTATE_FFH) {
+ acpi_processor_ffh_play_dead(cx);
} else
- return -ENODEV;
+ return;
}
-
- /* Never reached */
- return 0;
}
static __always_inline bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
@@ -738,18 +732,16 @@ static int __cpuidle acpi_idle_enter_s2idle(struct cpuidle_device *dev,
return 0;
}
-static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
- struct cpuidle_device *dev)
+static void acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
+ struct cpuidle_device *dev)
{
int i, count = ACPI_IDLE_STATE_START;
struct acpi_processor_cx *cx;
- struct cpuidle_state *state;
if (max_cstate == 0)
max_cstate = 1;
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
- state = &acpi_idle_driver.states[count];
cx = &pr->power.states[i];
if (!cx->valid)
@@ -757,27 +749,13 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
per_cpu(acpi_cstate[count], dev->cpu) = cx;
- if (lapic_timer_needs_broadcast(pr, cx))
- state->flags |= CPUIDLE_FLAG_TIMER_STOP;
-
- if (cx->type == ACPI_STATE_C3) {
- state->flags |= CPUIDLE_FLAG_TLB_FLUSHED;
- if (pr->flags.bm_check)
- state->flags |= CPUIDLE_FLAG_RCU_IDLE;
- }
-
count++;
if (count == CPUIDLE_STATE_MAX)
break;
}
-
- if (!count)
- return -EINVAL;
-
- return 0;
}
-static int acpi_processor_setup_cstates(struct acpi_processor *pr)
+static void acpi_processor_setup_cstates(struct acpi_processor *pr)
{
int i, count;
struct acpi_processor_cx *cx;
@@ -808,12 +786,12 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr)
state->enter = acpi_idle_enter;
state->flags = 0;
- if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2 ||
- cx->type == ACPI_STATE_C3) {
- state->enter_dead = acpi_idle_play_dead;
- if (cx->type != ACPI_STATE_C3)
- drv->safe_state_index = count;
- }
+
+ state->enter_dead = acpi_idle_play_dead;
+
+ if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2)
+ drv->safe_state_index = count;
+
/*
* Halt-induced C1 is not good for ->enter_s2idle, because it
* re-enables interrupts on exit. Moreover, C1 is generally not
@@ -824,17 +802,21 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr)
if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr))
state->enter_s2idle = acpi_idle_enter_s2idle;
+ if (lapic_timer_needs_broadcast(pr, cx))
+ state->flags |= CPUIDLE_FLAG_TIMER_STOP;
+
+ if (cx->type == ACPI_STATE_C3) {
+ state->flags |= CPUIDLE_FLAG_TLB_FLUSHED;
+ if (pr->flags.bm_check)
+ state->flags |= CPUIDLE_FLAG_RCU_IDLE;
+ }
+
count++;
if (count == CPUIDLE_STATE_MAX)
break;
}
drv->state_count = count;
-
- if (!count)
- return -EINVAL;
-
- return 0;
}
static inline void acpi_processor_cstate_first_run_checks(void)
@@ -1004,11 +986,6 @@ end:
return ret;
}
-/*
- * flat_state_cnt - the number of composite LPI states after the process of flattening
- */
-static int flat_state_cnt;
-
/**
* combine_lpi_states - combine local and parent LPI states to form a composite LPI state
*
@@ -1051,9 +1028,10 @@ static void stash_composite_state(struct acpi_lpi_states_array *curr_level,
curr_level->composite_states[curr_level->composite_states_size++] = t;
}
-static int flatten_lpi_states(struct acpi_processor *pr,
- struct acpi_lpi_states_array *curr_level,
- struct acpi_lpi_states_array *prev_level)
+static unsigned int flatten_lpi_states(struct acpi_processor *pr,
+ unsigned int flat_state_cnt,
+ struct acpi_lpi_states_array *curr_level,
+ struct acpi_lpi_states_array *prev_level)
{
int i, j, state_count = curr_level->size;
struct acpi_lpi_state *p, *t = curr_level->entries;
@@ -1093,7 +1071,7 @@ static int flatten_lpi_states(struct acpi_processor *pr,
}
kfree(curr_level->entries);
- return 0;
+ return flat_state_cnt;
}
int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
@@ -1108,6 +1086,7 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
acpi_handle handle = pr->handle, pr_ahandle;
struct acpi_device *d = NULL;
struct acpi_lpi_states_array info[2], *tmp, *prev, *curr;
+ unsigned int state_count;
/* make sure our architecture has support */
ret = acpi_processor_ffh_lpi_probe(pr->id);
@@ -1120,14 +1099,13 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
if (!acpi_has_method(handle, "_LPI"))
return -EINVAL;
- flat_state_cnt = 0;
prev = &info[0];
curr = &info[1];
handle = pr->handle;
ret = acpi_processor_evaluate_lpi(handle, prev);
if (ret)
return ret;
- flatten_lpi_states(pr, prev, NULL);
+ state_count = flatten_lpi_states(pr, 0, prev, NULL);
status = acpi_get_parent(handle, &pr_ahandle);
while (ACPI_SUCCESS(status)) {
@@ -1149,18 +1127,19 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
break;
/* flatten all the LPI states in this level of hierarchy */
- flatten_lpi_states(pr, curr, prev);
+ state_count = flatten_lpi_states(pr, state_count, curr, prev);
tmp = prev, prev = curr, curr = tmp;
status = acpi_get_parent(handle, &pr_ahandle);
}
- pr->power.count = flat_state_cnt;
/* reset the index after flattening */
- for (i = 0; i < pr->power.count; i++)
+ for (i = 0; i < state_count; i++)
pr->power.lpi_states[i].index = i;
+ pr->power.count = state_count;
+
/* Tell driver that _LPI is supported. */
pr->flags.has_lpi = 1;
pr->flags.power = 1;
@@ -1252,7 +1231,8 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
if (pr->flags.has_lpi)
return acpi_processor_setup_lpi_states(pr);
- return acpi_processor_setup_cstates(pr);
+ acpi_processor_setup_cstates(pr);
+ return 0;
}
/**
@@ -1272,7 +1252,8 @@ static int acpi_processor_setup_cpuidle_dev(struct acpi_processor *pr,
if (pr->flags.has_lpi)
return acpi_processor_ffh_lpi_probe(pr->id);
- return acpi_processor_setup_cpuidle_cx(pr, dev);
+ acpi_processor_setup_cpuidle_cx(pr, dev);
+ return 0;
}
static int acpi_processor_get_power_info(struct acpi_processor *pr)
@@ -1411,6 +1392,9 @@ int acpi_processor_power_init(struct acpi_processor *pr)
if (retval) {
if (acpi_processor_registered == 0)
cpuidle_unregister_driver(&acpi_idle_driver);
+
+ per_cpu(acpi_cpuidle_device, pr->id) = NULL;
+ kfree(dev);
return retval;
}
acpi_processor_registered++;
@@ -1437,3 +1421,5 @@ int acpi_processor_power_exit(struct acpi_processor *pr)
pr->flags.power_setup_done = 0;
return 0;
}
+
+MODULE_IMPORT_NS("ACPI_PROCESSOR_IDLE");
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 4265814c74f8..8972446b7162 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -20,12 +20,11 @@
#include <acpi/processor.h>
#ifdef CONFIG_X86
#include <asm/cpufeature.h>
+#include <asm/msr.h>
#endif
#define ACPI_PROCESSOR_FILE_PERFORMANCE "performance"
-static DEFINE_MUTEX(performance_mutex);
-
/*
* _PPC support is implemented as a CPUfreq policy notifier:
* This means each time a CPUfreq driver registered also with
@@ -174,6 +173,9 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy)
{
unsigned int cpu;
+ if (ignore_ppc == 1)
+ return;
+
for_each_cpu(cpu, policy->related_cpus) {
struct acpi_processor *pr = per_cpu(processors, cpu);
int ret;
@@ -194,6 +196,14 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy)
if (ret < 0)
pr_err("Failed to add freq constraint for CPU%d (%d)\n",
cpu, ret);
+
+ if (!pr->performance)
+ continue;
+
+ ret = acpi_processor_get_platform_limit(pr);
+ if (ret)
+ pr_err("Failed to update freq constraint for CPU%d (%d)\n",
+ cpu, ret);
}
}
@@ -209,6 +219,10 @@ void acpi_processor_ppc_exit(struct cpufreq_policy *policy)
}
}
+#ifdef CONFIG_X86
+
+static DEFINE_MUTEX(performance_mutex);
+
static int acpi_processor_get_performance_control(struct acpi_processor *pr)
{
int result = 0;
@@ -267,7 +281,6 @@ end:
return result;
}
-#ifdef CONFIG_X86
/*
* Some AMDs have 50MHz frequency multiples, but only provide 100MHz rounding
* in their ACPI data. Calculate the real values and fix up the _PSS data.
@@ -298,9 +311,6 @@ static void amd_fixup_frequency(struct acpi_processor_px *px, int i)
px->core_frequency = (100 * (fid + 8)) >> did;
}
}
-#else
-static void amd_fixup_frequency(struct acpi_processor_px *px, int i) {};
-#endif
static int acpi_processor_get_performance_states(struct acpi_processor *pr)
{
@@ -440,13 +450,11 @@ int acpi_processor_get_performance_info(struct acpi_processor *pr)
* the BIOS is older than the CPU and does not know its frequencies
*/
update_bios:
-#ifdef CONFIG_X86
if (acpi_has_method(pr->handle, "_PPC")) {
if(boot_cpu_has(X86_FEATURE_EST))
pr_warn(FW_BUG "BIOS needs update for CPU "
"frequency support\n");
}
-#endif
return result;
}
EXPORT_SYMBOL_GPL(acpi_processor_get_performance_info);
@@ -788,3 +796,4 @@ unlock:
mutex_unlock(&performance_mutex);
}
EXPORT_SYMBOL(acpi_processor_unregister_performance);
+#endif
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 1219adb11ab9..c7b1dc5687ec 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -62,19 +62,14 @@ static int phys_package_first_cpu(int cpu)
return 0;
}
-static int cpu_has_cpufreq(unsigned int cpu)
+static bool cpu_has_cpufreq(unsigned int cpu)
{
- struct cpufreq_policy *policy;
-
if (!acpi_processor_cpufreq_init)
return 0;
- policy = cpufreq_cpu_get(cpu);
- if (policy) {
- cpufreq_cpu_put(policy);
- return 1;
- }
- return 0;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
+
+ return policy != NULL;
}
static int cpufreq_get_max_state(unsigned int cpu)
@@ -93,12 +88,31 @@ static int cpufreq_get_cur_state(unsigned int cpu)
return reduction_step(cpu);
}
+static bool cpufreq_update_thermal_limit(unsigned int cpu, struct acpi_processor *pr)
+{
+ unsigned long max_freq;
+ int ret;
+
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
+ if (!policy)
+ return false;
+
+ max_freq = (policy->cpuinfo.max_freq *
+ (100 - reduction_step(cpu) * cpufreq_thermal_reduction_pctg)) / 100;
+
+ ret = freq_qos_update_request(&pr->thermal_req, max_freq);
+ if (ret < 0) {
+ pr_warn("Failed to update thermal freq constraint: CPU%d (%d)\n",
+ pr->id, ret);
+ }
+
+ return true;
+}
+
static int cpufreq_set_cur_state(unsigned int cpu, int state)
{
- struct cpufreq_policy *policy;
struct acpi_processor *pr;
- unsigned long max_freq;
- int i, ret;
+ int i;
if (!cpu_has_cpufreq(cpu))
return 0;
@@ -120,20 +134,8 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state)
if (unlikely(!freq_qos_request_active(&pr->thermal_req)))
continue;
- policy = cpufreq_cpu_get(i);
- if (!policy)
+ if (!cpufreq_update_thermal_limit(i, pr))
return -EINVAL;
-
- max_freq = (policy->cpuinfo.max_freq *
- (100 - reduction_step(i) * cpufreq_thermal_reduction_pctg)) / 100;
-
- cpufreq_cpu_put(policy);
-
- ret = freq_qos_update_request(&pr->thermal_req, max_freq);
- if (ret < 0) {
- pr_warn("Failed to update thermal freq constraint: CPU%d (%d)\n",
- pr->id, ret);
- }
}
return 0;
}
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 00d045e5f524..f9c2bc1d4a3a 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -18,9 +18,12 @@
#include <linux/sched.h>
#include <linux/cpufreq.h>
#include <linux/acpi.h>
+#include <linux/uaccess.h>
#include <acpi/processor.h>
#include <asm/io.h>
-#include <linux/uaccess.h>
+#ifdef CONFIG_X86
+#include <asm/msr.h>
+#endif
/* ignore_tpc:
* 0 -> acpi processor driver doesn't ignore _TPC values
@@ -232,7 +235,7 @@ static int acpi_processor_throttling_notifier(unsigned long event, void *data)
if (pr->throttling_platform_limit > target_state)
target_state = pr->throttling_platform_limit;
if (target_state >= p_throttling->state_count) {
- pr_warn("Exceed the limit of T-state \n");
+ pr_warn("Exceed the limit of T-state\n");
target_state = p_throttling->state_count - 1;
}
p_tstate->target_state = target_state;
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 80a52a4e66dd..18e90067d567 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -83,6 +83,7 @@ static bool acpi_nondev_subnode_extract(union acpi_object *desc,
struct fwnode_handle *parent)
{
struct acpi_data_node *dn;
+ acpi_handle scope = NULL;
bool result;
if (acpi_graph_ignore_port(handle))
@@ -98,59 +99,45 @@ static bool acpi_nondev_subnode_extract(union acpi_object *desc,
INIT_LIST_HEAD(&dn->data.properties);
INIT_LIST_HEAD(&dn->data.subnodes);
- result = acpi_extract_properties(handle, desc, &dn->data);
-
- if (handle) {
- acpi_handle scope;
- acpi_status status;
+ /*
+ * The scope for the completion of relative pathname segments and
+ * subnode object lookup is the one of the namespace node (device)
+ * containing the object that has returned the package. That is, it's
+ * the scope of that object's parent device.
+ */
+ if (handle)
+ acpi_get_parent(handle, &scope);
- /*
- * The scope for the subnode object lookup is the one of the
- * namespace node (device) containing the object that has
- * returned the package. That is, it's the scope of that
- * object's parent.
- */
- status = acpi_get_parent(handle, &scope);
- if (ACPI_SUCCESS(status)
- && acpi_enumerate_nondev_subnodes(scope, desc, &dn->data,
- &dn->fwnode))
- result = true;
- } else if (acpi_enumerate_nondev_subnodes(NULL, desc, &dn->data,
- &dn->fwnode)) {
+ /*
+ * Extract properties from the _DSD-equivalent package pointed to by
+ * desc and use scope (if not NULL) for the completion of relative
+ * pathname segments.
+ *
+ * The extracted properties will be held in the new data node dn.
+ */
+ result = acpi_extract_properties(scope, desc, &dn->data);
+ /*
+ * Look for subnodes in the _DSD-equivalent package pointed to by desc
+ * and create child nodes of dn if there are any.
+ */
+ if (acpi_enumerate_nondev_subnodes(scope, desc, &dn->data, &dn->fwnode))
result = true;
- }
-
- if (result) {
- dn->handle = handle;
- dn->data.pointer = desc;
- list_add_tail(&dn->sibling, list);
- return true;
- }
-
- kfree(dn);
- acpi_handle_debug(handle, "Invalid properties/subnodes data, skipping\n");
- return false;
-}
-
-static bool acpi_nondev_subnode_data_ok(acpi_handle handle,
- const union acpi_object *link,
- struct list_head *list,
- struct fwnode_handle *parent)
-{
- struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
- acpi_status status;
- status = acpi_evaluate_object_typed(handle, NULL, NULL, &buf,
- ACPI_TYPE_PACKAGE);
- if (ACPI_FAILURE(status))
+ if (!result) {
+ kfree(dn);
+ acpi_handle_debug(handle, "Invalid properties/subnodes data, skipping\n");
return false;
+ }
- if (acpi_nondev_subnode_extract(buf.pointer, handle, link, list,
- parent))
- return true;
+ /*
+ * This will be NULL if the desc package is embedded in an outer
+ * _DSD-equivalent package and its scope cannot be determined.
+ */
+ dn->handle = handle;
+ dn->data.pointer = desc;
+ list_add_tail(&dn->sibling, list);
- ACPI_FREE(buf.pointer);
- return false;
+ return true;
}
static bool acpi_nondev_subnode_ok(acpi_handle scope,
@@ -158,9 +145,16 @@ static bool acpi_nondev_subnode_ok(acpi_handle scope,
struct list_head *list,
struct fwnode_handle *parent)
{
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
acpi_handle handle;
acpi_status status;
+ /*
+ * If the scope is unknown, the _DSD-equivalent package being parsed
+ * was embedded in an outer _DSD-equivalent package as a result of
+ * direct evaluation of an object pointed to by a reference. In that
+ * case, using a pathname as the target object pointer is invalid.
+ */
if (!scope)
return false;
@@ -169,7 +163,17 @@ static bool acpi_nondev_subnode_ok(acpi_handle scope,
if (ACPI_FAILURE(status))
return false;
- return acpi_nondev_subnode_data_ok(handle, link, list, parent);
+ status = acpi_evaluate_object_typed(handle, NULL, NULL, &buf,
+ ACPI_TYPE_PACKAGE);
+ if (ACPI_FAILURE(status))
+ return false;
+
+ if (acpi_nondev_subnode_extract(buf.pointer, handle, link, list,
+ parent))
+ return true;
+
+ ACPI_FREE(buf.pointer);
+ return false;
}
static bool acpi_add_nondev_subnodes(acpi_handle scope,
@@ -180,9 +184,12 @@ static bool acpi_add_nondev_subnodes(acpi_handle scope,
bool ret = false;
int i;
+ /*
+ * Every element in the links package is expected to represent a link
+ * to a non-device node in a tree containing device-specific data.
+ */
for (i = 0; i < links->package.count; i++) {
union acpi_object *link, *desc;
- acpi_handle handle;
bool result;
link = &links->package.elements[i];
@@ -190,26 +197,53 @@ static bool acpi_add_nondev_subnodes(acpi_handle scope,
if (link->package.count != 2)
continue;
- /* The first one must be a string. */
+ /* The first one (the key) must be a string. */
if (link->package.elements[0].type != ACPI_TYPE_STRING)
continue;
- /* The second one may be a string, a reference or a package. */
+ /* The second one (the target) may be a string or a package. */
switch (link->package.elements[1].type) {
case ACPI_TYPE_STRING:
+ /*
+ * The string is expected to be a full pathname or a
+ * pathname segment relative to the given scope. That
+ * pathname is expected to point to an object returning
+ * a package that contains _DSD-equivalent information.
+ */
result = acpi_nondev_subnode_ok(scope, link, list,
parent);
break;
- case ACPI_TYPE_LOCAL_REFERENCE:
- handle = link->package.elements[1].reference.handle;
- result = acpi_nondev_subnode_data_ok(handle, link, list,
- parent);
- break;
case ACPI_TYPE_PACKAGE:
+ /*
+ * This happens when a reference is used in AML to
+ * point to the target. Since the target is expected
+ * to be a named object, a reference to it will cause it
+ * to be avaluated in place and its return package will
+ * be embedded in the links package at the location of
+ * the reference.
+ *
+ * The target package is expected to contain _DSD-
+ * equivalent information, but the scope in which it
+ * is located in the original AML is unknown. Thus
+ * it cannot contain pathname segments represented as
+ * strings because there is no way to build full
+ * pathnames out of them.
+ */
+ acpi_handle_debug(scope, "subnode %s: Unknown scope\n",
+ link->package.elements[0].string.pointer);
desc = &link->package.elements[1];
result = acpi_nondev_subnode_extract(desc, NULL, link,
list, parent);
break;
+ case ACPI_TYPE_LOCAL_REFERENCE:
+ /*
+ * It is not expected to see any local references in
+ * the links package because referencing a named object
+ * should cause it to be evaluated in place.
+ */
+ acpi_handle_info(scope, "subnode %s: Unexpected reference\n",
+ link->package.elements[0].string.pointer);
+ fallthrough;
default:
result = false;
break;
@@ -369,6 +403,9 @@ static void acpi_untie_nondev_subnodes(struct acpi_device_data *data)
struct acpi_data_node *dn;
list_for_each_entry(dn, &data->subnodes, sibling) {
+ if (!dn->handle)
+ continue;
+
acpi_detach_data(dn->handle, acpi_nondev_subnode_tag);
acpi_untie_nondev_subnodes(&dn->data);
@@ -383,6 +420,9 @@ static bool acpi_tie_nondev_subnodes(struct acpi_device_data *data)
acpi_status status;
bool ret;
+ if (!dn->handle)
+ continue;
+
status = acpi_attach_data(dn->handle, acpi_nondev_subnode_tag, dn);
if (ACPI_FAILURE(status) && status != AE_ALREADY_EXISTS) {
acpi_handle_err(dn->handle, "Can't tag data node\n");
@@ -804,13 +844,35 @@ acpi_fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
return NULL;
}
+static unsigned int acpi_fwnode_get_args_count(struct fwnode_handle *fwnode,
+ const char *nargs_prop)
+{
+ const struct acpi_device_data *data;
+ const union acpi_object *obj;
+ int ret;
+
+ data = acpi_device_data_of_node(fwnode);
+ if (!data)
+ return 0;
+
+ ret = acpi_data_get_property(data, nargs_prop, ACPI_TYPE_INTEGER, &obj);
+ if (ret)
+ return 0;
+
+ return obj->integer.value;
+}
+
static int acpi_get_ref_args(struct fwnode_reference_args *args,
struct fwnode_handle *ref_fwnode,
+ const char *nargs_prop,
const union acpi_object **element,
const union acpi_object *end, size_t num_args)
{
u32 nargs = 0, i;
+ if (nargs_prop)
+ num_args = acpi_fwnode_get_args_count(ref_fwnode, nargs_prop);
+
/*
* Assume the following integer elements are all args. Stop counting on
* the first reference (possibly represented as a string) or end of the
@@ -882,45 +944,10 @@ static struct fwnode_handle *acpi_parse_string_ref(const struct fwnode_handle *f
return &dn->fwnode;
}
-/**
- * __acpi_node_get_property_reference - returns handle to the referenced object
- * @fwnode: Firmware node to get the property from
- * @propname: Name of the property
- * @index: Index of the reference to return
- * @num_args: Maximum number of arguments after each reference
- * @args: Location to store the returned reference with optional arguments
- * (may be NULL)
- *
- * Find property with @name, verifify that it is a package containing at least
- * one object reference and if so, store the ACPI device object pointer to the
- * target object in @args->adev. If the reference includes arguments, store
- * them in the @args->args[] array.
- *
- * If there's more than one reference in the property value package, @index is
- * used to select the one to return.
- *
- * It is possible to leave holes in the property value set like in the
- * example below:
- *
- * Package () {
- * "cs-gpios",
- * Package () {
- * ^GPIO, 19, 0, 0,
- * ^GPIO, 20, 0, 0,
- * 0,
- * ^GPIO, 21, 0, 0,
- * }
- * }
- *
- * Calling this function with index %2 or index %3 return %-ENOENT. If the
- * property does not contain any more values %-ENOENT is returned. The NULL
- * entry must be single integer and preferably contain value %0.
- *
- * Return: %0 on success, negative error code on failure.
- */
-int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
- const char *propname, size_t index, size_t num_args,
- struct fwnode_reference_args *args)
+static int acpi_fwnode_get_reference_args(const struct fwnode_handle *fwnode,
+ const char *propname, const char *nargs_prop,
+ unsigned int args_count, unsigned int index,
+ struct fwnode_reference_args *args)
{
const union acpi_object *element, *end;
const union acpi_object *obj;
@@ -996,10 +1023,10 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
return -EINVAL;
element++;
-
ret = acpi_get_ref_args(idx == index ? args : NULL,
acpi_fwnode_handle(device),
- &element, end, num_args);
+ nargs_prop, &element, end,
+ args_count);
if (ret < 0)
return ret;
@@ -1014,10 +1041,9 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
return -EINVAL;
element++;
-
ret = acpi_get_ref_args(idx == index ? args : NULL,
- ref_fwnode, &element, end,
- num_args);
+ ref_fwnode, nargs_prop, &element, end,
+ args_count);
if (ret < 0)
return ret;
@@ -1039,6 +1065,50 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
return -ENOENT;
}
+
+/**
+ * __acpi_node_get_property_reference - returns handle to the referenced object
+ * @fwnode: Firmware node to get the property from
+ * @propname: Name of the property
+ * @index: Index of the reference to return
+ * @num_args: Maximum number of arguments after each reference
+ * @args: Location to store the returned reference with optional arguments
+ * (may be NULL)
+ *
+ * Find property with @name, verifify that it is a package containing at least
+ * one object reference and if so, store the ACPI device object pointer to the
+ * target object in @args->adev. If the reference includes arguments, store
+ * them in the @args->args[] array.
+ *
+ * If there's more than one reference in the property value package, @index is
+ * used to select the one to return.
+ *
+ * It is possible to leave holes in the property value set like in the
+ * example below:
+ *
+ * Package () {
+ * "cs-gpios",
+ * Package () {
+ * ^GPIO, 19, 0, 0,
+ * ^GPIO, 20, 0, 0,
+ * 0,
+ * ^GPIO, 21, 0, 0,
+ * }
+ * }
+ *
+ * Calling this function with index %2 or index %3 return %-ENOENT. If the
+ * property does not contain any more values %-ENOENT is returned. The NULL
+ * entry must be single integer and preferably contain value %0.
+ *
+ * Return: %0 on success, negative error code on failure.
+ */
+int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
+ const char *propname, size_t index,
+ size_t num_args,
+ struct fwnode_reference_args *args)
+{
+ return acpi_fwnode_get_reference_args(fwnode, propname, NULL, num_args, index, args);
+}
EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference);
static int acpi_data_prop_read_single(const struct acpi_device_data *data,
@@ -1187,8 +1257,6 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
}
break;
}
- if (nval == 0)
- return -EINVAL;
if (obj->type == ACPI_TYPE_BUFFER) {
if (proptype != DEV_PROP_U8)
@@ -1212,9 +1280,11 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
ret = acpi_copy_property_array_uint(items, (u64 *)val, nval);
break;
case DEV_PROP_STRING:
- ret = acpi_copy_property_array_string(
- items, (char **)val,
- min_t(u32, nval, obj->package.count));
+ nval = min(nval, obj->package.count);
+ if (nval == 0)
+ return -ENODATA;
+
+ ret = acpi_copy_property_array_string(items, (char **)val, nval);
break;
default:
ret = -EINVAL;
@@ -1259,13 +1329,14 @@ static int stop_on_next(struct acpi_device *adev, void *data)
return 0;
}
-/**
+/*
* acpi_get_next_subnode - Return the next child node handle for a fwnode
* @fwnode: Firmware node to find the next child node for.
* @child: Handle to one of the device's child nodes or a null handle.
*/
-struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
- struct fwnode_handle *child)
+static struct fwnode_handle *
+acpi_get_next_subnode(const struct fwnode_handle *fwnode,
+ struct fwnode_handle *child)
{
struct acpi_device *adev = to_acpi_device_node(fwnode);
@@ -1318,6 +1389,28 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
return NULL;
}
+/*
+ * acpi_get_next_present_subnode - Return the next present child node handle
+ * @fwnode: Firmware node to find the next child node for.
+ * @child: Handle to one of the device's child nodes or a null handle.
+ *
+ * Like acpi_get_next_subnode(), but the device nodes returned by
+ * acpi_get_next_present_subnode() are guaranteed to be present.
+ *
+ * Returns: The fwnode handle of the next present sub-node.
+ */
+static struct fwnode_handle *
+acpi_get_next_present_subnode(const struct fwnode_handle *fwnode,
+ struct fwnode_handle *child)
+{
+ do {
+ child = acpi_get_next_subnode(fwnode, child);
+ } while (is_acpi_device_node(child) &&
+ !acpi_device_is_present(to_acpi_device_node(child)));
+
+ return child;
+}
+
/**
* acpi_node_get_parent - Return parent fwnode of this fwnode
* @fwnode: Firmware node whose parent to get
@@ -1380,7 +1473,7 @@ static struct fwnode_handle *acpi_graph_get_next_endpoint(
if (!prev) {
do {
- port = fwnode_get_next_child_node(fwnode, port);
+ port = acpi_get_next_subnode(fwnode, port);
/*
* The names of the port nodes begin with "port@"
* followed by the number of the port node and they also
@@ -1398,14 +1491,17 @@ static struct fwnode_handle *acpi_graph_get_next_endpoint(
if (!port)
return NULL;
- endpoint = fwnode_get_next_child_node(port, prev);
- while (!endpoint) {
- port = fwnode_get_next_child_node(fwnode, port);
- if (!port)
+ do {
+ endpoint = acpi_get_next_subnode(port, prev);
+ if (endpoint)
break;
- if (is_acpi_graph_node(port, "port"))
- endpoint = fwnode_get_next_child_node(port, NULL);
- }
+
+ prev = NULL;
+
+ do {
+ port = acpi_get_next_subnode(fwnode, port);
+ } while (port && !is_acpi_graph_node(port, "port"));
+ } while (port);
/*
* The names of the endpoint nodes begin with "endpoint@" followed by
@@ -1492,7 +1588,7 @@ acpi_graph_get_remote_endpoint(const struct fwnode_handle *__fwnode)
static bool acpi_fwnode_device_is_available(const struct fwnode_handle *fwnode)
{
if (!is_acpi_device_node(fwnode))
- return false;
+ return true;
return acpi_device_is_present(to_acpi_device_node(fwnode));
}
@@ -1558,16 +1654,6 @@ acpi_fwnode_property_read_string_array(const struct fwnode_handle *fwnode,
val, nval);
}
-static int
-acpi_fwnode_get_reference_args(const struct fwnode_handle *fwnode,
- const char *prop, const char *nargs_prop,
- unsigned int args_count, unsigned int index,
- struct fwnode_reference_args *args)
-{
- return __acpi_node_get_property_reference(fwnode, prop, index,
- args_count, args);
-}
-
static const char *acpi_fwnode_get_name(const struct fwnode_handle *fwnode)
{
const struct acpi_device *adev;
@@ -1632,6 +1718,7 @@ static int acpi_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
if (fwnode_property_read_u32(fwnode, "reg", &endpoint->id))
fwnode_property_read_u32(fwnode, "endpoint", &endpoint->id);
+ fwnode_handle_put(port_fwnode);
return 0;
}
@@ -1656,12 +1743,13 @@ static int acpi_fwnode_irq_get(const struct fwnode_handle *fwnode,
acpi_fwnode_device_dma_supported, \
.device_get_dma_attr = acpi_fwnode_device_get_dma_attr, \
.property_present = acpi_fwnode_property_present, \
+ .property_read_bool = acpi_fwnode_property_present, \
.property_read_int_array = \
acpi_fwnode_property_read_int_array, \
.property_read_string_array = \
acpi_fwnode_property_read_string_array, \
.get_parent = acpi_node_get_parent, \
- .get_next_child_node = acpi_get_next_subnode, \
+ .get_next_child_node = acpi_get_next_present_subnode, \
.get_named_child_node = acpi_fwnode_get_named_child_node, \
.get_name = acpi_fwnode_get_name, \
.get_name_prefix = acpi_fwnode_get_name_prefix, \
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index b5bf8b81a050..d16906f46484 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/dmi.h>
+#include <linux/string_choices.h>
#ifdef CONFIG_X86
#define valid_IRQ(i) (((i) != 0) && ((i) != 2))
@@ -250,6 +251,9 @@ static bool acpi_decode_space(struct resource_win *win,
switch (addr->resource_type) {
case ACPI_MEMORY_RANGE:
acpi_dev_memresource_flags(res, len, wp);
+
+ if (addr->info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
+ res->flags |= IORESOURCE_PREFETCH;
break;
case ACPI_IO_RANGE:
acpi_dev_ioresource_flags(res, len, iodec,
@@ -265,9 +269,6 @@ static bool acpi_decode_space(struct resource_win *win,
if (addr->producer_consumer == ACPI_PRODUCER)
res->flags |= IORESOURCE_WINDOW;
- if (addr->info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
- res->flags |= IORESOURCE_PREFETCH;
-
return !(res->flags & IORESOURCE_DISABLED);
}
@@ -441,87 +442,80 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
},
},
{
- /* Asus ExpertBook B1402CBA */
+ /* Asus Vivobook X1404VAP */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_BOARD_NAME, "B1402CBA"),
+ DMI_MATCH(DMI_BOARD_NAME, "X1404VAP"),
},
},
{
- /* Asus ExpertBook B1402CVA */
+ /* Asus Vivobook X1504VAP */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_BOARD_NAME, "B1402CVA"),
+ DMI_MATCH(DMI_BOARD_NAME, "X1504VAP"),
},
},
{
- /* Asus ExpertBook B1502CBA */
+ /* Asus Vivobook X1704VAP */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_BOARD_NAME, "B1502CBA"),
+ DMI_MATCH(DMI_BOARD_NAME, "X1704VAP"),
},
},
{
- /* Asus ExpertBook B1502CGA */
+ /* Asus ExpertBook B1402C* */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_BOARD_NAME, "B1502CGA"),
+ DMI_MATCH(DMI_BOARD_NAME, "B1402C"),
},
},
- {
- /* Asus ExpertBook B1502CVA */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_BOARD_NAME, "B1502CVA"),
- },
- },
{
- /* Asus ExpertBook B2402CBA */
+ /* Asus ExpertBook B1502C* */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_BOARD_NAME, "B2402CBA"),
+ DMI_MATCH(DMI_BOARD_NAME, "B1502C"),
},
},
{
- /* Asus ExpertBook B2402FBA */
+ /* Asus ExpertBook B2402 (B2402CBA / B2402FBA / B2402CVA / B2402FVA) */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_BOARD_NAME, "B2402FBA"),
+ DMI_MATCH(DMI_BOARD_NAME, "B2402"),
},
},
{
- /* Asus ExpertBook B2502 */
+ /* Asus ExpertBook B2502 (B2502CBA / B2502FBA / B2502CVA / B2502FVA) */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_BOARD_NAME, "B2502CBA"),
+ DMI_MATCH(DMI_BOARD_NAME, "B2502"),
},
},
{
- /* Asus ExpertBook B2502FBA */
+ /* Asus Vivobook Go E1404GA* */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_BOARD_NAME, "B2502FBA"),
+ DMI_MATCH(DMI_BOARD_NAME, "E1404GA"),
},
},
{
- /* Asus Vivobook E1504GA */
+ /* Asus Vivobook E1504GA* */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_BOARD_NAME, "E1504GA"),
},
},
{
- /* Asus Vivobook E1504GAB */
+ /* Asus Vivobook Pro N6506M* */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_BOARD_NAME, "E1504GAB"),
+ DMI_MATCH(DMI_BOARD_NAME, "N6506M"),
},
},
{
- /* Asus Vivobook Pro N6506MV */
+ /* Asus Vivobook Pro N6506CU* */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_BOARD_NAME, "N6506MV"),
+ DMI_MATCH(DMI_BOARD_NAME, "N6506CU"),
},
},
{
@@ -531,6 +525,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
DMI_MATCH(DMI_BOARD_NAME, "17U70P"),
},
},
+ {
+ /* LG Electronics 16T90SP */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
+ DMI_MATCH(DMI_BOARD_NAME, "16T90SP"),
+ },
+ },
{ }
};
@@ -541,6 +542,12 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
*/
static const struct dmi_system_id irq1_edge_low_force_override[] = {
{
+ /* MECHREVO Jiaolong17KS Series GM7XG0M */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GM7XG0M"),
+ },
+ },
+ {
/* XMG APEX 17 (M23) */
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "GMxBGxx"),
@@ -559,6 +566,12 @@ static const struct dmi_system_id irq1_edge_low_force_override[] = {
},
},
{
+ /* TongFang GMxXGxX/TUXEDO Polaris 15 Gen5 AMD */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GMxXGxX"),
+ },
+ },
+ {
/* TongFang GMxXGxx sold as Eluktronics Inc. RP-15 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Eluktronics Inc."),
@@ -566,6 +579,12 @@ static const struct dmi_system_id irq1_edge_low_force_override[] = {
},
},
{
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Eluktronics Inc."),
+ DMI_MATCH(DMI_BOARD_NAME, "MECH-17"),
+ },
+ },
+ {
/* TongFang GM6XGxX/TUXEDO Stellaris 16 Gen5 AMD */
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "GM6XGxX"),
@@ -655,6 +674,24 @@ static const struct dmi_system_id irq1_edge_low_force_override[] = {
DMI_MATCH(DMI_BOARD_NAME, "GMxHGxx"),
},
},
+ {
+ /* MACHENIKE L16P/L16P */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MACHENIKE"),
+ DMI_MATCH(DMI_BOARD_NAME, "L16P"),
+ },
+ },
+ {
+ /*
+ * TongFang GM5HG0A in case of the SKIKK Vanaheim relabel the
+ * board-name is changed, so check OEM strings instead. Note
+ * OEM string matches are always exact matches.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=219614
+ */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_OEM_STRING, "GM5HG0A"),
+ },
+ },
{ }
};
@@ -680,11 +717,11 @@ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
for (i = 0; i < ARRAY_SIZE(override_table); i++) {
const struct irq_override_cmp *entry = &override_table[i];
- if (dmi_check_system(entry->system) &&
- entry->irq == gsi &&
+ if (entry->irq == gsi &&
entry->triggering == triggering &&
entry->polarity == polarity &&
- entry->shareable == shareable)
+ entry->shareable == shareable &&
+ dmi_check_system(entry->system))
return entry->override;
}
@@ -744,7 +781,7 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
pr_warn("ACPI: IRQ %d override to %s%s, %s%s\n", gsi,
t ? "level" : "edge",
trig == triggering ? "" : "(!)",
- p ? "low" : "high",
+ str_low_high(p),
pol == polarity ? "" : "(!)");
triggering = trig;
polarity = pol;
diff --git a/drivers/acpi/riscv/Kconfig b/drivers/acpi/riscv/Kconfig
new file mode 100644
index 000000000000..046296a18d00
--- /dev/null
+++ b/drivers/acpi/riscv/Kconfig
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# ACPI Configuration for RISC-V
+#
+
+config ACPI_RIMT
+ bool
diff --git a/drivers/acpi/riscv/Makefile b/drivers/acpi/riscv/Makefile
index 86b0925f612d..1284a076fa88 100644
--- a/drivers/acpi/riscv/Makefile
+++ b/drivers/acpi/riscv/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-y += rhct.o
+obj-y += rhct.o init.o irq.o
obj-$(CONFIG_ACPI_PROCESSOR_IDLE) += cpuidle.o
obj-$(CONFIG_ACPI_CPPC_LIB) += cppc.o
+obj-$(CONFIG_ACPI_RIMT) += rimt.o
diff --git a/drivers/acpi/riscv/cppc.c b/drivers/acpi/riscv/cppc.c
index 4cdff387deff..42c1a9052470 100644
--- a/drivers/acpi/riscv/cppc.c
+++ b/drivers/acpi/riscv/cppc.c
@@ -37,10 +37,8 @@ static int __init sbi_cppc_init(void)
{
if (sbi_spec_version >= sbi_mk_version(2, 0) &&
sbi_probe_extension(SBI_EXT_CPPC) > 0) {
- pr_info("SBI CPPC extension detected\n");
cppc_ext_present = true;
} else {
- pr_info("SBI CPPC extension NOT detected!!\n");
cppc_ext_present = false;
}
@@ -121,7 +119,7 @@ int cpc_read_ffh(int cpu, struct cpc_reg *reg, u64 *val)
*val = data.ret.value;
- return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0;
+ return data.ret.error;
}
return -EINVAL;
@@ -150,7 +148,7 @@ int cpc_write_ffh(int cpu, struct cpc_reg *reg, u64 val)
smp_call_function_single(cpu, cppc_ffh_csr_write, &data, 1);
- return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0;
+ return data.ret.error;
}
return -EINVAL;
diff --git a/drivers/acpi/riscv/init.c b/drivers/acpi/riscv/init.c
new file mode 100644
index 000000000000..7c00f7995e86
--- /dev/null
+++ b/drivers/acpi/riscv/init.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023-2024, Ventana Micro Systems Inc
+ * Author: Sunil V L <sunilvl@ventanamicro.com>
+ */
+
+#include <linux/acpi.h>
+#include "init.h"
+
+void __init acpi_arch_init(void)
+{
+ riscv_acpi_init_gsi_mapping();
+ if (IS_ENABLED(CONFIG_ACPI_RIMT))
+ riscv_acpi_rimt_init();
+}
diff --git a/drivers/acpi/riscv/init.h b/drivers/acpi/riscv/init.h
new file mode 100644
index 000000000000..1680aa2aaf23
--- /dev/null
+++ b/drivers/acpi/riscv/init.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#include <linux/init.h>
+
+void __init riscv_acpi_init_gsi_mapping(void);
+void __init riscv_acpi_rimt_init(void);
diff --git a/drivers/acpi/riscv/irq.c b/drivers/acpi/riscv/irq.c
new file mode 100644
index 000000000000..d9a2154d6c6a
--- /dev/null
+++ b/drivers/acpi/riscv/irq.c
@@ -0,0 +1,404 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023-2024, Ventana Micro Systems Inc
+ * Author: Sunil V L <sunilvl@ventanamicro.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/sort.h>
+#include <linux/irq.h>
+
+#include "init.h"
+
+#define RISCV_ACPI_INTC_FLAG_PENDING BIT(0)
+
+struct riscv_ext_intc_list {
+ acpi_handle handle;
+ u32 gsi_base;
+ u32 nr_irqs;
+ u32 nr_idcs;
+ u32 id;
+ u32 type;
+ u32 flag;
+ struct list_head list;
+};
+
+struct acpi_irq_dep_ctx {
+ int rc;
+ unsigned int index;
+ acpi_handle handle;
+};
+
+LIST_HEAD(ext_intc_list);
+
+static int irqchip_cmp_func(const void *in0, const void *in1)
+{
+ struct acpi_probe_entry *elem0 = (struct acpi_probe_entry *)in0;
+ struct acpi_probe_entry *elem1 = (struct acpi_probe_entry *)in1;
+
+ return (elem0->type > elem1->type) - (elem0->type < elem1->type);
+}
+
+/*
+ * On RISC-V, RINTC structures in MADT should be probed before any other
+ * interrupt controller structures and IMSIC before APLIC. The interrupt
+ * controller subtypes in MADT of ACPI spec for RISC-V are defined in
+ * the incremental order like RINTC(24)->IMSIC(25)->APLIC(26)->PLIC(27).
+ * Hence, simply sorting the subtypes in incremental order will
+ * establish the required order.
+ */
+void arch_sort_irqchip_probe(struct acpi_probe_entry *ap_head, int nr)
+{
+ struct acpi_probe_entry *ape = ap_head;
+
+ if (nr == 1 || !ACPI_COMPARE_NAMESEG(ACPI_SIG_MADT, ape->id))
+ return;
+ sort(ape, nr, sizeof(*ape), irqchip_cmp_func, NULL);
+}
+
+static acpi_status riscv_acpi_update_gsi_handle(u32 gsi_base, acpi_handle handle)
+{
+ struct riscv_ext_intc_list *ext_intc_element;
+ struct list_head *i, *tmp;
+
+ list_for_each_safe(i, tmp, &ext_intc_list) {
+ ext_intc_element = list_entry(i, struct riscv_ext_intc_list, list);
+ if (gsi_base == ext_intc_element->gsi_base) {
+ ext_intc_element->handle = handle;
+ return AE_OK;
+ }
+ }
+
+ return AE_NOT_FOUND;
+}
+
+int riscv_acpi_update_gsi_range(u32 gsi_base, u32 nr_irqs)
+{
+ struct riscv_ext_intc_list *ext_intc_element;
+
+ list_for_each_entry(ext_intc_element, &ext_intc_list, list) {
+ if (gsi_base == ext_intc_element->gsi_base &&
+ (ext_intc_element->flag & RISCV_ACPI_INTC_FLAG_PENDING)) {
+ ext_intc_element->nr_irqs = nr_irqs;
+ ext_intc_element->flag &= ~RISCV_ACPI_INTC_FLAG_PENDING;
+ return 0;
+ }
+ }
+
+ return -ENODEV;
+}
+
+int riscv_acpi_get_gsi_info(struct fwnode_handle *fwnode, u32 *gsi_base,
+ u32 *id, u32 *nr_irqs, u32 *nr_idcs)
+{
+ struct riscv_ext_intc_list *ext_intc_element;
+ struct list_head *i;
+
+ list_for_each(i, &ext_intc_list) {
+ ext_intc_element = list_entry(i, struct riscv_ext_intc_list, list);
+ if (ext_intc_element->handle == ACPI_HANDLE_FWNODE(fwnode)) {
+ *gsi_base = ext_intc_element->gsi_base;
+ *id = ext_intc_element->id;
+ *nr_irqs = ext_intc_element->nr_irqs;
+ if (nr_idcs)
+ *nr_idcs = ext_intc_element->nr_idcs;
+
+ return 0;
+ }
+ }
+
+ return -ENODEV;
+}
+
+struct fwnode_handle *riscv_acpi_get_gsi_domain_id(u32 gsi)
+{
+ struct riscv_ext_intc_list *ext_intc_element;
+ struct acpi_device *adev;
+ struct list_head *i;
+
+ list_for_each(i, &ext_intc_list) {
+ ext_intc_element = list_entry(i, struct riscv_ext_intc_list, list);
+ if (gsi >= ext_intc_element->gsi_base &&
+ gsi < (ext_intc_element->gsi_base + ext_intc_element->nr_irqs)) {
+ adev = acpi_fetch_acpi_dev(ext_intc_element->handle);
+ if (!adev)
+ return NULL;
+
+ return acpi_fwnode_handle(adev);
+ }
+ }
+
+ return NULL;
+}
+
+static int __init riscv_acpi_register_ext_intc(u32 gsi_base, u32 nr_irqs, u32 nr_idcs,
+ u32 id, u32 type)
+{
+ struct riscv_ext_intc_list *ext_intc_element, *node, *prev;
+
+ ext_intc_element = kzalloc(sizeof(*ext_intc_element), GFP_KERNEL);
+ if (!ext_intc_element)
+ return -ENOMEM;
+
+ ext_intc_element->gsi_base = gsi_base;
+
+ /* If nr_irqs is zero, indicate it in flag and set to max range possible */
+ if (nr_irqs) {
+ ext_intc_element->nr_irqs = nr_irqs;
+ } else {
+ ext_intc_element->flag |= RISCV_ACPI_INTC_FLAG_PENDING;
+ ext_intc_element->nr_irqs = U32_MAX - ext_intc_element->gsi_base;
+ }
+
+ ext_intc_element->nr_idcs = nr_idcs;
+ ext_intc_element->id = id;
+ list_for_each_entry(node, &ext_intc_list, list) {
+ if (node->gsi_base < ext_intc_element->gsi_base)
+ break;
+ }
+
+ /* Adjust the previous node's GSI range if that has pending registration */
+ prev = list_prev_entry(node, list);
+ if (!list_entry_is_head(prev, &ext_intc_list, list)) {
+ if (prev->flag & RISCV_ACPI_INTC_FLAG_PENDING)
+ prev->nr_irqs = ext_intc_element->gsi_base - prev->gsi_base;
+ }
+
+ list_add_tail(&ext_intc_element->list, &node->list);
+ return 0;
+}
+
+static acpi_status __init riscv_acpi_create_gsi_map_smsi(acpi_handle handle, u32 level,
+ void *context, void **return_value)
+{
+ acpi_status status;
+ u64 gbase;
+
+ if (!acpi_has_method(handle, "_GSB")) {
+ acpi_handle_err(handle, "_GSB method not found\n");
+ return AE_ERROR;
+ }
+
+ status = acpi_evaluate_integer(handle, "_GSB", NULL, &gbase);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_err(handle, "failed to evaluate _GSB method\n");
+ return status;
+ }
+
+ riscv_acpi_register_ext_intc(gbase, 0, 0, 0, ACPI_RISCV_IRQCHIP_SMSI);
+ status = riscv_acpi_update_gsi_handle((u32)gbase, handle);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_err(handle, "failed to find the GSI mapping entry\n");
+ return status;
+ }
+
+ return AE_OK;
+}
+
+static acpi_status __init riscv_acpi_create_gsi_map(acpi_handle handle, u32 level,
+ void *context, void **return_value)
+{
+ acpi_status status;
+ u64 gbase;
+
+ if (!acpi_has_method(handle, "_GSB")) {
+ acpi_handle_err(handle, "_GSB method not found\n");
+ return AE_ERROR;
+ }
+
+ status = acpi_evaluate_integer(handle, "_GSB", NULL, &gbase);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_err(handle, "failed to evaluate _GSB method\n");
+ return status;
+ }
+
+ status = riscv_acpi_update_gsi_handle((u32)gbase, handle);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_err(handle, "failed to find the GSI mapping entry\n");
+ return status;
+ }
+
+ return AE_OK;
+}
+
+static int __init riscv_acpi_aplic_parse_madt(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_madt_aplic *aplic = (struct acpi_madt_aplic *)header;
+
+ return riscv_acpi_register_ext_intc(aplic->gsi_base, aplic->num_sources, aplic->num_idcs,
+ aplic->id, ACPI_RISCV_IRQCHIP_APLIC);
+}
+
+static int __init riscv_acpi_plic_parse_madt(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_madt_plic *plic = (struct acpi_madt_plic *)header;
+
+ return riscv_acpi_register_ext_intc(plic->gsi_base, plic->num_irqs, 0,
+ plic->id, ACPI_RISCV_IRQCHIP_PLIC);
+}
+
+void __init riscv_acpi_init_gsi_mapping(void)
+{
+ /* There can be either PLIC or APLIC */
+ if (acpi_table_parse_madt(ACPI_MADT_TYPE_PLIC, riscv_acpi_plic_parse_madt, 0) > 0) {
+ acpi_get_devices("RSCV0001", riscv_acpi_create_gsi_map, NULL, NULL);
+ return;
+ }
+
+ if (acpi_table_parse_madt(ACPI_MADT_TYPE_APLIC, riscv_acpi_aplic_parse_madt, 0) > 0)
+ acpi_get_devices("RSCV0002", riscv_acpi_create_gsi_map, NULL, NULL);
+
+ /* Unlike PLIC/APLIC, SYSMSI doesn't have MADT */
+ acpi_get_devices("RSCV0006", riscv_acpi_create_gsi_map_smsi, NULL, NULL);
+}
+
+static acpi_handle riscv_acpi_get_gsi_handle(u32 gsi)
+{
+ struct riscv_ext_intc_list *ext_intc_element;
+ struct list_head *i;
+
+ list_for_each(i, &ext_intc_list) {
+ ext_intc_element = list_entry(i, struct riscv_ext_intc_list, list);
+ if (gsi >= ext_intc_element->gsi_base &&
+ gsi < (ext_intc_element->gsi_base + ext_intc_element->nr_irqs))
+ return ext_intc_element->handle;
+ }
+
+ return NULL;
+}
+
+static acpi_status riscv_acpi_irq_get_parent(struct acpi_resource *ares, void *context)
+{
+ struct acpi_irq_dep_ctx *ctx = context;
+ struct acpi_resource_irq *irq;
+ struct acpi_resource_extended_irq *eirq;
+
+ switch (ares->type) {
+ case ACPI_RESOURCE_TYPE_IRQ:
+ irq = &ares->data.irq;
+ if (ctx->index >= irq->interrupt_count) {
+ ctx->index -= irq->interrupt_count;
+ return AE_OK;
+ }
+ ctx->handle = riscv_acpi_get_gsi_handle(irq->interrupts[ctx->index]);
+ return AE_CTRL_TERMINATE;
+ case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
+ eirq = &ares->data.extended_irq;
+ if (eirq->producer_consumer == ACPI_PRODUCER)
+ return AE_OK;
+
+ if (ctx->index >= eirq->interrupt_count) {
+ ctx->index -= eirq->interrupt_count;
+ return AE_OK;
+ }
+
+ /* Support GSIs only */
+ if (eirq->resource_source.string_length)
+ return AE_OK;
+
+ ctx->handle = riscv_acpi_get_gsi_handle(eirq->interrupts[ctx->index]);
+ return AE_CTRL_TERMINATE;
+ }
+
+ return AE_OK;
+}
+
+static int riscv_acpi_irq_get_dep(acpi_handle handle, unsigned int index, acpi_handle *gsi_handle)
+{
+ struct acpi_irq_dep_ctx ctx = {-EINVAL, index, NULL};
+
+ if (!gsi_handle)
+ return 0;
+
+ acpi_walk_resources(handle, METHOD_NAME__CRS, riscv_acpi_irq_get_parent, &ctx);
+ *gsi_handle = ctx.handle;
+ if (*gsi_handle)
+ return 1;
+
+ return 0;
+}
+
+static u32 riscv_acpi_add_prt_dep(acpi_handle handle)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_pci_routing_table *entry;
+ struct acpi_handle_list dep_devices;
+ acpi_handle gsi_handle;
+ acpi_handle link_handle;
+ acpi_status status;
+ u32 count = 0;
+
+ status = acpi_get_irq_routing_table(handle, &buffer);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_err(handle, "failed to get IRQ routing table\n");
+ kfree(buffer.pointer);
+ return 0;
+ }
+
+ entry = buffer.pointer;
+ while (entry && (entry->length > 0)) {
+ if (entry->source[0]) {
+ acpi_get_handle(handle, entry->source, &link_handle);
+ dep_devices.count = 1;
+ dep_devices.handles = kcalloc(1, sizeof(*dep_devices.handles), GFP_KERNEL);
+ if (!dep_devices.handles) {
+ acpi_handle_err(handle, "failed to allocate memory\n");
+ continue;
+ }
+
+ dep_devices.handles[0] = link_handle;
+ count += acpi_scan_add_dep(handle, &dep_devices);
+ } else {
+ gsi_handle = riscv_acpi_get_gsi_handle(entry->source_index);
+ dep_devices.count = 1;
+ dep_devices.handles = kcalloc(1, sizeof(*dep_devices.handles), GFP_KERNEL);
+ if (!dep_devices.handles) {
+ acpi_handle_err(handle, "failed to allocate memory\n");
+ continue;
+ }
+
+ dep_devices.handles[0] = gsi_handle;
+ count += acpi_scan_add_dep(handle, &dep_devices);
+ }
+
+ entry = (struct acpi_pci_routing_table *)
+ ((unsigned long)entry + entry->length);
+ }
+
+ kfree(buffer.pointer);
+ return count;
+}
+
+static u32 riscv_acpi_add_irq_dep(acpi_handle handle)
+{
+ struct acpi_handle_list dep_devices;
+ acpi_handle gsi_handle;
+ u32 count = 0;
+ int i;
+
+ for (i = 0;
+ riscv_acpi_irq_get_dep(handle, i, &gsi_handle);
+ i++) {
+ dep_devices.count = 1;
+ dep_devices.handles = kcalloc(1, sizeof(*dep_devices.handles), GFP_KERNEL);
+ if (!dep_devices.handles) {
+ acpi_handle_err(handle, "failed to allocate memory\n");
+ continue;
+ }
+
+ dep_devices.handles[0] = gsi_handle;
+ count += acpi_scan_add_dep(handle, &dep_devices);
+ }
+
+ return count;
+}
+
+u32 arch_acpi_add_auto_dep(acpi_handle handle)
+{
+ if (acpi_has_method(handle, "_PRT"))
+ return riscv_acpi_add_prt_dep(handle);
+
+ return riscv_acpi_add_irq_dep(handle);
+}
diff --git a/drivers/acpi/riscv/rimt.c b/drivers/acpi/riscv/rimt.c
new file mode 100644
index 000000000000..7f423405e5ef
--- /dev/null
+++ b/drivers/acpi/riscv/rimt.c
@@ -0,0 +1,520 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024-2025, Ventana Micro Systems Inc
+ * Author: Sunil V L <sunilvl@ventanamicro.com>
+ *
+ */
+
+#define pr_fmt(fmt) "ACPI: RIMT: " fmt
+
+#include <linux/acpi.h>
+#include <linux/acpi_rimt.h>
+#include <linux/iommu.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include "init.h"
+
+struct rimt_fwnode {
+ struct list_head list;
+ struct acpi_rimt_node *rimt_node;
+ struct fwnode_handle *fwnode;
+};
+
+static LIST_HEAD(rimt_fwnode_list);
+static DEFINE_SPINLOCK(rimt_fwnode_lock);
+
+#define RIMT_TYPE_MASK(type) (1 << (type))
+#define RIMT_IOMMU_TYPE BIT(0)
+
+/* Root pointer to the mapped RIMT table */
+static struct acpi_table_header *rimt_table;
+
+/**
+ * rimt_set_fwnode() - Create rimt_fwnode and use it to register
+ * iommu data in the rimt_fwnode_list
+ *
+ * @rimt_node: RIMT table node associated with the IOMMU
+ * @fwnode: fwnode associated with the RIMT node
+ *
+ * Returns: 0 on success
+ * <0 on failure
+ */
+static int rimt_set_fwnode(struct acpi_rimt_node *rimt_node,
+ struct fwnode_handle *fwnode)
+{
+ struct rimt_fwnode *np;
+
+ np = kzalloc(sizeof(*np), GFP_ATOMIC);
+
+ if (WARN_ON(!np))
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&np->list);
+ np->rimt_node = rimt_node;
+ np->fwnode = fwnode;
+
+ spin_lock(&rimt_fwnode_lock);
+ list_add_tail(&np->list, &rimt_fwnode_list);
+ spin_unlock(&rimt_fwnode_lock);
+
+ return 0;
+}
+
+static acpi_status rimt_match_node_callback(struct acpi_rimt_node *node,
+ void *context)
+{
+ acpi_status status = AE_NOT_FOUND;
+ struct device *dev = context;
+
+ if (node->type == ACPI_RIMT_NODE_TYPE_IOMMU) {
+ struct acpi_rimt_iommu *iommu_node = (struct acpi_rimt_iommu *)&node->node_data;
+
+ if (dev_is_pci(dev)) {
+ struct pci_dev *pdev;
+ u16 bdf;
+
+ pdev = to_pci_dev(dev);
+ bdf = PCI_DEVID(pdev->bus->number, pdev->devfn);
+ if ((pci_domain_nr(pdev->bus) == iommu_node->pcie_segment_number) &&
+ bdf == iommu_node->pcie_bdf) {
+ status = AE_OK;
+ } else {
+ status = AE_NOT_FOUND;
+ }
+ } else {
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res && res->start == iommu_node->base_address)
+ status = AE_OK;
+ else
+ status = AE_NOT_FOUND;
+ }
+ } else if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
+ struct acpi_rimt_pcie_rc *pci_rc;
+ struct pci_bus *bus;
+
+ bus = to_pci_bus(dev);
+ pci_rc = (struct acpi_rimt_pcie_rc *)node->node_data;
+
+ /*
+ * It is assumed that PCI segment numbers maps one-to-one
+ * with root complexes. Each segment number can represent only
+ * one root complex.
+ */
+ status = pci_rc->pcie_segment_number == pci_domain_nr(bus) ?
+ AE_OK : AE_NOT_FOUND;
+ } else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) {
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_rimt_platform_device *ncomp;
+ struct device *plat_dev = dev;
+ struct acpi_device *adev;
+
+ /*
+ * Walk the device tree to find a device with an
+ * ACPI companion; there is no point in scanning
+ * RIMT for a device matching a platform device if
+ * the device does not have an ACPI companion to
+ * start with.
+ */
+ do {
+ adev = ACPI_COMPANION(plat_dev);
+ if (adev)
+ break;
+
+ plat_dev = plat_dev->parent;
+ } while (plat_dev);
+
+ if (!adev)
+ return status;
+
+ status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf);
+ if (ACPI_FAILURE(status)) {
+ dev_warn(plat_dev, "Can't get device full path name\n");
+ return status;
+ }
+
+ ncomp = (struct acpi_rimt_platform_device *)node->node_data;
+ status = !strcmp(ncomp->device_name, buf.pointer) ?
+ AE_OK : AE_NOT_FOUND;
+ acpi_os_free(buf.pointer);
+ }
+
+ return status;
+}
+
+static struct acpi_rimt_node *rimt_scan_node(enum acpi_rimt_node_type type,
+ void *context)
+{
+ struct acpi_rimt_node *rimt_node, *rimt_end;
+ struct acpi_table_rimt *rimt;
+ int i;
+
+ if (!rimt_table)
+ return NULL;
+
+ /* Get the first RIMT node */
+ rimt = (struct acpi_table_rimt *)rimt_table;
+ rimt_node = ACPI_ADD_PTR(struct acpi_rimt_node, rimt,
+ rimt->node_offset);
+ rimt_end = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_table,
+ rimt_table->length);
+
+ for (i = 0; i < rimt->num_nodes; i++) {
+ if (WARN_TAINT(rimt_node >= rimt_end, TAINT_FIRMWARE_WORKAROUND,
+ "RIMT node pointer overflows, bad table!\n"))
+ return NULL;
+
+ if (rimt_node->type == type &&
+ ACPI_SUCCESS(rimt_match_node_callback(rimt_node, context)))
+ return rimt_node;
+
+ rimt_node = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_node,
+ rimt_node->length);
+ }
+
+ return NULL;
+}
+
+/*
+ * RISC-V supports IOMMU as a PCI device or a platform device.
+ * When it is a platform device, there should be a namespace device as
+ * well along with RIMT. To create the link between RIMT information and
+ * the platform device, the IOMMU driver should register itself with the
+ * RIMT module. This is true for PCI based IOMMU as well.
+ */
+int rimt_iommu_register(struct device *dev)
+{
+ struct fwnode_handle *rimt_fwnode;
+ struct acpi_rimt_node *node;
+
+ node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_IOMMU, dev);
+ if (!node) {
+ pr_err("Could not find IOMMU node in RIMT\n");
+ return -ENODEV;
+ }
+
+ if (dev_is_pci(dev)) {
+ rimt_fwnode = acpi_alloc_fwnode_static();
+ if (!rimt_fwnode)
+ return -ENOMEM;
+
+ rimt_fwnode->dev = dev;
+ if (!dev->fwnode)
+ dev->fwnode = rimt_fwnode;
+
+ rimt_set_fwnode(node, rimt_fwnode);
+ } else {
+ rimt_set_fwnode(node, dev->fwnode);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_IOMMU_API
+
+/**
+ * rimt_get_fwnode() - Retrieve fwnode associated with an RIMT node
+ *
+ * @node: RIMT table node to be looked-up
+ *
+ * Returns: fwnode_handle pointer on success, NULL on failure
+ */
+static struct fwnode_handle *rimt_get_fwnode(struct acpi_rimt_node *node)
+{
+ struct fwnode_handle *fwnode = NULL;
+ struct rimt_fwnode *curr;
+
+ spin_lock(&rimt_fwnode_lock);
+ list_for_each_entry(curr, &rimt_fwnode_list, list) {
+ if (curr->rimt_node == node) {
+ fwnode = curr->fwnode;
+ break;
+ }
+ }
+ spin_unlock(&rimt_fwnode_lock);
+
+ return fwnode;
+}
+
+static bool rimt_pcie_rc_supports_ats(struct acpi_rimt_node *node)
+{
+ struct acpi_rimt_pcie_rc *pci_rc;
+
+ pci_rc = (struct acpi_rimt_pcie_rc *)node->node_data;
+ return pci_rc->flags & ACPI_RIMT_PCIE_ATS_SUPPORTED;
+}
+
+static int rimt_iommu_xlate(struct device *dev, struct acpi_rimt_node *node, u32 deviceid)
+{
+ struct fwnode_handle *rimt_fwnode;
+
+ if (!node)
+ return -ENODEV;
+
+ rimt_fwnode = rimt_get_fwnode(node);
+
+ /*
+ * The IOMMU drivers may not be probed yet.
+ * Defer the IOMMU configuration
+ */
+ if (!rimt_fwnode)
+ return -EPROBE_DEFER;
+
+ return acpi_iommu_fwspec_init(dev, deviceid, rimt_fwnode);
+}
+
+struct rimt_pci_alias_info {
+ struct device *dev;
+ struct acpi_rimt_node *node;
+ const struct iommu_ops *ops;
+};
+
+static int rimt_id_map(struct acpi_rimt_id_mapping *map, u8 type, u32 rid_in, u32 *rid_out)
+{
+ if (rid_in < map->source_id_base ||
+ (rid_in > map->source_id_base + map->num_ids))
+ return -ENXIO;
+
+ *rid_out = map->dest_id_base + (rid_in - map->source_id_base);
+ return 0;
+}
+
+static struct acpi_rimt_node *rimt_node_get_id(struct acpi_rimt_node *node,
+ u32 *id_out, int index)
+{
+ struct acpi_rimt_platform_device *plat_node;
+ u32 id_mapping_offset, num_id_mapping;
+ struct acpi_rimt_pcie_rc *pci_node;
+ struct acpi_rimt_id_mapping *map;
+ struct acpi_rimt_node *parent;
+
+ if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
+ pci_node = (struct acpi_rimt_pcie_rc *)&node->node_data;
+ id_mapping_offset = pci_node->id_mapping_offset;
+ num_id_mapping = pci_node->num_id_mappings;
+ } else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) {
+ plat_node = (struct acpi_rimt_platform_device *)&node->node_data;
+ id_mapping_offset = plat_node->id_mapping_offset;
+ num_id_mapping = plat_node->num_id_mappings;
+ } else {
+ return NULL;
+ }
+
+ if (!id_mapping_offset || !num_id_mapping || index >= num_id_mapping)
+ return NULL;
+
+ map = ACPI_ADD_PTR(struct acpi_rimt_id_mapping, node,
+ id_mapping_offset + index * sizeof(*map));
+
+ /* Firmware bug! */
+ if (!map->dest_offset) {
+ pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
+ node, node->type);
+ return NULL;
+ }
+
+ parent = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_table, map->dest_offset);
+
+ if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE ||
+ node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
+ *id_out = map->dest_id_base;
+ return parent;
+ }
+
+ return NULL;
+}
+
+static struct acpi_rimt_node *rimt_node_map_id(struct acpi_rimt_node *node,
+ u32 id_in, u32 *id_out,
+ u8 type_mask)
+{
+ struct acpi_rimt_platform_device *plat_node;
+ u32 id_mapping_offset, num_id_mapping;
+ struct acpi_rimt_pcie_rc *pci_node;
+ u32 id = id_in;
+
+ /* Parse the ID mapping tree to find specified node type */
+ while (node) {
+ struct acpi_rimt_id_mapping *map;
+ int i, rc = 0;
+ u32 map_id = id;
+
+ if (RIMT_TYPE_MASK(node->type) & type_mask) {
+ if (id_out)
+ *id_out = id;
+ return node;
+ }
+
+ if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
+ pci_node = (struct acpi_rimt_pcie_rc *)&node->node_data;
+ id_mapping_offset = pci_node->id_mapping_offset;
+ num_id_mapping = pci_node->num_id_mappings;
+ } else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) {
+ plat_node = (struct acpi_rimt_platform_device *)&node->node_data;
+ id_mapping_offset = plat_node->id_mapping_offset;
+ num_id_mapping = plat_node->num_id_mappings;
+ } else {
+ goto fail_map;
+ }
+
+ if (!id_mapping_offset || !num_id_mapping)
+ goto fail_map;
+
+ map = ACPI_ADD_PTR(struct acpi_rimt_id_mapping, node,
+ id_mapping_offset);
+
+ /* Firmware bug! */
+ if (!map->dest_offset) {
+ pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
+ node, node->type);
+ goto fail_map;
+ }
+
+ /* Do the ID translation */
+ for (i = 0; i < num_id_mapping; i++, map++) {
+ rc = rimt_id_map(map, node->type, map_id, &id);
+ if (!rc)
+ break;
+ }
+
+ if (i == num_id_mapping)
+ goto fail_map;
+
+ node = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_table,
+ rc ? 0 : map->dest_offset);
+ }
+
+fail_map:
+ /* Map input ID to output ID unchanged on mapping failure */
+ if (id_out)
+ *id_out = id_in;
+
+ return NULL;
+}
+
+static struct acpi_rimt_node *rimt_node_map_platform_id(struct acpi_rimt_node *node, u32 *id_out,
+ u8 type_mask, int index)
+{
+ struct acpi_rimt_node *parent;
+ u32 id;
+
+ parent = rimt_node_get_id(node, &id, index);
+ if (!parent)
+ return NULL;
+
+ if (!(RIMT_TYPE_MASK(parent->type) & type_mask))
+ parent = rimt_node_map_id(parent, id, id_out, type_mask);
+ else
+ if (id_out)
+ *id_out = id;
+
+ return parent;
+}
+
+static int rimt_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
+{
+ struct rimt_pci_alias_info *info = data;
+ struct acpi_rimt_node *parent;
+ u32 deviceid;
+
+ parent = rimt_node_map_id(info->node, alias, &deviceid, RIMT_IOMMU_TYPE);
+ return rimt_iommu_xlate(info->dev, parent, deviceid);
+}
+
+static int rimt_plat_iommu_map(struct device *dev, struct acpi_rimt_node *node)
+{
+ struct acpi_rimt_node *parent;
+ int err = -ENODEV, i = 0;
+ u32 deviceid = 0;
+
+ do {
+ parent = rimt_node_map_platform_id(node, &deviceid,
+ RIMT_IOMMU_TYPE,
+ i++);
+
+ if (parent)
+ err = rimt_iommu_xlate(dev, parent, deviceid);
+ } while (parent && !err);
+
+ return err;
+}
+
+static int rimt_plat_iommu_map_id(struct device *dev,
+ struct acpi_rimt_node *node,
+ const u32 *in_id)
+{
+ struct acpi_rimt_node *parent;
+ u32 deviceid;
+
+ parent = rimt_node_map_id(node, *in_id, &deviceid, RIMT_IOMMU_TYPE);
+ if (parent)
+ return rimt_iommu_xlate(dev, parent, deviceid);
+
+ return -ENODEV;
+}
+
+/**
+ * rimt_iommu_configure_id - Set-up IOMMU configuration for a device.
+ *
+ * @dev: device to configure
+ * @id_in: optional input id const value pointer
+ *
+ * Returns: 0 on success, <0 on failure
+ */
+int rimt_iommu_configure_id(struct device *dev, const u32 *id_in)
+{
+ struct acpi_rimt_node *node;
+ int err = -ENODEV;
+
+ if (dev_is_pci(dev)) {
+ struct iommu_fwspec *fwspec;
+ struct pci_bus *bus = to_pci_dev(dev)->bus;
+ struct rimt_pci_alias_info info = { .dev = dev };
+
+ node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX, &bus->dev);
+ if (!node)
+ return -ENODEV;
+
+ info.node = node;
+ err = pci_for_each_dma_alias(to_pci_dev(dev),
+ rimt_pci_iommu_init, &info);
+
+ fwspec = dev_iommu_fwspec_get(dev);
+ if (fwspec && rimt_pcie_rc_supports_ats(node))
+ fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
+ } else {
+ node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_PLAT_DEVICE, dev);
+ if (!node)
+ return -ENODEV;
+
+ err = id_in ? rimt_plat_iommu_map_id(dev, node, id_in) :
+ rimt_plat_iommu_map(dev, node);
+ }
+
+ return err;
+}
+
+#endif
+
+void __init riscv_acpi_rimt_init(void)
+{
+ acpi_status status;
+
+ /* rimt_table will be used at runtime after the rimt init,
+ * so we don't need to call acpi_put_table() to release
+ * the RIMT table mapping.
+ */
+ status = acpi_get_table(ACPI_SIG_RIMT, 0, &rimt_table);
+ if (ACPI_FAILURE(status)) {
+ if (status != AE_NOT_FOUND) {
+ const char *msg = acpi_format_exception(status);
+
+ pr_err("Failed to get table, %s\n", msg);
+ }
+
+ return;
+ }
+}
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index 94e3c000df2e..d3edc3bcbf01 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -77,7 +77,6 @@ struct acpi_battery {
u16 spec;
u8 id;
u8 present:1;
- u8 have_sysfs_alarm:1;
};
#define to_acpi_battery(x) power_supply_get_drvdata(x)
@@ -241,11 +240,11 @@ static int acpi_sbs_battery_get_property(struct power_supply *psy,
return 0;
}
-static enum power_supply_property sbs_ac_props[] = {
+static const enum power_supply_property sbs_ac_props[] = {
POWER_SUPPLY_PROP_ONLINE,
};
-static enum power_supply_property sbs_charge_battery_props[] = {
+static const enum power_supply_property sbs_charge_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_TECHNOLOGY,
@@ -263,7 +262,7 @@ static enum power_supply_property sbs_charge_battery_props[] = {
POWER_SUPPLY_PROP_MANUFACTURER,
};
-static enum power_supply_property sbs_energy_battery_props[] = {
+static const enum power_supply_property sbs_energy_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_TECHNOLOGY,
@@ -462,12 +461,18 @@ static ssize_t acpi_battery_alarm_store(struct device *dev,
return count;
}
-static const struct device_attribute alarm_attr = {
+static struct device_attribute alarm_attr = {
.attr = {.name = "alarm", .mode = 0644},
.show = acpi_battery_alarm_show,
.store = acpi_battery_alarm_store,
};
+static struct attribute *acpi_battery_attrs[] = {
+ &alarm_attr.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(acpi_battery);
+
/* --------------------------------------------------------------------------
Driver Interface
-------------------------------------------------------------------------- */
@@ -482,7 +487,7 @@ static int acpi_battery_read(struct acpi_battery *battery)
if (result)
return result;
- battery->present = state & (1 << battery->id);
+ battery->present = !!(state & (1 << battery->id));
if (!battery->present)
return 0;
@@ -518,7 +523,10 @@ static int acpi_battery_read(struct acpi_battery *battery)
static int acpi_battery_add(struct acpi_sbs *sbs, int id)
{
struct acpi_battery *battery = &sbs->battery[id];
- struct power_supply_config psy_cfg = { .drv_data = battery, };
+ struct power_supply_config psy_cfg = {
+ .drv_data = battery,
+ .attr_grp = acpi_battery_groups,
+ };
int result;
battery->id = id;
@@ -548,10 +556,6 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id)
goto end;
}
- result = device_create_file(&battery->bat->dev, &alarm_attr);
- if (result)
- goto end;
- battery->have_sysfs_alarm = 1;
end:
pr_info("%s [%s]: Battery Slot [%s] (battery %s)\n",
ACPI_SBS_DEVICE_NAME, acpi_device_bid(sbs->device),
@@ -563,11 +567,8 @@ static void acpi_battery_remove(struct acpi_sbs *sbs, int id)
{
struct acpi_battery *battery = &sbs->battery[id];
- if (battery->bat) {
- if (battery->have_sysfs_alarm)
- device_remove_file(&battery->bat->dev, &alarm_attr);
+ if (battery->bat)
power_supply_unregister(battery->bat);
- }
}
static int acpi_charger_add(struct acpi_sbs *sbs)
@@ -610,7 +611,7 @@ static void acpi_sbs_callback(void *context)
if (sbs->charger_exists) {
acpi_ac_get_present(sbs);
if (sbs->charger_present != saved_charger_state)
- kobject_uevent(&sbs->charger->dev.kobj, KOBJ_CHANGE);
+ power_supply_changed(sbs->charger);
}
if (sbs->manager_present) {
@@ -622,7 +623,7 @@ static void acpi_sbs_callback(void *context)
acpi_battery_read(bat);
if (saved_battery_state == bat->present)
continue;
- kobject_uevent(&bat->bat->dev.kobj, KOBJ_CHANGE);
+ power_supply_changed(bat->bat);
}
}
}
@@ -643,8 +644,8 @@ static int acpi_sbs_add(struct acpi_device *device)
sbs->hc = acpi_driver_data(acpi_dev_parent(device));
sbs->device = device;
- strcpy(acpi_device_name(device), ACPI_SBS_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_SBS_CLASS);
+ strscpy(acpi_device_name(device), ACPI_SBS_DEVICE_NAME);
+ strscpy(acpi_device_class(device), ACPI_SBS_CLASS);
device->driver_data = sbs;
result = acpi_charger_add(sbs);
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index 16f2daaa2c45..1a2bf520be23 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include "sbshc.h"
+#include "internal.h"
#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
#define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
@@ -236,12 +237,6 @@ static int smbus_alarm(void *context)
return 0;
}
-typedef int (*acpi_ec_query_func) (void *data);
-
-extern int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
- acpi_handle handle, acpi_ec_query_func func,
- void *data);
-
static int acpi_smbus_hc_add(struct acpi_device *device)
{
int status;
@@ -257,8 +252,8 @@ static int acpi_smbus_hc_add(struct acpi_device *device)
return -EIO;
}
- strcpy(acpi_device_name(device), ACPI_SMB_HC_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_SMB_HC_CLASS);
+ strscpy(acpi_device_name(device), ACPI_SMB_HC_DEVICE_NAME);
+ strscpy(acpi_device_class(device), ACPI_SMB_HC_CLASS);
hc = kzalloc(sizeof(struct acpi_smb_hc), GFP_KERNEL);
if (!hc)
@@ -278,8 +273,6 @@ static int acpi_smbus_hc_add(struct acpi_device *device)
return 0;
}
-extern void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
-
static void acpi_smbus_hc_remove(struct acpi_device *device)
{
struct acpi_smb_hc *hc;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 503773707e01..416d87f9bd10 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <linux/acpi_iort.h>
+#include <linux/acpi_rimt.h>
#include <linux/acpi_viot.h>
#include <linux/iommu.h>
#include <linux/signal.h>
@@ -243,13 +244,17 @@ static int acpi_scan_try_to_offline(struct acpi_device *device)
return 0;
}
-static int acpi_scan_check_and_detach(struct acpi_device *adev, void *check)
+#define ACPI_SCAN_CHECK_FLAG_STATUS BIT(0)
+#define ACPI_SCAN_CHECK_FLAG_EJECT BIT(1)
+
+static int acpi_scan_check_and_detach(struct acpi_device *adev, void *p)
{
struct acpi_scan_handler *handler = adev->handler;
+ uintptr_t flags = (uintptr_t)p;
- acpi_dev_for_each_child_reverse(adev, acpi_scan_check_and_detach, check);
+ acpi_dev_for_each_child_reverse(adev, acpi_scan_check_and_detach, p);
- if (check) {
+ if (flags & ACPI_SCAN_CHECK_FLAG_STATUS) {
acpi_bus_get_status(adev);
/*
* Skip devices that are still there and take the enabled
@@ -269,8 +274,6 @@ static int acpi_scan_check_and_detach(struct acpi_device *adev, void *check)
if (handler) {
if (handler->detach)
handler->detach(adev);
-
- adev->handler = NULL;
} else {
device_release_driver(&adev->dev);
}
@@ -280,6 +283,28 @@ static int acpi_scan_check_and_detach(struct acpi_device *adev, void *check)
*/
acpi_device_set_power(adev, ACPI_STATE_D3_COLD);
adev->flags.initialized = false;
+
+ /* For eject this is deferred to acpi_bus_post_eject() */
+ if (!(flags & ACPI_SCAN_CHECK_FLAG_EJECT)) {
+ adev->handler = NULL;
+ acpi_device_clear_enumerated(adev);
+ }
+ return 0;
+}
+
+static int acpi_bus_post_eject(struct acpi_device *adev, void *not_used)
+{
+ struct acpi_scan_handler *handler = adev->handler;
+
+ acpi_dev_for_each_child_reverse(adev, acpi_bus_post_eject, NULL);
+
+ if (handler) {
+ if (handler->post_eject)
+ handler->post_eject(adev);
+
+ adev->handler = NULL;
+ }
+
acpi_device_clear_enumerated(adev);
return 0;
@@ -287,7 +312,9 @@ static int acpi_scan_check_and_detach(struct acpi_device *adev, void *check)
static void acpi_scan_check_subtree(struct acpi_device *adev)
{
- acpi_scan_check_and_detach(adev, (void *)true);
+ uintptr_t flags = ACPI_SCAN_CHECK_FLAG_STATUS;
+
+ acpi_scan_check_and_detach(adev, (void *)flags);
}
static int acpi_scan_hot_remove(struct acpi_device *device)
@@ -295,6 +322,7 @@ static int acpi_scan_hot_remove(struct acpi_device *device)
acpi_handle handle = device->handle;
unsigned long long sta;
acpi_status status;
+ uintptr_t flags = ACPI_SCAN_CHECK_FLAG_EJECT;
if (device->handler && device->handler->hotplug.demand_offline) {
if (!acpi_scan_is_offline(device, true))
@@ -307,7 +335,7 @@ static int acpi_scan_hot_remove(struct acpi_device *device)
acpi_handle_debug(handle, "Ejecting\n");
- acpi_bus_trim(device);
+ acpi_scan_check_and_detach(device, (void *)flags);
acpi_evaluate_lck(handle, 0);
/*
@@ -330,6 +358,8 @@ static int acpi_scan_hot_remove(struct acpi_device *device)
} else if (sta & ACPI_STA_DEVICE_ENABLED) {
acpi_handle_warn(handle,
"Eject incomplete - status 0x%llx\n", sta);
+ } else {
+ acpi_bus_post_eject(device, NULL);
}
return 0;
@@ -694,10 +724,8 @@ int acpi_tie_acpi_dev(struct acpi_device *adev)
static void acpi_store_pld_crc(struct acpi_device *adev)
{
struct acpi_pld_info *pld;
- acpi_status status;
- status = acpi_get_physical_device_location(adev->handle, &pld);
- if (ACPI_FAILURE(status))
+ if (!acpi_get_physical_device_location(adev->handle, &pld))
return;
adev->pld_crc = crc32(~0, pld, sizeof(*pld));
@@ -766,10 +794,7 @@ int acpi_device_add(struct acpi_device *device)
goto err;
}
- result = acpi_device_setup_files(device);
- if (result)
- pr_err("Error creating sysfs interface for device %s\n",
- dev_name(&device->dev));
+ acpi_device_setup_files(device);
return 0;
@@ -821,6 +846,8 @@ static bool acpi_info_matches_ids(struct acpi_device_info *info,
static const char * const acpi_ignore_dep_ids[] = {
"PNP0D80", /* Windows-compatible System Power Management Controller */
"INT33BD", /* Intel Baytrail Mailbox Device */
+ "INTC10DE", /* Intel CVS LNL */
+ "INTC10E0", /* Intel CVS ARL */
"LATT2021", /* Lattice FW Update Client Driver */
NULL
};
@@ -832,6 +859,11 @@ static const char * const acpi_honor_dep_ids[] = {
"INTC1095", /* IVSC (ADL) driver must be loaded to allow i2c access to camera sensors */
"INTC100A", /* IVSC (RPL) driver must be loaded to allow i2c access to camera sensors */
"INTC10CF", /* IVSC (MTL) driver must be loaded to allow i2c access to camera sensors */
+ "RSCV0001", /* RISC-V PLIC */
+ "RSCV0002", /* RISC-V APLIC */
+ "RSCV0005", /* RISC-V SBI MPXY MBOX */
+ "RSCV0006", /* RISC-V RPMI SYSMSI */
+ "PNP0C0F", /* PCI Link Device */
NULL
};
@@ -1150,19 +1182,19 @@ static void acpi_device_get_busid(struct acpi_device *device)
* TBD: Shouldn't this value be unique (within the ACPI namespace)?
*/
if (!acpi_dev_parent(device)) {
- strcpy(device->pnp.bus_id, "ACPI");
+ strscpy(device->pnp.bus_id, "ACPI");
return;
}
switch (device->device_type) {
case ACPI_BUS_TYPE_POWER_BUTTON:
- strcpy(device->pnp.bus_id, "PWRF");
+ strscpy(device->pnp.bus_id, "PWRF");
break;
case ACPI_BUS_TYPE_SLEEP_BUTTON:
- strcpy(device->pnp.bus_id, "SLPF");
+ strscpy(device->pnp.bus_id, "SLPF");
break;
case ACPI_BUS_TYPE_ECDT_EC:
- strcpy(device->pnp.bus_id, "ECDT");
+ strscpy(device->pnp.bus_id, "ECDT");
break;
default:
acpi_get_name(device->handle, ACPI_SINGLE_NAME, &buffer);
@@ -1173,7 +1205,7 @@ static void acpi_device_get_busid(struct acpi_device *device)
else
break;
}
- strcpy(device->pnp.bus_id, bus_id);
+ strscpy(device->pnp.bus_id, bus_id);
break;
}
}
@@ -1424,8 +1456,8 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
acpi_object_is_system_bus(handle)) {
/* \_SB, \_TZ, LNXSYBUS */
acpi_add_id(pnp, ACPI_BUS_HID);
- strcpy(pnp->device_name, ACPI_BUS_DEVICE_NAME);
- strcpy(pnp->device_class, ACPI_BUS_CLASS);
+ strscpy(pnp->device_name, ACPI_BUS_DEVICE_NAME);
+ strscpy(pnp->device_class, ACPI_BUS_CLASS);
}
break;
@@ -1577,70 +1609,44 @@ int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map)
#ifdef CONFIG_IOMMU_API
int acpi_iommu_fwspec_init(struct device *dev, u32 id,
- struct fwnode_handle *fwnode,
- const struct iommu_ops *ops)
+ struct fwnode_handle *fwnode)
{
int ret;
- ret = iommu_fwspec_init(dev, fwnode, ops);
+ ret = iommu_fwspec_init(dev, fwnode);
if (ret)
return ret;
return iommu_fwspec_add_ids(dev, &id, 1);
}
-static inline const struct iommu_ops *acpi_iommu_fwspec_ops(struct device *dev)
-{
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
-
- return fwspec ? fwspec->ops : NULL;
-}
-
static int acpi_iommu_configure_id(struct device *dev, const u32 *id_in)
{
int err;
- const struct iommu_ops *ops;
/* Serialise to make dev->iommu stable under our potential fwspec */
mutex_lock(&iommu_probe_device_lock);
- /*
- * If we already translated the fwspec there is nothing left to do,
- * return the iommu_ops.
- */
- ops = acpi_iommu_fwspec_ops(dev);
- if (ops) {
+ /* If we already translated the fwspec there is nothing left to do */
+ if (dev_iommu_fwspec_get(dev)) {
mutex_unlock(&iommu_probe_device_lock);
return 0;
}
err = iort_iommu_configure_id(dev, id_in);
if (err && err != -EPROBE_DEFER)
+ err = rimt_iommu_configure_id(dev, id_in);
+ if (err && err != -EPROBE_DEFER)
err = viot_iommu_configure(dev);
- mutex_unlock(&iommu_probe_device_lock);
- /*
- * If we have reason to believe the IOMMU driver missed the initial
- * iommu_probe_device() call for dev, replay it to get things in order.
- */
- if (!err && dev->bus)
- err = iommu_probe_device(dev);
+ mutex_unlock(&iommu_probe_device_lock);
- if (err == -EPROBE_DEFER)
- return err;
- if (err) {
- dev_dbg(dev, "Adding to IOMMU failed: %d\n", err);
- return err;
- }
- if (!acpi_iommu_fwspec_ops(dev))
- return -ENODEV;
- return 0;
+ return err;
}
#else /* !CONFIG_IOMMU_API */
int acpi_iommu_fwspec_init(struct device *dev, u32 id,
- struct fwnode_handle *fwnode,
- const struct iommu_ops *ops)
+ struct fwnode_handle *fwnode)
{
return -ENODEV;
}
@@ -1674,6 +1680,8 @@ int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr,
ret = acpi_iommu_configure_id(dev, input_id);
if (ret == -EPROBE_DEFER)
return -EPROBE_DEFER;
+ if (ret)
+ dev_dbg(dev, "Adding to IOMMU failed: %d\n", ret);
arch_setup_dma_ops(dev, attr == DEV_DMA_COHERENT);
@@ -1760,6 +1768,7 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
{"CSC3557", },
{"INT33FE", },
{"INT3515", },
+ {"TXNW2781", },
/* Non-conforming _HID for Cirrus Logic already released */
{"CLSA0100", },
{"CLSA0101", },
@@ -1813,6 +1822,7 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
device->dev.parent = parent ? &parent->dev : NULL;
device->dev.release = release;
device->dev.bus = &acpi_bus_type;
+ device->dev.groups = acpi_groups;
fwnode_init(&device->fwnode, &acpi_device_fwnode_ops);
acpi_set_device_status(device, ACPI_STA_DEFAULT);
acpi_device_get_busid(device);
@@ -2004,6 +2014,49 @@ void acpi_scan_hotplug_enabled(struct acpi_hotplug_profile *hotplug, bool val)
mutex_unlock(&acpi_scan_lock);
}
+int acpi_scan_add_dep(acpi_handle handle, struct acpi_handle_list *dep_devices)
+{
+ u32 count;
+ int i;
+
+ for (count = 0, i = 0; i < dep_devices->count; i++) {
+ struct acpi_device_info *info;
+ struct acpi_dep_data *dep;
+ bool skip, honor_dep;
+ acpi_status status;
+
+ status = acpi_get_object_info(dep_devices->handles[i], &info);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_debug(handle, "Error reading _DEP device info\n");
+ continue;
+ }
+
+ skip = acpi_info_matches_ids(info, acpi_ignore_dep_ids);
+ honor_dep = acpi_info_matches_ids(info, acpi_honor_dep_ids);
+ kfree(info);
+
+ if (skip)
+ continue;
+
+ dep = kzalloc(sizeof(*dep), GFP_KERNEL);
+ if (!dep)
+ continue;
+
+ count++;
+
+ dep->supplier = dep_devices->handles[i];
+ dep->consumer = handle;
+ dep->honor_dep = honor_dep;
+
+ mutex_lock(&acpi_dep_list_lock);
+ list_add_tail(&dep->node, &acpi_dep_list);
+ mutex_unlock(&acpi_dep_list_lock);
+ }
+
+ acpi_handle_list_free(dep_devices);
+ return count;
+}
+
static void acpi_scan_init_hotplug(struct acpi_device *adev)
{
struct acpi_hardware_id *hwid;
@@ -2023,11 +2076,21 @@ static void acpi_scan_init_hotplug(struct acpi_device *adev)
}
}
+u32 __weak arch_acpi_add_auto_dep(acpi_handle handle) { return 0; }
+
static u32 acpi_scan_check_dep(acpi_handle handle)
{
struct acpi_handle_list dep_devices;
- u32 count;
- int i;
+ u32 count = 0;
+
+ /*
+ * Some architectures like RISC-V need to add dependencies for
+ * all devices which use GSI to the interrupt controller so that
+ * interrupt controller is probed before any of those devices.
+ * Instead of mandating _DEP on all the devices, detect the
+ * dependency and add automatically.
+ */
+ count += arch_acpi_add_auto_dep(handle);
/*
* Check for _HID here to avoid deferring the enumeration of:
@@ -2036,48 +2099,14 @@ static u32 acpi_scan_check_dep(acpi_handle handle)
* Still, checking for _HID catches more then just these cases ...
*/
if (!acpi_has_method(handle, "_DEP") || !acpi_has_method(handle, "_HID"))
- return 0;
+ return count;
if (!acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices)) {
acpi_handle_debug(handle, "Failed to evaluate _DEP.\n");
- return 0;
- }
-
- for (count = 0, i = 0; i < dep_devices.count; i++) {
- struct acpi_device_info *info;
- struct acpi_dep_data *dep;
- bool skip, honor_dep;
- acpi_status status;
-
- status = acpi_get_object_info(dep_devices.handles[i], &info);
- if (ACPI_FAILURE(status)) {
- acpi_handle_debug(handle, "Error reading _DEP device info\n");
- continue;
- }
-
- skip = acpi_info_matches_ids(info, acpi_ignore_dep_ids);
- honor_dep = acpi_info_matches_ids(info, acpi_honor_dep_ids);
- kfree(info);
-
- if (skip)
- continue;
-
- dep = kzalloc(sizeof(*dep), GFP_KERNEL);
- if (!dep)
- continue;
-
- count++;
-
- dep->supplier = dep_devices.handles[i];
- dep->consumer = handle;
- dep->honor_dep = honor_dep;
-
- mutex_lock(&acpi_dep_list_lock);
- list_add_tail(&dep->node , &acpi_dep_list);
- mutex_unlock(&acpi_dep_list_lock);
+ return count;
}
- acpi_handle_list_free(&dep_devices);
+ count += acpi_scan_add_dep(handle, &dep_devices);
return count;
}
@@ -2264,6 +2293,8 @@ static int acpi_bus_attach(struct acpi_device *device, void *first_pass)
if (device->handler)
goto ok;
+ acpi_ec_register_opregions(device);
+
if (!device->flags.initialized) {
device->flags.power_manageable =
device->power.states[ACPI_STATE_D0].flags.valid;
@@ -2366,7 +2397,7 @@ static bool acpi_scan_clear_dep_queue(struct acpi_device *adev)
* initial enumeration of devices is complete, put it into the unbound
* workqueue.
*/
- queue_work(system_unbound_wq, &cdw->work);
+ queue_work(system_dfl_wq, &cdw->work);
return true;
}
@@ -2596,7 +2627,9 @@ EXPORT_SYMBOL(acpi_bus_scan);
*/
void acpi_bus_trim(struct acpi_device *adev)
{
- acpi_scan_check_and_detach(adev, NULL);
+ uintptr_t flags = 0;
+
+ acpi_scan_check_and_detach(adev, (void *)flags);
}
EXPORT_SYMBOL_GPL(acpi_bus_trim);
@@ -2677,7 +2710,7 @@ void __init acpi_scan_init(void)
acpi_memory_hotplug_init();
acpi_watchdog_init();
acpi_pnp_init();
- acpi_int340x_thermal_init();
+ acpi_power_resources_init();
acpi_init_lpit();
acpi_scan_add_handler(&generic_device_handler);
@@ -2744,6 +2777,8 @@ static int __init acpi_match_madt(union acpi_subtable_headers *header,
return 0;
}
+void __weak arch_sort_irqchip_probe(struct acpi_probe_entry *ap_head, int nr) { }
+
int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr)
{
int count = 0;
@@ -2752,6 +2787,7 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr)
return 0;
mutex_lock(&acpi_probe_mutex);
+ arch_sort_irqchip_probe(ap_head, nr);
for (ape = ap_head; nr; ape++, nr--) {
if (ACPI_COMPARE_NAMESEG(ACPI_SIG_MADT, ape->id)) {
acpi_probe_count = 0;
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 889f1c1a1fa9..66ec81e306d4 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -352,6 +352,20 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
},
},
/*
+ * The ASUS ROG M16 from 2023 has many events which wake it from s2idle
+ * resulting in excessive battery drain and risk of laptop overheating,
+ * these events can be caused by the MMC or y AniMe display if installed.
+ * The match is valid for all of the GU604V<x> range.
+ */
+ {
+ .callback = init_default_s3,
+ .ident = "ASUS ROG Zephyrus M16 (2023)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ROG Zephyrus M16 GU604V"),
+ },
+ },
+ /*
* https://bugzilla.kernel.org/show_bug.cgi?id=189431
* Lenovo G50-45 is a platform later than 2012, but needs nvs memory
* saving during S3.
@@ -628,7 +642,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
/*
* Disable all GPE and clear their status bits before interrupts are
* enabled. Some GPEs (like wakeup GPEs) have no handlers and this can
- * prevent them from producing spurious interrups.
+ * prevent them from producing spurious interrupts.
*
* acpi_leave_sleep_state() will reenable specific GPEs later.
*
@@ -870,13 +884,13 @@ bool acpi_s2idle_wakeup(void)
#ifdef CONFIG_PM_SLEEP
static u32 saved_bm_rld;
-static int acpi_save_bm_rld(void)
+static int acpi_save_bm_rld(void *data)
{
acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
return 0;
}
-static void acpi_restore_bm_rld(void)
+static void acpi_restore_bm_rld(void *data)
{
u32 resumed_bm_rld = 0;
@@ -887,14 +901,18 @@ static void acpi_restore_bm_rld(void)
acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
}
-static struct syscore_ops acpi_sleep_syscore_ops = {
+static const struct syscore_ops acpi_sleep_syscore_ops = {
.suspend = acpi_save_bm_rld,
.resume = acpi_restore_bm_rld,
};
+static struct syscore acpi_sleep_syscore = {
+ .ops = &acpi_sleep_syscore_ops,
+};
+
static void acpi_sleep_syscore_init(void)
{
- register_syscore_ops(&acpi_sleep_syscore_ops);
+ register_syscore(&acpi_sleep_syscore);
}
#else
static inline void acpi_sleep_syscore_init(void) {}
diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h
index d960a238be4e..9c3cb109c5d2 100644
--- a/drivers/acpi/sleep.h
+++ b/drivers/acpi/sleep.h
@@ -17,10 +17,7 @@ static inline acpi_status acpi_set_waking_vector(u32 wakeup_address)
extern int acpi_s2idle_begin(void);
extern int acpi_s2idle_prepare(void);
-extern int acpi_s2idle_prepare_late(void);
-extern void acpi_s2idle_check(void);
extern bool acpi_s2idle_wake(void);
-extern void acpi_s2idle_restore_early(void);
extern void acpi_s2idle_restore(void);
extern void acpi_s2idle_end(void);
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
index cd36a97b0ea2..73cb933fdc89 100644
--- a/drivers/acpi/spcr.c
+++ b/drivers/acpi/spcr.c
@@ -141,12 +141,23 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)
case ACPI_DBG2_16550_NVIDIA:
uart = "uart";
break;
+ case ACPI_DBG2_RISCV_SBI_CON:
+ uart = "sbi";
+ break;
default:
err = -ENOENT;
goto done;
}
- switch (table->baud_rate) {
+ /*
+ * SPCR 1.09 defines Precise Baud Rate Filed contains a specific
+ * non-zero baud rate which overrides the value of the Configured
+ * Baud Rate field. If this field is zero or not present, Configured
+ * Baud Rate is used.
+ */
+ if (table->header.revision >= 4 && table->precise_baudrate)
+ baud_rate = table->precise_baudrate;
+ else switch (table->baud_rate) {
case 0:
/*
* SPCR 1.04 defines 0 as a preconfigured state of UART.
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 687524b50085..e596224302f4 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -319,7 +319,7 @@ struct acpi_data_attr {
};
static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t offset, size_t count)
{
struct acpi_table_attr *table_attr =
@@ -412,7 +412,7 @@ acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context)
}
static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t offset, size_t count)
{
struct acpi_data_attr *data_attr;
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index b976e5fc3fbc..4286e4af1092 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -56,7 +56,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
(struct acpi_madt_local_apic *)header;
pr_debug("LAPIC (acpi_id[0x%02x] lapic_id[0x%02x] %s)\n",
p->processor_id, p->id,
- (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ str_enabled_disabled(p->lapic_flags & ACPI_MADT_ENABLED));
}
break;
@@ -66,7 +66,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
(struct acpi_madt_local_x2apic *)header;
pr_debug("X2APIC (apic_id[0x%02x] uid[0x%02x] %s)\n",
p->local_apic_id, p->uid,
- (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ str_enabled_disabled(p->lapic_flags & ACPI_MADT_ENABLED));
}
break;
@@ -160,7 +160,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
(struct acpi_madt_local_sapic *)header;
pr_debug("LSAPIC (acpi_id[0x%02x] lsapic_id[0x%02x] lsapic_eid[0x%02x] %s)\n",
p->processor_id, p->id, p->eid,
- (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ str_enabled_disabled(p->lapic_flags & ACPI_MADT_ENABLED));
}
break;
@@ -183,7 +183,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
pr_debug("GICC (acpi_id[0x%04x] address[%llx] MPIDR[0x%llx] %s)\n",
p->uid, p->base_address,
p->arm_mpidr,
- (p->flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_MADT_ENABLED));
}
break;
@@ -198,13 +198,27 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
}
break;
+ case ACPI_MADT_TYPE_MULTIPROC_WAKEUP:
+ {
+ struct acpi_madt_multiproc_wakeup *p =
+ (struct acpi_madt_multiproc_wakeup *)header;
+ u64 reset_vector = 0;
+
+ if (p->version >= ACPI_MADT_MP_WAKEUP_VERSION_V1)
+ reset_vector = p->reset_vector;
+
+ pr_debug("MP Wakeup (version[%d], mailbox[%#llx], reset[%#llx])\n",
+ p->version, p->mailbox_address, reset_vector);
+ }
+ break;
+
case ACPI_MADT_TYPE_CORE_PIC:
{
struct acpi_madt_core_pic *p = (struct acpi_madt_core_pic *)header;
pr_debug("CORE PIC (processor_id[0x%02x] core_id[0x%02x] %s)\n",
p->processor_id, p->core_id,
- (p->flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_MADT_ENABLED));
}
break;
@@ -214,7 +228,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
pr_debug("RISC-V INTC (acpi_uid[0x%04x] hart_id[0x%llx] %s)\n",
p->uid, p->hart_id,
- (p->flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_MADT_ENABLED));
}
break;
@@ -382,7 +396,7 @@ static u8 __init acpi_table_checksum(u8 *buffer, u32 length)
}
/* All but ACPI_SIG_RSDP and ACPI_SIG_FACS: */
-static const char table_sigs[][ACPI_NAMESEG_SIZE] __initconst = {
+static const char table_sigs[][ACPI_NAMESEG_SIZE] __nonstring_array __initconst = {
ACPI_SIG_BERT, ACPI_SIG_BGRT, ACPI_SIG_CPEP, ACPI_SIG_ECDT,
ACPI_SIG_EINJ, ACPI_SIG_ERST, ACPI_SIG_HEST, ACPI_SIG_MADT,
ACPI_SIG_MSCT, ACPI_SIG_SBST, ACPI_SIG_SLIT, ACPI_SIG_SRAT,
@@ -394,7 +408,7 @@ static const char table_sigs[][ACPI_NAMESEG_SIZE] __initconst = {
ACPI_SIG_PSDT, ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT,
ACPI_SIG_IORT, ACPI_SIG_NFIT, ACPI_SIG_HMAT, ACPI_SIG_PPTT,
ACPI_SIG_NHLT, ACPI_SIG_AEST, ACPI_SIG_CEDT, ACPI_SIG_AGDI,
- ACPI_SIG_NBFT };
+ ACPI_SIG_NBFT, ACPI_SIG_SWFT, ACPI_SIG_MPAM};
#define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
@@ -705,8 +719,12 @@ int __init acpi_locate_initial_tables(void)
}
status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
+ const char *msg = acpi_format_exception(status);
+
+ pr_warn("Failed to initialize tables, status=0x%x (%s)", status, msg);
return -EINVAL;
+ }
return 0;
}
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index d67881b50bca..a511f9ea0267 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -168,11 +168,17 @@ static int acpi_thermal_get_polling_frequency(struct acpi_thermal *tz)
static int acpi_thermal_temp(struct acpi_thermal *tz, int temp_deci_k)
{
+ int temp;
+
if (temp_deci_k == THERMAL_TEMP_INVALID)
return THERMAL_TEMP_INVALID;
- return deci_kelvin_to_millicelsius_with_offset(temp_deci_k,
+ temp = deci_kelvin_to_millicelsius_with_offset(temp_deci_k,
tz->kelvin_offset);
+ if (temp <= 0)
+ return THERMAL_TEMP_INVALID;
+
+ return temp;
}
static bool acpi_thermal_trip_valid(struct acpi_thermal_trip *acpi_trip)
@@ -552,77 +558,31 @@ static void acpi_thermal_zone_device_critical(struct thermal_zone_device *therma
thermal_zone_device_critical(thermal);
}
-struct acpi_thermal_bind_data {
- struct thermal_zone_device *thermal;
- struct thermal_cooling_device *cdev;
- bool bind;
-};
-
-static int bind_unbind_cdev_cb(struct thermal_trip *trip, void *arg)
+static bool acpi_thermal_should_bind_cdev(struct thermal_zone_device *thermal,
+ const struct thermal_trip *trip,
+ struct thermal_cooling_device *cdev,
+ struct cooling_spec *c)
{
struct acpi_thermal_trip *acpi_trip = trip->priv;
- struct acpi_thermal_bind_data *bd = arg;
- struct thermal_zone_device *thermal = bd->thermal;
- struct thermal_cooling_device *cdev = bd->cdev;
struct acpi_device *cdev_adev = cdev->devdata;
int i;
/* Skip critical and hot trips. */
if (!acpi_trip)
- return 0;
+ return false;
for (i = 0; i < acpi_trip->devices.count; i++) {
acpi_handle handle = acpi_trip->devices.handles[i];
- struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
-
- if (adev != cdev_adev)
- continue;
-
- if (bd->bind) {
- int ret;
-
- ret = thermal_bind_cdev_to_trip(thermal, trip, cdev,
- THERMAL_NO_LIMIT,
- THERMAL_NO_LIMIT,
- THERMAL_WEIGHT_DEFAULT);
- if (ret)
- return ret;
- } else {
- thermal_unbind_cdev_from_trip(thermal, trip, cdev);
- }
- }
-
- return 0;
-}
-static int acpi_thermal_bind_unbind_cdev(struct thermal_zone_device *thermal,
- struct thermal_cooling_device *cdev,
- bool bind)
-{
- struct acpi_thermal_bind_data bd = {
- .thermal = thermal, .cdev = cdev, .bind = bind
- };
-
- return for_each_thermal_trip(thermal, bind_unbind_cdev_cb, &bd);
-}
-
-static int
-acpi_thermal_bind_cooling_device(struct thermal_zone_device *thermal,
- struct thermal_cooling_device *cdev)
-{
- return acpi_thermal_bind_unbind_cdev(thermal, cdev, true);
-}
+ if (acpi_fetch_acpi_dev(handle) == cdev_adev)
+ return true;
+ }
-static int
-acpi_thermal_unbind_cooling_device(struct thermal_zone_device *thermal,
- struct thermal_cooling_device *cdev)
-{
- return acpi_thermal_bind_unbind_cdev(thermal, cdev, false);
+ return false;
}
static const struct thermal_zone_device_ops acpi_thermal_zone_ops = {
- .bind = acpi_thermal_bind_cooling_device,
- .unbind = acpi_thermal_unbind_cooling_device,
+ .should_bind = acpi_thermal_should_bind_cdev,
.get_temp = thermal_get_temp,
.get_trend = thermal_get_trend,
.hot = acpi_thermal_zone_device_hot,
@@ -836,14 +796,20 @@ static int acpi_thermal_add(struct acpi_device *device)
return -ENOMEM;
tz->device = device;
- strcpy(tz->name, device->pnp.bus_id);
- strcpy(acpi_device_name(device), ACPI_THERMAL_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_THERMAL_CLASS);
+ strscpy(tz->name, device->pnp.bus_id);
+ strscpy(acpi_device_name(device), ACPI_THERMAL_DEVICE_NAME);
+ strscpy(acpi_device_class(device), ACPI_THERMAL_CLASS);
device->driver_data = tz;
acpi_thermal_aml_dependency_fix(tz);
- /* Get trip points [_CRT, _PSV, etc.] (required). */
+ /*
+ * Set the cooling mode [_SCP] to active cooling. This needs to happen before
+ * we retrieve the trip point values.
+ */
+ acpi_execute_simple_method(tz->device->handle, "_SCP", ACPI_THERMAL_MODE_ACTIVE);
+
+ /* Get trip points [_ACi, _PSV, etc.] (required). */
acpi_thermal_get_trip_points(tz);
crit_temp = acpi_thermal_get_critical_trip(tz);
@@ -854,10 +820,6 @@ static int acpi_thermal_add(struct acpi_device *device)
if (result)
goto free_memory;
- /* Set the cooling mode [_SCP] to active cooling. */
- acpi_execute_simple_method(tz->device->handle, "_SCP",
- ACPI_THERMAL_MODE_ACTIVE);
-
/* Determine the default polling frequency [_TZP]. */
if (tzp)
tz->polling_frequency = tzp;
@@ -962,7 +924,7 @@ static int acpi_thermal_suspend(struct device *dev)
static int acpi_thermal_resume(struct device *dev)
{
struct acpi_thermal *tz;
- int i, j, power_state;
+ int i, j;
if (!dev)
return -EINVAL;
@@ -977,10 +939,8 @@ static int acpi_thermal_resume(struct device *dev)
if (!acpi_thermal_trip_valid(acpi_trip))
break;
- for (j = 0; j < acpi_trip->devices.count; j++) {
- acpi_bus_update_power(acpi_trip->devices.handles[j],
- &power_state);
- }
+ for (j = 0; j < acpi_trip->devices.count; j++)
+ acpi_bus_update_power(acpi_trip->devices.handles[j], NULL);
}
acpi_queue_thermal_check(tz);
@@ -1100,7 +1060,8 @@ static int __init acpi_thermal_init(void)
}
acpi_thermal_pm_queue = alloc_workqueue("acpi_thermal_pm",
- WQ_HIGHPRI | WQ_MEM_RECLAIM, 0);
+ WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_PERCPU,
+ 0);
if (!acpi_thermal_pm_queue)
return -ENODEV;
@@ -1122,7 +1083,7 @@ static void __exit acpi_thermal_exit(void)
module_init(acpi_thermal_init);
module_exit(acpi_thermal_exit);
-MODULE_IMPORT_NS(ACPI_THERMAL);
+MODULE_IMPORT_NS("ACPI_THERMAL");
MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_DESCRIPTION("ACPI Thermal Zone Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/thermal_lib.c b/drivers/acpi/thermal_lib.c
index 6214d6ebe1fa..f81591927e86 100644
--- a/drivers/acpi/thermal_lib.c
+++ b/drivers/acpi/thermal_lib.c
@@ -53,25 +53,25 @@ int acpi_active_trip_temp(struct acpi_device *adev, int id, int *ret_temp)
return acpi_trip_temp(adev, obj_name, ret_temp);
}
-EXPORT_SYMBOL_NS_GPL(acpi_active_trip_temp, ACPI_THERMAL);
+EXPORT_SYMBOL_NS_GPL(acpi_active_trip_temp, "ACPI_THERMAL");
int acpi_passive_trip_temp(struct acpi_device *adev, int *ret_temp)
{
return acpi_trip_temp(adev, "_PSV", ret_temp);
}
-EXPORT_SYMBOL_NS_GPL(acpi_passive_trip_temp, ACPI_THERMAL);
+EXPORT_SYMBOL_NS_GPL(acpi_passive_trip_temp, "ACPI_THERMAL");
int acpi_hot_trip_temp(struct acpi_device *adev, int *ret_temp)
{
return acpi_trip_temp(adev, "_HOT", ret_temp);
}
-EXPORT_SYMBOL_NS_GPL(acpi_hot_trip_temp, ACPI_THERMAL);
+EXPORT_SYMBOL_NS_GPL(acpi_hot_trip_temp, "ACPI_THERMAL");
int acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp)
{
return acpi_trip_temp(adev, "_CRT", ret_temp);
}
-EXPORT_SYMBOL_NS_GPL(acpi_critical_trip_temp, ACPI_THERMAL);
+EXPORT_SYMBOL_NS_GPL(acpi_critical_trip_temp, "ACPI_THERMAL");
static int thermal_temp(int error, int temp_decik, int *ret_temp)
{
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 202234ba54bd..526563a0d188 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -277,15 +277,25 @@ acpi_evaluate_integer(acpi_handle handle,
EXPORT_SYMBOL(acpi_evaluate_integer);
-int acpi_get_local_address(acpi_handle handle, u32 *addr)
+int acpi_get_local_u64_address(acpi_handle handle, u64 *addr)
{
- unsigned long long adr;
acpi_status status;
- status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &adr);
+ status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, addr);
if (ACPI_FAILURE(status))
return -ENODATA;
+ return 0;
+}
+EXPORT_SYMBOL(acpi_get_local_u64_address);
+
+int acpi_get_local_address(acpi_handle handle, u32 *addr)
+{
+ u64 adr;
+ int ret;
+ ret = acpi_get_local_u64_address(handle, &adr);
+ if (ret < 0)
+ return ret;
*addr = (u32)adr;
return 0;
}
@@ -484,7 +494,7 @@ bool acpi_device_dep(acpi_handle target, acpi_handle match)
}
EXPORT_SYMBOL_GPL(acpi_device_dep);
-acpi_status
+bool
acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld)
{
acpi_status status;
@@ -492,9 +502,8 @@ acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld
union acpi_object *output;
status = acpi_evaluate_object(handle, "_PLD", NULL, &buffer);
-
if (ACPI_FAILURE(status))
- return status;
+ return false;
output = buffer.pointer;
@@ -513,7 +522,7 @@ acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld
out:
kfree(buffer.pointer);
- return status;
+ return ACPI_SUCCESS(status);
}
EXPORT_SYMBOL(acpi_get_physical_device_location);
@@ -791,7 +800,8 @@ acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 func,
if (ret != AE_NOT_FOUND)
acpi_handle_warn(handle,
- "failed to evaluate _DSM %pUb (0x%x)\n", guid, ret);
+ "failed to evaluate _DSM %pUb rev:%lld func:%lld (0x%x)\n",
+ guid, rev, func, ret);
return NULL;
}
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 2cc3821b2b16..4cf74f173c78 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -54,6 +54,8 @@ static void acpi_video_parse_cmdline(void)
acpi_backlight_cmdline = acpi_backlight_nvidia_wmi_ec;
if (!strcmp("apple_gmux", acpi_video_backlight_string))
acpi_backlight_cmdline = acpi_backlight_apple_gmux;
+ if (!strcmp("dell_uart", acpi_video_backlight_string))
+ acpi_backlight_cmdline = acpi_backlight_dell_uart;
if (!strcmp("none", acpi_video_backlight_string))
acpi_backlight_cmdline = acpi_backlight_none;
}
@@ -252,6 +254,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "PCG-FRV35"),
},
},
+ {
+ .callback = video_detect_force_vendor,
+ /* Panasonic Toughbook CF-18 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Matsushita Electric Industrial"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CF-18"),
+ },
+ },
/*
* Toshiba models with Transflective display, these need to use
@@ -540,6 +550,38 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
},
{
+ .callback = video_detect_force_native,
+ /* Apple MacBook Air 7,2 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir7,2"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ /* Apple MacBook Air 9,1 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir9,1"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ /* Apple MacBook Pro 9,2 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro9,2"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ /* Apple MacBook Pro 11,2 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro11,2"),
+ },
+ },
+ {
/* https://bugzilla.redhat.com/show_bug.cgi?id=1217249 */
.callback = video_detect_force_native,
/* Apple MacBook Pro 12,1 */
@@ -550,6 +592,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
{
.callback = video_detect_force_native,
+ /* Apple MacBook Pro 16,2 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro16,2"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
/* Dell Inspiron N4010 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
@@ -806,6 +856,30 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
/*
+ * Dell AIO (All in Ones) which advertise an UART attached backlight
+ * controller board in their ACPI tables (and may even have one), but
+ * which need native backlight control nevertheless.
+ */
+ {
+ /* https://github.com/zabbly/linux/issues/26 */
+ .callback = video_detect_force_native,
+ /* Dell OptiPlex 5480 AIO */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 5480 AIO"),
+ },
+ },
+ {
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=2303936 */
+ .callback = video_detect_force_native,
+ /* Dell OptiPlex 7760 AIO */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 7760 AIO"),
+ },
+ },
+
+ /*
* Models which have nvidia-ec-wmi support, but should not use it.
* Note this indicates a likely firmware bug on these models and should
* be revisited if/when Linux gets support for dynamic mux mode.
@@ -863,7 +937,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
/* Lenovo Yoga Tab 3 Pro YT3-X90F */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
},
},
@@ -875,6 +948,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Mipad2"),
},
},
+ /* https://gitlab.freedesktop.org/drm/amd/-/issues/4512 */
+ {
+ .callback = video_detect_force_native,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82K8"),
+ },
+ },
{ },
};
@@ -902,6 +983,7 @@ enum acpi_backlight_type __acpi_video_get_backlight_type(bool native, bool *auto
static DEFINE_MUTEX(init_mutex);
static bool nvidia_wmi_ec_present;
static bool apple_gmux_present;
+ static bool dell_uart_present;
static bool native_available;
static bool init_done;
static long video_caps;
@@ -916,6 +998,7 @@ enum acpi_backlight_type __acpi_video_get_backlight_type(bool native, bool *auto
&video_caps, NULL);
nvidia_wmi_ec_present = nvidia_wmi_ec_supported();
apple_gmux_present = apple_gmux_detect(NULL, NULL);
+ dell_uart_present = acpi_dev_present("DELL0501", NULL, -1);
init_done = true;
}
if (native)
@@ -946,6 +1029,9 @@ enum acpi_backlight_type __acpi_video_get_backlight_type(bool native, bool *auto
if (apple_gmux_present)
return acpi_backlight_apple_gmux;
+ if (dell_uart_present)
+ return acpi_backlight_dell_uart;
+
/* Use ACPI video if available, except when native should be preferred. */
if ((video_caps & ACPI_VIDEO_BACKLIGHT) &&
!(native_available && prefer_native_over_acpi_video()))
diff --git a/drivers/acpi/viot.c b/drivers/acpi/viot.c
index c8025921c129..c13a20365c2c 100644
--- a/drivers/acpi/viot.c
+++ b/drivers/acpi/viot.c
@@ -19,11 +19,11 @@
#define pr_fmt(fmt) "ACPI: VIOT: " fmt
#include <linux/acpi_viot.h>
-#include <linux/fwnode.h>
#include <linux/iommu.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
struct viot_iommu {
/* Node offset within the table */
@@ -307,21 +307,14 @@ void __init acpi_viot_init(void)
static int viot_dev_iommu_init(struct device *dev, struct viot_iommu *viommu,
u32 epid)
{
- const struct iommu_ops *ops;
-
- if (!viommu)
+ if (!viommu || !IS_ENABLED(CONFIG_VIRTIO_IOMMU))
return -ENODEV;
/* We're not translating ourself */
if (device_match_fwnode(dev, viommu->fwnode))
return -EINVAL;
- ops = iommu_ops_from_fwnode(viommu->fwnode);
- if (!ops)
- return IS_ENABLED(CONFIG_VIRTIO_IOMMU) ?
- -EPROBE_DEFER : -ENODEV;
-
- return acpi_iommu_fwspec_init(dev, epid, viommu->fwnode, ops);
+ return acpi_iommu_fwspec_init(dev, epid, viommu->fwnode);
}
static int viot_pci_dev_iommu_init(struct pci_dev *pdev, u16 dev_id, void *data)
diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c
index b02bf770aead..ff6dc957bc11 100644
--- a/drivers/acpi/wakeup.c
+++ b/drivers/acpi/wakeup.c
@@ -42,7 +42,7 @@ void acpi_enable_wakeup_devices(u8 sleep_state)
list_for_each_entry_safe(dev, tmp, &acpi_wakeup_device_list,
wakeup_list) {
if (!dev->wakeup.flags.valid
- || sleep_state > (u32) dev->wakeup.sleep_state
+ || sleep_state > dev->wakeup.sleep_state
|| !(device_may_wakeup(&dev->dev)
|| dev->wakeup.prepare_count))
continue;
@@ -67,7 +67,7 @@ void acpi_disable_wakeup_devices(u8 sleep_state)
list_for_each_entry_safe(dev, tmp, &acpi_wakeup_device_list,
wakeup_list) {
if (!dev->wakeup.flags.valid
- || sleep_state > (u32) dev->wakeup.sleep_state
+ || sleep_state > dev->wakeup.sleep_state
|| !(device_may_wakeup(&dev->dev)
|| dev->wakeup.prepare_count))
continue;
diff --git a/drivers/acpi/x86/lpss.c b/drivers/acpi/x86/lpss.c
index 148e29c2c526..1dcb80ab0d23 100644
--- a/drivers/acpi/x86/lpss.c
+++ b/drivers/acpi/x86/lpss.c
@@ -181,7 +181,7 @@ static void byt_i2c_setup(struct lpss_private_data *pdata)
acpi_status status;
u64 uid;
- /* Expected to always be successfull, but better safe then sorry */
+ /* Expected to always be successful, but better safe then sorry */
if (!acpi_dev_uid_to_integer(pdata->adev, &uid) && uid) {
/* Detect I2C bus shared with PUNIT and ignore its d3 status */
status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host);
@@ -338,8 +338,8 @@ static const struct lpss_device_desc bsw_spi_dev_desc = {
};
static const struct x86_cpu_id lpss_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, NULL),
+ X86_MATCH_VFM(INTEL_ATOM_SILVERMONT, NULL),
+ X86_MATCH_VFM(INTEL_ATOM_AIRMONT, NULL),
{}
};
@@ -387,9 +387,6 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
{ "INT3435", LPSS_ADDR(lpt_uart_dev_desc) },
{ "INT3436", LPSS_ADDR(lpt_sdio_dev_desc) },
- /* Wildcat Point LPSS devices */
- { "INT3438", LPSS_ADDR(lpt_spi_dev_desc) },
-
{ }
};
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index dd0b40b9bbe8..6d4d06236f61 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -299,34 +299,13 @@ free_acpi_buffer:
ACPI_FREE(out_obj);
}
-/**
- * acpi_get_lps0_constraint - Get the LPS0 constraint for a device.
- * @adev: Device to get the constraint for.
- *
- * The LPS0 constraint is the shallowest (minimum) power state in which the
- * device can be so as to allow the platform as a whole to achieve additional
- * energy conservation by utilizing a system-wide low-power state.
- *
- * Returns:
- * - ACPI power state value of the constraint for @adev on success.
- * - Otherwise, ACPI_STATE_UNKNOWN.
- */
-int acpi_get_lps0_constraint(struct acpi_device *adev)
-{
- struct lpi_constraints *entry;
-
- for_each_lpi_constraint(entry) {
- if (adev->handle == entry->handle)
- return entry->min_dstate;
- }
-
- return ACPI_STATE_UNKNOWN;
-}
-
static void lpi_check_constraints(void)
{
struct lpi_constraints *entry;
+ if (IS_ERR_OR_NULL(lpi_constraints_table))
+ return;
+
for_each_lpi_constraint(entry) {
struct acpi_device *adev = acpi_fetch_acpi_dev(entry->handle);
@@ -508,11 +487,6 @@ static int lps0_device_attach(struct acpi_device *adev,
lps0_device_handle = adev->handle;
- if (acpi_s2idle_vendor_amd())
- lpi_device_get_constraints_amd();
- else
- lpi_device_get_constraints();
-
/*
* Use suspend-to-idle by default if ACPI_FADT_LOW_POWER_S0 is set in
* the FADT and the default suspend mode was not set from the command
@@ -539,7 +513,26 @@ static struct acpi_scan_handler lps0_handler = {
.attach = lps0_device_attach,
};
-int acpi_s2idle_prepare_late(void)
+static int acpi_s2idle_begin_lps0(void)
+{
+ if (pm_debug_messages_on && !lpi_constraints_table) {
+ if (acpi_s2idle_vendor_amd())
+ lpi_device_get_constraints_amd();
+ else
+ lpi_device_get_constraints();
+
+ /*
+ * Try to retrieve the constraints only once because failures
+ * to do so usually are sticky.
+ */
+ if (!lpi_constraints_table)
+ lpi_constraints_table = ERR_PTR(-ENODATA);
+ }
+
+ return acpi_s2idle_begin();
+}
+
+static int acpi_s2idle_prepare_late_lps0(void)
{
struct acpi_s2idle_dev_ops *handler;
@@ -585,7 +578,7 @@ int acpi_s2idle_prepare_late(void)
return 0;
}
-void acpi_s2idle_check(void)
+static void acpi_s2idle_check_lps0(void)
{
struct acpi_s2idle_dev_ops *handler;
@@ -598,7 +591,7 @@ void acpi_s2idle_check(void)
}
}
-void acpi_s2idle_restore_early(void)
+static void acpi_s2idle_restore_early_lps0(void)
{
struct acpi_s2idle_dev_ops *handler;
@@ -636,12 +629,12 @@ void acpi_s2idle_restore_early(void)
}
static const struct platform_s2idle_ops acpi_s2idle_ops_lps0 = {
- .begin = acpi_s2idle_begin,
+ .begin = acpi_s2idle_begin_lps0,
.prepare = acpi_s2idle_prepare,
- .prepare_late = acpi_s2idle_prepare_late,
- .check = acpi_s2idle_check,
+ .prepare_late = acpi_s2idle_prepare_late_lps0,
+ .check = acpi_s2idle_check_lps0,
.wake = acpi_s2idle_wake,
- .restore_early = acpi_s2idle_restore_early,
+ .restore_early = acpi_s2idle_restore_early_lps0,
.restore = acpi_s2idle_restore,
.end = acpi_s2idle_end,
};
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index 7dca73417e2b..4ee30c2897a2 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -12,6 +12,7 @@
#include <linux/acpi.h>
#include <linux/dmi.h>
+#include <linux/pci.h>
#include <linux/platform_device.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
@@ -45,37 +46,37 @@ struct override_status_id {
unsigned long long status;
};
-#define ENTRY(status, hid, uid, path, cpu_model, dmi...) { \
+#define ENTRY(status, hid, uid, path, cpu_vfm, dmi...) { \
{ { hid, }, {} }, \
- { X86_MATCH_INTEL_FAM6_MODEL(cpu_model, NULL), {} }, \
+ { X86_MATCH_VFM(cpu_vfm, NULL), {} }, \
{ { .matches = dmi }, {} }, \
uid, \
path, \
status, \
}
-#define PRESENT_ENTRY_HID(hid, uid, cpu_model, dmi...) \
- ENTRY(ACPI_STA_DEFAULT, hid, uid, NULL, cpu_model, dmi)
+#define PRESENT_ENTRY_HID(hid, uid, cpu_vfm, dmi...) \
+ ENTRY(ACPI_STA_DEFAULT, hid, uid, NULL, cpu_vfm, dmi)
-#define NOT_PRESENT_ENTRY_HID(hid, uid, cpu_model, dmi...) \
- ENTRY(0, hid, uid, NULL, cpu_model, dmi)
+#define NOT_PRESENT_ENTRY_HID(hid, uid, cpu_vfm, dmi...) \
+ ENTRY(0, hid, uid, NULL, cpu_vfm, dmi)
-#define PRESENT_ENTRY_PATH(path, cpu_model, dmi...) \
- ENTRY(ACPI_STA_DEFAULT, "", NULL, path, cpu_model, dmi)
+#define PRESENT_ENTRY_PATH(path, cpu_vfm, dmi...) \
+ ENTRY(ACPI_STA_DEFAULT, "", NULL, path, cpu_vfm, dmi)
-#define NOT_PRESENT_ENTRY_PATH(path, cpu_model, dmi...) \
- ENTRY(0, "", NULL, path, cpu_model, dmi)
+#define NOT_PRESENT_ENTRY_PATH(path, cpu_vfm, dmi...) \
+ ENTRY(0, "", NULL, path, cpu_vfm, dmi)
static const struct override_status_id override_status_ids[] = {
/*
* Bay / Cherry Trail PWM directly poked by GPU driver in win10,
* but Linux uses a separate PWM driver, harmless if not used.
*/
- PRESENT_ENTRY_HID("80860F09", "1", ATOM_SILVERMONT, {}),
- PRESENT_ENTRY_HID("80862288", "1", ATOM_AIRMONT, {}),
+ PRESENT_ENTRY_HID("80860F09", "1", INTEL_ATOM_SILVERMONT, {}),
+ PRESENT_ENTRY_HID("80862288", "1", INTEL_ATOM_AIRMONT, {}),
/* The Xiaomi Mi Pad 2 uses PWM2 for touchkeys backlight control */
- PRESENT_ENTRY_HID("80862289", "2", ATOM_AIRMONT, {
+ PRESENT_ENTRY_HID("80862289", "2", INTEL_ATOM_AIRMONT, {
DMI_MATCH(DMI_SYS_VENDOR, "Xiaomi Inc"),
DMI_MATCH(DMI_PRODUCT_NAME, "Mipad2"),
}),
@@ -84,18 +85,18 @@ static const struct override_status_id override_status_ids[] = {
* The INT0002 device is necessary to clear wakeup interrupt sources
* on Cherry Trail devices, without it we get nobody cared IRQ msgs.
*/
- PRESENT_ENTRY_HID("INT0002", "1", ATOM_AIRMONT, {}),
+ PRESENT_ENTRY_HID("INT0002", "1", INTEL_ATOM_AIRMONT, {}),
/*
* On the Dell Venue 11 Pro 7130 and 7139, the DSDT hides
* the touchscreen ACPI device until a certain time
* after _SB.PCI0.GFX0.LCD.LCD1._ON gets called has passed
* *and* _STA has been called at least 3 times since.
*/
- PRESENT_ENTRY_HID("SYNA7500", "1", HASWELL_L, {
+ PRESENT_ENTRY_HID("SYNA7500", "1", INTEL_HASWELL_L, {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7130"),
}),
- PRESENT_ENTRY_HID("SYNA7500", "1", HASWELL_L, {
+ PRESENT_ENTRY_HID("SYNA7500", "1", INTEL_HASWELL_L, {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7139"),
}),
@@ -104,7 +105,7 @@ static const struct override_status_id override_status_ids[] = {
* The Dell XPS 15 9550 has a SMO8110 accelerometer /
* HDD freefall sensor which is wrongly marked as not present.
*/
- PRESENT_ENTRY_HID("SMO8810", "1", SKYLAKE, {
+ PRESENT_ENTRY_HID("SMO8810", "1", INTEL_SKYLAKE, {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "XPS 15 9550"),
}),
@@ -121,19 +122,19 @@ static const struct override_status_id override_status_ids[] = {
* was copy-pasted from the GPD win, so it has a disabled KIOX000A
* node which we should not enable, thus we also check the BIOS date.
*/
- PRESENT_ENTRY_HID("KIOX000A", "1", ATOM_AIRMONT, {
+ PRESENT_ENTRY_HID("KIOX000A", "1", INTEL_ATOM_AIRMONT, {
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
DMI_MATCH(DMI_BOARD_NAME, "Default string"),
DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
DMI_MATCH(DMI_BIOS_DATE, "02/21/2017")
}),
- PRESENT_ENTRY_HID("KIOX000A", "1", ATOM_AIRMONT, {
+ PRESENT_ENTRY_HID("KIOX000A", "1", INTEL_ATOM_AIRMONT, {
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
DMI_MATCH(DMI_BOARD_NAME, "Default string"),
DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
DMI_MATCH(DMI_BIOS_DATE, "03/20/2017")
}),
- PRESENT_ENTRY_HID("KIOX000A", "1", ATOM_AIRMONT, {
+ PRESENT_ENTRY_HID("KIOX000A", "1", INTEL_ATOM_AIRMONT, {
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
DMI_MATCH(DMI_BOARD_NAME, "Default string"),
DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
@@ -146,7 +147,7 @@ static const struct override_status_id override_status_ids[] = {
* method sets a GPIO causing the PCI wifi card to turn off.
* See above remark about uniqueness of the DMI match.
*/
- NOT_PRESENT_ENTRY_PATH("\\_SB_.PCI0.SDHB.BRC1", ATOM_AIRMONT, {
+ NOT_PRESENT_ENTRY_PATH("\\_SB_.PCI0.SDHB.BRC1", INTEL_ATOM_AIRMONT, {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
DMI_EXACT_MATCH(DMI_BOARD_SERIAL, "Default string"),
@@ -158,7 +159,7 @@ static const struct override_status_id override_status_ids[] = {
* as both ACCL0001 and MAGN0001. As we can only ever register an
* i2c client for one of them, ignore MAGN0001.
*/
- NOT_PRESENT_ENTRY_HID("MAGN0001", "1", ATOM_SILVERMONT, {
+ NOT_PRESENT_ENTRY_HID("MAGN0001", "1", INTEL_ATOM_SILVERMONT, {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_FAMILY, "YOGATablet2"),
}),
@@ -206,16 +207,16 @@ bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *s
}
/*
- * AMD systems from Renoir and Lucienne *require* that the NVME controller
+ * AMD systems from Renoir onwards *require* that the NVME controller
* is put into D3 over a Modern Standby / suspend-to-idle cycle.
*
* This is "typically" accomplished using the `StorageD3Enable`
* property in the _DSD that is checked via the `acpi_storage_d3` function
- * but this property was introduced after many of these systems launched
- * and most OEM systems don't have it in their BIOS.
+ * but some OEM systems still don't have it in their BIOS.
*
* The Microsoft documentation for StorageD3Enable mentioned that Windows has
- * a hardcoded allowlist for D3 support, which was used for these platforms.
+ * a hardcoded allowlist for D3 support as well as a registry key to override
+ * the BIOS, which has been used for these cases.
*
* This allows quirking on Linux in a similar fashion.
*
@@ -228,19 +229,15 @@ bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *s
* https://bugzilla.kernel.org/show_bug.cgi?id=216773
* https://bugzilla.kernel.org/show_bug.cgi?id=217003
* 2) On at least one HP system StorageD3Enable is missing on the second NVME
- disk in the system.
+ * disk in the system.
+ * 3) On at least one HP Rembrandt system StorageD3Enable is missing on the only
+ * NVME device.
*/
-static const struct x86_cpu_id storage_d3_cpu_ids[] = {
- X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 24, NULL), /* Picasso */
- X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 96, NULL), /* Renoir */
- X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 104, NULL), /* Lucienne */
- X86_MATCH_VENDOR_FAM_MODEL(AMD, 25, 80, NULL), /* Cezanne */
- {}
-};
-
bool force_storage_d3(void)
{
- return x86_match_cpu(storage_d3_cpu_ids);
+ if (!cpu_feature_enabled(X86_FEATURE_ZEN))
+ return false;
+ return acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0;
}
/*
@@ -299,6 +296,7 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
/*
* 2. Devices which also have the skip i2c/serdev quirks and which
* need the x86-android-tablets module to properly work.
+ * Sorted alphabetically.
*/
#if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS)
{
@@ -312,6 +310,19 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
},
{
+ /* Acer Iconia One 8 A1-840 (non FHD version) */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "BayTrail"),
+ /* Above strings are too generic also match BIOS date */
+ DMI_MATCH(DMI_BIOS_DATE, "04/01/2014"),
+ },
+ .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
+ ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
+ },
+ {
+ /* Asus ME176C tablet */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ME176C"),
@@ -322,23 +333,24 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
},
{
- /* Lenovo Yoga Book X90F/L */
+ /* Asus TF103C transformer 2-in-1 */
.matches = {
- DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
- DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
- DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"),
},
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
- ACPI_QUIRK_UART1_SKIP |
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
},
{
+ /* Lenovo Yoga Book X90F/L */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"),
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
},
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ ACPI_QUIRK_UART1_SKIP |
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
},
@@ -359,11 +371,11 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
/* Lenovo Yoga Tab 3 Pro X90F */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
},
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
- ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
+ ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
},
{
/* Medion Lifetab S10346 */
@@ -397,6 +409,32 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
},
{
+ /* Vexia Edu Atla 10 tablet 5V version */
+ .matches = {
+ /* Having all 3 of these not set is somewhat unique */
+ DMI_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."),
+ DMI_MATCH(DMI_BOARD_NAME, "To be filled by O.E.M."),
+ /* Above strings are too generic, also match on BIOS date */
+ DMI_MATCH(DMI_BIOS_DATE, "05/14/2015"),
+ },
+ .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
+ },
+ {
+ /* Vexia Edu Atla 10 tablet 9V version */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+ /* Above strings are too generic, also match on BIOS date */
+ DMI_MATCH(DMI_BIOS_DATE, "08/25/2014"),
+ },
+ .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ ACPI_QUIRK_UART1_SKIP |
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
+ ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
+ },
+ {
/* Whitelabel (sold as various brands) TM800A550L */
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
@@ -416,6 +454,7 @@ static const struct acpi_device_id i2c_acpi_known_good_ids[] = {
{ "10EC5640", 0 }, /* RealTek ALC5640 audio codec */
{ "10EC5651", 0 }, /* RealTek ALC5651 audio codec */
{ "INT33F4", 0 }, /* X-Powers AXP288 PMIC */
+ { "INT33F5", 0 }, /* TI Dollar Cove PMIC */
{ "INT33FD", 0 }, /* Intel Crystal Cove PMIC */
{ "INT34D3", 0 }, /* Intel Whiskey Cove PMIC */
{ "NPCE69A", 0 }, /* Asus Transformer keyboard dock */
@@ -444,18 +483,35 @@ static int acpi_dmi_skip_serdev_enumeration(struct device *controller_parent, bo
struct acpi_device *adev = ACPI_COMPANION(controller_parent);
const struct dmi_system_id *dmi_id;
long quirks = 0;
- u64 uid;
- int ret;
+ u64 uid = 0;
- ret = acpi_dev_uid_to_integer(adev, &uid);
- if (ret)
+ dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids);
+ if (!dmi_id)
return 0;
- dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids);
- if (dmi_id)
- quirks = (unsigned long)dmi_id->driver_data;
+ quirks = (unsigned long)dmi_id->driver_data;
+
+ /* uid is left at 0 on errors and 0 is not a valid UART UID */
+ acpi_dev_uid_to_integer(adev, &uid);
+
+ /* For PCI UARTs without an UID */
+ if (!uid && dev_is_pci(controller_parent)) {
+ struct pci_dev *pdev = to_pci_dev(controller_parent);
+
+ /*
+ * Devfn values for PCI UARTs on Bay Trail SoCs, which are
+ * the only devices where this fallback is necessary.
+ */
+ if (pdev->devfn == PCI_DEVFN(0x1e, 3))
+ uid = 1;
+ else if (pdev->devfn == PCI_DEVFN(0x1e, 4))
+ uid = 2;
+ }
+
+ if (!uid)
+ return 0;
- if (!dev_is_platform(controller_parent)) {
+ if (!dev_is_platform(controller_parent) && !dev_is_pci(controller_parent)) {
/* PNP enumerated UARTs */
if ((quirks & ACPI_QUIRK_PNP_UART1_SKIP) && uid == 1)
*skip = true;
@@ -510,7 +566,7 @@ int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *s
* Set skip to true so that the tty core creates a serdev ctrl device.
* The backlight driver will manually create the serdev client device.
*/
- if (acpi_dev_hid_match(adev, "DELL0501")) {
+ if (adev && acpi_dev_hid_match(adev, "DELL0501")) {
*skip = true;
/*
* Create a platform dev for dell-uart-backlight to bind to.
diff --git a/drivers/amba/Kconfig b/drivers/amba/Kconfig
index fb6c7e0b4cce..14bb61ff801e 100644
--- a/drivers/amba/Kconfig
+++ b/drivers/amba/Kconfig
@@ -5,7 +5,7 @@ config ARM_AMBA
if ARM_AMBA
config TEGRA_AHB
- bool
+ bool "Enable AHB driver for NVIDIA Tegra SoCs" if COMPILE_TEST
default y if ARCH_TEGRA
help
Adds AHB configuration functionality for NVIDIA Tegra SoCs,
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index aba3aa95b224..952c45ca6e48 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -26,7 +26,7 @@
#include <linux/iommu.h>
#include <linux/dma-map-ops.h>
-#define to_amba_driver(d) container_of(d, struct amba_driver, drv)
+#define to_amba_driver(d) container_of_const(d, struct amba_driver, drv)
/* called on periphid match and class 0x9 coresight device. */
static int
@@ -138,7 +138,7 @@ static int amba_read_periphid(struct amba_device *dev)
void __iomem *tmp;
int i, ret;
- ret = dev_pm_domain_attach(&dev->dev, true);
+ ret = dev_pm_domain_attach(&dev->dev, PD_FLAG_ATTACH_POWER_ON);
if (ret) {
dev_dbg(&dev->dev, "can't get PM domain: %d\n", ret);
goto err_out;
@@ -205,10 +205,10 @@ err_out:
return ret;
}
-static int amba_match(struct device *dev, struct device_driver *drv)
+static int amba_match(struct device *dev, const struct device_driver *drv)
{
struct amba_device *pcdev = to_amba_device(dev);
- struct amba_driver *pcdrv = to_amba_driver(drv);
+ const struct amba_driver *pcdrv = to_amba_driver(drv);
mutex_lock(&pcdev->periphid_lock);
if (!pcdev->periphid) {
@@ -291,15 +291,14 @@ static int amba_probe(struct device *dev)
if (ret < 0)
break;
- ret = dev_pm_domain_attach(dev, true);
+ ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON |
+ PD_FLAG_DETACH_POWER_OFF);
if (ret)
break;
ret = amba_get_enable_pclk(pcdev);
- if (ret) {
- dev_pm_domain_detach(dev, true);
+ if (ret)
break;
- }
pm_runtime_get_noresume(dev);
pm_runtime_set_active(dev);
@@ -314,7 +313,6 @@ static int amba_probe(struct device *dev)
pm_runtime_put_noidle(dev);
amba_put_disable_pclk(pcdev);
- dev_pm_domain_detach(dev, true);
} while (0);
return ret;
@@ -336,7 +334,6 @@ static void amba_remove(struct device *dev)
pm_runtime_put_noidle(dev);
amba_put_disable_pclk(pcdev);
- dev_pm_domain_detach(dev, true);
}
static void amba_shutdown(struct device *dev)
@@ -364,7 +361,8 @@ static int amba_dma_configure(struct device *dev)
ret = acpi_dma_configure(dev, attr);
}
- if (!ret && !drv->driver_managed_dma) {
+ /* @drv may not be valid when we're called from the IOMMU layer */
+ if (!ret && dev->driver && !drv->driver_managed_dma) {
ret = iommu_device_use_default_domain(dev);
if (ret)
arch_teardown_dma_ops(dev);
@@ -435,7 +433,7 @@ static const struct dev_pm_ops amba_pm = {
* DMA configuration for platform and AMBA bus is same. So here we reuse
* platform's DMA config routine.
*/
-struct bus_type amba_bustype = {
+const struct bus_type amba_bustype = {
.name = "amba",
.dev_groups = amba_dev_groups,
.match = amba_match,
@@ -449,6 +447,12 @@ struct bus_type amba_bustype = {
};
EXPORT_SYMBOL_GPL(amba_bustype);
+bool dev_is_amba(const struct device *dev)
+{
+ return dev->bus == &amba_bustype;
+}
+EXPORT_SYMBOL_GPL(dev_is_amba);
+
static int __init amba_init(void)
{
return bus_register(&amba_bustype);
diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c
index c0e8b765522d..f23c3ed01810 100644
--- a/drivers/amba/tegra-ahb.c
+++ b/drivers/amba/tegra-ahb.c
@@ -144,6 +144,7 @@ int tegra_ahb_enable_smmu(struct device_node *dn)
if (!dev)
return -EPROBE_DEFER;
ahb = dev_get_drvdata(dev);
+ put_device(dev);
val = gizmo_readl(ahb, AHB_ARBITRATION_XBAR_CTRL);
val |= AHB_ARBITRATION_XBAR_CTRL_SMMU_INIT_DONE;
gizmo_writel(ahb, val, AHB_ARBITRATION_XBAR_CTRL);
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index 07aa8ae0a058..e2e402c9d175 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -4,6 +4,7 @@ menu "Android"
config ANDROID_BINDER_IPC
bool "Android Binder IPC Driver"
depends on MMU
+ depends on NET
default n
help
Binder is used in Android for both communication between processes,
@@ -13,6 +14,19 @@ config ANDROID_BINDER_IPC
Android process, using Binder to identify, invoke and pass arguments
between said processes.
+config ANDROID_BINDER_IPC_RUST
+ bool "Rust version of Android Binder IPC Driver"
+ depends on RUST && MMU && !ANDROID_BINDER_IPC
+ help
+ This enables the Rust implementation of the Binder driver.
+
+ Binder is used in Android for both communication between processes,
+ and remote method invocation.
+
+ This means one Android process can call a method/routine in another
+ Android process, using Binder to identify, invoke and pass arguments
+ between said processes.
+
config ANDROID_BINDERFS
bool "Android Binderfs filesystem"
depends on ANDROID_BINDER_IPC
@@ -27,7 +41,7 @@ config ANDROID_BINDERFS
config ANDROID_BINDER_DEVICES
string "Android Binder devices"
- depends on ANDROID_BINDER_IPC
+ depends on ANDROID_BINDER_IPC || ANDROID_BINDER_IPC_RUST
default "binder,hwbinder,vndbinder"
help
Default value for the binder.devices parameter.
@@ -37,14 +51,15 @@ config ANDROID_BINDER_DEVICES
created. Each binder device has its own context manager, and is
therefore logically separated from the other devices.
-config ANDROID_BINDER_IPC_SELFTEST
- bool "Android Binder IPC Driver Selftest"
- depends on ANDROID_BINDER_IPC
+config ANDROID_BINDER_ALLOC_KUNIT_TEST
+ tristate "KUnit Tests for Android Binder Alloc" if !KUNIT_ALL_TESTS
+ depends on ANDROID_BINDER_IPC && KUNIT
+ default KUNIT_ALL_TESTS
help
- This feature allows binder selftest to run.
+ This feature builds the binder alloc KUnit tests.
- Binder selftest checks the allocation and free of binder buffers
- exhaustively with combinations of various buffer sizes and
- alignments.
+ Each test case runs using a pared-down binder_alloc struct and
+ test-specific freelist, which allows this KUnit module to be loaded
+ for testing without interfering with a running system.
endmenu
diff --git a/drivers/android/Makefile b/drivers/android/Makefile
index c9d3d0c99c25..e0c650d3898e 100644
--- a/drivers/android/Makefile
+++ b/drivers/android/Makefile
@@ -2,5 +2,6 @@
ccflags-y += -I$(src) # needed for trace events
obj-$(CONFIG_ANDROID_BINDERFS) += binderfs.o
-obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o
-obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o
+obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o binder_netlink.o
+obj-$(CONFIG_ANDROID_BINDER_ALLOC_KUNIT_TEST) += tests/
+obj-$(CONFIG_ANDROID_BINDER_IPC_RUST) += binder/
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index dd6923d37931..535fc881c8da 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -68,10 +68,13 @@
#include <linux/sizes.h>
#include <linux/ktime.h>
+#include <kunit/visibility.h>
+
#include <uapi/linux/android/binder.h>
#include <linux/cacheflush.h>
+#include "binder_netlink.h"
#include "binder_internal.h"
#include "binder_trace.h"
@@ -79,6 +82,8 @@ static HLIST_HEAD(binder_deferred_list);
static DEFINE_MUTEX(binder_deferred_lock);
static HLIST_HEAD(binder_devices);
+static DEFINE_SPINLOCK(binder_devices_lock);
+
static HLIST_HEAD(binder_procs);
static DEFINE_MUTEX(binder_procs_lock);
@@ -277,7 +282,7 @@ _binder_proc_lock(struct binder_proc *proc, int line)
}
/**
- * binder_proc_unlock() - Release spinlock for given binder_proc
+ * binder_proc_unlock() - Release outer lock for given binder_proc
* @proc: struct binder_proc to acquire
*
* Release lock acquired via binder_proc_lock()
@@ -570,9 +575,7 @@ static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
{
return !thread->transaction_stack &&
- binder_worklist_empty_ilocked(&thread->todo) &&
- (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
- BINDER_LOOPER_STATE_REGISTERED));
+ binder_worklist_empty_ilocked(&thread->todo);
}
static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
@@ -848,17 +851,8 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong,
} else {
if (!internal)
node->local_weak_refs++;
- if (!node->has_weak_ref && list_empty(&node->work.entry)) {
- if (target_list == NULL) {
- pr_err("invalid inc weak node for %d\n",
- node->debug_id);
- return -EINVAL;
- }
- /*
- * See comment above
- */
+ if (!node->has_weak_ref && target_list && list_empty(&node->work.entry))
binder_enqueue_work_ilocked(&node->work, target_list);
- }
}
return 0;
}
@@ -1045,6 +1039,63 @@ static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
return NULL;
}
+/* Find the smallest unused descriptor the "slow way" */
+static u32 slow_desc_lookup_olocked(struct binder_proc *proc, u32 offset)
+{
+ struct binder_ref *ref;
+ struct rb_node *n;
+ u32 desc;
+
+ desc = offset;
+ for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
+ ref = rb_entry(n, struct binder_ref, rb_node_desc);
+ if (ref->data.desc > desc)
+ break;
+ desc = ref->data.desc + 1;
+ }
+
+ return desc;
+}
+
+/*
+ * Find an available reference descriptor ID. The proc->outer_lock might
+ * be released in the process, in which case -EAGAIN is returned and the
+ * @desc should be considered invalid.
+ */
+static int get_ref_desc_olocked(struct binder_proc *proc,
+ struct binder_node *node,
+ u32 *desc)
+{
+ struct dbitmap *dmap = &proc->dmap;
+ unsigned int nbits, offset;
+ unsigned long *new, bit;
+
+ /* 0 is reserved for the context manager */
+ offset = (node == proc->context->binder_context_mgr_node) ? 0 : 1;
+
+ if (!dbitmap_enabled(dmap)) {
+ *desc = slow_desc_lookup_olocked(proc, offset);
+ return 0;
+ }
+
+ if (dbitmap_acquire_next_zero_bit(dmap, offset, &bit) == 0) {
+ *desc = bit;
+ return 0;
+ }
+
+ /*
+ * The dbitmap is full and needs to grow. The proc->outer_lock
+ * is briefly released to allocate the new bitmap safely.
+ */
+ nbits = dbitmap_grow_nbits(dmap);
+ binder_proc_unlock(proc);
+ new = bitmap_zalloc(nbits, GFP_KERNEL);
+ binder_proc_lock(proc);
+ dbitmap_grow(dmap, new, nbits);
+
+ return -EAGAIN;
+}
+
/**
* binder_get_ref_for_node_olocked() - get the ref associated with given node
* @proc: binder_proc that owns the ref
@@ -1068,12 +1119,14 @@ static struct binder_ref *binder_get_ref_for_node_olocked(
struct binder_node *node,
struct binder_ref *new_ref)
{
- struct binder_context *context = proc->context;
- struct rb_node **p = &proc->refs_by_node.rb_node;
- struct rb_node *parent = NULL;
struct binder_ref *ref;
- struct rb_node *n;
+ struct rb_node *parent;
+ struct rb_node **p;
+ u32 desc;
+retry:
+ p = &proc->refs_by_node.rb_node;
+ parent = NULL;
while (*p) {
parent = *p;
ref = rb_entry(parent, struct binder_ref, rb_node_node);
@@ -1088,6 +1141,10 @@ static struct binder_ref *binder_get_ref_for_node_olocked(
if (!new_ref)
return NULL;
+ /* might release the proc->outer_lock */
+ if (get_ref_desc_olocked(proc, node, &desc) == -EAGAIN)
+ goto retry;
+
binder_stats_created(BINDER_STAT_REF);
new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
new_ref->proc = proc;
@@ -1095,14 +1152,7 @@ static struct binder_ref *binder_get_ref_for_node_olocked(
rb_link_node(&new_ref->rb_node_node, parent, p);
rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
- new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
- for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
- ref = rb_entry(n, struct binder_ref, rb_node_desc);
- if (ref->data.desc > new_ref->data.desc)
- break;
- new_ref->data.desc = ref->data.desc + 1;
- }
-
+ new_ref->data.desc = desc;
p = &proc->refs_by_desc.rb_node;
while (*p) {
parent = *p;
@@ -1131,6 +1181,7 @@ static struct binder_ref *binder_get_ref_for_node_olocked(
static void binder_cleanup_ref_olocked(struct binder_ref *ref)
{
+ struct dbitmap *dmap = &ref->proc->dmap;
bool delete_node = false;
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
@@ -1138,6 +1189,8 @@ static void binder_cleanup_ref_olocked(struct binder_ref *ref)
ref->proc->pid, ref->data.debug_id, ref->data.desc,
ref->node->debug_id);
+ if (dbitmap_enabled(dmap))
+ dbitmap_clear_bit(dmap, ref->data.desc);
rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
@@ -1168,6 +1221,12 @@ static void binder_cleanup_ref_olocked(struct binder_ref *ref)
binder_dequeue_work(ref->proc, &ref->death->work);
binder_stats_deleted(BINDER_STAT_DEATH);
}
+
+ if (ref->freeze) {
+ binder_dequeue_work(ref->proc, &ref->freeze->work);
+ binder_stats_deleted(BINDER_STAT_FREEZE);
+ }
+
binder_stats_deleted(BINDER_STAT_REF);
}
@@ -1295,9 +1354,29 @@ static void binder_free_ref(struct binder_ref *ref)
if (ref->node)
binder_free_node(ref->node);
kfree(ref->death);
+ kfree(ref->freeze);
kfree(ref);
}
+/* shrink descriptor bitmap if needed */
+static void try_shrink_dmap(struct binder_proc *proc)
+{
+ unsigned long *new;
+ int nbits;
+
+ binder_proc_lock(proc);
+ nbits = dbitmap_shrink_nbits(&proc->dmap);
+ binder_proc_unlock(proc);
+
+ if (!nbits)
+ return;
+
+ new = bitmap_zalloc(nbits, GFP_KERNEL);
+ binder_proc_lock(proc);
+ dbitmap_shrink(&proc->dmap, new, nbits);
+ binder_proc_unlock(proc);
+}
+
/**
* binder_update_ref_for_handle() - inc/dec the ref for given handle
* @proc: proc containing the ref
@@ -1334,8 +1413,10 @@ static int binder_update_ref_for_handle(struct binder_proc *proc,
*rdata = ref->data;
binder_proc_unlock(proc);
- if (delete_ref)
+ if (delete_ref) {
binder_free_ref(ref);
+ try_shrink_dmap(proc);
+ }
return ret;
err_no_ref:
@@ -1468,7 +1549,7 @@ static void binder_thread_dec_tmpref(struct binder_thread *thread)
* by threads that are being released. When done with the binder_proc,
* this function is called to decrement the counter and free the
* proc if appropriate (proc has been released, all threads have
- * been released and not currenly in-use to process a transaction).
+ * been released and not currently in-use to process a transaction).
*/
static void binder_proc_dec_tmpref(struct binder_proc *proc)
{
@@ -1498,11 +1579,10 @@ static struct binder_thread *binder_get_txn_from(
{
struct binder_thread *from;
- spin_lock(&t->lock);
+ guard(spinlock)(&t->lock);
from = t->from;
if (from)
atomic_inc(&from->tmp_ref);
- spin_unlock(&t->lock);
return from;
}
@@ -1886,7 +1966,7 @@ static bool binder_validate_fixup(struct binder_proc *proc,
* struct binder_task_work_cb - for deferred close
*
* @twork: callback_head for task work
- * @fd: fd to close
+ * @file: file to close
*
* Structure to pass task work to be handled after
* returning from binder_ioctl() via task_work_add().
@@ -2329,10 +2409,10 @@ err_fd_not_accepted:
/**
* struct binder_ptr_fixup - data to be fixed-up in target buffer
- * @offset offset in target buffer to fixup
- * @skip_size bytes to skip in copy (fixup will be written later)
- * @fixup_data data to write at fixup offset
- * @node list node
+ * @offset: offset in target buffer to fixup
+ * @skip_size: bytes to skip in copy (fixup will be written later)
+ * @fixup_data: data to write at fixup offset
+ * @node: list node
*
* This is used for the pointer fixup list (pf) which is created and consumed
* during binder_transaction() and is only accessed locally. No
@@ -2349,10 +2429,10 @@ struct binder_ptr_fixup {
/**
* struct binder_sg_copy - scatter-gather data to be copied
- * @offset offset in target buffer
- * @sender_uaddr user address in source buffer
- * @length bytes to copy
- * @node list node
+ * @offset: offset in target buffer
+ * @sender_uaddr: user address in source buffer
+ * @length: bytes to copy
+ * @node: list node
*
* This is used for the sg copy list (sgc) which is created and consumed
* during binder_transaction() and is only accessed locally. No
@@ -2905,6 +2985,69 @@ static void binder_set_txn_from_error(struct binder_transaction *t, int id,
binder_thread_dec_tmpref(from);
}
+/**
+ * binder_netlink_report() - report a transaction failure via netlink
+ * @proc: the binder proc sending the transaction
+ * @t: the binder transaction that failed
+ * @data_size: the user provided data size for the transaction
+ * @error: enum binder_driver_return_protocol returned to sender
+ */
+static void binder_netlink_report(struct binder_proc *proc,
+ struct binder_transaction *t,
+ u32 data_size,
+ u32 error)
+{
+ const char *context = proc->context->name;
+ struct sk_buff *skb;
+ void *hdr;
+
+ if (!genl_has_listeners(&binder_nl_family, &init_net,
+ BINDER_NLGRP_REPORT))
+ return;
+
+ trace_binder_netlink_report(context, t, data_size, error);
+
+ skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return;
+
+ hdr = genlmsg_put(skb, 0, 0, &binder_nl_family, 0, BINDER_CMD_REPORT);
+ if (!hdr)
+ goto free_skb;
+
+ if (nla_put_u32(skb, BINDER_A_REPORT_ERROR, error) ||
+ nla_put_string(skb, BINDER_A_REPORT_CONTEXT, context) ||
+ nla_put_u32(skb, BINDER_A_REPORT_FROM_PID, t->from_pid) ||
+ nla_put_u32(skb, BINDER_A_REPORT_FROM_TID, t->from_tid))
+ goto cancel_skb;
+
+ if (t->to_proc &&
+ nla_put_u32(skb, BINDER_A_REPORT_TO_PID, t->to_proc->pid))
+ goto cancel_skb;
+
+ if (t->to_thread &&
+ nla_put_u32(skb, BINDER_A_REPORT_TO_TID, t->to_thread->pid))
+ goto cancel_skb;
+
+ if (t->is_reply && nla_put_flag(skb, BINDER_A_REPORT_IS_REPLY))
+ goto cancel_skb;
+
+ if (nla_put_u32(skb, BINDER_A_REPORT_FLAGS, t->flags) ||
+ nla_put_u32(skb, BINDER_A_REPORT_CODE, t->code) ||
+ nla_put_u32(skb, BINDER_A_REPORT_DATA_SIZE, data_size))
+ goto cancel_skb;
+
+ genlmsg_end(skb, hdr);
+ genlmsg_multicast(&binder_nl_family, skb, 0, BINDER_NLGRP_REPORT,
+ GFP_KERNEL);
+ return;
+
+cancel_skb:
+ genlmsg_cancel(skb, hdr);
+free_skb:
+ nlmsg_free(skb);
+}
+
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply,
@@ -2932,8 +3075,7 @@ static void binder_transaction(struct binder_proc *proc,
struct binder_context *context = proc->context;
int t_debug_id = atomic_inc_return(&binder_last_id);
ktime_t t_start_time = ktime_get();
- char *secctx = NULL;
- u32 secctx_sz = 0;
+ struct lsm_context lsmctx = { };
struct list_head sgc_head;
struct list_head pf_head;
const void __user *user_buffer = (const void __user *)
@@ -2955,6 +3097,32 @@ static void binder_transaction(struct binder_proc *proc,
binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
binder_inner_proc_unlock(proc);
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
+ if (!t) {
+ binder_txn_error("%d:%d cannot allocate transaction\n",
+ thread->pid, proc->pid);
+ return_error = BR_FAILED_REPLY;
+ return_error_param = -ENOMEM;
+ return_error_line = __LINE__;
+ goto err_alloc_t_failed;
+ }
+ INIT_LIST_HEAD(&t->fd_fixups);
+ binder_stats_created(BINDER_STAT_TRANSACTION);
+ spin_lock_init(&t->lock);
+ t->debug_id = t_debug_id;
+ t->start_time = t_start_time;
+ t->from_pid = proc->pid;
+ t->from_tid = thread->pid;
+ t->sender_euid = task_euid(proc->tsk);
+ t->code = tr->code;
+ t->flags = tr->flags;
+ t->priority = task_nice(current);
+ t->work.type = BINDER_WORK_TRANSACTION;
+ t->is_async = !reply && (tr->flags & TF_ONE_WAY);
+ t->is_reply = reply;
+ if (!reply && !(tr->flags & TF_ONE_WAY))
+ t->from = thread;
+
if (reply) {
binder_inner_proc_lock(proc);
in_reply_to = thread->transaction_stack;
@@ -3058,10 +3226,8 @@ static void binder_transaction(struct binder_proc *proc,
}
if (!target_node) {
binder_txn_error("%d:%d cannot find target node\n",
- thread->pid, proc->pid);
- /*
- * return_error is set above
- */
+ proc->pid, thread->pid);
+ /* return_error is set above */
return_error_param = -EINVAL;
return_error_line = __LINE__;
goto err_dead_binder;
@@ -3143,24 +3309,13 @@ static void binder_transaction(struct binder_proc *proc,
}
binder_inner_proc_unlock(proc);
}
+
+ t->to_proc = target_proc;
+ t->to_thread = target_thread;
if (target_thread)
e->to_thread = target_thread->pid;
e->to_proc = target_proc->pid;
- /* TODO: reuse incoming transaction for reply */
- t = kzalloc(sizeof(*t), GFP_KERNEL);
- if (t == NULL) {
- binder_txn_error("%d:%d cannot allocate transaction\n",
- thread->pid, proc->pid);
- return_error = BR_FAILED_REPLY;
- return_error_param = -ENOMEM;
- return_error_line = __LINE__;
- goto err_alloc_t_failed;
- }
- INIT_LIST_HEAD(&t->fd_fixups);
- binder_stats_created(BINDER_STAT_TRANSACTION);
- spin_lock_init(&t->lock);
-
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
if (tcomplete == NULL) {
binder_txn_error("%d:%d cannot allocate work for transaction\n",
@@ -3172,48 +3327,28 @@ static void binder_transaction(struct binder_proc *proc,
}
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
- t->debug_id = t_debug_id;
- t->start_time = t_start_time;
-
if (reply)
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
+ "%d:%d BC_REPLY %d -> %d:%d, data size %lld-%lld-%lld\n",
proc->pid, thread->pid, t->debug_id,
target_proc->pid, target_thread->pid,
- (u64)tr->data.ptr.buffer,
- (u64)tr->data.ptr.offsets,
(u64)tr->data_size, (u64)tr->offsets_size,
(u64)extra_buffers_size);
else
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
+ "%d:%d BC_TRANSACTION %d -> %d - node %d, data size %lld-%lld-%lld\n",
proc->pid, thread->pid, t->debug_id,
target_proc->pid, target_node->debug_id,
- (u64)tr->data.ptr.buffer,
- (u64)tr->data.ptr.offsets,
(u64)tr->data_size, (u64)tr->offsets_size,
(u64)extra_buffers_size);
- if (!reply && !(tr->flags & TF_ONE_WAY))
- t->from = thread;
- else
- t->from = NULL;
- t->from_pid = proc->pid;
- t->from_tid = thread->pid;
- t->sender_euid = task_euid(proc->tsk);
- t->to_proc = target_proc;
- t->to_thread = target_thread;
- t->code = tr->code;
- t->flags = tr->flags;
- t->priority = task_nice(current);
-
if (target_node && target_node->txn_security_ctx) {
u32 secid;
size_t added_size;
security_cred_getsecid(proc->cred, &secid);
- ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
- if (ret) {
+ ret = security_secid_to_secctx(secid, &lsmctx);
+ if (ret < 0) {
binder_txn_error("%d:%d failed to get security context\n",
thread->pid, proc->pid);
return_error = BR_FAILED_REPLY;
@@ -3221,7 +3356,7 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_get_secctx_failed;
}
- added_size = ALIGN(secctx_sz, sizeof(u64));
+ added_size = ALIGN(lsmctx.len, sizeof(u64));
extra_buffers_size += added_size;
if (extra_buffers_size < added_size) {
binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
@@ -3255,23 +3390,23 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer = NULL;
goto err_binder_alloc_buf_failed;
}
- if (secctx) {
+ if (lsmctx.context) {
int err;
size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
ALIGN(tr->offsets_size, sizeof(void *)) +
ALIGN(extra_buffers_size, sizeof(void *)) -
- ALIGN(secctx_sz, sizeof(u64));
+ ALIGN(lsmctx.len, sizeof(u64));
t->security_ctx = t->buffer->user_data + buf_offset;
err = binder_alloc_copy_to_buffer(&target_proc->alloc,
t->buffer, buf_offset,
- secctx, secctx_sz);
+ lsmctx.context, lsmctx.len);
if (err) {
t->security_ctx = 0;
WARN_ON(1);
}
- security_release_secctx(secctx, secctx_sz);
- secctx = NULL;
+ security_release_secctx(&lsmctx);
+ lsmctx.context = NULL;
}
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
@@ -3315,7 +3450,7 @@ static void binder_transaction(struct binder_proc *proc,
off_end_offset = off_start_offset + tr->offsets_size;
sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
- ALIGN(secctx_sz, sizeof(u64));
+ ALIGN(lsmctx.len, sizeof(u64));
off_min = 0;
for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
buffer_offset += sizeof(binder_size_t)) {
@@ -3344,6 +3479,7 @@ static void binder_transaction(struct binder_proc *proc,
*/
copy_size = object_offset - user_offset;
if (copy_size && (user_offset > object_offset ||
+ object_offset > tr->data_size ||
binder_alloc_copy_user_to_buffer(
&target_proc->alloc,
t->buffer, user_offset,
@@ -3598,11 +3734,13 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_copy_data_failed;
}
- if (t->buffer->oneway_spam_suspect)
+ if (t->buffer->oneway_spam_suspect) {
tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
- else
+ binder_netlink_report(proc, t, tr->data_size,
+ BR_ONEWAY_SPAM_SUSPECT);
+ } else {
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
- t->work.type = BINDER_WORK_TRANSACTION;
+ }
if (reply) {
binder_enqueue_thread_work(thread, tcomplete);
@@ -3630,7 +3768,6 @@ static void binder_transaction(struct binder_proc *proc,
* the target replies (or there is an error).
*/
binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
- t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
binder_inner_proc_unlock(proc);
@@ -3651,8 +3788,11 @@ static void binder_transaction(struct binder_proc *proc,
* process and is put in a pending queue, waiting for the target
* process to be unfrozen.
*/
- if (return_error == BR_TRANSACTION_PENDING_FROZEN)
+ if (return_error == BR_TRANSACTION_PENDING_FROZEN) {
tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
+ binder_netlink_report(proc, t, tr->data_size,
+ return_error);
+ }
binder_enqueue_thread_work(thread, tcomplete);
if (return_error &&
return_error != BR_TRANSACTION_PENDING_FROZEN)
@@ -3693,17 +3833,14 @@ err_copy_data_failed:
binder_alloc_free_buf(&target_proc->alloc, t->buffer);
err_binder_alloc_buf_failed:
err_bad_extra_size:
- if (secctx)
- security_release_secctx(secctx, secctx_sz);
+ if (lsmctx.context)
+ security_release_secctx(&lsmctx);
err_get_secctx_failed:
kfree(tcomplete);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
err_alloc_tcomplete_failed:
if (trace_binder_txn_latency_free_enabled())
binder_txn_latency_free(t);
- kfree(t);
- binder_stats_deleted(BINDER_STAT_TRANSACTION);
-err_alloc_t_failed:
err_bad_todo_list:
err_bad_call_stack:
err_empty_call_stack:
@@ -3714,14 +3851,19 @@ err_invalid_target_handle:
binder_dec_node_tmpref(target_node);
}
+ binder_netlink_report(proc, t, tr->data_size, return_error);
+ kfree(t);
+ binder_stats_deleted(BINDER_STAT_TRANSACTION);
+err_alloc_t_failed:
+
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
- "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n",
+ "%d:%d transaction %s to %d:%d failed %d/%d/%d, code %u size %lld-%lld line %d\n",
proc->pid, thread->pid, reply ? "reply" :
(tr->flags & TF_ONE_WAY ? "async" : "call"),
target_proc ? target_proc->pid : 0,
target_thread ? target_thread->pid : 0,
t_debug_id, return_error, return_error_param,
- (u64)tr->data_size, (u64)tr->offsets_size,
+ tr->code, (u64)tr->data_size, (u64)tr->offsets_size,
return_error_line);
if (target_thread)
@@ -3763,16 +3905,164 @@ err_invalid_target_handle:
}
}
+static int
+binder_request_freeze_notification(struct binder_proc *proc,
+ struct binder_thread *thread,
+ struct binder_handle_cookie *handle_cookie)
+{
+ struct binder_ref_freeze *freeze;
+ struct binder_ref *ref;
+
+ freeze = kzalloc(sizeof(*freeze), GFP_KERNEL);
+ if (!freeze)
+ return -ENOMEM;
+ binder_proc_lock(proc);
+ ref = binder_get_ref_olocked(proc, handle_cookie->handle, false);
+ if (!ref) {
+ binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION invalid ref %d\n",
+ proc->pid, thread->pid, handle_cookie->handle);
+ binder_proc_unlock(proc);
+ kfree(freeze);
+ return -EINVAL;
+ }
+
+ binder_node_lock(ref->node);
+ if (ref->freeze) {
+ binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION already set\n",
+ proc->pid, thread->pid);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
+ kfree(freeze);
+ return -EINVAL;
+ }
+
+ binder_stats_created(BINDER_STAT_FREEZE);
+ INIT_LIST_HEAD(&freeze->work.entry);
+ freeze->cookie = handle_cookie->cookie;
+ freeze->work.type = BINDER_WORK_FROZEN_BINDER;
+ ref->freeze = freeze;
+
+ if (ref->node->proc) {
+ binder_inner_proc_lock(ref->node->proc);
+ freeze->is_frozen = ref->node->proc->is_frozen;
+ binder_inner_proc_unlock(ref->node->proc);
+
+ binder_inner_proc_lock(proc);
+ binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
+ binder_inner_proc_unlock(proc);
+ }
+
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
+ return 0;
+}
+
+static int
+binder_clear_freeze_notification(struct binder_proc *proc,
+ struct binder_thread *thread,
+ struct binder_handle_cookie *handle_cookie)
+{
+ struct binder_ref_freeze *freeze;
+ struct binder_ref *ref;
+
+ binder_proc_lock(proc);
+ ref = binder_get_ref_olocked(proc, handle_cookie->handle, false);
+ if (!ref) {
+ binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION invalid ref %d\n",
+ proc->pid, thread->pid, handle_cookie->handle);
+ binder_proc_unlock(proc);
+ return -EINVAL;
+ }
+
+ binder_node_lock(ref->node);
+
+ if (!ref->freeze) {
+ binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification not active\n",
+ proc->pid, thread->pid);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
+ return -EINVAL;
+ }
+ freeze = ref->freeze;
+ binder_inner_proc_lock(proc);
+ if (freeze->cookie != handle_cookie->cookie) {
+ binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification cookie mismatch %016llx != %016llx\n",
+ proc->pid, thread->pid, (u64)freeze->cookie,
+ (u64)handle_cookie->cookie);
+ binder_inner_proc_unlock(proc);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
+ return -EINVAL;
+ }
+ ref->freeze = NULL;
+ /*
+ * Take the existing freeze object and overwrite its work type. There are three cases here:
+ * 1. No pending notification. In this case just add the work to the queue.
+ * 2. A notification was sent and is pending an ack from userspace. Once an ack arrives, we
+ * should resend with the new work type.
+ * 3. A notification is pending to be sent. Since the work is already in the queue, nothing
+ * needs to be done here.
+ */
+ freeze->work.type = BINDER_WORK_CLEAR_FREEZE_NOTIFICATION;
+ if (list_empty(&freeze->work.entry)) {
+ binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
+ } else if (freeze->sent) {
+ freeze->resend = true;
+ }
+ binder_inner_proc_unlock(proc);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
+ return 0;
+}
+
+static int
+binder_freeze_notification_done(struct binder_proc *proc,
+ struct binder_thread *thread,
+ binder_uintptr_t cookie)
+{
+ struct binder_ref_freeze *freeze = NULL;
+ struct binder_work *w;
+
+ binder_inner_proc_lock(proc);
+ list_for_each_entry(w, &proc->delivered_freeze, entry) {
+ struct binder_ref_freeze *tmp_freeze =
+ container_of(w, struct binder_ref_freeze, work);
+
+ if (tmp_freeze->cookie == cookie) {
+ freeze = tmp_freeze;
+ break;
+ }
+ }
+ if (!freeze) {
+ binder_user_error("%d:%d BC_FREEZE_NOTIFICATION_DONE %016llx not found\n",
+ proc->pid, thread->pid, (u64)cookie);
+ binder_inner_proc_unlock(proc);
+ return -EINVAL;
+ }
+ binder_dequeue_work_ilocked(&freeze->work);
+ freeze->sent = false;
+ if (freeze->resend) {
+ freeze->resend = false;
+ binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
+ }
+ binder_inner_proc_unlock(proc);
+ return 0;
+}
+
/**
* binder_free_buf() - free the specified buffer
- * @proc: binder proc that owns buffer
- * @buffer: buffer to be freed
- * @is_failure: failed to send transaction
+ * @proc: binder proc that owns buffer
+ * @thread: binder thread performing the buffer release
+ * @buffer: buffer to be freed
+ * @is_failure: failed to send transaction
*
- * If buffer for an async transaction, enqueue the next async
+ * If the buffer is for an async transaction, enqueue the next async
* transaction from the node.
*
- * Cleanup buffer and free it.
+ * Cleanup the buffer and free it.
*/
static void
binder_free_buf(struct binder_proc *proc,
@@ -3991,20 +4281,21 @@ static int binder_thread_write(struct binder_proc *proc,
if (IS_ERR_OR_NULL(buffer)) {
if (PTR_ERR(buffer) == -EPERM) {
binder_user_error(
- "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
+ "%d:%d BC_FREE_BUFFER matched unreturned or currently freeing buffer at offset %lx\n",
proc->pid, thread->pid,
- (u64)data_ptr);
+ (unsigned long)data_ptr - proc->alloc.vm_start);
} else {
binder_user_error(
- "%d:%d BC_FREE_BUFFER u%016llx no match\n",
+ "%d:%d BC_FREE_BUFFER no match for buffer at offset %lx\n",
proc->pid, thread->pid,
- (u64)data_ptr);
+ (unsigned long)data_ptr - proc->alloc.vm_start);
}
break;
}
binder_debug(BINDER_DEBUG_FREE_BUFFER,
- "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
- proc->pid, thread->pid, (u64)data_ptr,
+ "%d:%d BC_FREE_BUFFER at offset %lx found buffer %d for %s transaction\n",
+ proc->pid, thread->pid,
+ (unsigned long)data_ptr - proc->alloc.vm_start,
buffer->debug_id,
buffer->transaction ? "active" : "finished");
binder_free_buf(proc, thread, buffer, false);
@@ -4246,6 +4537,44 @@ static int binder_thread_write(struct binder_proc *proc,
binder_inner_proc_unlock(proc);
} break;
+ case BC_REQUEST_FREEZE_NOTIFICATION: {
+ struct binder_handle_cookie handle_cookie;
+ int error;
+
+ if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie)))
+ return -EFAULT;
+ ptr += sizeof(handle_cookie);
+ error = binder_request_freeze_notification(proc, thread,
+ &handle_cookie);
+ if (error)
+ return error;
+ } break;
+
+ case BC_CLEAR_FREEZE_NOTIFICATION: {
+ struct binder_handle_cookie handle_cookie;
+ int error;
+
+ if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie)))
+ return -EFAULT;
+ ptr += sizeof(handle_cookie);
+ error = binder_clear_freeze_notification(proc, thread, &handle_cookie);
+ if (error)
+ return error;
+ } break;
+
+ case BC_FREEZE_NOTIFICATION_DONE: {
+ binder_uintptr_t cookie;
+ int error;
+
+ if (get_user(cookie, (binder_uintptr_t __user *)ptr))
+ return -EFAULT;
+
+ ptr += sizeof(cookie);
+ error = binder_freeze_notification_done(proc, thread, cookie);
+ if (error)
+ return error;
+ } break;
+
default:
pr_err("%d:%d unknown command %u\n",
proc->pid, thread->pid, cmd);
@@ -4340,6 +4669,8 @@ static int binder_wait_for_work(struct binder_thread *thread,
*
* If we fail to allocate an fd, skip the install and release
* any fds that have already been allocated.
+ *
+ * Return: 0 on success, a negative errno code on failure.
*/
static int binder_apply_fd_fixups(struct binder_proc *proc,
struct binder_transaction *t)
@@ -4635,6 +4966,46 @@ retry:
if (cmd == BR_DEAD_BINDER)
goto done; /* DEAD_BINDER notifications can cause transactions */
} break;
+
+ case BINDER_WORK_FROZEN_BINDER: {
+ struct binder_ref_freeze *freeze;
+ struct binder_frozen_state_info info;
+
+ memset(&info, 0, sizeof(info));
+ freeze = container_of(w, struct binder_ref_freeze, work);
+ info.is_frozen = freeze->is_frozen;
+ info.cookie = freeze->cookie;
+ freeze->sent = true;
+ binder_enqueue_work_ilocked(w, &proc->delivered_freeze);
+ binder_inner_proc_unlock(proc);
+
+ if (put_user(BR_FROZEN_BINDER, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (copy_to_user(ptr, &info, sizeof(info)))
+ return -EFAULT;
+ ptr += sizeof(info);
+ binder_stat_br(proc, thread, BR_FROZEN_BINDER);
+ goto done; /* BR_FROZEN_BINDER notifications can cause transactions */
+ } break;
+
+ case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: {
+ struct binder_ref_freeze *freeze =
+ container_of(w, struct binder_ref_freeze, work);
+ binder_uintptr_t cookie = freeze->cookie;
+
+ binder_inner_proc_unlock(proc);
+ kfree(freeze);
+ binder_stats_deleted(BINDER_STAT_FREEZE);
+ if (put_user(BR_CLEAR_FREEZE_NOTIFICATION_DONE, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (put_user(cookie, (binder_uintptr_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(binder_uintptr_t);
+ binder_stat_br(proc, thread, BR_CLEAR_FREEZE_NOTIFICATION_DONE);
+ } break;
+
default:
binder_inner_proc_unlock(proc);
pr_err("%d:%d: bad work type %d\n",
@@ -4743,16 +5114,14 @@ retry:
trace_binder_transaction_received(t);
binder_stat_br(proc, thread, cmd);
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n",
+ "%d:%d %s %d %d:%d, cmd %u size %zd-%zd\n",
proc->pid, thread->pid,
(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
(cmd == BR_TRANSACTION_SEC_CTX) ?
"BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
t->debug_id, t_from ? t_from->proc->pid : 0,
t_from ? t_from->pid : 0, cmd,
- t->buffer->data_size, t->buffer->offsets_size,
- (u64)trd->data.ptr.buffer,
- (u64)trd->data.ptr.offsets);
+ t->buffer->data_size, t->buffer->offsets_size);
if (t_from)
binder_thread_dec_tmpref(t_from);
@@ -4844,6 +5213,16 @@ static void binder_release_work(struct binder_proc *proc,
} break;
case BINDER_WORK_NODE:
break;
+ case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: {
+ struct binder_ref_freeze *freeze;
+
+ freeze = container_of(w, struct binder_ref_freeze, work);
+ binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+ "undelivered freeze notification, %016llx\n",
+ (u64)freeze->cookie);
+ kfree(freeze);
+ binder_stats_deleted(BINDER_STAT_FREEZE);
+ } break;
default:
pr_err("unexpected work type, %d, not freed\n",
wtype);
@@ -4924,6 +5303,7 @@ static void binder_free_proc(struct binder_proc *proc)
__func__, proc->outstanding_txns);
device = container_of(proc->context, struct binder_device, context);
if (refcount_dec_and_test(&device->ref)) {
+ binder_remove_device(device);
kfree(proc->context->name);
kfree(device);
}
@@ -4931,6 +5311,7 @@ static void binder_free_proc(struct binder_proc *proc)
put_task_struct(proc->tsk);
put_cred(proc->cred);
binder_stats_deleted(BINDER_STAT_PROC);
+ dbitmap_free(&proc->dmap);
kfree(proc);
}
@@ -5065,10 +5446,9 @@ static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
void __user *ubuf = (void __user *)arg;
struct binder_write_read bwr;
- if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
- ret = -EFAULT;
- goto out;
- }
+ if (copy_from_user(&bwr, ubuf, sizeof(bwr)))
+ return -EFAULT;
+
binder_debug(BINDER_DEBUG_READ_WRITE,
"%d:%d write %lld at %016llx, read %lld at %016llx\n",
proc->pid, thread->pid,
@@ -5083,8 +5463,6 @@ static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
trace_binder_write_done(ret);
if (ret < 0) {
bwr.read_consumed = 0;
- if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
- ret = -EFAULT;
goto out;
}
}
@@ -5098,22 +5476,17 @@ static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
if (!binder_worklist_empty_ilocked(&proc->todo))
binder_wakeup_proc_ilocked(proc);
binder_inner_proc_unlock(proc);
- if (ret < 0) {
- if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
- ret = -EFAULT;
+ if (ret < 0)
goto out;
- }
}
binder_debug(BINDER_DEBUG_READ_WRITE,
"%d:%d wrote %lld of %lld, read return %lld of %lld\n",
proc->pid, thread->pid,
(u64)bwr.write_consumed, (u64)bwr.write_size,
(u64)bwr.read_consumed, (u64)bwr.read_size);
- if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
- ret = -EFAULT;
- goto out;
- }
out:
+ if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
+ ret = -EFAULT;
return ret;
}
@@ -5126,32 +5499,28 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp,
struct binder_node *new_node;
kuid_t curr_euid = current_euid();
- mutex_lock(&context->context_mgr_node_lock);
+ guard(mutex)(&context->context_mgr_node_lock);
if (context->binder_context_mgr_node) {
pr_err("BINDER_SET_CONTEXT_MGR already set\n");
- ret = -EBUSY;
- goto out;
+ return -EBUSY;
}
ret = security_binder_set_context_mgr(proc->cred);
if (ret < 0)
- goto out;
+ return ret;
if (uid_valid(context->binder_context_mgr_uid)) {
if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
from_kuid(&init_user_ns, curr_euid),
from_kuid(&init_user_ns,
context->binder_context_mgr_uid));
- ret = -EPERM;
- goto out;
+ return -EPERM;
}
} else {
context->binder_context_mgr_uid = curr_euid;
}
new_node = binder_new_node(proc, fbo);
- if (!new_node) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!new_node)
+ return -ENOMEM;
binder_node_lock(new_node);
new_node->local_weak_refs++;
new_node->local_strong_refs++;
@@ -5160,8 +5529,6 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp,
context->binder_context_mgr_node = new_node;
binder_node_unlock(new_node);
binder_put_node(new_node);
-out:
- mutex_unlock(&context->context_mgr_node_lock);
return ret;
}
@@ -5242,6 +5609,57 @@ static bool binder_txns_pending_ilocked(struct binder_proc *proc)
return false;
}
+static void binder_add_freeze_work(struct binder_proc *proc, bool is_frozen)
+{
+ struct binder_node *prev = NULL;
+ struct rb_node *n;
+ struct binder_ref *ref;
+
+ binder_inner_proc_lock(proc);
+ for (n = rb_first(&proc->nodes); n; n = rb_next(n)) {
+ struct binder_node *node;
+
+ node = rb_entry(n, struct binder_node, rb_node);
+ binder_inc_node_tmpref_ilocked(node);
+ binder_inner_proc_unlock(proc);
+ if (prev)
+ binder_put_node(prev);
+ binder_node_lock(node);
+ hlist_for_each_entry(ref, &node->refs, node_entry) {
+ /*
+ * Need the node lock to synchronize
+ * with new notification requests and the
+ * inner lock to synchronize with queued
+ * freeze notifications.
+ */
+ binder_inner_proc_lock(ref->proc);
+ if (!ref->freeze) {
+ binder_inner_proc_unlock(ref->proc);
+ continue;
+ }
+ ref->freeze->work.type = BINDER_WORK_FROZEN_BINDER;
+ if (list_empty(&ref->freeze->work.entry)) {
+ ref->freeze->is_frozen = is_frozen;
+ binder_enqueue_work_ilocked(&ref->freeze->work, &ref->proc->todo);
+ binder_wakeup_proc_ilocked(ref->proc);
+ } else {
+ if (ref->freeze->sent && ref->freeze->is_frozen != is_frozen)
+ ref->freeze->resend = true;
+ ref->freeze->is_frozen = is_frozen;
+ }
+ binder_inner_proc_unlock(ref->proc);
+ }
+ prev = node;
+ binder_node_unlock(node);
+ binder_inner_proc_lock(proc);
+ if (proc->is_dead)
+ break;
+ }
+ binder_inner_proc_unlock(proc);
+ if (prev)
+ binder_put_node(prev);
+}
+
static int binder_ioctl_freeze(struct binder_freeze_info *info,
struct binder_proc *target_proc)
{
@@ -5253,6 +5671,7 @@ static int binder_ioctl_freeze(struct binder_freeze_info *info,
target_proc->async_recv = false;
target_proc->is_frozen = false;
binder_inner_proc_unlock(target_proc);
+ binder_add_freeze_work(target_proc, false);
return 0;
}
@@ -5285,6 +5704,8 @@ static int binder_ioctl_freeze(struct binder_freeze_info *info,
binder_inner_proc_lock(target_proc);
target_proc->is_frozen = false;
binder_inner_proc_unlock(target_proc);
+ } else {
+ binder_add_freeze_work(target_proc, true);
}
return ret;
@@ -5343,11 +5764,6 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
struct binder_thread *thread;
void __user *ubuf = (void __user *)arg;
- /*pr_info("binder_ioctl: %d:%d %x %lx\n",
- proc->pid, current->pid, cmd, arg);*/
-
- binder_selftest_alloc(&proc->alloc);
-
trace_binder_ioctl(cmd, arg);
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
@@ -5367,7 +5783,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
goto err;
break;
case BINDER_SET_MAX_THREADS: {
- int max_threads;
+ u32 max_threads;
if (copy_from_user(&max_threads, ubuf,
sizeof(max_threads))) {
@@ -5583,10 +5999,11 @@ static void binder_vma_close(struct vm_area_struct *vma)
binder_alloc_vma_close(&proc->alloc);
}
-static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
+VISIBLE_IF_KUNIT vm_fault_t binder_vm_fault(struct vm_fault *vmf)
{
return VM_FAULT_SIGBUS;
}
+EXPORT_SYMBOL_IF_KUNIT(binder_vm_fault);
static const struct vm_operations_struct binder_vm_ops = {
.open = binder_vma_open,
@@ -5634,6 +6051,8 @@ static int binder_open(struct inode *nodp, struct file *filp)
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
if (proc == NULL)
return -ENOMEM;
+
+ dbitmap_init(&proc->dmap);
spin_lock_init(&proc->inner_lock);
spin_lock_init(&proc->outer_lock);
get_task_struct(current->group_leader);
@@ -5658,6 +6077,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
binder_stats_created(BINDER_STAT_PROC);
proc->pid = current->group_leader->pid;
INIT_LIST_HEAD(&proc->delivered_death);
+ INIT_LIST_HEAD(&proc->delivered_freeze);
INIT_LIST_HEAD(&proc->waiting_threads);
filp->private_data = proc;
@@ -5752,7 +6172,7 @@ static int binder_release(struct inode *nodp, struct file *filp)
debugfs_remove(proc->debugfs_entry);
if (proc->binderfs_entry) {
- binderfs_remove_file(proc->binderfs_entry);
+ simple_recursive_removal(proc->binderfs_entry, NULL);
proc->binderfs_entry = NULL;
}
@@ -5904,6 +6324,7 @@ static void binder_deferred_release(struct binder_proc *proc)
binder_release_work(proc, &proc->todo);
binder_release_work(proc, &proc->delivered_death);
+ binder_release_work(proc, &proc->delivered_freeze);
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
"%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
@@ -5945,14 +6366,13 @@ static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
static void
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
{
- mutex_lock(&binder_deferred_lock);
+ guard(mutex)(&binder_deferred_lock);
proc->deferred_work |= defer;
if (hlist_unhashed(&proc->deferred_work_node)) {
hlist_add_head(&proc->deferred_work_node,
&binder_deferred_list);
schedule_work(&binder_deferred_work);
}
- mutex_unlock(&binder_deferred_lock);
}
static void print_binder_transaction_ilocked(struct seq_file *m,
@@ -5967,13 +6387,13 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
spin_lock(&t->lock);
to_proc = t->to_proc;
seq_printf(m,
- "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d elapsed %lldms",
+ "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld a%d r%d elapsed %lldms",
prefix, t->debug_id, t,
t->from_pid,
t->from_tid,
to_proc ? to_proc->pid : 0,
t->to_thread ? t->to_thread->pid : 0,
- t->code, t->flags, t->priority, t->need_reply,
+ t->code, t->flags, t->priority, t->is_async, t->is_reply,
ktime_ms_delta(current_time, t->start_time));
spin_unlock(&t->lock);
@@ -5994,14 +6414,14 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
seq_printf(m, " node %d", buffer->target_node->debug_id);
seq_printf(m, " size %zd:%zd offset %lx\n",
buffer->data_size, buffer->offsets_size,
- proc->alloc.buffer - buffer->user_data);
+ buffer->user_data - proc->alloc.vm_start);
}
static void print_binder_work_ilocked(struct seq_file *m,
- struct binder_proc *proc,
- const char *prefix,
- const char *transaction_prefix,
- struct binder_work *w)
+ struct binder_proc *proc,
+ const char *prefix,
+ const char *transaction_prefix,
+ struct binder_work *w, bool hash_ptrs)
{
struct binder_node *node;
struct binder_transaction *t;
@@ -6024,9 +6444,15 @@ static void print_binder_work_ilocked(struct seq_file *m,
break;
case BINDER_WORK_NODE:
node = container_of(w, struct binder_node, work);
- seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
- prefix, node->debug_id,
- (u64)node->ptr, (u64)node->cookie);
+ if (hash_ptrs)
+ seq_printf(m, "%snode work %d: u%p c%p\n",
+ prefix, node->debug_id,
+ (void *)(long)node->ptr,
+ (void *)(long)node->cookie);
+ else
+ seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
+ prefix, node->debug_id,
+ (u64)node->ptr, (u64)node->cookie);
break;
case BINDER_WORK_DEAD_BINDER:
seq_printf(m, "%shas dead binder\n", prefix);
@@ -6037,6 +6463,12 @@ static void print_binder_work_ilocked(struct seq_file *m,
case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
seq_printf(m, "%shas cleared death notification\n", prefix);
break;
+ case BINDER_WORK_FROZEN_BINDER:
+ seq_printf(m, "%shas frozen binder\n", prefix);
+ break;
+ case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION:
+ seq_printf(m, "%shas cleared freeze notification\n", prefix);
+ break;
default:
seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
break;
@@ -6045,7 +6477,7 @@ static void print_binder_work_ilocked(struct seq_file *m,
static void print_binder_thread_ilocked(struct seq_file *m,
struct binder_thread *thread,
- int print_always)
+ bool print_always, bool hash_ptrs)
{
struct binder_transaction *t;
struct binder_work *w;
@@ -6075,14 +6507,16 @@ static void print_binder_thread_ilocked(struct seq_file *m,
}
list_for_each_entry(w, &thread->todo, entry) {
print_binder_work_ilocked(m, thread->proc, " ",
- " pending transaction", w);
+ " pending transaction",
+ w, hash_ptrs);
}
if (!print_always && m->count == header_pos)
m->count = start_pos;
}
static void print_binder_node_nilocked(struct seq_file *m,
- struct binder_node *node)
+ struct binder_node *node,
+ bool hash_ptrs)
{
struct binder_ref *ref;
struct binder_work *w;
@@ -6090,8 +6524,13 @@ static void print_binder_node_nilocked(struct seq_file *m,
count = hlist_count_nodes(&node->refs);
- seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
- node->debug_id, (u64)node->ptr, (u64)node->cookie,
+ if (hash_ptrs)
+ seq_printf(m, " node %d: u%p c%p", node->debug_id,
+ (void *)(long)node->ptr, (void *)(long)node->cookie);
+ else
+ seq_printf(m, " node %d: u%016llx c%016llx", node->debug_id,
+ (u64)node->ptr, (u64)node->cookie);
+ seq_printf(m, " hs %d hw %d ls %d lw %d is %d iw %d tr %d",
node->has_strong_ref, node->has_weak_ref,
node->local_strong_refs, node->local_weak_refs,
node->internal_strong_refs, count, node->tmp_refs);
@@ -6104,7 +6543,8 @@ static void print_binder_node_nilocked(struct seq_file *m,
if (node->proc) {
list_for_each_entry(w, &node->async_todo, entry)
print_binder_work_ilocked(m, node->proc, " ",
- " pending async transaction", w);
+ " pending async transaction",
+ w, hash_ptrs);
}
}
@@ -6120,8 +6560,54 @@ static void print_binder_ref_olocked(struct seq_file *m,
binder_node_unlock(ref->node);
}
-static void print_binder_proc(struct seq_file *m,
- struct binder_proc *proc, int print_all)
+/**
+ * print_next_binder_node_ilocked() - Print binder_node from a locked list
+ * @m: struct seq_file for output via seq_printf()
+ * @proc: struct binder_proc we hold the inner_proc_lock to (if any)
+ * @node: struct binder_node to print fields of
+ * @prev_node: struct binder_node we hold a temporary reference to (if any)
+ * @hash_ptrs: whether to hash @node's binder_uintptr_t fields
+ *
+ * Helper function to handle synchronization around printing a struct
+ * binder_node while iterating through @proc->nodes or the dead nodes list.
+ * Caller must hold either @proc->inner_lock (for live nodes) or
+ * binder_dead_nodes_lock. This lock will be released during the body of this
+ * function, but it will be reacquired before returning to the caller.
+ *
+ * Return: pointer to the struct binder_node we hold a tmpref on
+ */
+static struct binder_node *
+print_next_binder_node_ilocked(struct seq_file *m, struct binder_proc *proc,
+ struct binder_node *node,
+ struct binder_node *prev_node, bool hash_ptrs)
+{
+ /*
+ * Take a temporary reference on the node so that isn't freed while
+ * we print it.
+ */
+ binder_inc_node_tmpref_ilocked(node);
+ /*
+ * Live nodes need to drop the inner proc lock and dead nodes need to
+ * drop the binder_dead_nodes_lock before trying to take the node lock.
+ */
+ if (proc)
+ binder_inner_proc_unlock(proc);
+ else
+ spin_unlock(&binder_dead_nodes_lock);
+ if (prev_node)
+ binder_put_node(prev_node);
+ binder_node_inner_lock(node);
+ print_binder_node_nilocked(m, node, hash_ptrs);
+ binder_node_inner_unlock(node);
+ if (proc)
+ binder_inner_proc_lock(proc);
+ else
+ spin_lock(&binder_dead_nodes_lock);
+ return node;
+}
+
+static void print_binder_proc(struct seq_file *m, struct binder_proc *proc,
+ bool print_all, bool hash_ptrs)
{
struct binder_work *w;
struct rb_node *n;
@@ -6134,31 +6620,19 @@ static void print_binder_proc(struct seq_file *m,
header_pos = m->count;
binder_inner_proc_lock(proc);
- for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
+ for (n = rb_first(&proc->threads); n; n = rb_next(n))
print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
- rb_node), print_all);
+ rb_node), print_all, hash_ptrs);
- for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
+ for (n = rb_first(&proc->nodes); n; n = rb_next(n)) {
struct binder_node *node = rb_entry(n, struct binder_node,
rb_node);
if (!print_all && !node->has_async_transaction)
continue;
- /*
- * take a temporary reference on the node so it
- * survives and isn't removed from the tree
- * while we print it.
- */
- binder_inc_node_tmpref_ilocked(node);
- /* Need to drop inner lock to take node lock */
- binder_inner_proc_unlock(proc);
- if (last_node)
- binder_put_node(last_node);
- binder_node_inner_lock(node);
- print_binder_node_nilocked(m, node);
- binder_node_inner_unlock(node);
- last_node = node;
- binder_inner_proc_lock(proc);
+ last_node = print_next_binder_node_ilocked(m, proc, node,
+ last_node,
+ hash_ptrs);
}
binder_inner_proc_unlock(proc);
if (last_node)
@@ -6166,23 +6640,26 @@ static void print_binder_proc(struct seq_file *m,
if (print_all) {
binder_proc_lock(proc);
- for (n = rb_first(&proc->refs_by_desc);
- n != NULL;
- n = rb_next(n))
+ for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n))
print_binder_ref_olocked(m, rb_entry(n,
- struct binder_ref,
- rb_node_desc));
+ struct binder_ref,
+ rb_node_desc));
binder_proc_unlock(proc);
}
binder_alloc_print_allocated(m, &proc->alloc);
binder_inner_proc_lock(proc);
list_for_each_entry(w, &proc->todo, entry)
print_binder_work_ilocked(m, proc, " ",
- " pending transaction", w);
+ " pending transaction", w,
+ hash_ptrs);
list_for_each_entry(w, &proc->delivered_death, entry) {
seq_puts(m, " has delivered dead binder\n");
break;
}
+ list_for_each_entry(w, &proc->delivered_freeze, entry) {
+ seq_puts(m, " has delivered freeze binder\n");
+ break;
+ }
binder_inner_proc_unlock(proc);
if (!print_all && m->count == header_pos)
m->count = start_pos;
@@ -6209,7 +6686,9 @@ static const char * const binder_return_strings[] = {
"BR_FAILED_REPLY",
"BR_FROZEN_REPLY",
"BR_ONEWAY_SPAM_SUSPECT",
- "BR_TRANSACTION_PENDING_FROZEN"
+ "BR_TRANSACTION_PENDING_FROZEN",
+ "BR_FROZEN_BINDER",
+ "BR_CLEAR_FREEZE_NOTIFICATION_DONE",
};
static const char * const binder_command_strings[] = {
@@ -6232,6 +6711,9 @@ static const char * const binder_command_strings[] = {
"BC_DEAD_BINDER_DONE",
"BC_TRANSACTION_SG",
"BC_REPLY_SG",
+ "BC_REQUEST_FREEZE_NOTIFICATION",
+ "BC_CLEAR_FREEZE_NOTIFICATION",
+ "BC_FREEZE_NOTIFICATION_DONE",
};
static const char * const binder_objstat_strings[] = {
@@ -6241,7 +6723,8 @@ static const char * const binder_objstat_strings[] = {
"ref",
"death",
"transaction",
- "transaction_complete"
+ "transaction_complete",
+ "freeze",
};
static void print_binder_stats(struct seq_file *m, const char *prefix,
@@ -6301,7 +6784,7 @@ static void print_binder_proc_stats(struct seq_file *m,
count = 0;
ready_threads = 0;
binder_inner_proc_lock(proc);
- for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
+ for (n = rb_first(&proc->threads); n; n = rb_next(n))
count++;
list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
@@ -6315,7 +6798,7 @@ static void print_binder_proc_stats(struct seq_file *m,
ready_threads,
free_async_space);
count = 0;
- for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
+ for (n = rb_first(&proc->nodes); n; n = rb_next(n))
count++;
binder_inner_proc_unlock(proc);
seq_printf(m, " nodes: %d\n", count);
@@ -6323,7 +6806,7 @@ static void print_binder_proc_stats(struct seq_file *m,
strong = 0;
weak = 0;
binder_proc_lock(proc);
- for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
+ for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
struct binder_ref *ref = rb_entry(n, struct binder_ref,
rb_node_desc);
count++;
@@ -6350,7 +6833,7 @@ static void print_binder_proc_stats(struct seq_file *m,
print_binder_stats(m, " ", &proc->stats);
}
-static int state_show(struct seq_file *m, void *unused)
+static void print_binder_state(struct seq_file *m, bool hash_ptrs)
{
struct binder_proc *proc;
struct binder_node *node;
@@ -6361,31 +6844,40 @@ static int state_show(struct seq_file *m, void *unused)
spin_lock(&binder_dead_nodes_lock);
if (!hlist_empty(&binder_dead_nodes))
seq_puts(m, "dead nodes:\n");
- hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
- /*
- * take a temporary reference on the node so it
- * survives and isn't removed from the list
- * while we print it.
- */
- node->tmp_refs++;
- spin_unlock(&binder_dead_nodes_lock);
- if (last_node)
- binder_put_node(last_node);
- binder_node_lock(node);
- print_binder_node_nilocked(m, node);
- binder_node_unlock(node);
- last_node = node;
- spin_lock(&binder_dead_nodes_lock);
- }
+ hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
+ last_node = print_next_binder_node_ilocked(m, NULL, node,
+ last_node,
+ hash_ptrs);
spin_unlock(&binder_dead_nodes_lock);
if (last_node)
binder_put_node(last_node);
mutex_lock(&binder_procs_lock);
hlist_for_each_entry(proc, &binder_procs, proc_node)
- print_binder_proc(m, proc, 1);
+ print_binder_proc(m, proc, true, hash_ptrs);
mutex_unlock(&binder_procs_lock);
+}
+
+static void print_binder_transactions(struct seq_file *m, bool hash_ptrs)
+{
+ struct binder_proc *proc;
+
+ seq_puts(m, "binder transactions:\n");
+ mutex_lock(&binder_procs_lock);
+ hlist_for_each_entry(proc, &binder_procs, proc_node)
+ print_binder_proc(m, proc, false, hash_ptrs);
+ mutex_unlock(&binder_procs_lock);
+}
+static int state_show(struct seq_file *m, void *unused)
+{
+ print_binder_state(m, false);
+ return 0;
+}
+
+static int state_hashed_show(struct seq_file *m, void *unused)
+{
+ print_binder_state(m, true);
return 0;
}
@@ -6407,14 +6899,13 @@ static int stats_show(struct seq_file *m, void *unused)
static int transactions_show(struct seq_file *m, void *unused)
{
- struct binder_proc *proc;
-
- seq_puts(m, "binder transactions:\n");
- mutex_lock(&binder_procs_lock);
- hlist_for_each_entry(proc, &binder_procs, proc_node)
- print_binder_proc(m, proc, 0);
- mutex_unlock(&binder_procs_lock);
+ print_binder_transactions(m, false);
+ return 0;
+}
+static int transactions_hashed_show(struct seq_file *m, void *unused)
+{
+ print_binder_transactions(m, true);
return 0;
}
@@ -6423,14 +6914,13 @@ static int proc_show(struct seq_file *m, void *unused)
struct binder_proc *itr;
int pid = (unsigned long)m->private;
- mutex_lock(&binder_procs_lock);
+ guard(mutex)(&binder_procs_lock);
hlist_for_each_entry(itr, &binder_procs, proc_node) {
if (itr->pid == pid) {
seq_puts(m, "binder proc state:\n");
- print_binder_proc(m, itr, 1);
+ print_binder_proc(m, itr, true, false);
}
}
- mutex_unlock(&binder_procs_lock);
return 0;
}
@@ -6494,8 +6984,10 @@ const struct file_operations binder_fops = {
};
DEFINE_SHOW_ATTRIBUTE(state);
+DEFINE_SHOW_ATTRIBUTE(state_hashed);
DEFINE_SHOW_ATTRIBUTE(stats);
DEFINE_SHOW_ATTRIBUTE(transactions);
+DEFINE_SHOW_ATTRIBUTE(transactions_hashed);
DEFINE_SHOW_ATTRIBUTE(transaction_log);
const struct binder_debugfs_entry binder_debugfs_entries[] = {
@@ -6506,6 +6998,12 @@ const struct binder_debugfs_entry binder_debugfs_entries[] = {
.data = NULL,
},
{
+ .name = "state_hashed",
+ .mode = 0444,
+ .fops = &state_hashed_fops,
+ .data = NULL,
+ },
+ {
.name = "stats",
.mode = 0444,
.fops = &stats_fops,
@@ -6518,6 +7016,12 @@ const struct binder_debugfs_entry binder_debugfs_entries[] = {
.data = NULL,
},
{
+ .name = "transactions_hashed",
+ .mode = 0444,
+ .fops = &transactions_hashed_fops,
+ .data = NULL,
+ },
+ {
.name = "transaction_log",
.mode = 0444,
.fops = &transaction_log_fops,
@@ -6532,6 +7036,18 @@ const struct binder_debugfs_entry binder_debugfs_entries[] = {
{} /* terminator */
};
+void binder_add_device(struct binder_device *device)
+{
+ guard(spinlock)(&binder_devices_lock);
+ hlist_add_head(&device->hlist, &binder_devices);
+}
+
+void binder_remove_device(struct binder_device *device)
+{
+ guard(spinlock)(&binder_devices_lock);
+ hlist_del_init(&device->hlist);
+}
+
static int __init init_binder_device(const char *name)
{
int ret;
@@ -6556,7 +7072,7 @@ static int __init init_binder_device(const char *name)
return ret;
}
- hlist_add_head(&binder_device->hlist, &binder_devices);
+ binder_add_device(binder_device);
return ret;
}
@@ -6609,16 +7125,23 @@ static int __init binder_init(void)
}
}
- ret = init_binderfs();
+ ret = genl_register_family(&binder_nl_family);
if (ret)
goto err_init_binder_device_failed;
+ ret = init_binderfs();
+ if (ret)
+ goto err_init_binderfs_failed;
+
return ret;
+err_init_binderfs_failed:
+ genl_unregister_family(&binder_nl_family);
+
err_init_binder_device_failed:
hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
misc_deregister(&device->miscdev);
- hlist_del(&device->hlist);
+ binder_remove_device(device);
kfree(device);
}
@@ -6635,5 +7158,3 @@ device_initcall(binder_init);
#define CREATE_TRACE_POINTS
#include "binder_trace.h"
-
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/android/binder/Makefile b/drivers/android/binder/Makefile
new file mode 100644
index 000000000000..09eabb527fa0
--- /dev/null
+++ b/drivers/android/binder/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y += -I$(src) # needed for trace events
+
+obj-$(CONFIG_ANDROID_BINDER_IPC_RUST) += rust_binder.o
+rust_binder-y := \
+ rust_binder_main.o \
+ rust_binderfs.o \
+ rust_binder_events.o \
+ page_range_helper.o
diff --git a/drivers/android/binder/allocation.rs b/drivers/android/binder/allocation.rs
new file mode 100644
index 000000000000..7f65a9c3a0e5
--- /dev/null
+++ b/drivers/android/binder/allocation.rs
@@ -0,0 +1,602 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use core::mem::{size_of, size_of_val, MaybeUninit};
+use core::ops::Range;
+
+use kernel::{
+ bindings,
+ fs::file::{File, FileDescriptorReservation},
+ prelude::*,
+ sync::{aref::ARef, Arc},
+ transmute::{AsBytes, FromBytes},
+ uaccess::UserSliceReader,
+ uapi,
+};
+
+use crate::{
+ deferred_close::DeferredFdCloser,
+ defs::*,
+ node::{Node, NodeRef},
+ process::Process,
+ DArc,
+};
+
+#[derive(Default)]
+pub(crate) struct AllocationInfo {
+ /// Range within the allocation where we can find the offsets to the object descriptors.
+ pub(crate) offsets: Option<Range<usize>>,
+ /// The target node of the transaction this allocation is associated to.
+ /// Not set for replies.
+ pub(crate) target_node: Option<NodeRef>,
+ /// When this allocation is dropped, call `pending_oneway_finished` on the node.
+ ///
+ /// This is used to serialize oneway transaction on the same node. Binder guarantees that
+ /// oneway transactions to the same node are delivered sequentially in the order they are sent.
+ pub(crate) oneway_node: Option<DArc<Node>>,
+ /// Zero the data in the buffer on free.
+ pub(crate) clear_on_free: bool,
+ /// List of files embedded in this transaction.
+ file_list: FileList,
+}
+
+/// Represents an allocation that the kernel is currently using.
+///
+/// When allocations are idle, the range allocator holds the data related to them.
+///
+/// # Invariants
+///
+/// This allocation corresponds to an allocation in the range allocator, so the relevant pages are
+/// marked in use in the page range.
+pub(crate) struct Allocation {
+ pub(crate) offset: usize,
+ size: usize,
+ pub(crate) ptr: usize,
+ pub(crate) process: Arc<Process>,
+ allocation_info: Option<AllocationInfo>,
+ free_on_drop: bool,
+ pub(crate) oneway_spam_detected: bool,
+ #[allow(dead_code)]
+ pub(crate) debug_id: usize,
+}
+
+impl Allocation {
+ pub(crate) fn new(
+ process: Arc<Process>,
+ debug_id: usize,
+ offset: usize,
+ size: usize,
+ ptr: usize,
+ oneway_spam_detected: bool,
+ ) -> Self {
+ Self {
+ process,
+ offset,
+ size,
+ ptr,
+ debug_id,
+ oneway_spam_detected,
+ allocation_info: None,
+ free_on_drop: true,
+ }
+ }
+
+ fn size_check(&self, offset: usize, size: usize) -> Result {
+ let overflow_fail = offset.checked_add(size).is_none();
+ let cmp_size_fail = offset.wrapping_add(size) > self.size;
+ if overflow_fail || cmp_size_fail {
+ return Err(EFAULT);
+ }
+ Ok(())
+ }
+
+ pub(crate) fn copy_into(
+ &self,
+ reader: &mut UserSliceReader,
+ offset: usize,
+ size: usize,
+ ) -> Result {
+ self.size_check(offset, size)?;
+
+ // SAFETY: While this object exists, the range allocator will keep the range allocated, and
+ // in turn, the pages will be marked as in use.
+ unsafe {
+ self.process
+ .pages
+ .copy_from_user_slice(reader, self.offset + offset, size)
+ }
+ }
+
+ pub(crate) fn read<T: FromBytes>(&self, offset: usize) -> Result<T> {
+ self.size_check(offset, size_of::<T>())?;
+
+ // SAFETY: While this object exists, the range allocator will keep the range allocated, and
+ // in turn, the pages will be marked as in use.
+ unsafe { self.process.pages.read(self.offset + offset) }
+ }
+
+ pub(crate) fn write<T: ?Sized>(&self, offset: usize, obj: &T) -> Result {
+ self.size_check(offset, size_of_val::<T>(obj))?;
+
+ // SAFETY: While this object exists, the range allocator will keep the range allocated, and
+ // in turn, the pages will be marked as in use.
+ unsafe { self.process.pages.write(self.offset + offset, obj) }
+ }
+
+ pub(crate) fn fill_zero(&self) -> Result {
+ // SAFETY: While this object exists, the range allocator will keep the range allocated, and
+ // in turn, the pages will be marked as in use.
+ unsafe { self.process.pages.fill_zero(self.offset, self.size) }
+ }
+
+ pub(crate) fn keep_alive(mut self) {
+ self.process
+ .buffer_make_freeable(self.offset, self.allocation_info.take());
+ self.free_on_drop = false;
+ }
+
+ pub(crate) fn set_info(&mut self, info: AllocationInfo) {
+ self.allocation_info = Some(info);
+ }
+
+ pub(crate) fn get_or_init_info(&mut self) -> &mut AllocationInfo {
+ self.allocation_info.get_or_insert_with(Default::default)
+ }
+
+ pub(crate) fn set_info_offsets(&mut self, offsets: Range<usize>) {
+ self.get_or_init_info().offsets = Some(offsets);
+ }
+
+ pub(crate) fn set_info_oneway_node(&mut self, oneway_node: DArc<Node>) {
+ self.get_or_init_info().oneway_node = Some(oneway_node);
+ }
+
+ pub(crate) fn set_info_clear_on_drop(&mut self) {
+ self.get_or_init_info().clear_on_free = true;
+ }
+
+ pub(crate) fn set_info_target_node(&mut self, target_node: NodeRef) {
+ self.get_or_init_info().target_node = Some(target_node);
+ }
+
+ /// Reserve enough space to push at least `num_fds` fds.
+ pub(crate) fn info_add_fd_reserve(&mut self, num_fds: usize) -> Result {
+ self.get_or_init_info()
+ .file_list
+ .files_to_translate
+ .reserve(num_fds, GFP_KERNEL)?;
+
+ Ok(())
+ }
+
+ pub(crate) fn info_add_fd(
+ &mut self,
+ file: ARef<File>,
+ buffer_offset: usize,
+ close_on_free: bool,
+ ) -> Result {
+ self.get_or_init_info().file_list.files_to_translate.push(
+ FileEntry {
+ file,
+ buffer_offset,
+ close_on_free,
+ },
+ GFP_KERNEL,
+ )?;
+
+ Ok(())
+ }
+
+ pub(crate) fn set_info_close_on_free(&mut self, cof: FdsCloseOnFree) {
+ self.get_or_init_info().file_list.close_on_free = cof.0;
+ }
+
+ pub(crate) fn translate_fds(&mut self) -> Result<TranslatedFds> {
+ let file_list = match self.allocation_info.as_mut() {
+ Some(info) => &mut info.file_list,
+ None => return Ok(TranslatedFds::new()),
+ };
+
+ let files = core::mem::take(&mut file_list.files_to_translate);
+
+ let num_close_on_free = files.iter().filter(|entry| entry.close_on_free).count();
+ let mut close_on_free = KVec::with_capacity(num_close_on_free, GFP_KERNEL)?;
+
+ let mut reservations = KVec::with_capacity(files.len(), GFP_KERNEL)?;
+ for file_info in files {
+ let res = FileDescriptorReservation::get_unused_fd_flags(bindings::O_CLOEXEC)?;
+ let fd = res.reserved_fd();
+ self.write::<u32>(file_info.buffer_offset, &fd)?;
+
+ reservations.push(
+ Reservation {
+ res,
+ file: file_info.file,
+ },
+ GFP_KERNEL,
+ )?;
+ if file_info.close_on_free {
+ close_on_free.push(fd, GFP_KERNEL)?;
+ }
+ }
+
+ Ok(TranslatedFds {
+ reservations,
+ close_on_free: FdsCloseOnFree(close_on_free),
+ })
+ }
+
+ /// Should the looper return to userspace when freeing this allocation?
+ pub(crate) fn looper_need_return_on_free(&self) -> bool {
+ // Closing fds involves pushing task_work for execution when we return to userspace. Hence,
+ // we should return to userspace asap if we are closing fds.
+ match self.allocation_info {
+ Some(ref info) => !info.file_list.close_on_free.is_empty(),
+ None => false,
+ }
+ }
+}
+
+impl Drop for Allocation {
+ fn drop(&mut self) {
+ if !self.free_on_drop {
+ return;
+ }
+
+ if let Some(mut info) = self.allocation_info.take() {
+ if let Some(oneway_node) = info.oneway_node.as_ref() {
+ oneway_node.pending_oneway_finished();
+ }
+
+ info.target_node = None;
+
+ if let Some(offsets) = info.offsets.clone() {
+ let view = AllocationView::new(self, offsets.start);
+ for i in offsets.step_by(size_of::<usize>()) {
+ if view.cleanup_object(i).is_err() {
+ pr_warn!("Error cleaning up object at offset {}\n", i)
+ }
+ }
+ }
+
+ for &fd in &info.file_list.close_on_free {
+ let closer = match DeferredFdCloser::new(GFP_KERNEL) {
+ Ok(closer) => closer,
+ Err(kernel::alloc::AllocError) => {
+ // Ignore allocation failures.
+ break;
+ }
+ };
+
+ // Here, we ignore errors. The operation can fail if the fd is not valid, or if the
+ // method is called from a kthread. However, this is always called from a syscall,
+ // so the latter case cannot happen, and we don't care about the first case.
+ let _ = closer.close_fd(fd);
+ }
+
+ if info.clear_on_free {
+ if let Err(e) = self.fill_zero() {
+ pr_warn!("Failed to clear data on free: {:?}", e);
+ }
+ }
+ }
+
+ self.process.buffer_raw_free(self.ptr);
+ }
+}
+
+/// A wrapper around `Allocation` that is being created.
+///
+/// If the allocation is destroyed while wrapped in this wrapper, then the allocation will be
+/// considered to be part of a failed transaction. Successful transactions avoid that by calling
+/// `success`, which skips the destructor.
+#[repr(transparent)]
+pub(crate) struct NewAllocation(pub(crate) Allocation);
+
+impl NewAllocation {
+ pub(crate) fn success(self) -> Allocation {
+ // This skips the destructor.
+ //
+ // SAFETY: This type is `#[repr(transparent)]`, so the layout matches.
+ unsafe { core::mem::transmute(self) }
+ }
+}
+
+impl core::ops::Deref for NewAllocation {
+ type Target = Allocation;
+ fn deref(&self) -> &Allocation {
+ &self.0
+ }
+}
+
+impl core::ops::DerefMut for NewAllocation {
+ fn deref_mut(&mut self) -> &mut Allocation {
+ &mut self.0
+ }
+}
+
+/// A view into the beginning of an allocation.
+///
+/// All attempts to read or write outside of the view will fail. To intentionally access outside of
+/// this view, use the `alloc` field of this struct directly.
+pub(crate) struct AllocationView<'a> {
+ pub(crate) alloc: &'a mut Allocation,
+ limit: usize,
+}
+
+impl<'a> AllocationView<'a> {
+ pub(crate) fn new(alloc: &'a mut Allocation, limit: usize) -> Self {
+ AllocationView { alloc, limit }
+ }
+
+ pub(crate) fn read<T: FromBytes>(&self, offset: usize) -> Result<T> {
+ if offset.checked_add(size_of::<T>()).ok_or(EINVAL)? > self.limit {
+ return Err(EINVAL);
+ }
+ self.alloc.read(offset)
+ }
+
+ pub(crate) fn write<T: AsBytes>(&self, offset: usize, obj: &T) -> Result {
+ if offset.checked_add(size_of::<T>()).ok_or(EINVAL)? > self.limit {
+ return Err(EINVAL);
+ }
+ self.alloc.write(offset, obj)
+ }
+
+ pub(crate) fn copy_into(
+ &self,
+ reader: &mut UserSliceReader,
+ offset: usize,
+ size: usize,
+ ) -> Result {
+ if offset.checked_add(size).ok_or(EINVAL)? > self.limit {
+ return Err(EINVAL);
+ }
+ self.alloc.copy_into(reader, offset, size)
+ }
+
+ pub(crate) fn transfer_binder_object(
+ &self,
+ offset: usize,
+ obj: &uapi::flat_binder_object,
+ strong: bool,
+ node_ref: NodeRef,
+ ) -> Result {
+ let mut newobj = FlatBinderObject::default();
+ let node = node_ref.node.clone();
+ if Arc::ptr_eq(&node_ref.node.owner, &self.alloc.process) {
+ // The receiving process is the owner of the node, so send it a binder object (instead
+ // of a handle).
+ let (ptr, cookie) = node.get_id();
+ newobj.hdr.type_ = if strong {
+ BINDER_TYPE_BINDER
+ } else {
+ BINDER_TYPE_WEAK_BINDER
+ };
+ newobj.flags = obj.flags;
+ newobj.__bindgen_anon_1.binder = ptr as _;
+ newobj.cookie = cookie as _;
+ self.write(offset, &newobj)?;
+ // Increment the user ref count on the node. It will be decremented as part of the
+ // destruction of the buffer, when we see a binder or weak-binder object.
+ node.update_refcount(true, 1, strong);
+ } else {
+ // The receiving process is different from the owner, so we need to insert a handle to
+ // the binder object.
+ let handle = self
+ .alloc
+ .process
+ .as_arc_borrow()
+ .insert_or_update_handle(node_ref, false)?;
+ newobj.hdr.type_ = if strong {
+ BINDER_TYPE_HANDLE
+ } else {
+ BINDER_TYPE_WEAK_HANDLE
+ };
+ newobj.flags = obj.flags;
+ newobj.__bindgen_anon_1.handle = handle;
+ if self.write(offset, &newobj).is_err() {
+ // Decrement ref count on the handle we just created.
+ let _ = self
+ .alloc
+ .process
+ .as_arc_borrow()
+ .update_ref(handle, false, strong);
+ return Err(EINVAL);
+ }
+ }
+
+ Ok(())
+ }
+
+ fn cleanup_object(&self, index_offset: usize) -> Result {
+ let offset = self.alloc.read(index_offset)?;
+ let header = self.read::<BinderObjectHeader>(offset)?;
+ match header.type_ {
+ BINDER_TYPE_WEAK_BINDER | BINDER_TYPE_BINDER => {
+ let obj = self.read::<FlatBinderObject>(offset)?;
+ let strong = header.type_ == BINDER_TYPE_BINDER;
+ // SAFETY: The type is `BINDER_TYPE_{WEAK_}BINDER`, so the `binder` field is
+ // populated.
+ let ptr = unsafe { obj.__bindgen_anon_1.binder };
+ let cookie = obj.cookie;
+ self.alloc.process.update_node(ptr, cookie, strong);
+ Ok(())
+ }
+ BINDER_TYPE_WEAK_HANDLE | BINDER_TYPE_HANDLE => {
+ let obj = self.read::<FlatBinderObject>(offset)?;
+ let strong = header.type_ == BINDER_TYPE_HANDLE;
+ // SAFETY: The type is `BINDER_TYPE_{WEAK_}HANDLE`, so the `handle` field is
+ // populated.
+ let handle = unsafe { obj.__bindgen_anon_1.handle };
+ self.alloc
+ .process
+ .as_arc_borrow()
+ .update_ref(handle, false, strong)
+ }
+ _ => Ok(()),
+ }
+ }
+}
+
+/// A binder object as it is serialized.
+///
+/// # Invariants
+///
+/// All bytes must be initialized, and the value of `self.hdr.type_` must be one of the allowed
+/// types.
+#[repr(C)]
+pub(crate) union BinderObject {
+ hdr: uapi::binder_object_header,
+ fbo: uapi::flat_binder_object,
+ fdo: uapi::binder_fd_object,
+ bbo: uapi::binder_buffer_object,
+ fdao: uapi::binder_fd_array_object,
+}
+
+/// A view into a `BinderObject` that can be used in a match statement.
+pub(crate) enum BinderObjectRef<'a> {
+ Binder(&'a mut uapi::flat_binder_object),
+ Handle(&'a mut uapi::flat_binder_object),
+ Fd(&'a mut uapi::binder_fd_object),
+ Ptr(&'a mut uapi::binder_buffer_object),
+ Fda(&'a mut uapi::binder_fd_array_object),
+}
+
+impl BinderObject {
+ pub(crate) fn read_from(reader: &mut UserSliceReader) -> Result<BinderObject> {
+ let object = Self::read_from_inner(|slice| {
+ let read_len = usize::min(slice.len(), reader.len());
+ reader.clone_reader().read_slice(&mut slice[..read_len])?;
+ Ok(())
+ })?;
+
+ // If we used a object type smaller than the largest object size, then we've read more
+ // bytes than we needed to. However, we used `.clone_reader()` to avoid advancing the
+ // original reader. Now, we call `skip` so that the caller's reader is advanced by the
+ // right amount.
+ //
+ // The `skip` call fails if the reader doesn't have `size` bytes available. This could
+ // happen if the type header corresponds to an object type that is larger than the rest of
+ // the reader.
+ //
+ // Any extra bytes beyond the size of the object are inaccessible after this call, so
+ // reading them again from the `reader` later does not result in TOCTOU bugs.
+ reader.skip(object.size())?;
+
+ Ok(object)
+ }
+
+ /// Use the provided reader closure to construct a `BinderObject`.
+ ///
+ /// The closure should write the bytes for the object into the provided slice.
+ pub(crate) fn read_from_inner<R>(reader: R) -> Result<BinderObject>
+ where
+ R: FnOnce(&mut [u8; size_of::<BinderObject>()]) -> Result<()>,
+ {
+ let mut obj = MaybeUninit::<BinderObject>::zeroed();
+
+ // SAFETY: The lengths of `BinderObject` and `[u8; size_of::<BinderObject>()]` are equal,
+ // and the byte array has an alignment requirement of one, so the pointer cast is okay.
+ // Additionally, `obj` was initialized to zeros, so the byte array will not be
+ // uninitialized.
+ (reader)(unsafe { &mut *obj.as_mut_ptr().cast() })?;
+
+ // SAFETY: The entire object is initialized, so accessing this field is safe.
+ let type_ = unsafe { obj.assume_init_ref().hdr.type_ };
+ if Self::type_to_size(type_).is_none() {
+ // The value of `obj.hdr_type_` was invalid.
+ return Err(EINVAL);
+ }
+
+ // SAFETY: All bytes are initialized (since we zeroed them at the start) and we checked
+ // that `self.hdr.type_` is one of the allowed types, so the type invariants are satisfied.
+ unsafe { Ok(obj.assume_init()) }
+ }
+
+ pub(crate) fn as_ref(&mut self) -> BinderObjectRef<'_> {
+ use BinderObjectRef::*;
+ // SAFETY: The constructor ensures that all bytes of `self` are initialized, and all
+ // variants of this union accept all initialized bit patterns.
+ unsafe {
+ match self.hdr.type_ {
+ BINDER_TYPE_WEAK_BINDER | BINDER_TYPE_BINDER => Binder(&mut self.fbo),
+ BINDER_TYPE_WEAK_HANDLE | BINDER_TYPE_HANDLE => Handle(&mut self.fbo),
+ BINDER_TYPE_FD => Fd(&mut self.fdo),
+ BINDER_TYPE_PTR => Ptr(&mut self.bbo),
+ BINDER_TYPE_FDA => Fda(&mut self.fdao),
+ // SAFETY: By the type invariant, the value of `self.hdr.type_` cannot have any
+ // other value than the ones checked above.
+ _ => core::hint::unreachable_unchecked(),
+ }
+ }
+ }
+
+ pub(crate) fn size(&self) -> usize {
+ // SAFETY: The entire object is initialized, so accessing this field is safe.
+ let type_ = unsafe { self.hdr.type_ };
+
+ // SAFETY: The type invariants guarantee that the type field is correct.
+ unsafe { Self::type_to_size(type_).unwrap_unchecked() }
+ }
+
+ fn type_to_size(type_: u32) -> Option<usize> {
+ match type_ {
+ BINDER_TYPE_WEAK_BINDER => Some(size_of::<uapi::flat_binder_object>()),
+ BINDER_TYPE_BINDER => Some(size_of::<uapi::flat_binder_object>()),
+ BINDER_TYPE_WEAK_HANDLE => Some(size_of::<uapi::flat_binder_object>()),
+ BINDER_TYPE_HANDLE => Some(size_of::<uapi::flat_binder_object>()),
+ BINDER_TYPE_FD => Some(size_of::<uapi::binder_fd_object>()),
+ BINDER_TYPE_PTR => Some(size_of::<uapi::binder_buffer_object>()),
+ BINDER_TYPE_FDA => Some(size_of::<uapi::binder_fd_array_object>()),
+ _ => None,
+ }
+ }
+}
+
+#[derive(Default)]
+struct FileList {
+ files_to_translate: KVec<FileEntry>,
+ close_on_free: KVec<u32>,
+}
+
+struct FileEntry {
+ /// The file for which a descriptor will be created in the recipient process.
+ file: ARef<File>,
+ /// The offset in the buffer where the file descriptor is stored.
+ buffer_offset: usize,
+ /// Whether this fd should be closed when the allocation is freed.
+ close_on_free: bool,
+}
+
+pub(crate) struct TranslatedFds {
+ reservations: KVec<Reservation>,
+ /// If commit is called, then these fds should be closed. (If commit is not called, then they
+ /// shouldn't be closed.)
+ close_on_free: FdsCloseOnFree,
+}
+
+struct Reservation {
+ res: FileDescriptorReservation,
+ file: ARef<File>,
+}
+
+impl TranslatedFds {
+ pub(crate) fn new() -> Self {
+ Self {
+ reservations: KVec::new(),
+ close_on_free: FdsCloseOnFree(KVec::new()),
+ }
+ }
+
+ pub(crate) fn commit(self) -> FdsCloseOnFree {
+ for entry in self.reservations {
+ entry.res.fd_install(entry.file);
+ }
+
+ self.close_on_free
+ }
+}
+
+pub(crate) struct FdsCloseOnFree(KVec<u32>);
diff --git a/drivers/android/binder/context.rs b/drivers/android/binder/context.rs
new file mode 100644
index 000000000000..3d135ec03ca7
--- /dev/null
+++ b/drivers/android/binder/context.rs
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{
+ error::Error,
+ list::{List, ListArc, ListLinks},
+ prelude::*,
+ security,
+ str::{CStr, CString},
+ sync::{Arc, Mutex},
+ task::Kuid,
+};
+
+use crate::{error::BinderError, node::NodeRef, process::Process};
+
+kernel::sync::global_lock! {
+ // SAFETY: We call `init` in the module initializer, so it's initialized before first use.
+ pub(crate) unsafe(uninit) static CONTEXTS: Mutex<ContextList> = ContextList {
+ list: List::new(),
+ };
+}
+
+pub(crate) struct ContextList {
+ list: List<Context>,
+}
+
+pub(crate) fn get_all_contexts() -> Result<KVec<Arc<Context>>> {
+ let lock = CONTEXTS.lock();
+
+ let count = lock.list.iter().count();
+
+ let mut ctxs = KVec::with_capacity(count, GFP_KERNEL)?;
+ for ctx in &lock.list {
+ ctxs.push(Arc::from(ctx), GFP_KERNEL)?;
+ }
+ Ok(ctxs)
+}
+
+/// This struct keeps track of the processes using this context, and which process is the context
+/// manager.
+struct Manager {
+ node: Option<NodeRef>,
+ uid: Option<Kuid>,
+ all_procs: List<Process>,
+}
+
+/// There is one context per binder file (/dev/binder, /dev/hwbinder, etc)
+#[pin_data]
+pub(crate) struct Context {
+ #[pin]
+ manager: Mutex<Manager>,
+ pub(crate) name: CString,
+ #[pin]
+ links: ListLinks,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for Context { untracked; }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<0> for Context {
+ using ListLinks { self.links };
+ }
+}
+
+impl Context {
+ pub(crate) fn new(name: &CStr) -> Result<Arc<Self>> {
+ let name = CString::try_from(name)?;
+ let list_ctx = ListArc::pin_init::<Error>(
+ try_pin_init!(Context {
+ name,
+ links <- ListLinks::new(),
+ manager <- kernel::new_mutex!(Manager {
+ all_procs: List::new(),
+ node: None,
+ uid: None,
+ }, "Context::manager"),
+ }),
+ GFP_KERNEL,
+ )?;
+
+ let ctx = list_ctx.clone_arc();
+ CONTEXTS.lock().list.push_back(list_ctx);
+
+ Ok(ctx)
+ }
+
+ /// Called when the file for this context is unlinked.
+ ///
+ /// No-op if called twice.
+ pub(crate) fn deregister(&self) {
+ // SAFETY: We never add the context to any other linked list than this one, so it is either
+ // in this list, or not in any list.
+ unsafe { CONTEXTS.lock().list.remove(self) };
+ }
+
+ pub(crate) fn register_process(self: &Arc<Self>, proc: ListArc<Process>) {
+ if !Arc::ptr_eq(self, &proc.ctx) {
+ pr_err!("Context::register_process called on the wrong context.");
+ return;
+ }
+ self.manager.lock().all_procs.push_back(proc);
+ }
+
+ pub(crate) fn deregister_process(self: &Arc<Self>, proc: &Process) {
+ if !Arc::ptr_eq(self, &proc.ctx) {
+ pr_err!("Context::deregister_process called on the wrong context.");
+ return;
+ }
+ // SAFETY: We just checked that this is the right list.
+ unsafe { self.manager.lock().all_procs.remove(proc) };
+ }
+
+ pub(crate) fn set_manager_node(&self, node_ref: NodeRef) -> Result {
+ let mut manager = self.manager.lock();
+ if manager.node.is_some() {
+ pr_warn!("BINDER_SET_CONTEXT_MGR already set");
+ return Err(EBUSY);
+ }
+ security::binder_set_context_mgr(&node_ref.node.owner.cred)?;
+
+ // If the context manager has been set before, ensure that we use the same euid.
+ let caller_uid = Kuid::current_euid();
+ if let Some(ref uid) = manager.uid {
+ if *uid != caller_uid {
+ return Err(EPERM);
+ }
+ }
+
+ manager.node = Some(node_ref);
+ manager.uid = Some(caller_uid);
+ Ok(())
+ }
+
+ pub(crate) fn unset_manager_node(&self) {
+ let node_ref = self.manager.lock().node.take();
+ drop(node_ref);
+ }
+
+ pub(crate) fn get_manager_node(&self, strong: bool) -> Result<NodeRef, BinderError> {
+ self.manager
+ .lock()
+ .node
+ .as_ref()
+ .ok_or_else(BinderError::new_dead)?
+ .clone(strong)
+ .map_err(BinderError::from)
+ }
+
+ pub(crate) fn for_each_proc<F>(&self, mut func: F)
+ where
+ F: FnMut(&Process),
+ {
+ let lock = self.manager.lock();
+ for proc in &lock.all_procs {
+ func(&proc);
+ }
+ }
+
+ pub(crate) fn get_all_procs(&self) -> Result<KVec<Arc<Process>>> {
+ let lock = self.manager.lock();
+ let count = lock.all_procs.iter().count();
+
+ let mut procs = KVec::with_capacity(count, GFP_KERNEL)?;
+ for proc in &lock.all_procs {
+ procs.push(Arc::from(proc), GFP_KERNEL)?;
+ }
+ Ok(procs)
+ }
+
+ pub(crate) fn get_procs_with_pid(&self, pid: i32) -> Result<KVec<Arc<Process>>> {
+ let orig = self.get_all_procs()?;
+ let mut backing = KVec::with_capacity(orig.len(), GFP_KERNEL)?;
+ for proc in orig.into_iter().filter(|proc| proc.task.pid() == pid) {
+ backing.push(proc, GFP_KERNEL)?;
+ }
+ Ok(backing)
+ }
+}
diff --git a/drivers/android/binder/deferred_close.rs b/drivers/android/binder/deferred_close.rs
new file mode 100644
index 000000000000..ac895c04d0cb
--- /dev/null
+++ b/drivers/android/binder/deferred_close.rs
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! Logic for closing files in a deferred manner.
+//!
+//! This file could make sense to have in `kernel::fs`, but it was rejected for being too
+//! Binder-specific.
+
+use core::mem::MaybeUninit;
+use kernel::{
+ alloc::{AllocError, Flags},
+ bindings,
+ prelude::*,
+};
+
+/// Helper used for closing file descriptors in a way that is safe even if the file is currently
+/// held using `fdget`.
+///
+/// Additional motivation can be found in commit 80cd795630d6 ("binder: fix use-after-free due to
+/// ksys_close() during fdget()") and in the comments on `binder_do_fd_close`.
+pub(crate) struct DeferredFdCloser {
+ inner: KBox<DeferredFdCloserInner>,
+}
+
+/// SAFETY: This just holds an allocation with no real content, so there's no safety issue with
+/// moving it across threads.
+unsafe impl Send for DeferredFdCloser {}
+/// SAFETY: This just holds an allocation with no real content, so there's no safety issue with
+/// moving it across threads.
+unsafe impl Sync for DeferredFdCloser {}
+
+/// # Invariants
+///
+/// If the `file` pointer is non-null, then it points at a `struct file` and owns a refcount to
+/// that file.
+#[repr(C)]
+struct DeferredFdCloserInner {
+ twork: MaybeUninit<bindings::callback_head>,
+ file: *mut bindings::file,
+}
+
+impl DeferredFdCloser {
+ /// Create a new [`DeferredFdCloser`].
+ pub(crate) fn new(flags: Flags) -> Result<Self, AllocError> {
+ Ok(Self {
+ // INVARIANT: The `file` pointer is null, so the type invariant does not apply.
+ inner: KBox::new(
+ DeferredFdCloserInner {
+ twork: MaybeUninit::uninit(),
+ file: core::ptr::null_mut(),
+ },
+ flags,
+ )?,
+ })
+ }
+
+ /// Schedule a task work that closes the file descriptor when this task returns to userspace.
+ ///
+ /// Fails if this is called from a context where we cannot run work when returning to
+ /// userspace. (E.g., from a kthread.)
+ pub(crate) fn close_fd(self, fd: u32) -> Result<(), DeferredFdCloseError> {
+ use bindings::task_work_notify_mode_TWA_RESUME as TWA_RESUME;
+
+ // In this method, we schedule the task work before closing the file. This is because
+ // scheduling a task work is fallible, and we need to know whether it will fail before we
+ // attempt to close the file.
+
+ // Task works are not available on kthreads.
+ let current = kernel::current!();
+
+ // Check if this is a kthread.
+ // SAFETY: Reading `flags` from a task is always okay.
+ if unsafe { ((*current.as_ptr()).flags & bindings::PF_KTHREAD) != 0 } {
+ return Err(DeferredFdCloseError::TaskWorkUnavailable);
+ }
+
+ // Transfer ownership of the box's allocation to a raw pointer. This disables the
+ // destructor, so we must manually convert it back to a KBox to drop it.
+ //
+ // Until we convert it back to a `KBox`, there are no aliasing requirements on this
+ // pointer.
+ let inner = KBox::into_raw(self.inner);
+
+ // The `callback_head` field is first in the struct, so this cast correctly gives us a
+ // pointer to the field.
+ let callback_head = inner.cast::<bindings::callback_head>();
+ // SAFETY: This pointer offset operation does not go out-of-bounds.
+ let file_field = unsafe { core::ptr::addr_of_mut!((*inner).file) };
+
+ let current = current.as_ptr();
+
+ // SAFETY: This function currently has exclusive access to the `DeferredFdCloserInner`, so
+ // it is okay for us to perform unsynchronized writes to its `callback_head` field.
+ unsafe { bindings::init_task_work(callback_head, Some(Self::do_close_fd)) };
+
+ // SAFETY: This inserts the `DeferredFdCloserInner` into the task workqueue for the current
+ // task. If this operation is successful, then this transfers exclusive ownership of the
+ // `callback_head` field to the C side until it calls `do_close_fd`, and we don't touch or
+ // invalidate the field during that time.
+ //
+ // When the C side calls `do_close_fd`, the safety requirements of that method are
+ // satisfied because when a task work is executed, the callback is given ownership of the
+ // pointer.
+ //
+ // The file pointer is currently null. If it is changed to be non-null before `do_close_fd`
+ // is called, then that change happens due to the write at the end of this function, and
+ // that write has a safety comment that explains why the refcount can be dropped when
+ // `do_close_fd` runs.
+ let res = unsafe { bindings::task_work_add(current, callback_head, TWA_RESUME) };
+
+ if res != 0 {
+ // SAFETY: Scheduling the task work failed, so we still have ownership of the box, so
+ // we may destroy it.
+ unsafe { drop(KBox::from_raw(inner)) };
+
+ return Err(DeferredFdCloseError::TaskWorkUnavailable);
+ }
+
+ // This removes the fd from the fd table in `current`. The file is not fully closed until
+ // `filp_close` is called. We are given ownership of one refcount to the file.
+ //
+ // SAFETY: This is safe no matter what `fd` is. If the `fd` is valid (that is, if the
+ // pointer is non-null), then we call `filp_close` on the returned pointer as required by
+ // `file_close_fd`.
+ let file = unsafe { bindings::file_close_fd(fd) };
+ if file.is_null() {
+ // We don't clean up the task work since that might be expensive if the task work queue
+ // is long. Just let it execute and let it clean up for itself.
+ return Err(DeferredFdCloseError::BadFd);
+ }
+
+ // Acquire a second refcount to the file.
+ //
+ // SAFETY: The `file` pointer points at a file with a non-zero refcount.
+ unsafe { bindings::get_file(file) };
+
+ // This method closes the fd, consuming one of our two refcounts. There could be active
+ // light refcounts created from that fd, so we must ensure that the file has a positive
+ // refcount for the duration of those active light refcounts. We do that by holding on to
+ // the second refcount until the current task returns to userspace.
+ //
+ // SAFETY: The `file` pointer is valid. Passing `current->files` as the file table to close
+ // it in is correct, since we just got the `fd` from `file_close_fd` which also uses
+ // `current->files`.
+ //
+ // Note: fl_owner_t is currently a void pointer.
+ unsafe { bindings::filp_close(file, (*current).files as bindings::fl_owner_t) };
+
+ // We update the file pointer that the task work is supposed to fput. This transfers
+ // ownership of our last refcount.
+ //
+ // INVARIANT: This changes the `file` field of a `DeferredFdCloserInner` from null to
+ // non-null. This doesn't break the type invariant for `DeferredFdCloserInner` because we
+ // still own a refcount to the file, so we can pass ownership of that refcount to the
+ // `DeferredFdCloserInner`.
+ //
+ // When `do_close_fd` runs, it must be safe for it to `fput` the refcount. However, this is
+ // the case because all light refcounts that are associated with the fd we closed
+ // previously must be dropped when `do_close_fd`, since light refcounts must be dropped
+ // before returning to userspace.
+ //
+ // SAFETY: Task works are executed on the current thread right before we return to
+ // userspace, so this write is guaranteed to happen before `do_close_fd` is called, which
+ // means that a race is not possible here.
+ unsafe { *file_field = file };
+
+ Ok(())
+ }
+
+ /// # Safety
+ ///
+ /// The provided pointer must point at the `twork` field of a `DeferredFdCloserInner` stored in
+ /// a `KBox`, and the caller must pass exclusive ownership of that `KBox`. Furthermore, if the
+ /// file pointer is non-null, then it must be okay to release the refcount by calling `fput`.
+ unsafe extern "C" fn do_close_fd(inner: *mut bindings::callback_head) {
+ // SAFETY: The caller just passed us ownership of this box.
+ let inner = unsafe { KBox::from_raw(inner.cast::<DeferredFdCloserInner>()) };
+ if !inner.file.is_null() {
+ // SAFETY: By the type invariants, we own a refcount to this file, and the caller
+ // guarantees that dropping the refcount now is okay.
+ unsafe { bindings::fput(inner.file) };
+ }
+ // The allocation is freed when `inner` goes out of scope.
+ }
+}
+
+/// Represents a failure to close an fd in a deferred manner.
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub(crate) enum DeferredFdCloseError {
+ /// Closing the fd failed because we were unable to schedule a task work.
+ TaskWorkUnavailable,
+ /// Closing the fd failed because the fd does not exist.
+ BadFd,
+}
+
+impl From<DeferredFdCloseError> for Error {
+ fn from(err: DeferredFdCloseError) -> Error {
+ match err {
+ DeferredFdCloseError::TaskWorkUnavailable => ESRCH,
+ DeferredFdCloseError::BadFd => EBADF,
+ }
+ }
+}
diff --git a/drivers/android/binder/defs.rs b/drivers/android/binder/defs.rs
new file mode 100644
index 000000000000..33f51b4139c7
--- /dev/null
+++ b/drivers/android/binder/defs.rs
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use core::mem::MaybeUninit;
+use core::ops::{Deref, DerefMut};
+use kernel::{
+ transmute::{AsBytes, FromBytes},
+ uapi::{self, *},
+};
+
+macro_rules! pub_no_prefix {
+ ($prefix:ident, $($newname:ident),+ $(,)?) => {
+ $(pub(crate) const $newname: u32 = kernel::macros::concat_idents!($prefix, $newname);)+
+ };
+}
+
+pub_no_prefix!(
+ binder_driver_return_protocol_,
+ BR_TRANSACTION,
+ BR_TRANSACTION_SEC_CTX,
+ BR_REPLY,
+ BR_DEAD_REPLY,
+ BR_FAILED_REPLY,
+ BR_FROZEN_REPLY,
+ BR_NOOP,
+ BR_SPAWN_LOOPER,
+ BR_TRANSACTION_COMPLETE,
+ BR_TRANSACTION_PENDING_FROZEN,
+ BR_ONEWAY_SPAM_SUSPECT,
+ BR_OK,
+ BR_ERROR,
+ BR_INCREFS,
+ BR_ACQUIRE,
+ BR_RELEASE,
+ BR_DECREFS,
+ BR_DEAD_BINDER,
+ BR_CLEAR_DEATH_NOTIFICATION_DONE,
+ BR_FROZEN_BINDER,
+ BR_CLEAR_FREEZE_NOTIFICATION_DONE,
+);
+
+pub_no_prefix!(
+ binder_driver_command_protocol_,
+ BC_TRANSACTION,
+ BC_TRANSACTION_SG,
+ BC_REPLY,
+ BC_REPLY_SG,
+ BC_FREE_BUFFER,
+ BC_ENTER_LOOPER,
+ BC_EXIT_LOOPER,
+ BC_REGISTER_LOOPER,
+ BC_INCREFS,
+ BC_ACQUIRE,
+ BC_RELEASE,
+ BC_DECREFS,
+ BC_INCREFS_DONE,
+ BC_ACQUIRE_DONE,
+ BC_REQUEST_DEATH_NOTIFICATION,
+ BC_CLEAR_DEATH_NOTIFICATION,
+ BC_DEAD_BINDER_DONE,
+ BC_REQUEST_FREEZE_NOTIFICATION,
+ BC_CLEAR_FREEZE_NOTIFICATION,
+ BC_FREEZE_NOTIFICATION_DONE,
+);
+
+pub_no_prefix!(
+ flat_binder_object_flags_,
+ FLAT_BINDER_FLAG_ACCEPTS_FDS,
+ FLAT_BINDER_FLAG_TXN_SECURITY_CTX
+);
+
+pub_no_prefix!(
+ transaction_flags_,
+ TF_ONE_WAY,
+ TF_ACCEPT_FDS,
+ TF_CLEAR_BUF,
+ TF_UPDATE_TXN
+);
+
+pub(crate) use uapi::{
+ BINDER_TYPE_BINDER, BINDER_TYPE_FD, BINDER_TYPE_FDA, BINDER_TYPE_HANDLE, BINDER_TYPE_PTR,
+ BINDER_TYPE_WEAK_BINDER, BINDER_TYPE_WEAK_HANDLE,
+};
+
+macro_rules! decl_wrapper {
+ ($newname:ident, $wrapped:ty) => {
+ // Define a wrapper around the C type. Use `MaybeUninit` to enforce that the value of
+ // padding bytes must be preserved.
+ #[derive(Copy, Clone)]
+ #[repr(transparent)]
+ pub(crate) struct $newname(MaybeUninit<$wrapped>);
+
+ // SAFETY: This macro is only used with types where this is ok.
+ unsafe impl FromBytes for $newname {}
+ // SAFETY: This macro is only used with types where this is ok.
+ unsafe impl AsBytes for $newname {}
+
+ impl Deref for $newname {
+ type Target = $wrapped;
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: We use `MaybeUninit` only to preserve padding. The value must still
+ // always be valid.
+ unsafe { self.0.assume_init_ref() }
+ }
+ }
+
+ impl DerefMut for $newname {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ // SAFETY: We use `MaybeUninit` only to preserve padding. The value must still
+ // always be valid.
+ unsafe { self.0.assume_init_mut() }
+ }
+ }
+
+ impl Default for $newname {
+ fn default() -> Self {
+ // Create a new value of this type where all bytes (including padding) are zeroed.
+ Self(MaybeUninit::zeroed())
+ }
+ }
+ };
+}
+
+decl_wrapper!(BinderNodeDebugInfo, uapi::binder_node_debug_info);
+decl_wrapper!(BinderNodeInfoForRef, uapi::binder_node_info_for_ref);
+decl_wrapper!(FlatBinderObject, uapi::flat_binder_object);
+decl_wrapper!(BinderFdObject, uapi::binder_fd_object);
+decl_wrapper!(BinderFdArrayObject, uapi::binder_fd_array_object);
+decl_wrapper!(BinderObjectHeader, uapi::binder_object_header);
+decl_wrapper!(BinderBufferObject, uapi::binder_buffer_object);
+decl_wrapper!(BinderTransactionData, uapi::binder_transaction_data);
+decl_wrapper!(
+ BinderTransactionDataSecctx,
+ uapi::binder_transaction_data_secctx
+);
+decl_wrapper!(BinderTransactionDataSg, uapi::binder_transaction_data_sg);
+decl_wrapper!(BinderWriteRead, uapi::binder_write_read);
+decl_wrapper!(BinderVersion, uapi::binder_version);
+decl_wrapper!(BinderFrozenStatusInfo, uapi::binder_frozen_status_info);
+decl_wrapper!(BinderFreezeInfo, uapi::binder_freeze_info);
+decl_wrapper!(BinderFrozenStateInfo, uapi::binder_frozen_state_info);
+decl_wrapper!(BinderHandleCookie, uapi::binder_handle_cookie);
+decl_wrapper!(ExtendedError, uapi::binder_extended_error);
+
+impl BinderVersion {
+ pub(crate) fn current() -> Self {
+ Self(MaybeUninit::new(uapi::binder_version {
+ protocol_version: BINDER_CURRENT_PROTOCOL_VERSION as _,
+ }))
+ }
+}
+
+impl BinderTransactionData {
+ pub(crate) fn with_buffers_size(self, buffers_size: u64) -> BinderTransactionDataSg {
+ BinderTransactionDataSg(MaybeUninit::new(uapi::binder_transaction_data_sg {
+ transaction_data: *self,
+ buffers_size,
+ }))
+ }
+}
+
+impl BinderTransactionDataSecctx {
+ /// View the inner data as wrapped in `BinderTransactionData`.
+ pub(crate) fn tr_data(&mut self) -> &mut BinderTransactionData {
+ // SAFETY: Transparent wrapper is safe to transmute.
+ unsafe {
+ &mut *(&mut self.transaction_data as *mut uapi::binder_transaction_data
+ as *mut BinderTransactionData)
+ }
+ }
+}
+
+impl ExtendedError {
+ pub(crate) fn new(id: u32, command: u32, param: i32) -> Self {
+ Self(MaybeUninit::new(uapi::binder_extended_error {
+ id,
+ command,
+ param,
+ }))
+ }
+}
diff --git a/drivers/android/binder/error.rs b/drivers/android/binder/error.rs
new file mode 100644
index 000000000000..b24497cfa292
--- /dev/null
+++ b/drivers/android/binder/error.rs
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::fmt;
+use kernel::prelude::*;
+
+use crate::defs::*;
+
+pub(crate) type BinderResult<T = ()> = core::result::Result<T, BinderError>;
+
+/// An error that will be returned to userspace via the `BINDER_WRITE_READ` ioctl rather than via
+/// errno.
+pub(crate) struct BinderError {
+ pub(crate) reply: u32,
+ source: Option<Error>,
+}
+
+impl BinderError {
+ pub(crate) fn new_dead() -> Self {
+ Self {
+ reply: BR_DEAD_REPLY,
+ source: None,
+ }
+ }
+
+ pub(crate) fn new_frozen() -> Self {
+ Self {
+ reply: BR_FROZEN_REPLY,
+ source: None,
+ }
+ }
+
+ pub(crate) fn new_frozen_oneway() -> Self {
+ Self {
+ reply: BR_TRANSACTION_PENDING_FROZEN,
+ source: None,
+ }
+ }
+
+ pub(crate) fn is_dead(&self) -> bool {
+ self.reply == BR_DEAD_REPLY
+ }
+
+ pub(crate) fn as_errno(&self) -> kernel::ffi::c_int {
+ self.source.unwrap_or(EINVAL).to_errno()
+ }
+
+ pub(crate) fn should_pr_warn(&self) -> bool {
+ self.source.is_some()
+ }
+}
+
+/// Convert an errno into a `BinderError` and store the errno used to construct it. The errno
+/// should be stored as the thread's extended error when given to userspace.
+impl From<Error> for BinderError {
+ fn from(source: Error) -> Self {
+ Self {
+ reply: BR_FAILED_REPLY,
+ source: Some(source),
+ }
+ }
+}
+
+impl From<kernel::fs::file::BadFdError> for BinderError {
+ fn from(source: kernel::fs::file::BadFdError) -> Self {
+ BinderError::from(Error::from(source))
+ }
+}
+
+impl From<kernel::alloc::AllocError> for BinderError {
+ fn from(_: kernel::alloc::AllocError) -> Self {
+ Self {
+ reply: BR_FAILED_REPLY,
+ source: Some(ENOMEM),
+ }
+ }
+}
+
+impl fmt::Debug for BinderError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self.reply {
+ BR_FAILED_REPLY => match self.source.as_ref() {
+ Some(source) => f
+ .debug_struct("BR_FAILED_REPLY")
+ .field("source", source)
+ .finish(),
+ None => f.pad("BR_FAILED_REPLY"),
+ },
+ BR_DEAD_REPLY => f.pad("BR_DEAD_REPLY"),
+ BR_FROZEN_REPLY => f.pad("BR_FROZEN_REPLY"),
+ BR_TRANSACTION_PENDING_FROZEN => f.pad("BR_TRANSACTION_PENDING_FROZEN"),
+ BR_TRANSACTION_COMPLETE => f.pad("BR_TRANSACTION_COMPLETE"),
+ _ => f
+ .debug_struct("BinderError")
+ .field("reply", &self.reply)
+ .finish(),
+ }
+ }
+}
diff --git a/drivers/android/binder/freeze.rs b/drivers/android/binder/freeze.rs
new file mode 100644
index 000000000000..53b60035639a
--- /dev/null
+++ b/drivers/android/binder/freeze.rs
@@ -0,0 +1,398 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{
+ alloc::AllocError,
+ list::ListArc,
+ prelude::*,
+ rbtree::{self, RBTreeNodeReservation},
+ seq_file::SeqFile,
+ seq_print,
+ sync::{Arc, UniqueArc},
+ uaccess::UserSliceReader,
+};
+
+use crate::{
+ defs::*, node::Node, process::Process, thread::Thread, BinderReturnWriter, DArc, DLArc,
+ DTRWrap, DeliverToRead,
+};
+
+#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd)]
+pub(crate) struct FreezeCookie(u64);
+
+/// Represents a listener for changes to the frozen state of a process.
+pub(crate) struct FreezeListener {
+ /// The node we are listening for.
+ pub(crate) node: DArc<Node>,
+ /// The cookie of this freeze listener.
+ cookie: FreezeCookie,
+ /// What value of `is_frozen` did we most recently tell userspace about?
+ last_is_frozen: Option<bool>,
+ /// We sent a `BR_FROZEN_BINDER` and we are waiting for `BC_FREEZE_NOTIFICATION_DONE` before
+ /// sending any other commands.
+ is_pending: bool,
+ /// Userspace sent `BC_CLEAR_FREEZE_NOTIFICATION` and we need to reply with
+ /// `BR_CLEAR_FREEZE_NOTIFICATION_DONE` as soon as possible. If `is_pending` is set, then we
+ /// must wait for it to be unset before we can reply.
+ is_clearing: bool,
+ /// Number of cleared duplicates that can't be deleted until userspace sends
+ /// `BC_FREEZE_NOTIFICATION_DONE`.
+ num_pending_duplicates: u64,
+ /// Number of cleared duplicates that can be deleted.
+ num_cleared_duplicates: u64,
+}
+
+impl FreezeListener {
+ /// Is it okay to create a new listener with the same cookie as this one for the provided node?
+ ///
+ /// Under some scenarios, userspace may delete a freeze listener and immediately recreate it
+ /// with the same cookie. This results in duplicate listeners. To avoid issues with ambiguity,
+ /// we allow this only if the new listener is for the same node, and we also require that the
+ /// old listener has already been cleared.
+ fn allow_duplicate(&self, node: &DArc<Node>) -> bool {
+ Arc::ptr_eq(&self.node, node) && self.is_clearing
+ }
+}
+
+type UninitFM = UniqueArc<core::mem::MaybeUninit<DTRWrap<FreezeMessage>>>;
+
+/// Represents a notification that the freeze state has changed.
+pub(crate) struct FreezeMessage {
+ cookie: FreezeCookie,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for FreezeMessage {
+ untracked;
+ }
+}
+
+impl FreezeMessage {
+ fn new(flags: kernel::alloc::Flags) -> Result<UninitFM, AllocError> {
+ UniqueArc::new_uninit(flags)
+ }
+
+ fn init(ua: UninitFM, cookie: FreezeCookie) -> DLArc<FreezeMessage> {
+ match ua.pin_init_with(DTRWrap::new(FreezeMessage { cookie })) {
+ Ok(msg) => ListArc::from(msg),
+ Err(err) => match err {},
+ }
+ }
+}
+
+impl DeliverToRead for FreezeMessage {
+ fn do_work(
+ self: DArc<Self>,
+ thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ let _removed_listener;
+ let mut node_refs = thread.process.node_refs.lock();
+ let Some(mut freeze_entry) = node_refs.freeze_listeners.find_mut(&self.cookie) else {
+ return Ok(true);
+ };
+ let freeze = freeze_entry.get_mut();
+
+ if freeze.num_cleared_duplicates > 0 {
+ freeze.num_cleared_duplicates -= 1;
+ drop(node_refs);
+ writer.write_code(BR_CLEAR_FREEZE_NOTIFICATION_DONE)?;
+ writer.write_payload(&self.cookie.0)?;
+ return Ok(true);
+ }
+
+ if freeze.is_pending {
+ return Ok(true);
+ }
+ if freeze.is_clearing {
+ kernel::warn_on!(freeze.num_cleared_duplicates != 0);
+ if freeze.num_pending_duplicates > 0 {
+ // The primary freeze listener was deleted, so convert a pending duplicate back
+ // into the primary one.
+ freeze.num_pending_duplicates -= 1;
+ freeze.is_pending = true;
+ freeze.is_clearing = true;
+ } else {
+ _removed_listener = freeze_entry.remove_node();
+ }
+ drop(node_refs);
+ writer.write_code(BR_CLEAR_FREEZE_NOTIFICATION_DONE)?;
+ writer.write_payload(&self.cookie.0)?;
+ Ok(true)
+ } else {
+ let is_frozen = freeze.node.owner.inner.lock().is_frozen.is_fully_frozen();
+ if freeze.last_is_frozen == Some(is_frozen) {
+ return Ok(true);
+ }
+
+ let mut state_info = BinderFrozenStateInfo::default();
+ state_info.is_frozen = is_frozen as u32;
+ state_info.cookie = freeze.cookie.0;
+ freeze.is_pending = true;
+ freeze.last_is_frozen = Some(is_frozen);
+ drop(node_refs);
+
+ writer.write_code(BR_FROZEN_BINDER)?;
+ writer.write_payload(&state_info)?;
+ // BR_FROZEN_BINDER notifications can cause transactions
+ Ok(false)
+ }
+ }
+
+ fn cancel(self: DArc<Self>) {}
+
+ fn should_sync_wakeup(&self) -> bool {
+ false
+ }
+
+ #[inline(never)]
+ fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
+ seq_print!(m, "{}has frozen binder\n", prefix);
+ Ok(())
+ }
+}
+
+impl FreezeListener {
+ pub(crate) fn on_process_exit(&self, proc: &Arc<Process>) {
+ if !self.is_clearing {
+ self.node.remove_freeze_listener(proc);
+ }
+ }
+}
+
+impl Process {
+ pub(crate) fn request_freeze_notif(
+ self: &Arc<Self>,
+ reader: &mut UserSliceReader,
+ ) -> Result<()> {
+ let hc = reader.read::<BinderHandleCookie>()?;
+ let handle = hc.handle;
+ let cookie = FreezeCookie(hc.cookie);
+
+ let msg = FreezeMessage::new(GFP_KERNEL)?;
+ let alloc = RBTreeNodeReservation::new(GFP_KERNEL)?;
+
+ let mut node_refs_guard = self.node_refs.lock();
+ let node_refs = &mut *node_refs_guard;
+ let Some(info) = node_refs.by_handle.get_mut(&handle) else {
+ pr_warn!("BC_REQUEST_FREEZE_NOTIFICATION invalid ref {}\n", handle);
+ return Err(EINVAL);
+ };
+ if info.freeze().is_some() {
+ pr_warn!("BC_REQUEST_FREEZE_NOTIFICATION already set\n");
+ return Err(EINVAL);
+ }
+ let node_ref = info.node_ref();
+ let freeze_entry = node_refs.freeze_listeners.entry(cookie);
+
+ if let rbtree::Entry::Occupied(ref dupe) = freeze_entry {
+ if !dupe.get().allow_duplicate(&node_ref.node) {
+ pr_warn!("BC_REQUEST_FREEZE_NOTIFICATION duplicate cookie\n");
+ return Err(EINVAL);
+ }
+ }
+
+ // All failure paths must come before this call, and all modifications must come after this
+ // call.
+ node_ref.node.add_freeze_listener(self, GFP_KERNEL)?;
+
+ match freeze_entry {
+ rbtree::Entry::Vacant(entry) => {
+ entry.insert(
+ FreezeListener {
+ cookie,
+ node: node_ref.node.clone(),
+ last_is_frozen: None,
+ is_pending: false,
+ is_clearing: false,
+ num_pending_duplicates: 0,
+ num_cleared_duplicates: 0,
+ },
+ alloc,
+ );
+ }
+ rbtree::Entry::Occupied(mut dupe) => {
+ let dupe = dupe.get_mut();
+ if dupe.is_pending {
+ dupe.num_pending_duplicates += 1;
+ } else {
+ dupe.num_cleared_duplicates += 1;
+ }
+ dupe.last_is_frozen = None;
+ dupe.is_pending = false;
+ dupe.is_clearing = false;
+ }
+ }
+
+ *info.freeze() = Some(cookie);
+ let msg = FreezeMessage::init(msg, cookie);
+ drop(node_refs_guard);
+ let _ = self.push_work(msg);
+ Ok(())
+ }
+
+ pub(crate) fn freeze_notif_done(self: &Arc<Self>, reader: &mut UserSliceReader) -> Result<()> {
+ let cookie = FreezeCookie(reader.read()?);
+ let alloc = FreezeMessage::new(GFP_KERNEL)?;
+ let mut node_refs_guard = self.node_refs.lock();
+ let node_refs = &mut *node_refs_guard;
+ let Some(freeze) = node_refs.freeze_listeners.get_mut(&cookie) else {
+ pr_warn!("BC_FREEZE_NOTIFICATION_DONE {:016x} not found\n", cookie.0);
+ return Err(EINVAL);
+ };
+ let mut clear_msg = None;
+ if freeze.num_pending_duplicates > 0 {
+ clear_msg = Some(FreezeMessage::init(alloc, cookie));
+ freeze.num_pending_duplicates -= 1;
+ freeze.num_cleared_duplicates += 1;
+ } else {
+ if !freeze.is_pending {
+ pr_warn!(
+ "BC_FREEZE_NOTIFICATION_DONE {:016x} not pending\n",
+ cookie.0
+ );
+ return Err(EINVAL);
+ }
+ let is_frozen = freeze.node.owner.inner.lock().is_frozen.is_fully_frozen();
+ if freeze.is_clearing || freeze.last_is_frozen != Some(is_frozen) {
+ // Immediately send another FreezeMessage.
+ clear_msg = Some(FreezeMessage::init(alloc, cookie));
+ }
+ freeze.is_pending = false;
+ }
+ drop(node_refs_guard);
+ if let Some(clear_msg) = clear_msg {
+ let _ = self.push_work(clear_msg);
+ }
+ Ok(())
+ }
+
+ pub(crate) fn clear_freeze_notif(self: &Arc<Self>, reader: &mut UserSliceReader) -> Result<()> {
+ let hc = reader.read::<BinderHandleCookie>()?;
+ let handle = hc.handle;
+ let cookie = FreezeCookie(hc.cookie);
+
+ let alloc = FreezeMessage::new(GFP_KERNEL)?;
+ let mut node_refs_guard = self.node_refs.lock();
+ let node_refs = &mut *node_refs_guard;
+ let Some(info) = node_refs.by_handle.get_mut(&handle) else {
+ pr_warn!("BC_CLEAR_FREEZE_NOTIFICATION invalid ref {}\n", handle);
+ return Err(EINVAL);
+ };
+ let Some(info_cookie) = info.freeze() else {
+ pr_warn!("BC_CLEAR_FREEZE_NOTIFICATION freeze notification not active\n");
+ return Err(EINVAL);
+ };
+ if *info_cookie != cookie {
+ pr_warn!("BC_CLEAR_FREEZE_NOTIFICATION freeze notification cookie mismatch\n");
+ return Err(EINVAL);
+ }
+ let Some(listener) = node_refs.freeze_listeners.get_mut(&cookie) else {
+ pr_warn!("BC_CLEAR_FREEZE_NOTIFICATION invalid cookie {}\n", handle);
+ return Err(EINVAL);
+ };
+ listener.is_clearing = true;
+ listener.node.remove_freeze_listener(self);
+ *info.freeze() = None;
+ let mut msg = None;
+ if !listener.is_pending {
+ msg = Some(FreezeMessage::init(alloc, cookie));
+ }
+ drop(node_refs_guard);
+
+ if let Some(msg) = msg {
+ let _ = self.push_work(msg);
+ }
+ Ok(())
+ }
+
+ fn get_freeze_cookie(&self, node: &DArc<Node>) -> Option<FreezeCookie> {
+ let node_refs = &mut *self.node_refs.lock();
+ let handle = node_refs.by_node.get(&node.global_id())?;
+ let node_ref = node_refs.by_handle.get_mut(handle)?;
+ *node_ref.freeze()
+ }
+
+ /// Creates a vector of every freeze listener on this process.
+ ///
+ /// Returns pairs of the remote process listening for notifications and the local node it is
+ /// listening on.
+ #[expect(clippy::type_complexity)]
+ fn find_freeze_recipients(&self) -> Result<KVVec<(DArc<Node>, Arc<Process>)>, AllocError> {
+ // Defined before `inner` to drop after releasing spinlock if `push_within_capacity` fails.
+ let mut node_proc_pair;
+
+ // We pre-allocate space for up to 8 recipients before we take the spinlock. However, if
+ // the allocation fails, use a vector with a capacity of zero instead of failing. After
+ // all, there might not be any freeze listeners, in which case this operation could still
+ // succeed.
+ let mut recipients =
+ KVVec::with_capacity(8, GFP_KERNEL).unwrap_or_else(|_err| KVVec::new());
+
+ let mut inner = self.lock_with_nodes();
+ let mut curr = inner.nodes.cursor_front_mut();
+ while let Some(cursor) = curr {
+ let (key, node) = cursor.current();
+ let key = *key;
+ let list = node.freeze_list(&inner.inner);
+ let len = list.len();
+
+ if recipients.spare_capacity_mut().len() < len {
+ drop(inner);
+ recipients.reserve(len, GFP_KERNEL)?;
+ inner = self.lock_with_nodes();
+ // Find the node we were looking at and try again. If the set of nodes was changed,
+ // then just proceed to the next node. This is ok because we don't guarantee the
+ // inclusion of nodes that are added or removed in parallel with this operation.
+ curr = inner.nodes.cursor_lower_bound_mut(&key);
+ continue;
+ }
+
+ for proc in list {
+ node_proc_pair = (node.clone(), proc.clone());
+ recipients
+ .push_within_capacity(node_proc_pair)
+ .map_err(|_| {
+ pr_err!(
+ "push_within_capacity failed even though we checked the capacity\n"
+ );
+ AllocError
+ })?;
+ }
+
+ curr = cursor.move_next();
+ }
+ Ok(recipients)
+ }
+
+ /// Prepare allocations for sending freeze messages.
+ pub(crate) fn prepare_freeze_messages(&self) -> Result<FreezeMessages, AllocError> {
+ let recipients = self.find_freeze_recipients()?;
+ let mut batch = KVVec::with_capacity(recipients.len(), GFP_KERNEL)?;
+ for (node, proc) in recipients {
+ let Some(cookie) = proc.get_freeze_cookie(&node) else {
+ // If the freeze listener was removed in the meantime, just discard the
+ // notification.
+ continue;
+ };
+ let msg_alloc = FreezeMessage::new(GFP_KERNEL)?;
+ let msg = FreezeMessage::init(msg_alloc, cookie);
+ batch.push((proc, msg), GFP_KERNEL)?;
+ }
+
+ Ok(FreezeMessages { batch })
+ }
+}
+
+pub(crate) struct FreezeMessages {
+ batch: KVVec<(Arc<Process>, DLArc<FreezeMessage>)>,
+}
+
+impl FreezeMessages {
+ pub(crate) fn send_messages(self) {
+ for (proc, msg) in self.batch {
+ let _ = proc.push_work(msg);
+ }
+ }
+}
diff --git a/drivers/android/binder/node.rs b/drivers/android/binder/node.rs
new file mode 100644
index 000000000000..c26d113ede96
--- /dev/null
+++ b/drivers/android/binder/node.rs
@@ -0,0 +1,1131 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{
+ list::{AtomicTracker, List, ListArc, ListLinks, TryNewListArc},
+ prelude::*,
+ seq_file::SeqFile,
+ seq_print,
+ sync::lock::{spinlock::SpinLockBackend, Guard},
+ sync::{Arc, LockedBy, SpinLock},
+};
+
+use crate::{
+ defs::*,
+ error::BinderError,
+ process::{NodeRefInfo, Process, ProcessInner},
+ thread::Thread,
+ transaction::Transaction,
+ BinderReturnWriter, DArc, DLArc, DTRWrap, DeliverToRead,
+};
+
+use core::mem;
+
+mod wrapper;
+pub(crate) use self::wrapper::CritIncrWrapper;
+
+#[derive(Debug)]
+pub(crate) struct CouldNotDeliverCriticalIncrement;
+
+/// Keeps track of how this node is scheduled.
+///
+/// There are two ways to schedule a node to a work list. Just schedule the node itself, or
+/// allocate a wrapper that references the node and schedule the wrapper. These wrappers exists to
+/// make it possible to "move" a node from one list to another - when `do_work` is called directly
+/// on the `Node`, then it's a no-op if there's also a pending wrapper.
+///
+/// Wrappers are generally only needed for zero-to-one refcount increments, and there are two cases
+/// of this: weak increments and strong increments. We call such increments "critical" because it
+/// is critical that they are delivered to the thread doing the increment. Some examples:
+///
+/// * One thread makes a zero-to-one strong increment, and another thread makes a zero-to-one weak
+/// increment. Delivering the node to the thread doing the weak increment is wrong, since the
+/// thread doing the strong increment may have ended a long time ago when the command is actually
+/// processed by userspace.
+///
+/// * We have a weak reference and are about to drop it on one thread. But then another thread does
+/// a zero-to-one strong increment. If the strong increment gets sent to the thread that was
+/// about to drop the weak reference, then the strong increment could be processed after the
+/// other thread has already exited, which would be too late.
+///
+/// Note that trying to create a `ListArc` to the node can succeed even if `has_normal_push` is
+/// set. This is because another thread might just have popped the node from a todo list, but not
+/// yet called `do_work`. However, if `has_normal_push` is false, then creating a `ListArc` should
+/// always succeed.
+///
+/// Like the other fields in `NodeInner`, the delivery state is protected by the process lock.
+struct DeliveryState {
+ /// Is the `Node` currently scheduled?
+ has_pushed_node: bool,
+
+ /// Is a wrapper currently scheduled?
+ ///
+ /// The wrapper is used only for strong zero2one increments.
+ has_pushed_wrapper: bool,
+
+ /// Is the currently scheduled `Node` scheduled due to a weak zero2one increment?
+ ///
+ /// Weak zero2one operations are always scheduled using the `Node`.
+ has_weak_zero2one: bool,
+
+ /// Is the currently scheduled wrapper/`Node` scheduled due to a strong zero2one increment?
+ ///
+ /// If `has_pushed_wrapper` is set, then the strong zero2one increment was scheduled using the
+ /// wrapper. Otherwise, `has_pushed_node` must be set and it was scheduled using the `Node`.
+ has_strong_zero2one: bool,
+}
+
+impl DeliveryState {
+ fn should_normal_push(&self) -> bool {
+ !self.has_pushed_node && !self.has_pushed_wrapper
+ }
+
+ fn did_normal_push(&mut self) {
+ assert!(self.should_normal_push());
+ self.has_pushed_node = true;
+ }
+
+ fn should_push_weak_zero2one(&self) -> bool {
+ !self.has_weak_zero2one && !self.has_strong_zero2one
+ }
+
+ fn can_push_weak_zero2one_normally(&self) -> bool {
+ !self.has_pushed_node
+ }
+
+ fn did_push_weak_zero2one(&mut self) {
+ assert!(self.should_push_weak_zero2one());
+ assert!(self.can_push_weak_zero2one_normally());
+ self.has_pushed_node = true;
+ self.has_weak_zero2one = true;
+ }
+
+ fn should_push_strong_zero2one(&self) -> bool {
+ !self.has_strong_zero2one
+ }
+
+ fn can_push_strong_zero2one_normally(&self) -> bool {
+ !self.has_pushed_node
+ }
+
+ fn did_push_strong_zero2one(&mut self) {
+ assert!(self.should_push_strong_zero2one());
+ assert!(self.can_push_strong_zero2one_normally());
+ self.has_pushed_node = true;
+ self.has_strong_zero2one = true;
+ }
+
+ fn did_push_strong_zero2one_wrapper(&mut self) {
+ assert!(self.should_push_strong_zero2one());
+ assert!(!self.can_push_strong_zero2one_normally());
+ self.has_pushed_wrapper = true;
+ self.has_strong_zero2one = true;
+ }
+}
+
+struct CountState {
+ /// The reference count.
+ count: usize,
+ /// Whether the process that owns this node thinks that we hold a refcount on it. (Note that
+ /// even if count is greater than one, we only increment it once in the owning process.)
+ has_count: bool,
+}
+
+impl CountState {
+ fn new() -> Self {
+ Self {
+ count: 0,
+ has_count: false,
+ }
+ }
+}
+
+struct NodeInner {
+ /// Strong refcounts held on this node by `NodeRef` objects.
+ strong: CountState,
+ /// Weak refcounts held on this node by `NodeRef` objects.
+ weak: CountState,
+ delivery_state: DeliveryState,
+ /// The binder driver guarantees that oneway transactions sent to the same node are serialized,
+ /// that is, userspace will not be given the next one until it has finished processing the
+ /// previous oneway transaction. This is done to avoid the case where two oneway transactions
+ /// arrive in opposite order from the order in which they were sent. (E.g., they could be
+ /// delivered to two different threads, which could appear as-if they were sent in opposite
+ /// order.)
+ ///
+ /// To fix that, we store pending oneway transactions in a separate list in the node, and don't
+ /// deliver the next oneway transaction until userspace signals that it has finished processing
+ /// the previous oneway transaction by calling the `BC_FREE_BUFFER` ioctl.
+ oneway_todo: List<DTRWrap<Transaction>>,
+ /// Keeps track of whether this node has a pending oneway transaction.
+ ///
+ /// When this is true, incoming oneway transactions are stored in `oneway_todo`, instead of
+ /// being delivered directly to the process.
+ has_oneway_transaction: bool,
+ /// List of processes to deliver a notification to when this node is destroyed (usually due to
+ /// the process dying).
+ death_list: List<DTRWrap<NodeDeath>, 1>,
+ /// List of processes to deliver freeze notifications to.
+ freeze_list: KVVec<Arc<Process>>,
+ /// The number of active BR_INCREFS or BR_ACQUIRE operations. (should be maximum two)
+ ///
+ /// If this is non-zero, then we postpone any BR_RELEASE or BR_DECREFS notifications until the
+ /// active operations have ended. This avoids the situation an increment and decrement get
+ /// reordered from userspace's perspective.
+ active_inc_refs: u8,
+ /// List of `NodeRefInfo` objects that reference this node.
+ refs: List<NodeRefInfo, { NodeRefInfo::LIST_NODE }>,
+}
+
+#[pin_data]
+pub(crate) struct Node {
+ pub(crate) debug_id: usize,
+ ptr: u64,
+ pub(crate) cookie: u64,
+ pub(crate) flags: u32,
+ pub(crate) owner: Arc<Process>,
+ inner: LockedBy<NodeInner, ProcessInner>,
+ #[pin]
+ links_track: AtomicTracker,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for Node {
+ tracked_by links_track: AtomicTracker;
+ }
+}
+
+// Make `oneway_todo` work.
+kernel::list::impl_list_item! {
+ impl ListItem<0> for DTRWrap<Transaction> {
+ using ListLinks { self.links.inner };
+ }
+}
+
+impl Node {
+ pub(crate) fn new(
+ ptr: u64,
+ cookie: u64,
+ flags: u32,
+ owner: Arc<Process>,
+ ) -> impl PinInit<Self> {
+ pin_init!(Self {
+ inner: LockedBy::new(
+ &owner.inner,
+ NodeInner {
+ strong: CountState::new(),
+ weak: CountState::new(),
+ delivery_state: DeliveryState {
+ has_pushed_node: false,
+ has_pushed_wrapper: false,
+ has_weak_zero2one: false,
+ has_strong_zero2one: false,
+ },
+ death_list: List::new(),
+ oneway_todo: List::new(),
+ freeze_list: KVVec::new(),
+ has_oneway_transaction: false,
+ active_inc_refs: 0,
+ refs: List::new(),
+ },
+ ),
+ debug_id: super::next_debug_id(),
+ ptr,
+ cookie,
+ flags,
+ owner,
+ links_track <- AtomicTracker::new(),
+ })
+ }
+
+ pub(crate) fn has_oneway_transaction(&self, owner_inner: &mut ProcessInner) -> bool {
+ let inner = self.inner.access_mut(owner_inner);
+ inner.has_oneway_transaction
+ }
+
+ #[inline(never)]
+ pub(crate) fn full_debug_print(
+ &self,
+ m: &SeqFile,
+ owner_inner: &mut ProcessInner,
+ ) -> Result<()> {
+ let inner = self.inner.access_mut(owner_inner);
+ seq_print!(
+ m,
+ " node {}: u{:016x} c{:016x} hs {} hw {} cs {} cw {}",
+ self.debug_id,
+ self.ptr,
+ self.cookie,
+ inner.strong.has_count,
+ inner.weak.has_count,
+ inner.strong.count,
+ inner.weak.count,
+ );
+ if !inner.refs.is_empty() {
+ seq_print!(m, " proc");
+ for node_ref in &inner.refs {
+ seq_print!(m, " {}", node_ref.process.task.pid());
+ }
+ }
+ seq_print!(m, "\n");
+ for t in &inner.oneway_todo {
+ t.debug_print_inner(m, " pending async transaction ");
+ }
+ Ok(())
+ }
+
+ /// Insert the `NodeRef` into this `refs` list.
+ ///
+ /// # Safety
+ ///
+ /// It must be the case that `info.node_ref.node` is this node.
+ pub(crate) unsafe fn insert_node_info(
+ &self,
+ info: ListArc<NodeRefInfo, { NodeRefInfo::LIST_NODE }>,
+ ) {
+ self.inner
+ .access_mut(&mut self.owner.inner.lock())
+ .refs
+ .push_front(info);
+ }
+
+ /// Insert the `NodeRef` into this `refs` list.
+ ///
+ /// # Safety
+ ///
+ /// It must be the case that `info.node_ref.node` is this node.
+ pub(crate) unsafe fn remove_node_info(
+ &self,
+ info: &NodeRefInfo,
+ ) -> Option<ListArc<NodeRefInfo, { NodeRefInfo::LIST_NODE }>> {
+ // SAFETY: We always insert `NodeRefInfo` objects into the `refs` list of the node that it
+ // references in `info.node_ref.node`. That is this node, so `info` cannot possibly be in
+ // the `refs` list of another node.
+ unsafe {
+ self.inner
+ .access_mut(&mut self.owner.inner.lock())
+ .refs
+ .remove(info)
+ }
+ }
+
+ /// An id that is unique across all binder nodes on the system. Used as the key in the
+ /// `by_node` map.
+ pub(crate) fn global_id(&self) -> usize {
+ self as *const Node as usize
+ }
+
+ pub(crate) fn get_id(&self) -> (u64, u64) {
+ (self.ptr, self.cookie)
+ }
+
+ pub(crate) fn add_death(
+ &self,
+ death: ListArc<DTRWrap<NodeDeath>, 1>,
+ guard: &mut Guard<'_, ProcessInner, SpinLockBackend>,
+ ) {
+ self.inner.access_mut(guard).death_list.push_back(death);
+ }
+
+ pub(crate) fn inc_ref_done_locked(
+ self: &DArc<Node>,
+ _strong: bool,
+ owner_inner: &mut ProcessInner,
+ ) -> Option<DLArc<Node>> {
+ let inner = self.inner.access_mut(owner_inner);
+ if inner.active_inc_refs == 0 {
+ pr_err!("inc_ref_done called when no active inc_refs");
+ return None;
+ }
+
+ inner.active_inc_refs -= 1;
+ if inner.active_inc_refs == 0 {
+ // Having active inc_refs can inhibit dropping of ref-counts. Calculate whether we
+ // would send a refcount decrement, and if so, tell the caller to schedule us.
+ let strong = inner.strong.count > 0;
+ let has_strong = inner.strong.has_count;
+ let weak = strong || inner.weak.count > 0;
+ let has_weak = inner.weak.has_count;
+
+ let should_drop_weak = !weak && has_weak;
+ let should_drop_strong = !strong && has_strong;
+
+ // If we want to drop the ref-count again, tell the caller to schedule a work node for
+ // that.
+ let need_push = should_drop_weak || should_drop_strong;
+
+ if need_push && inner.delivery_state.should_normal_push() {
+ let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap();
+ inner.delivery_state.did_normal_push();
+ Some(list_arc)
+ } else {
+ None
+ }
+ } else {
+ None
+ }
+ }
+
+ pub(crate) fn update_refcount_locked(
+ self: &DArc<Node>,
+ inc: bool,
+ strong: bool,
+ count: usize,
+ owner_inner: &mut ProcessInner,
+ ) -> Option<DLArc<Node>> {
+ let is_dead = owner_inner.is_dead;
+ let inner = self.inner.access_mut(owner_inner);
+
+ // Get a reference to the state we'll update.
+ let state = if strong {
+ &mut inner.strong
+ } else {
+ &mut inner.weak
+ };
+
+ // Update the count and determine whether we need to push work.
+ let need_push = if inc {
+ state.count += count;
+ // TODO: This method shouldn't be used for zero-to-one increments.
+ !is_dead && !state.has_count
+ } else {
+ if state.count < count {
+ pr_err!("Failure: refcount underflow!");
+ return None;
+ }
+ state.count -= count;
+ !is_dead && state.count == 0 && state.has_count
+ };
+
+ if need_push && inner.delivery_state.should_normal_push() {
+ let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap();
+ inner.delivery_state.did_normal_push();
+ Some(list_arc)
+ } else {
+ None
+ }
+ }
+
+ pub(crate) fn incr_refcount_allow_zero2one(
+ self: &DArc<Self>,
+ strong: bool,
+ owner_inner: &mut ProcessInner,
+ ) -> Result<Option<DLArc<Node>>, CouldNotDeliverCriticalIncrement> {
+ let is_dead = owner_inner.is_dead;
+ let inner = self.inner.access_mut(owner_inner);
+
+ // Get a reference to the state we'll update.
+ let state = if strong {
+ &mut inner.strong
+ } else {
+ &mut inner.weak
+ };
+
+ // Update the count and determine whether we need to push work.
+ state.count += 1;
+ if is_dead || state.has_count {
+ return Ok(None);
+ }
+
+ // Userspace needs to be notified of this.
+ if !strong && inner.delivery_state.should_push_weak_zero2one() {
+ assert!(inner.delivery_state.can_push_weak_zero2one_normally());
+ let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap();
+ inner.delivery_state.did_push_weak_zero2one();
+ Ok(Some(list_arc))
+ } else if strong && inner.delivery_state.should_push_strong_zero2one() {
+ if inner.delivery_state.can_push_strong_zero2one_normally() {
+ let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap();
+ inner.delivery_state.did_push_strong_zero2one();
+ Ok(Some(list_arc))
+ } else {
+ state.count -= 1;
+ Err(CouldNotDeliverCriticalIncrement)
+ }
+ } else {
+ // Work is already pushed, and we don't need to push again.
+ Ok(None)
+ }
+ }
+
+ pub(crate) fn incr_refcount_allow_zero2one_with_wrapper(
+ self: &DArc<Self>,
+ strong: bool,
+ wrapper: CritIncrWrapper,
+ owner_inner: &mut ProcessInner,
+ ) -> Option<DLArc<dyn DeliverToRead>> {
+ match self.incr_refcount_allow_zero2one(strong, owner_inner) {
+ Ok(Some(node)) => Some(node as _),
+ Ok(None) => None,
+ Err(CouldNotDeliverCriticalIncrement) => {
+ assert!(strong);
+ let inner = self.inner.access_mut(owner_inner);
+ inner.strong.count += 1;
+ inner.delivery_state.did_push_strong_zero2one_wrapper();
+ Some(wrapper.init(self.clone()))
+ }
+ }
+ }
+
+ pub(crate) fn update_refcount(self: &DArc<Self>, inc: bool, count: usize, strong: bool) {
+ self.owner
+ .inner
+ .lock()
+ .update_node_refcount(self, inc, strong, count, None);
+ }
+
+ pub(crate) fn populate_counts(
+ &self,
+ out: &mut BinderNodeInfoForRef,
+ guard: &Guard<'_, ProcessInner, SpinLockBackend>,
+ ) {
+ let inner = self.inner.access(guard);
+ out.strong_count = inner.strong.count as _;
+ out.weak_count = inner.weak.count as _;
+ }
+
+ pub(crate) fn populate_debug_info(
+ &self,
+ out: &mut BinderNodeDebugInfo,
+ guard: &Guard<'_, ProcessInner, SpinLockBackend>,
+ ) {
+ out.ptr = self.ptr as _;
+ out.cookie = self.cookie as _;
+ let inner = self.inner.access(guard);
+ if inner.strong.has_count {
+ out.has_strong_ref = 1;
+ }
+ if inner.weak.has_count {
+ out.has_weak_ref = 1;
+ }
+ }
+
+ pub(crate) fn force_has_count(&self, guard: &mut Guard<'_, ProcessInner, SpinLockBackend>) {
+ let inner = self.inner.access_mut(guard);
+ inner.strong.has_count = true;
+ inner.weak.has_count = true;
+ }
+
+ fn write(&self, writer: &mut BinderReturnWriter<'_>, code: u32) -> Result {
+ writer.write_code(code)?;
+ writer.write_payload(&self.ptr)?;
+ writer.write_payload(&self.cookie)?;
+ Ok(())
+ }
+
+ pub(crate) fn submit_oneway(
+ &self,
+ transaction: DLArc<Transaction>,
+ guard: &mut Guard<'_, ProcessInner, SpinLockBackend>,
+ ) -> Result<(), (BinderError, DLArc<dyn DeliverToRead>)> {
+ if guard.is_dead {
+ return Err((BinderError::new_dead(), transaction));
+ }
+
+ let inner = self.inner.access_mut(guard);
+ if inner.has_oneway_transaction {
+ inner.oneway_todo.push_back(transaction);
+ } else {
+ inner.has_oneway_transaction = true;
+ guard.push_work(transaction)?;
+ }
+ Ok(())
+ }
+
+ pub(crate) fn release(&self) {
+ let mut guard = self.owner.inner.lock();
+ while let Some(work) = self.inner.access_mut(&mut guard).oneway_todo.pop_front() {
+ drop(guard);
+ work.into_arc().cancel();
+ guard = self.owner.inner.lock();
+ }
+
+ while let Some(death) = self.inner.access_mut(&mut guard).death_list.pop_front() {
+ drop(guard);
+ death.into_arc().set_dead();
+ guard = self.owner.inner.lock();
+ }
+ }
+
+ pub(crate) fn pending_oneway_finished(&self) {
+ let mut guard = self.owner.inner.lock();
+ if guard.is_dead {
+ // Cleanup will happen in `Process::deferred_release`.
+ return;
+ }
+
+ let inner = self.inner.access_mut(&mut guard);
+
+ let transaction = inner.oneway_todo.pop_front();
+ inner.has_oneway_transaction = transaction.is_some();
+ if let Some(transaction) = transaction {
+ match guard.push_work(transaction) {
+ Ok(()) => {}
+ Err((_err, work)) => {
+ // Process is dead.
+ // This shouldn't happen due to the `is_dead` check, but if it does, just drop
+ // the transaction and return.
+ drop(guard);
+ drop(work);
+ }
+ }
+ }
+ }
+
+ /// Finds an outdated transaction that the given transaction can replace.
+ ///
+ /// If one is found, it is removed from the list and returned.
+ pub(crate) fn take_outdated_transaction(
+ &self,
+ new: &Transaction,
+ guard: &mut Guard<'_, ProcessInner, SpinLockBackend>,
+ ) -> Option<DLArc<Transaction>> {
+ let inner = self.inner.access_mut(guard);
+ let mut cursor = inner.oneway_todo.cursor_front();
+ while let Some(next) = cursor.peek_next() {
+ if new.can_replace(&next) {
+ return Some(next.remove());
+ }
+ cursor.move_next();
+ }
+ None
+ }
+
+ /// This is split into a separate function since it's called by both `Node::do_work` and
+ /// `NodeWrapper::do_work`.
+ fn do_work_locked(
+ &self,
+ writer: &mut BinderReturnWriter<'_>,
+ mut guard: Guard<'_, ProcessInner, SpinLockBackend>,
+ ) -> Result<bool> {
+ let inner = self.inner.access_mut(&mut guard);
+ let strong = inner.strong.count > 0;
+ let has_strong = inner.strong.has_count;
+ let weak = strong || inner.weak.count > 0;
+ let has_weak = inner.weak.has_count;
+
+ if weak && !has_weak {
+ inner.weak.has_count = true;
+ inner.active_inc_refs += 1;
+ }
+
+ if strong && !has_strong {
+ inner.strong.has_count = true;
+ inner.active_inc_refs += 1;
+ }
+
+ let no_active_inc_refs = inner.active_inc_refs == 0;
+ let should_drop_weak = no_active_inc_refs && (!weak && has_weak);
+ let should_drop_strong = no_active_inc_refs && (!strong && has_strong);
+ if should_drop_weak {
+ inner.weak.has_count = false;
+ }
+ if should_drop_strong {
+ inner.strong.has_count = false;
+ }
+ if no_active_inc_refs && !weak {
+ // Remove the node if there are no references to it.
+ guard.remove_node(self.ptr);
+ }
+ drop(guard);
+
+ if weak && !has_weak {
+ self.write(writer, BR_INCREFS)?;
+ }
+ if strong && !has_strong {
+ self.write(writer, BR_ACQUIRE)?;
+ }
+ if should_drop_strong {
+ self.write(writer, BR_RELEASE)?;
+ }
+ if should_drop_weak {
+ self.write(writer, BR_DECREFS)?;
+ }
+
+ Ok(true)
+ }
+
+ pub(crate) fn add_freeze_listener(
+ &self,
+ process: &Arc<Process>,
+ flags: kernel::alloc::Flags,
+ ) -> Result {
+ let mut vec_alloc = KVVec::<Arc<Process>>::new();
+ loop {
+ let mut guard = self.owner.inner.lock();
+ // Do not check for `guard.dead`. The `dead` flag that matters here is the owner of the
+ // listener, no the target.
+ let inner = self.inner.access_mut(&mut guard);
+ let len = inner.freeze_list.len();
+ if len >= inner.freeze_list.capacity() {
+ if len >= vec_alloc.capacity() {
+ drop(guard);
+ vec_alloc = KVVec::with_capacity((1 + len).next_power_of_two(), flags)?;
+ continue;
+ }
+ mem::swap(&mut inner.freeze_list, &mut vec_alloc);
+ for elem in vec_alloc.drain_all() {
+ inner.freeze_list.push_within_capacity(elem)?;
+ }
+ }
+ inner.freeze_list.push_within_capacity(process.clone())?;
+ return Ok(());
+ }
+ }
+
+ pub(crate) fn remove_freeze_listener(&self, p: &Arc<Process>) {
+ let _unused_capacity;
+ let mut guard = self.owner.inner.lock();
+ let inner = self.inner.access_mut(&mut guard);
+ let len = inner.freeze_list.len();
+ inner.freeze_list.retain(|proc| !Arc::ptr_eq(proc, p));
+ if len == inner.freeze_list.len() {
+ pr_warn!(
+ "Could not remove freeze listener for {}\n",
+ p.pid_in_current_ns()
+ );
+ }
+ if inner.freeze_list.is_empty() {
+ _unused_capacity = mem::take(&mut inner.freeze_list);
+ }
+ }
+
+ pub(crate) fn freeze_list<'a>(&'a self, guard: &'a ProcessInner) -> &'a [Arc<Process>] {
+ &self.inner.access(guard).freeze_list
+ }
+}
+
+impl DeliverToRead for Node {
+ fn do_work(
+ self: DArc<Self>,
+ _thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ let mut owner_inner = self.owner.inner.lock();
+ let inner = self.inner.access_mut(&mut owner_inner);
+
+ assert!(inner.delivery_state.has_pushed_node);
+ if inner.delivery_state.has_pushed_wrapper {
+ // If the wrapper is scheduled, then we are either a normal push or weak zero2one
+ // increment, and the wrapper is a strong zero2one increment, so the wrapper always
+ // takes precedence over us.
+ assert!(inner.delivery_state.has_strong_zero2one);
+ inner.delivery_state.has_pushed_node = false;
+ inner.delivery_state.has_weak_zero2one = false;
+ return Ok(true);
+ }
+
+ inner.delivery_state.has_pushed_node = false;
+ inner.delivery_state.has_weak_zero2one = false;
+ inner.delivery_state.has_strong_zero2one = false;
+
+ self.do_work_locked(writer, owner_inner)
+ }
+
+ fn cancel(self: DArc<Self>) {}
+
+ fn should_sync_wakeup(&self) -> bool {
+ false
+ }
+
+ #[inline(never)]
+ fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
+ seq_print!(
+ m,
+ "{}node work {}: u{:016x} c{:016x}\n",
+ prefix,
+ self.debug_id,
+ self.ptr,
+ self.cookie,
+ );
+ Ok(())
+ }
+}
+
+/// Represents something that holds one or more ref-counts to a `Node`.
+///
+/// Whenever process A holds a refcount to a node owned by a different process B, then process A
+/// will store a `NodeRef` that refers to the `Node` in process B. When process A releases the
+/// refcount, we destroy the NodeRef, which decrements the ref-count in process A.
+///
+/// This type is also used for some other cases. For example, a transaction allocation holds a
+/// refcount on the target node, and this is implemented by storing a `NodeRef` in the allocation
+/// so that the destructor of the allocation will drop a refcount of the `Node`.
+pub(crate) struct NodeRef {
+ pub(crate) node: DArc<Node>,
+ /// How many times does this NodeRef hold a refcount on the Node?
+ strong_node_count: usize,
+ weak_node_count: usize,
+ /// How many times does userspace hold a refcount on this NodeRef?
+ strong_count: usize,
+ weak_count: usize,
+}
+
+impl NodeRef {
+ pub(crate) fn new(node: DArc<Node>, strong_count: usize, weak_count: usize) -> Self {
+ Self {
+ node,
+ strong_node_count: strong_count,
+ weak_node_count: weak_count,
+ strong_count,
+ weak_count,
+ }
+ }
+
+ pub(crate) fn absorb(&mut self, mut other: Self) {
+ assert!(
+ Arc::ptr_eq(&self.node, &other.node),
+ "absorb called with differing nodes"
+ );
+ self.strong_node_count += other.strong_node_count;
+ self.weak_node_count += other.weak_node_count;
+ self.strong_count += other.strong_count;
+ self.weak_count += other.weak_count;
+ other.strong_count = 0;
+ other.weak_count = 0;
+ other.strong_node_count = 0;
+ other.weak_node_count = 0;
+
+ if self.strong_node_count >= 2 || self.weak_node_count >= 2 {
+ let mut guard = self.node.owner.inner.lock();
+ let inner = self.node.inner.access_mut(&mut guard);
+
+ if self.strong_node_count >= 2 {
+ inner.strong.count -= self.strong_node_count - 1;
+ self.strong_node_count = 1;
+ assert_ne!(inner.strong.count, 0);
+ }
+ if self.weak_node_count >= 2 {
+ inner.weak.count -= self.weak_node_count - 1;
+ self.weak_node_count = 1;
+ assert_ne!(inner.weak.count, 0);
+ }
+ }
+ }
+
+ pub(crate) fn get_count(&self) -> (usize, usize) {
+ (self.strong_count, self.weak_count)
+ }
+
+ pub(crate) fn clone(&self, strong: bool) -> Result<NodeRef> {
+ if strong && self.strong_count == 0 {
+ return Err(EINVAL);
+ }
+ Ok(self
+ .node
+ .owner
+ .inner
+ .lock()
+ .new_node_ref(self.node.clone(), strong, None))
+ }
+
+ /// Updates (increments or decrements) the number of references held against the node. If the
+ /// count being updated transitions from 0 to 1 or from 1 to 0, the node is notified by having
+ /// its `update_refcount` function called.
+ ///
+ /// Returns whether `self` should be removed (when both counts are zero).
+ pub(crate) fn update(&mut self, inc: bool, strong: bool) -> bool {
+ if strong && self.strong_count == 0 {
+ return false;
+ }
+ let (count, node_count, other_count) = if strong {
+ (
+ &mut self.strong_count,
+ &mut self.strong_node_count,
+ self.weak_count,
+ )
+ } else {
+ (
+ &mut self.weak_count,
+ &mut self.weak_node_count,
+ self.strong_count,
+ )
+ };
+ if inc {
+ if *count == 0 {
+ *node_count = 1;
+ self.node.update_refcount(true, 1, strong);
+ }
+ *count += 1;
+ } else {
+ if *count == 0 {
+ pr_warn!(
+ "pid {} performed invalid decrement on ref\n",
+ kernel::current!().pid()
+ );
+ return false;
+ }
+ *count -= 1;
+ if *count == 0 {
+ self.node.update_refcount(false, *node_count, strong);
+ *node_count = 0;
+ return other_count == 0;
+ }
+ }
+ false
+ }
+}
+
+impl Drop for NodeRef {
+ // This destructor is called conditionally from `Allocation::drop`. That branch is often
+ // mispredicted. Inlining this method call reduces the cost of those branch mispredictions.
+ #[inline(always)]
+ fn drop(&mut self) {
+ if self.strong_node_count > 0 {
+ self.node
+ .update_refcount(false, self.strong_node_count, true);
+ }
+ if self.weak_node_count > 0 {
+ self.node
+ .update_refcount(false, self.weak_node_count, false);
+ }
+ }
+}
+
+struct NodeDeathInner {
+ dead: bool,
+ cleared: bool,
+ notification_done: bool,
+ /// Indicates whether the normal flow was interrupted by removing the handle. In this case, we
+ /// need behave as if the death notification didn't exist (i.e., we don't deliver anything to
+ /// the user.
+ aborted: bool,
+}
+
+/// Used to deliver notifications when a process dies.
+///
+/// A process can request to be notified when a process dies using `BC_REQUEST_DEATH_NOTIFICATION`.
+/// This will make the driver send a `BR_DEAD_BINDER` to userspace when the process dies (or
+/// immediately if it is already dead). Userspace is supposed to respond with `BC_DEAD_BINDER_DONE`
+/// once it has processed the notification.
+///
+/// Userspace can unregister from death notifications using the `BC_CLEAR_DEATH_NOTIFICATION`
+/// command. In this case, the kernel will respond with `BR_CLEAR_DEATH_NOTIFICATION_DONE` once the
+/// notification has been removed. Note that if the remote process dies before the kernel has
+/// responded with `BR_CLEAR_DEATH_NOTIFICATION_DONE`, then the kernel will still send a
+/// `BR_DEAD_BINDER`, which userspace must be able to process. In this case, the kernel will wait
+/// for the `BC_DEAD_BINDER_DONE` command before it sends `BR_CLEAR_DEATH_NOTIFICATION_DONE`.
+///
+/// Note that even if the kernel sends a `BR_DEAD_BINDER`, this does not remove the death
+/// notification. Userspace must still remove it manually using `BC_CLEAR_DEATH_NOTIFICATION`.
+///
+/// If a process uses `BC_RELEASE` to destroy its last refcount on a node that has an active death
+/// registration, then the death registration is immediately deleted (we implement this using the
+/// `aborted` field). However, userspace is not supposed to delete a `NodeRef` without first
+/// deregistering death notifications, so this codepath is not executed under normal circumstances.
+#[pin_data]
+pub(crate) struct NodeDeath {
+ node: DArc<Node>,
+ process: Arc<Process>,
+ pub(crate) cookie: u64,
+ #[pin]
+ links_track: AtomicTracker<0>,
+ /// Used by the owner `Node` to store a list of registered death notifications.
+ ///
+ /// # Invariants
+ ///
+ /// Only ever used with the `death_list` list of `self.node`.
+ #[pin]
+ death_links: ListLinks<1>,
+ /// Used by the process to keep track of the death notifications for which we have sent a
+ /// `BR_DEAD_BINDER` but not yet received a `BC_DEAD_BINDER_DONE`.
+ ///
+ /// # Invariants
+ ///
+ /// Only ever used with the `delivered_deaths` list of `self.process`.
+ #[pin]
+ delivered_links: ListLinks<2>,
+ #[pin]
+ delivered_links_track: AtomicTracker<2>,
+ #[pin]
+ inner: SpinLock<NodeDeathInner>,
+}
+
+impl NodeDeath {
+ /// Constructs a new node death notification object.
+ pub(crate) fn new(
+ node: DArc<Node>,
+ process: Arc<Process>,
+ cookie: u64,
+ ) -> impl PinInit<DTRWrap<Self>> {
+ DTRWrap::new(pin_init!(
+ Self {
+ node,
+ process,
+ cookie,
+ links_track <- AtomicTracker::new(),
+ death_links <- ListLinks::new(),
+ delivered_links <- ListLinks::new(),
+ delivered_links_track <- AtomicTracker::new(),
+ inner <- kernel::new_spinlock!(NodeDeathInner {
+ dead: false,
+ cleared: false,
+ notification_done: false,
+ aborted: false,
+ }, "NodeDeath::inner"),
+ }
+ ))
+ }
+
+ /// Sets the cleared flag to `true`.
+ ///
+ /// It removes `self` from the node's death notification list if needed.
+ ///
+ /// Returns whether it needs to be queued.
+ pub(crate) fn set_cleared(self: &DArc<Self>, abort: bool) -> bool {
+ let (needs_removal, needs_queueing) = {
+ // Update state and determine if we need to queue a work item. We only need to do it
+ // when the node is not dead or if the user already completed the death notification.
+ let mut inner = self.inner.lock();
+ if abort {
+ inner.aborted = true;
+ }
+ if inner.cleared {
+ // Already cleared.
+ return false;
+ }
+ inner.cleared = true;
+ (!inner.dead, !inner.dead || inner.notification_done)
+ };
+
+ // Remove death notification from node.
+ if needs_removal {
+ let mut owner_inner = self.node.owner.inner.lock();
+ let node_inner = self.node.inner.access_mut(&mut owner_inner);
+ // SAFETY: A `NodeDeath` is never inserted into the death list of any node other than
+ // its owner, so it is either in this death list or in no death list.
+ unsafe { node_inner.death_list.remove(self) };
+ }
+ needs_queueing
+ }
+
+ /// Sets the 'notification done' flag to `true`.
+ pub(crate) fn set_notification_done(self: DArc<Self>, thread: &Thread) {
+ let needs_queueing = {
+ let mut inner = self.inner.lock();
+ inner.notification_done = true;
+ inner.cleared
+ };
+ if needs_queueing {
+ if let Some(death) = ListArc::try_from_arc_or_drop(self) {
+ let _ = thread.push_work_if_looper(death);
+ }
+ }
+ }
+
+ /// Sets the 'dead' flag to `true` and queues work item if needed.
+ pub(crate) fn set_dead(self: DArc<Self>) {
+ let needs_queueing = {
+ let mut inner = self.inner.lock();
+ if inner.cleared {
+ false
+ } else {
+ inner.dead = true;
+ true
+ }
+ };
+ if needs_queueing {
+ // Push the death notification to the target process. There is nothing else to do if
+ // it's already dead.
+ if let Some(death) = ListArc::try_from_arc_or_drop(self) {
+ let process = death.process.clone();
+ let _ = process.push_work(death);
+ }
+ }
+ }
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for NodeDeath {
+ tracked_by links_track: AtomicTracker;
+ }
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<1> for DTRWrap<NodeDeath> { untracked; }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<1> for DTRWrap<NodeDeath> {
+ using ListLinks { self.wrapped.death_links };
+ }
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<2> for DTRWrap<NodeDeath> {
+ tracked_by wrapped: NodeDeath;
+ }
+}
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<2> for NodeDeath {
+ tracked_by delivered_links_track: AtomicTracker<2>;
+ }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<2> for DTRWrap<NodeDeath> {
+ using ListLinks { self.wrapped.delivered_links };
+ }
+}
+
+impl DeliverToRead for NodeDeath {
+ fn do_work(
+ self: DArc<Self>,
+ _thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ let done = {
+ let inner = self.inner.lock();
+ if inner.aborted {
+ return Ok(true);
+ }
+ inner.cleared && (!inner.dead || inner.notification_done)
+ };
+
+ let cookie = self.cookie;
+ let cmd = if done {
+ BR_CLEAR_DEATH_NOTIFICATION_DONE
+ } else {
+ let process = self.process.clone();
+ let mut process_inner = process.inner.lock();
+ let inner = self.inner.lock();
+ if inner.aborted {
+ return Ok(true);
+ }
+ // We're still holding the inner lock, so it cannot be aborted while we insert it into
+ // the delivered list.
+ process_inner.death_delivered(self.clone());
+ BR_DEAD_BINDER
+ };
+
+ writer.write_code(cmd)?;
+ writer.write_payload(&cookie)?;
+ // DEAD_BINDER notifications can cause transactions, so stop processing work items when we
+ // get to a death notification.
+ Ok(cmd != BR_DEAD_BINDER)
+ }
+
+ fn cancel(self: DArc<Self>) {}
+
+ fn should_sync_wakeup(&self) -> bool {
+ false
+ }
+
+ #[inline(never)]
+ fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
+ let inner = self.inner.lock();
+
+ let dead_binder = inner.dead && !inner.notification_done;
+
+ if dead_binder {
+ if inner.cleared {
+ seq_print!(m, "{}has cleared dead binder\n", prefix);
+ } else {
+ seq_print!(m, "{}has dead binder\n", prefix);
+ }
+ } else {
+ seq_print!(m, "{}has cleared death notification\n", prefix);
+ }
+
+ Ok(())
+ }
+}
diff --git a/drivers/android/binder/node/wrapper.rs b/drivers/android/binder/node/wrapper.rs
new file mode 100644
index 000000000000..43294c050502
--- /dev/null
+++ b/drivers/android/binder/node/wrapper.rs
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{list::ListArc, prelude::*, seq_file::SeqFile, seq_print, sync::UniqueArc};
+
+use crate::{node::Node, thread::Thread, BinderReturnWriter, DArc, DLArc, DTRWrap, DeliverToRead};
+
+use core::mem::MaybeUninit;
+
+pub(crate) struct CritIncrWrapper {
+ inner: UniqueArc<MaybeUninit<DTRWrap<NodeWrapper>>>,
+}
+
+impl CritIncrWrapper {
+ pub(crate) fn new() -> Result<Self> {
+ Ok(CritIncrWrapper {
+ inner: UniqueArc::new_uninit(GFP_KERNEL)?,
+ })
+ }
+
+ pub(super) fn init(self, node: DArc<Node>) -> DLArc<dyn DeliverToRead> {
+ match self.inner.pin_init_with(DTRWrap::new(NodeWrapper { node })) {
+ Ok(initialized) => ListArc::from(initialized) as _,
+ Err(err) => match err {},
+ }
+ }
+}
+
+struct NodeWrapper {
+ node: DArc<Node>,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for NodeWrapper {
+ untracked;
+ }
+}
+
+impl DeliverToRead for NodeWrapper {
+ fn do_work(
+ self: DArc<Self>,
+ _thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ let node = &self.node;
+ let mut owner_inner = node.owner.inner.lock();
+ let inner = node.inner.access_mut(&mut owner_inner);
+
+ let ds = &mut inner.delivery_state;
+
+ assert!(ds.has_pushed_wrapper);
+ assert!(ds.has_strong_zero2one);
+ ds.has_pushed_wrapper = false;
+ ds.has_strong_zero2one = false;
+
+ node.do_work_locked(writer, owner_inner)
+ }
+
+ fn cancel(self: DArc<Self>) {}
+
+ fn should_sync_wakeup(&self) -> bool {
+ false
+ }
+
+ #[inline(never)]
+ fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
+ seq_print!(
+ m,
+ "{}node work {}: u{:016x} c{:016x}\n",
+ prefix,
+ self.node.debug_id,
+ self.node.ptr,
+ self.node.cookie,
+ );
+ Ok(())
+ }
+}
diff --git a/drivers/android/binder/page_range.rs b/drivers/android/binder/page_range.rs
new file mode 100644
index 000000000000..9379038f61f5
--- /dev/null
+++ b/drivers/android/binder/page_range.rs
@@ -0,0 +1,734 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! This module has utilities for managing a page range where unused pages may be reclaimed by a
+//! vma shrinker.
+
+// To avoid deadlocks, locks are taken in the order:
+//
+// 1. mmap lock
+// 2. spinlock
+// 3. lru spinlock
+//
+// The shrinker will use trylock methods because it locks them in a different order.
+
+use core::{
+ marker::PhantomPinned,
+ mem::{size_of, size_of_val, MaybeUninit},
+ ptr,
+};
+
+use kernel::{
+ bindings,
+ error::Result,
+ ffi::{c_ulong, c_void},
+ mm::{virt, Mm, MmWithUser},
+ new_mutex, new_spinlock,
+ page::{Page, PAGE_SHIFT, PAGE_SIZE},
+ prelude::*,
+ str::CStr,
+ sync::{aref::ARef, Mutex, SpinLock},
+ task::Pid,
+ transmute::FromBytes,
+ types::Opaque,
+ uaccess::UserSliceReader,
+};
+
+/// Represents a shrinker that can be registered with the kernel.
+///
+/// Each shrinker can be used by many `ShrinkablePageRange` objects.
+#[repr(C)]
+pub(crate) struct Shrinker {
+ inner: Opaque<*mut bindings::shrinker>,
+ list_lru: Opaque<bindings::list_lru>,
+}
+
+// SAFETY: The shrinker and list_lru are thread safe.
+unsafe impl Send for Shrinker {}
+// SAFETY: The shrinker and list_lru are thread safe.
+unsafe impl Sync for Shrinker {}
+
+impl Shrinker {
+ /// Create a new shrinker.
+ ///
+ /// # Safety
+ ///
+ /// Before using this shrinker with a `ShrinkablePageRange`, the `register` method must have
+ /// been called exactly once, and it must not have returned an error.
+ pub(crate) const unsafe fn new() -> Self {
+ Self {
+ inner: Opaque::uninit(),
+ list_lru: Opaque::uninit(),
+ }
+ }
+
+ /// Register this shrinker with the kernel.
+ pub(crate) fn register(&'static self, name: &CStr) -> Result<()> {
+ // SAFETY: These fields are not yet used, so it's okay to zero them.
+ unsafe {
+ self.inner.get().write(ptr::null_mut());
+ self.list_lru.get().write_bytes(0, 1);
+ }
+
+ // SAFETY: The field is not yet used, so we can initialize it.
+ let ret = unsafe { bindings::__list_lru_init(self.list_lru.get(), false, ptr::null_mut()) };
+ if ret != 0 {
+ return Err(Error::from_errno(ret));
+ }
+
+ // SAFETY: The `name` points at a valid c string.
+ let shrinker = unsafe { bindings::shrinker_alloc(0, name.as_char_ptr()) };
+ if shrinker.is_null() {
+ // SAFETY: We initialized it, so its okay to destroy it.
+ unsafe { bindings::list_lru_destroy(self.list_lru.get()) };
+ return Err(Error::from_errno(ret));
+ }
+
+ // SAFETY: We're about to register the shrinker, and these are the fields we need to
+ // initialize. (All other fields are already zeroed.)
+ unsafe {
+ (&raw mut (*shrinker).count_objects).write(Some(rust_shrink_count));
+ (&raw mut (*shrinker).scan_objects).write(Some(rust_shrink_scan));
+ (&raw mut (*shrinker).private_data).write(self.list_lru.get().cast());
+ }
+
+ // SAFETY: The new shrinker has been fully initialized, so we can register it.
+ unsafe { bindings::shrinker_register(shrinker) };
+
+ // SAFETY: This initializes the pointer to the shrinker so that we can use it.
+ unsafe { self.inner.get().write(shrinker) };
+
+ Ok(())
+ }
+}
+
+/// A container that manages a page range in a vma.
+///
+/// The pages can be thought of as an array of booleans of whether the pages are usable. The
+/// methods `use_range` and `stop_using_range` set all booleans in a range to true or false
+/// respectively. Initially, no pages are allocated. When a page is not used, it is not freed
+/// immediately. Instead, it is made available to the memory shrinker to free it if the device is
+/// under memory pressure.
+///
+/// It's okay for `use_range` and `stop_using_range` to race with each other, although there's no
+/// way to know whether an index ends up with true or false if a call to `use_range` races with
+/// another call to `stop_using_range` on a given index.
+///
+/// It's also okay for the two methods to race with themselves, e.g. if two threads call
+/// `use_range` on the same index, then that's fine and neither call will return until the page is
+/// allocated and mapped.
+///
+/// The methods that read or write to a range require that the page is marked as in use. So it is
+/// _not_ okay to call `stop_using_range` on a page that is in use by the methods that read or
+/// write to the page.
+#[pin_data(PinnedDrop)]
+pub(crate) struct ShrinkablePageRange {
+ /// Shrinker object registered with the kernel.
+ shrinker: &'static Shrinker,
+ /// Pid using this page range. Only used as debugging information.
+ pid: Pid,
+ /// The mm for the relevant process.
+ mm: ARef<Mm>,
+ /// Used to synchronize calls to `vm_insert_page` and `zap_page_range_single`.
+ #[pin]
+ mm_lock: Mutex<()>,
+ /// Spinlock protecting changes to pages.
+ #[pin]
+ lock: SpinLock<Inner>,
+
+ /// Must not move, since page info has pointers back.
+ #[pin]
+ _pin: PhantomPinned,
+}
+
+struct Inner {
+ /// Array of pages.
+ ///
+ /// Since this is also accessed by the shrinker, we can't use a `Box`, which asserts exclusive
+ /// ownership. To deal with that, we manage it using raw pointers.
+ pages: *mut PageInfo,
+ /// Length of the `pages` array.
+ size: usize,
+ /// The address of the vma to insert the pages into.
+ vma_addr: usize,
+}
+
+// SAFETY: proper locking is in place for `Inner`
+unsafe impl Send for Inner {}
+
+type StableMmGuard =
+ kernel::sync::lock::Guard<'static, (), kernel::sync::lock::mutex::MutexBackend>;
+
+/// An array element that describes the current state of a page.
+///
+/// There are three states:
+///
+/// * Free. The page is None. The `lru` element is not queued.
+/// * Available. The page is Some. The `lru` element is queued to the shrinker's lru.
+/// * Used. The page is Some. The `lru` element is not queued.
+///
+/// When an element is available, the shrinker is able to free the page.
+#[repr(C)]
+struct PageInfo {
+ lru: bindings::list_head,
+ page: Option<Page>,
+ range: *const ShrinkablePageRange,
+}
+
+impl PageInfo {
+ /// # Safety
+ ///
+ /// The caller ensures that writing to `me.page` is ok, and that the page is not currently set.
+ unsafe fn set_page(me: *mut PageInfo, page: Page) {
+ // SAFETY: This pointer offset is in bounds.
+ let ptr = unsafe { &raw mut (*me).page };
+
+ // SAFETY: The pointer is valid for writing, so also valid for reading.
+ if unsafe { (*ptr).is_some() } {
+ pr_err!("set_page called when there is already a page");
+ // SAFETY: We will initialize the page again below.
+ unsafe { ptr::drop_in_place(ptr) };
+ }
+
+ // SAFETY: The pointer is valid for writing.
+ unsafe { ptr::write(ptr, Some(page)) };
+ }
+
+ /// # Safety
+ ///
+ /// The caller ensures that reading from `me.page` is ok for the duration of 'a.
+ unsafe fn get_page<'a>(me: *const PageInfo) -> Option<&'a Page> {
+ // SAFETY: This pointer offset is in bounds.
+ let ptr = unsafe { &raw const (*me).page };
+
+ // SAFETY: The pointer is valid for reading.
+ unsafe { (*ptr).as_ref() }
+ }
+
+ /// # Safety
+ ///
+ /// The caller ensures that writing to `me.page` is ok for the duration of 'a.
+ unsafe fn take_page(me: *mut PageInfo) -> Option<Page> {
+ // SAFETY: This pointer offset is in bounds.
+ let ptr = unsafe { &raw mut (*me).page };
+
+ // SAFETY: The pointer is valid for reading.
+ unsafe { (*ptr).take() }
+ }
+
+ /// Add this page to the lru list, if not already in the list.
+ ///
+ /// # Safety
+ ///
+ /// The pointer must be valid, and it must be the right shrinker and nid.
+ unsafe fn list_lru_add(me: *mut PageInfo, nid: i32, shrinker: &'static Shrinker) {
+ // SAFETY: This pointer offset is in bounds.
+ let lru_ptr = unsafe { &raw mut (*me).lru };
+ // SAFETY: The lru pointer is valid, and we're not using it with any other lru list.
+ unsafe { bindings::list_lru_add(shrinker.list_lru.get(), lru_ptr, nid, ptr::null_mut()) };
+ }
+
+ /// Remove this page from the lru list, if it is in the list.
+ ///
+ /// # Safety
+ ///
+ /// The pointer must be valid, and it must be the right shrinker and nid.
+ unsafe fn list_lru_del(me: *mut PageInfo, nid: i32, shrinker: &'static Shrinker) {
+ // SAFETY: This pointer offset is in bounds.
+ let lru_ptr = unsafe { &raw mut (*me).lru };
+ // SAFETY: The lru pointer is valid, and we're not using it with any other lru list.
+ unsafe { bindings::list_lru_del(shrinker.list_lru.get(), lru_ptr, nid, ptr::null_mut()) };
+ }
+}
+
+impl ShrinkablePageRange {
+ /// Create a new `ShrinkablePageRange` using the given shrinker.
+ pub(crate) fn new(shrinker: &'static Shrinker) -> impl PinInit<Self, Error> {
+ try_pin_init!(Self {
+ shrinker,
+ pid: kernel::current!().pid(),
+ mm: ARef::from(&**kernel::current!().mm().ok_or(ESRCH)?),
+ mm_lock <- new_mutex!((), "ShrinkablePageRange::mm"),
+ lock <- new_spinlock!(Inner {
+ pages: ptr::null_mut(),
+ size: 0,
+ vma_addr: 0,
+ }, "ShrinkablePageRange"),
+ _pin: PhantomPinned,
+ })
+ }
+
+ pub(crate) fn stable_trylock_mm(&self) -> Option<StableMmGuard> {
+ // SAFETY: This extends the duration of the reference. Since this call happens before
+ // `mm_lock` is taken in the destructor of `ShrinkablePageRange`, the destructor will block
+ // until the returned guard is dropped. This ensures that the guard is valid until dropped.
+ let mm_lock = unsafe { &*ptr::from_ref(&self.mm_lock) };
+
+ mm_lock.try_lock()
+ }
+
+ /// Register a vma with this page range. Returns the size of the region.
+ pub(crate) fn register_with_vma(&self, vma: &virt::VmaNew) -> Result<usize> {
+ let num_bytes = usize::min(vma.end() - vma.start(), bindings::SZ_4M as usize);
+ let num_pages = num_bytes >> PAGE_SHIFT;
+
+ if !ptr::eq::<Mm>(&*self.mm, &**vma.mm()) {
+ pr_debug!("Failed to register with vma: invalid vma->vm_mm");
+ return Err(EINVAL);
+ }
+ if num_pages == 0 {
+ pr_debug!("Failed to register with vma: size zero");
+ return Err(EINVAL);
+ }
+
+ let mut pages = KVVec::<PageInfo>::with_capacity(num_pages, GFP_KERNEL)?;
+
+ // SAFETY: This just initializes the pages array.
+ unsafe {
+ let self_ptr = self as *const ShrinkablePageRange;
+ for i in 0..num_pages {
+ let info = pages.as_mut_ptr().add(i);
+ (&raw mut (*info).range).write(self_ptr);
+ (&raw mut (*info).page).write(None);
+ let lru = &raw mut (*info).lru;
+ (&raw mut (*lru).next).write(lru);
+ (&raw mut (*lru).prev).write(lru);
+ }
+ }
+
+ let mut inner = self.lock.lock();
+ if inner.size > 0 {
+ pr_debug!("Failed to register with vma: already registered");
+ drop(inner);
+ return Err(EBUSY);
+ }
+
+ inner.pages = pages.into_raw_parts().0;
+ inner.size = num_pages;
+ inner.vma_addr = vma.start();
+
+ Ok(num_pages)
+ }
+
+ /// Make sure that the given pages are allocated and mapped.
+ ///
+ /// Must not be called from an atomic context.
+ pub(crate) fn use_range(&self, start: usize, end: usize) -> Result<()> {
+ if start >= end {
+ return Ok(());
+ }
+ let mut inner = self.lock.lock();
+ assert!(end <= inner.size);
+
+ for i in start..end {
+ // SAFETY: This pointer offset is in bounds.
+ let page_info = unsafe { inner.pages.add(i) };
+
+ // SAFETY: The pointer is valid, and we hold the lock so reading from the page is okay.
+ if let Some(page) = unsafe { PageInfo::get_page(page_info) } {
+ // Since we're going to use the page, we should remove it from the lru list so that
+ // the shrinker will not free it.
+ //
+ // SAFETY: The pointer is valid, and this is the right shrinker.
+ //
+ // The shrinker can't free the page between the check and this call to
+ // `list_lru_del` because we hold the lock.
+ unsafe { PageInfo::list_lru_del(page_info, page.nid(), self.shrinker) };
+ } else {
+ // We have to allocate a new page. Use the slow path.
+ drop(inner);
+ // SAFETY: `i < end <= inner.size` so `i` is in bounds.
+ match unsafe { self.use_page_slow(i) } {
+ Ok(()) => {}
+ Err(err) => {
+ pr_warn!("Error in use_page_slow: {:?}", err);
+ return Err(err);
+ }
+ }
+ inner = self.lock.lock();
+ }
+ }
+ Ok(())
+ }
+
+ /// Mark the given page as in use, slow path.
+ ///
+ /// Must not be called from an atomic context.
+ ///
+ /// # Safety
+ ///
+ /// Assumes that `i` is in bounds.
+ #[cold]
+ unsafe fn use_page_slow(&self, i: usize) -> Result<()> {
+ let new_page = Page::alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO)?;
+
+ let mm_mutex = self.mm_lock.lock();
+ let inner = self.lock.lock();
+
+ // SAFETY: This pointer offset is in bounds.
+ let page_info = unsafe { inner.pages.add(i) };
+
+ // SAFETY: The pointer is valid, and we hold the lock so reading from the page is okay.
+ if let Some(page) = unsafe { PageInfo::get_page(page_info) } {
+ // The page was already there, or someone else added the page while we didn't hold the
+ // spinlock.
+ //
+ // SAFETY: The pointer is valid, and this is the right shrinker.
+ //
+ // The shrinker can't free the page between the check and this call to
+ // `list_lru_del` because we hold the lock.
+ unsafe { PageInfo::list_lru_del(page_info, page.nid(), self.shrinker) };
+ return Ok(());
+ }
+
+ let vma_addr = inner.vma_addr;
+ // Release the spinlock while we insert the page into the vma.
+ drop(inner);
+
+ // No overflow since we stay in bounds of the vma.
+ let user_page_addr = vma_addr + (i << PAGE_SHIFT);
+
+ // We use `mmput_async` when dropping the `mm` because `use_page_slow` is usually used from
+ // a remote process. If the call to `mmput` races with the process shutting down, then the
+ // caller of `use_page_slow` becomes responsible for cleaning up the `mm`, which doesn't
+ // happen until it returns to userspace. However, the caller might instead go to sleep and
+ // wait for the owner of the `mm` to wake it up, which doesn't happen because it's in the
+ // middle of a shutdown process that won't complete until the `mm` is dropped. This can
+ // amount to a deadlock.
+ //
+ // Using `mmput_async` avoids this, because then the `mm` cleanup is instead queued to a
+ // workqueue.
+ MmWithUser::into_mmput_async(self.mm.mmget_not_zero().ok_or(ESRCH)?)
+ .mmap_read_lock()
+ .vma_lookup(vma_addr)
+ .ok_or(ESRCH)?
+ .as_mixedmap_vma()
+ .ok_or(ESRCH)?
+ .vm_insert_page(user_page_addr, &new_page)
+ .inspect_err(|err| {
+ pr_warn!(
+ "Failed to vm_insert_page({}): vma_addr:{} i:{} err:{:?}",
+ user_page_addr,
+ vma_addr,
+ i,
+ err
+ )
+ })?;
+
+ let inner = self.lock.lock();
+
+ // SAFETY: The `page_info` pointer is valid and currently does not have a page. The page
+ // can be written to since we hold the lock.
+ //
+ // We released and reacquired the spinlock since we checked that the page is null, but we
+ // always hold the mm_lock mutex when setting the page to a non-null value, so it's not
+ // possible for someone else to have changed it since our check.
+ unsafe { PageInfo::set_page(page_info, new_page) };
+
+ drop(inner);
+ drop(mm_mutex);
+
+ Ok(())
+ }
+
+ /// If the given page is in use, then mark it as available so that the shrinker can free it.
+ ///
+ /// May be called from an atomic context.
+ pub(crate) fn stop_using_range(&self, start: usize, end: usize) {
+ if start >= end {
+ return;
+ }
+ let inner = self.lock.lock();
+ assert!(end <= inner.size);
+
+ for i in (start..end).rev() {
+ // SAFETY: The pointer is in bounds.
+ let page_info = unsafe { inner.pages.add(i) };
+
+ // SAFETY: Okay for reading since we have the lock.
+ if let Some(page) = unsafe { PageInfo::get_page(page_info) } {
+ // SAFETY: The pointer is valid, and it's the right shrinker.
+ unsafe { PageInfo::list_lru_add(page_info, page.nid(), self.shrinker) };
+ }
+ }
+ }
+
+ /// Helper for reading or writing to a range of bytes that may overlap with several pages.
+ ///
+ /// # Safety
+ ///
+ /// All pages touched by this operation must be in use for the duration of this call.
+ unsafe fn iterate<T>(&self, mut offset: usize, mut size: usize, mut cb: T) -> Result
+ where
+ T: FnMut(&Page, usize, usize) -> Result,
+ {
+ if size == 0 {
+ return Ok(());
+ }
+
+ let (pages, num_pages) = {
+ let inner = self.lock.lock();
+ (inner.pages, inner.size)
+ };
+ let num_bytes = num_pages << PAGE_SHIFT;
+
+ // Check that the request is within the buffer.
+ if offset.checked_add(size).ok_or(EFAULT)? > num_bytes {
+ return Err(EFAULT);
+ }
+
+ let mut page_index = offset >> PAGE_SHIFT;
+ offset &= PAGE_SIZE - 1;
+ while size > 0 {
+ let available = usize::min(size, PAGE_SIZE - offset);
+ // SAFETY: The pointer is in bounds.
+ let page_info = unsafe { pages.add(page_index) };
+ // SAFETY: The caller guarantees that this page is in the "in use" state for the
+ // duration of this call to `iterate`, so nobody will change the page.
+ let page = unsafe { PageInfo::get_page(page_info) };
+ if page.is_none() {
+ pr_warn!("Page is null!");
+ }
+ let page = page.ok_or(EFAULT)?;
+ cb(page, offset, available)?;
+ size -= available;
+ page_index += 1;
+ offset = 0;
+ }
+ Ok(())
+ }
+
+ /// Copy from userspace into this page range.
+ ///
+ /// # Safety
+ ///
+ /// All pages touched by this operation must be in use for the duration of this call.
+ pub(crate) unsafe fn copy_from_user_slice(
+ &self,
+ reader: &mut UserSliceReader,
+ offset: usize,
+ size: usize,
+ ) -> Result {
+ // SAFETY: `self.iterate` has the same safety requirements as `copy_from_user_slice`.
+ unsafe {
+ self.iterate(offset, size, |page, offset, to_copy| {
+ page.copy_from_user_slice_raw(reader, offset, to_copy)
+ })
+ }
+ }
+
+ /// Copy from this page range into kernel space.
+ ///
+ /// # Safety
+ ///
+ /// All pages touched by this operation must be in use for the duration of this call.
+ pub(crate) unsafe fn read<T: FromBytes>(&self, offset: usize) -> Result<T> {
+ let mut out = MaybeUninit::<T>::uninit();
+ let mut out_offset = 0;
+ // SAFETY: `self.iterate` has the same safety requirements as `read`.
+ unsafe {
+ self.iterate(offset, size_of::<T>(), |page, offset, to_copy| {
+ // SAFETY: The sum of `offset` and `to_copy` is bounded by the size of T.
+ let obj_ptr = (out.as_mut_ptr() as *mut u8).add(out_offset);
+ // SAFETY: The pointer points is in-bounds of the `out` variable, so it is valid.
+ page.read_raw(obj_ptr, offset, to_copy)?;
+ out_offset += to_copy;
+ Ok(())
+ })?;
+ }
+ // SAFETY: We just initialised the data.
+ Ok(unsafe { out.assume_init() })
+ }
+
+ /// Copy from kernel space into this page range.
+ ///
+ /// # Safety
+ ///
+ /// All pages touched by this operation must be in use for the duration of this call.
+ pub(crate) unsafe fn write<T: ?Sized>(&self, offset: usize, obj: &T) -> Result {
+ let mut obj_offset = 0;
+ // SAFETY: `self.iterate` has the same safety requirements as `write`.
+ unsafe {
+ self.iterate(offset, size_of_val(obj), |page, offset, to_copy| {
+ // SAFETY: The sum of `offset` and `to_copy` is bounded by the size of T.
+ let obj_ptr = (obj as *const T as *const u8).add(obj_offset);
+ // SAFETY: We have a reference to the object, so the pointer is valid.
+ page.write_raw(obj_ptr, offset, to_copy)?;
+ obj_offset += to_copy;
+ Ok(())
+ })
+ }
+ }
+
+ /// Write zeroes to the given range.
+ ///
+ /// # Safety
+ ///
+ /// All pages touched by this operation must be in use for the duration of this call.
+ pub(crate) unsafe fn fill_zero(&self, offset: usize, size: usize) -> Result {
+ // SAFETY: `self.iterate` has the same safety requirements as `copy_into`.
+ unsafe {
+ self.iterate(offset, size, |page, offset, len| {
+ page.fill_zero_raw(offset, len)
+ })
+ }
+ }
+}
+
+#[pinned_drop]
+impl PinnedDrop for ShrinkablePageRange {
+ fn drop(self: Pin<&mut Self>) {
+ let (pages, size) = {
+ let lock = self.lock.lock();
+ (lock.pages, lock.size)
+ };
+
+ if size == 0 {
+ return;
+ }
+
+ // Note: This call is also necessary for the safety of `stable_trylock_mm`.
+ let mm_lock = self.mm_lock.lock();
+
+ // This is the destructor, so unlike the other methods, we only need to worry about races
+ // with the shrinker here. Since we hold the `mm_lock`, we also can't race with the
+ // shrinker, and after this loop, the shrinker will not access any of our pages since we
+ // removed them from the lru list.
+ for i in 0..size {
+ // SAFETY: Loop is in-bounds of the size.
+ let p_ptr = unsafe { pages.add(i) };
+ // SAFETY: No other readers, so we can read.
+ if let Some(p) = unsafe { PageInfo::get_page(p_ptr) } {
+ // SAFETY: The pointer is valid and it's the right shrinker.
+ unsafe { PageInfo::list_lru_del(p_ptr, p.nid(), self.shrinker) };
+ }
+ }
+
+ drop(mm_lock);
+
+ // SAFETY: `pages` was allocated as an `KVVec<PageInfo>` with capacity `size`. Furthermore,
+ // all `size` elements are initialized. Also, the array is no longer shared with the
+ // shrinker due to the above loop.
+ drop(unsafe { KVVec::from_raw_parts(pages, size, size) });
+ }
+}
+
+/// # Safety
+/// Called by the shrinker.
+#[no_mangle]
+unsafe extern "C" fn rust_shrink_count(
+ shrink: *mut bindings::shrinker,
+ _sc: *mut bindings::shrink_control,
+) -> c_ulong {
+ // SAFETY: We can access our own private data.
+ let list_lru = unsafe { (*shrink).private_data.cast::<bindings::list_lru>() };
+ // SAFETY: Accessing the lru list is okay. Just an FFI call.
+ unsafe { bindings::list_lru_count(list_lru) }
+}
+
+/// # Safety
+/// Called by the shrinker.
+#[no_mangle]
+unsafe extern "C" fn rust_shrink_scan(
+ shrink: *mut bindings::shrinker,
+ sc: *mut bindings::shrink_control,
+) -> c_ulong {
+ // SAFETY: We can access our own private data.
+ let list_lru = unsafe { (*shrink).private_data.cast::<bindings::list_lru>() };
+ // SAFETY: Caller guarantees that it is safe to read this field.
+ let nr_to_scan = unsafe { (*sc).nr_to_scan };
+ // SAFETY: Accessing the lru list is okay. Just an FFI call.
+ unsafe {
+ bindings::list_lru_walk(
+ list_lru,
+ Some(bindings::rust_shrink_free_page_wrap),
+ ptr::null_mut(),
+ nr_to_scan,
+ )
+ }
+}
+
+const LRU_SKIP: bindings::lru_status = bindings::lru_status_LRU_SKIP;
+const LRU_REMOVED_ENTRY: bindings::lru_status = bindings::lru_status_LRU_REMOVED_RETRY;
+
+/// # Safety
+/// Called by the shrinker.
+#[no_mangle]
+unsafe extern "C" fn rust_shrink_free_page(
+ item: *mut bindings::list_head,
+ lru: *mut bindings::list_lru_one,
+ _cb_arg: *mut c_void,
+) -> bindings::lru_status {
+ // Fields that should survive after unlocking the lru lock.
+ let page;
+ let page_index;
+ let mm;
+ let mmap_read;
+ let mm_mutex;
+ let vma_addr;
+
+ {
+ // CAST: The `list_head` field is first in `PageInfo`.
+ let info = item as *mut PageInfo;
+ // SAFETY: The `range` field of `PageInfo` is immutable.
+ let range = unsafe { &*((*info).range) };
+
+ mm = match range.mm.mmget_not_zero() {
+ Some(mm) => MmWithUser::into_mmput_async(mm),
+ None => return LRU_SKIP,
+ };
+
+ mm_mutex = match range.stable_trylock_mm() {
+ Some(guard) => guard,
+ None => return LRU_SKIP,
+ };
+
+ mmap_read = match mm.mmap_read_trylock() {
+ Some(guard) => guard,
+ None => return LRU_SKIP,
+ };
+
+ // We can't lock it normally here, since we hold the lru lock.
+ let inner = match range.lock.try_lock() {
+ Some(inner) => inner,
+ None => return LRU_SKIP,
+ };
+
+ // SAFETY: The item is in this lru list, so it's okay to remove it.
+ unsafe { bindings::list_lru_isolate(lru, item) };
+
+ // SAFETY: Both pointers are in bounds of the same allocation.
+ page_index = unsafe { info.offset_from(inner.pages) } as usize;
+
+ // SAFETY: We hold the spinlock, so we can take the page.
+ //
+ // This sets the page pointer to zero before we unmap it from the vma. However, we call
+ // `zap_page_range` before we release the mmap lock, so `use_page_slow` will not be able to
+ // insert a new page until after our call to `zap_page_range`.
+ page = unsafe { PageInfo::take_page(info) };
+ vma_addr = inner.vma_addr;
+
+ // From this point on, we don't access this PageInfo or ShrinkablePageRange again, because
+ // they can be freed at any point after we unlock `lru_lock`. This is with the exception of
+ // `mm_mutex` which is kept alive by holding the lock.
+ }
+
+ // SAFETY: The lru lock is locked when this method is called.
+ unsafe { bindings::spin_unlock(&raw mut (*lru).lock) };
+
+ if let Some(vma) = mmap_read.vma_lookup(vma_addr) {
+ let user_page_addr = vma_addr + (page_index << PAGE_SHIFT);
+ vma.zap_page_range_single(user_page_addr, PAGE_SIZE);
+ }
+
+ drop(mmap_read);
+ drop(mm_mutex);
+ drop(mm);
+ drop(page);
+
+ // SAFETY: We just unlocked the lru lock, but it should be locked when we return.
+ unsafe { bindings::spin_lock(&raw mut (*lru).lock) };
+
+ LRU_REMOVED_ENTRY
+}
diff --git a/drivers/android/binder/page_range_helper.c b/drivers/android/binder/page_range_helper.c
new file mode 100644
index 000000000000..496887723ee0
--- /dev/null
+++ b/drivers/android/binder/page_range_helper.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* C helper for page_range.rs to work around a CFI violation.
+ *
+ * Bindgen currently pretends that `enum lru_status` is the same as an integer.
+ * This assumption is fine ABI-wise, but once you add CFI to the mix, it
+ * triggers a CFI violation because `enum lru_status` gets a different CFI tag.
+ *
+ * This file contains a workaround until bindgen can be fixed.
+ *
+ * Copyright (C) 2025 Google LLC.
+ */
+#include "page_range_helper.h"
+
+unsigned int rust_shrink_free_page(struct list_head *item,
+ struct list_lru_one *list,
+ void *cb_arg);
+
+enum lru_status
+rust_shrink_free_page_wrap(struct list_head *item, struct list_lru_one *list,
+ void *cb_arg)
+{
+ return rust_shrink_free_page(item, list, cb_arg);
+}
diff --git a/drivers/android/binder/page_range_helper.h b/drivers/android/binder/page_range_helper.h
new file mode 100644
index 000000000000..18dd2dd117b2
--- /dev/null
+++ b/drivers/android/binder/page_range_helper.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025 Google, Inc.
+ */
+
+#ifndef _LINUX_PAGE_RANGE_HELPER_H
+#define _LINUX_PAGE_RANGE_HELPER_H
+
+#include <linux/list_lru.h>
+
+enum lru_status
+rust_shrink_free_page_wrap(struct list_head *item, struct list_lru_one *list,
+ void *cb_arg);
+
+#endif /* _LINUX_PAGE_RANGE_HELPER_H */
diff --git a/drivers/android/binder/process.rs b/drivers/android/binder/process.rs
new file mode 100644
index 000000000000..132055b4790f
--- /dev/null
+++ b/drivers/android/binder/process.rs
@@ -0,0 +1,1745 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! This module defines the `Process` type, which represents a process using a particular binder
+//! context.
+//!
+//! The `Process` object keeps track of all of the resources that this process owns in the binder
+//! context.
+//!
+//! There is one `Process` object for each binder fd that a process has opened, so processes using
+//! several binder contexts have several `Process` objects. This ensures that the contexts are
+//! fully separated.
+
+use core::mem::take;
+
+use kernel::{
+ bindings,
+ cred::Credential,
+ error::Error,
+ fs::file::{self, File},
+ id_pool::IdPool,
+ list::{List, ListArc, ListArcField, ListLinks},
+ mm,
+ prelude::*,
+ rbtree::{self, RBTree, RBTreeNode, RBTreeNodeReservation},
+ seq_file::SeqFile,
+ seq_print,
+ sync::poll::PollTable,
+ sync::{
+ lock::{spinlock::SpinLockBackend, Guard},
+ Arc, ArcBorrow, CondVar, CondVarTimeoutResult, Mutex, SpinLock, UniqueArc,
+ },
+ task::Task,
+ types::ARef,
+ uaccess::{UserSlice, UserSliceReader},
+ uapi,
+ workqueue::{self, Work},
+};
+
+use crate::{
+ allocation::{Allocation, AllocationInfo, NewAllocation},
+ context::Context,
+ defs::*,
+ error::{BinderError, BinderResult},
+ node::{CouldNotDeliverCriticalIncrement, CritIncrWrapper, Node, NodeDeath, NodeRef},
+ page_range::ShrinkablePageRange,
+ range_alloc::{RangeAllocator, ReserveNew, ReserveNewArgs},
+ stats::BinderStats,
+ thread::{PushWorkRes, Thread},
+ BinderfsProcFile, DArc, DLArc, DTRWrap, DeliverToRead,
+};
+
+#[path = "freeze.rs"]
+mod freeze;
+use self::freeze::{FreezeCookie, FreezeListener};
+
+struct Mapping {
+ address: usize,
+ alloc: RangeAllocator<AllocationInfo>,
+}
+
+impl Mapping {
+ fn new(address: usize, size: usize) -> Self {
+ Self {
+ address,
+ alloc: RangeAllocator::new(size),
+ }
+ }
+}
+
+// bitflags for defer_work.
+const PROC_DEFER_FLUSH: u8 = 1;
+const PROC_DEFER_RELEASE: u8 = 2;
+
+#[derive(Copy, Clone)]
+pub(crate) enum IsFrozen {
+ Yes,
+ No,
+ InProgress,
+}
+
+impl IsFrozen {
+ /// Whether incoming transactions should be rejected due to freeze.
+ pub(crate) fn is_frozen(self) -> bool {
+ match self {
+ IsFrozen::Yes => true,
+ IsFrozen::No => false,
+ IsFrozen::InProgress => true,
+ }
+ }
+
+ /// Whether freeze notifications consider this process frozen.
+ pub(crate) fn is_fully_frozen(self) -> bool {
+ match self {
+ IsFrozen::Yes => true,
+ IsFrozen::No => false,
+ IsFrozen::InProgress => false,
+ }
+ }
+}
+
+/// The fields of `Process` protected by the spinlock.
+pub(crate) struct ProcessInner {
+ is_manager: bool,
+ pub(crate) is_dead: bool,
+ threads: RBTree<i32, Arc<Thread>>,
+ /// INVARIANT: Threads pushed to this list must be owned by this process.
+ ready_threads: List<Thread>,
+ nodes: RBTree<u64, DArc<Node>>,
+ mapping: Option<Mapping>,
+ work: List<DTRWrap<dyn DeliverToRead>>,
+ delivered_deaths: List<DTRWrap<NodeDeath>, 2>,
+
+ /// The number of requested threads that haven't registered yet.
+ requested_thread_count: u32,
+ /// The maximum number of threads used by the process thread pool.
+ max_threads: u32,
+ /// The number of threads the started and registered with the thread pool.
+ started_thread_count: u32,
+
+ /// Bitmap of deferred work to do.
+ defer_work: u8,
+
+ /// Number of transactions to be transmitted before processes in freeze_wait
+ /// are woken up.
+ outstanding_txns: u32,
+ /// Process is frozen and unable to service binder transactions.
+ pub(crate) is_frozen: IsFrozen,
+ /// Process received sync transactions since last frozen.
+ pub(crate) sync_recv: bool,
+ /// Process received async transactions since last frozen.
+ pub(crate) async_recv: bool,
+ pub(crate) binderfs_file: Option<BinderfsProcFile>,
+ /// Check for oneway spam
+ oneway_spam_detection_enabled: bool,
+}
+
+impl ProcessInner {
+ fn new() -> Self {
+ Self {
+ is_manager: false,
+ is_dead: false,
+ threads: RBTree::new(),
+ ready_threads: List::new(),
+ mapping: None,
+ nodes: RBTree::new(),
+ work: List::new(),
+ delivered_deaths: List::new(),
+ requested_thread_count: 0,
+ max_threads: 0,
+ started_thread_count: 0,
+ defer_work: 0,
+ outstanding_txns: 0,
+ is_frozen: IsFrozen::No,
+ sync_recv: false,
+ async_recv: false,
+ binderfs_file: None,
+ oneway_spam_detection_enabled: false,
+ }
+ }
+
+ /// Schedule the work item for execution on this process.
+ ///
+ /// If any threads are ready for work, then the work item is given directly to that thread and
+ /// it is woken up. Otherwise, it is pushed to the process work list.
+ ///
+ /// This call can fail only if the process is dead. In this case, the work item is returned to
+ /// the caller so that the caller can drop it after releasing the inner process lock. This is
+ /// necessary since the destructor of `Transaction` will take locks that can't necessarily be
+ /// taken while holding the inner process lock.
+ pub(crate) fn push_work(
+ &mut self,
+ work: DLArc<dyn DeliverToRead>,
+ ) -> Result<(), (BinderError, DLArc<dyn DeliverToRead>)> {
+ // Try to find a ready thread to which to push the work.
+ if let Some(thread) = self.ready_threads.pop_front() {
+ // Push to thread while holding state lock. This prevents the thread from giving up
+ // (for example, because of a signal) when we're about to deliver work.
+ match thread.push_work(work) {
+ PushWorkRes::Ok => Ok(()),
+ PushWorkRes::FailedDead(work) => Err((BinderError::new_dead(), work)),
+ }
+ } else if self.is_dead {
+ Err((BinderError::new_dead(), work))
+ } else {
+ let sync = work.should_sync_wakeup();
+
+ // Didn't find a thread waiting for proc work; this can happen
+ // in two scenarios:
+ // 1. All threads are busy handling transactions
+ // In that case, one of those threads should call back into
+ // the kernel driver soon and pick up this work.
+ // 2. Threads are using the (e)poll interface, in which case
+ // they may be blocked on the waitqueue without having been
+ // added to waiting_threads. For this case, we just iterate
+ // over all threads not handling transaction work, and
+ // wake them all up. We wake all because we don't know whether
+ // a thread that called into (e)poll is handling non-binder
+ // work currently.
+ self.work.push_back(work);
+
+ // Wake up polling threads, if any.
+ for thread in self.threads.values() {
+ thread.notify_if_poll_ready(sync);
+ }
+
+ Ok(())
+ }
+ }
+
+ pub(crate) fn remove_node(&mut self, ptr: u64) {
+ self.nodes.remove(&ptr);
+ }
+
+ /// Updates the reference count on the given node.
+ pub(crate) fn update_node_refcount(
+ &mut self,
+ node: &DArc<Node>,
+ inc: bool,
+ strong: bool,
+ count: usize,
+ othread: Option<&Thread>,
+ ) {
+ let push = node.update_refcount_locked(inc, strong, count, self);
+
+ // If we decided that we need to push work, push either to the process or to a thread if
+ // one is specified.
+ if let Some(node) = push {
+ if let Some(thread) = othread {
+ thread.push_work_deferred(node);
+ } else {
+ let _ = self.push_work(node);
+ // Nothing to do: `push_work` may fail if the process is dead, but that's ok as in
+ // that case, it doesn't care about the notification.
+ }
+ }
+ }
+
+ pub(crate) fn new_node_ref(
+ &mut self,
+ node: DArc<Node>,
+ strong: bool,
+ thread: Option<&Thread>,
+ ) -> NodeRef {
+ self.update_node_refcount(&node, true, strong, 1, thread);
+ let strong_count = if strong { 1 } else { 0 };
+ NodeRef::new(node, strong_count, 1 - strong_count)
+ }
+
+ pub(crate) fn new_node_ref_with_thread(
+ &mut self,
+ node: DArc<Node>,
+ strong: bool,
+ thread: &Thread,
+ wrapper: Option<CritIncrWrapper>,
+ ) -> Result<NodeRef, CouldNotDeliverCriticalIncrement> {
+ let push = match wrapper {
+ None => node
+ .incr_refcount_allow_zero2one(strong, self)?
+ .map(|node| node as _),
+ Some(wrapper) => node.incr_refcount_allow_zero2one_with_wrapper(strong, wrapper, self),
+ };
+ if let Some(node) = push {
+ thread.push_work_deferred(node);
+ }
+ let strong_count = if strong { 1 } else { 0 };
+ Ok(NodeRef::new(node, strong_count, 1 - strong_count))
+ }
+
+ /// Returns an existing node with the given pointer and cookie, if one exists.
+ ///
+ /// Returns an error if a node with the given pointer but a different cookie exists.
+ fn get_existing_node(&self, ptr: u64, cookie: u64) -> Result<Option<DArc<Node>>> {
+ match self.nodes.get(&ptr) {
+ None => Ok(None),
+ Some(node) => {
+ let (_, node_cookie) = node.get_id();
+ if node_cookie == cookie {
+ Ok(Some(node.clone()))
+ } else {
+ Err(EINVAL)
+ }
+ }
+ }
+ }
+
+ fn register_thread(&mut self) -> bool {
+ if self.requested_thread_count == 0 {
+ return false;
+ }
+
+ self.requested_thread_count -= 1;
+ self.started_thread_count += 1;
+ true
+ }
+
+ /// Finds a delivered death notification with the given cookie, removes it from the thread's
+ /// delivered list, and returns it.
+ fn pull_delivered_death(&mut self, cookie: u64) -> Option<DArc<NodeDeath>> {
+ let mut cursor = self.delivered_deaths.cursor_front();
+ while let Some(next) = cursor.peek_next() {
+ if next.cookie == cookie {
+ return Some(next.remove().into_arc());
+ }
+ cursor.move_next();
+ }
+ None
+ }
+
+ pub(crate) fn death_delivered(&mut self, death: DArc<NodeDeath>) {
+ if let Some(death) = ListArc::try_from_arc_or_drop(death) {
+ self.delivered_deaths.push_back(death);
+ } else {
+ pr_warn!("Notification added to `delivered_deaths` twice.");
+ }
+ }
+
+ pub(crate) fn add_outstanding_txn(&mut self) {
+ self.outstanding_txns += 1;
+ }
+
+ fn txns_pending_locked(&self) -> bool {
+ if self.outstanding_txns > 0 {
+ return true;
+ }
+ for thread in self.threads.values() {
+ if thread.has_current_transaction() {
+ return true;
+ }
+ }
+ false
+ }
+}
+
+/// Used to keep track of a node that this process has a handle to.
+#[pin_data]
+pub(crate) struct NodeRefInfo {
+ debug_id: usize,
+ /// The refcount that this process owns to the node.
+ node_ref: ListArcField<NodeRef, { Self::LIST_PROC }>,
+ death: ListArcField<Option<DArc<NodeDeath>>, { Self::LIST_PROC }>,
+ /// Cookie of the active freeze listener for this node.
+ freeze: ListArcField<Option<FreezeCookie>, { Self::LIST_PROC }>,
+ /// Used to store this `NodeRefInfo` in the node's `refs` list.
+ #[pin]
+ links: ListLinks<{ Self::LIST_NODE }>,
+ /// The handle for this `NodeRefInfo`.
+ handle: u32,
+ /// The process that has a handle to the node.
+ pub(crate) process: Arc<Process>,
+}
+
+impl NodeRefInfo {
+ /// The id used for the `Node::refs` list.
+ pub(crate) const LIST_NODE: u64 = 0x2da16350fb724a10;
+ /// The id used for the `ListArc` in `ProcessNodeRefs`.
+ const LIST_PROC: u64 = 0xd703a5263dcc8650;
+
+ fn new(node_ref: NodeRef, handle: u32, process: Arc<Process>) -> impl PinInit<Self> {
+ pin_init!(Self {
+ debug_id: super::next_debug_id(),
+ node_ref: ListArcField::new(node_ref),
+ death: ListArcField::new(None),
+ freeze: ListArcField::new(None),
+ links <- ListLinks::new(),
+ handle,
+ process,
+ })
+ }
+
+ kernel::list::define_list_arc_field_getter! {
+ pub(crate) fn death(&mut self<{Self::LIST_PROC}>) -> &mut Option<DArc<NodeDeath>> { death }
+ pub(crate) fn freeze(&mut self<{Self::LIST_PROC}>) -> &mut Option<FreezeCookie> { freeze }
+ pub(crate) fn node_ref(&mut self<{Self::LIST_PROC}>) -> &mut NodeRef { node_ref }
+ pub(crate) fn node_ref2(&self<{Self::LIST_PROC}>) -> &NodeRef { node_ref }
+ }
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<{Self::LIST_NODE}> for NodeRefInfo { untracked; }
+ impl ListArcSafe<{Self::LIST_PROC}> for NodeRefInfo { untracked; }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<{Self::LIST_NODE}> for NodeRefInfo {
+ using ListLinks { self.links };
+ }
+}
+
+/// Keeps track of references this process has to nodes owned by other processes.
+///
+/// TODO: Currently, the rbtree requires two allocations per node reference, and two tree
+/// traversals to look up a node by `Node::global_id`. Once the rbtree is more powerful, these
+/// extra costs should be eliminated.
+struct ProcessNodeRefs {
+ /// Used to look up nodes using the 32-bit id that this process knows it by.
+ by_handle: RBTree<u32, ListArc<NodeRefInfo, { NodeRefInfo::LIST_PROC }>>,
+ /// Used to quickly find unused ids in `by_handle`.
+ handle_is_present: IdPool,
+ /// Used to look up nodes without knowing their local 32-bit id. The usize is the address of
+ /// the underlying `Node` struct as returned by `Node::global_id`.
+ by_node: RBTree<usize, u32>,
+ /// Used to look up a `FreezeListener` by cookie.
+ ///
+ /// There might be multiple freeze listeners for the same node, but at most one of them is
+ /// active.
+ freeze_listeners: RBTree<FreezeCookie, FreezeListener>,
+}
+
+impl ProcessNodeRefs {
+ fn new() -> Self {
+ Self {
+ by_handle: RBTree::new(),
+ handle_is_present: IdPool::new(),
+ by_node: RBTree::new(),
+ freeze_listeners: RBTree::new(),
+ }
+ }
+}
+
+/// A process using binder.
+///
+/// Strictly speaking, there can be multiple of these per process. There is one for each binder fd
+/// that a process has opened, so processes using several binder contexts have several `Process`
+/// objects. This ensures that the contexts are fully separated.
+#[pin_data]
+pub(crate) struct Process {
+ pub(crate) ctx: Arc<Context>,
+
+ // The task leader (process).
+ pub(crate) task: ARef<Task>,
+
+ // Credential associated with file when `Process` is created.
+ pub(crate) cred: ARef<Credential>,
+
+ #[pin]
+ pub(crate) inner: SpinLock<ProcessInner>,
+
+ #[pin]
+ pub(crate) pages: ShrinkablePageRange,
+
+ // Waitqueue of processes waiting for all outstanding transactions to be
+ // processed.
+ #[pin]
+ freeze_wait: CondVar,
+
+ // Node references are in a different lock to avoid recursive acquisition when
+ // incrementing/decrementing a node in another process.
+ #[pin]
+ node_refs: Mutex<ProcessNodeRefs>,
+
+ // Work node for deferred work item.
+ #[pin]
+ defer_work: Work<Process>,
+
+ // Links for process list in Context.
+ #[pin]
+ links: ListLinks,
+
+ pub(crate) stats: BinderStats,
+}
+
+kernel::impl_has_work! {
+ impl HasWork<Process> for Process { self.defer_work }
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for Process { untracked; }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<0> for Process {
+ using ListLinks { self.links };
+ }
+}
+
+impl workqueue::WorkItem for Process {
+ type Pointer = Arc<Process>;
+
+ fn run(me: Arc<Self>) {
+ let defer;
+ {
+ let mut inner = me.inner.lock();
+ defer = inner.defer_work;
+ inner.defer_work = 0;
+ }
+
+ if defer & PROC_DEFER_FLUSH != 0 {
+ me.deferred_flush();
+ }
+ if defer & PROC_DEFER_RELEASE != 0 {
+ me.deferred_release();
+ }
+ }
+}
+
+impl Process {
+ fn new(ctx: Arc<Context>, cred: ARef<Credential>) -> Result<Arc<Self>> {
+ let current = kernel::current!();
+ let list_process = ListArc::pin_init::<Error>(
+ try_pin_init!(Process {
+ ctx,
+ cred,
+ inner <- kernel::new_spinlock!(ProcessInner::new(), "Process::inner"),
+ pages <- ShrinkablePageRange::new(&super::BINDER_SHRINKER),
+ node_refs <- kernel::new_mutex!(ProcessNodeRefs::new(), "Process::node_refs"),
+ freeze_wait <- kernel::new_condvar!("Process::freeze_wait"),
+ task: current.group_leader().into(),
+ defer_work <- kernel::new_work!("Process::defer_work"),
+ links <- ListLinks::new(),
+ stats: BinderStats::new(),
+ }),
+ GFP_KERNEL,
+ )?;
+
+ let process = list_process.clone_arc();
+ process.ctx.register_process(list_process);
+
+ Ok(process)
+ }
+
+ pub(crate) fn pid_in_current_ns(&self) -> kernel::task::Pid {
+ self.task.tgid_nr_ns(None)
+ }
+
+ #[inline(never)]
+ pub(crate) fn debug_print_stats(&self, m: &SeqFile, ctx: &Context) -> Result<()> {
+ seq_print!(m, "proc {}\n", self.pid_in_current_ns());
+ seq_print!(m, "context {}\n", &*ctx.name);
+
+ let inner = self.inner.lock();
+ seq_print!(m, " threads: {}\n", inner.threads.iter().count());
+ seq_print!(
+ m,
+ " requested threads: {}+{}/{}\n",
+ inner.requested_thread_count,
+ inner.started_thread_count,
+ inner.max_threads,
+ );
+ if let Some(mapping) = &inner.mapping {
+ seq_print!(
+ m,
+ " free oneway space: {}\n",
+ mapping.alloc.free_oneway_space()
+ );
+ seq_print!(m, " buffers: {}\n", mapping.alloc.count_buffers());
+ }
+ seq_print!(
+ m,
+ " outstanding transactions: {}\n",
+ inner.outstanding_txns
+ );
+ seq_print!(m, " nodes: {}\n", inner.nodes.iter().count());
+ drop(inner);
+
+ {
+ let mut refs = self.node_refs.lock();
+ let (mut count, mut weak, mut strong) = (0, 0, 0);
+ for r in refs.by_handle.values_mut() {
+ let node_ref = r.node_ref();
+ let (nstrong, nweak) = node_ref.get_count();
+ count += 1;
+ weak += nweak;
+ strong += nstrong;
+ }
+ seq_print!(m, " refs: {count} s {strong} w {weak}\n");
+ }
+
+ self.stats.debug_print(" ", m);
+
+ Ok(())
+ }
+
+ #[inline(never)]
+ pub(crate) fn debug_print(&self, m: &SeqFile, ctx: &Context, print_all: bool) -> Result<()> {
+ seq_print!(m, "proc {}\n", self.pid_in_current_ns());
+ seq_print!(m, "context {}\n", &*ctx.name);
+
+ let mut all_threads = KVec::new();
+ let mut all_nodes = KVec::new();
+ loop {
+ let inner = self.inner.lock();
+ let num_threads = inner.threads.iter().count();
+ let num_nodes = inner.nodes.iter().count();
+
+ if all_threads.capacity() < num_threads || all_nodes.capacity() < num_nodes {
+ drop(inner);
+ all_threads.reserve(num_threads, GFP_KERNEL)?;
+ all_nodes.reserve(num_nodes, GFP_KERNEL)?;
+ continue;
+ }
+
+ for thread in inner.threads.values() {
+ assert!(all_threads.len() < all_threads.capacity());
+ let _ = all_threads.push(thread.clone(), GFP_ATOMIC);
+ }
+
+ for node in inner.nodes.values() {
+ assert!(all_nodes.len() < all_nodes.capacity());
+ let _ = all_nodes.push(node.clone(), GFP_ATOMIC);
+ }
+
+ break;
+ }
+
+ for thread in all_threads {
+ thread.debug_print(m, print_all)?;
+ }
+
+ let mut inner = self.inner.lock();
+ for node in all_nodes {
+ if print_all || node.has_oneway_transaction(&mut inner) {
+ node.full_debug_print(m, &mut inner)?;
+ }
+ }
+ drop(inner);
+
+ if print_all {
+ let mut refs = self.node_refs.lock();
+ for r in refs.by_handle.values_mut() {
+ let node_ref = r.node_ref();
+ let dead = node_ref.node.owner.inner.lock().is_dead;
+ let (strong, weak) = node_ref.get_count();
+ let debug_id = node_ref.node.debug_id;
+
+ seq_print!(
+ m,
+ " ref {}: desc {} {}node {debug_id} s {strong} w {weak}",
+ r.debug_id,
+ r.handle,
+ if dead { "dead " } else { "" }
+ );
+ }
+ }
+
+ let inner = self.inner.lock();
+ for work in &inner.work {
+ work.debug_print(m, " ", " pending transaction ")?;
+ }
+ for _death in &inner.delivered_deaths {
+ seq_print!(m, " has delivered dead binder\n");
+ }
+ if let Some(mapping) = &inner.mapping {
+ mapping.alloc.debug_print(m)?;
+ }
+ drop(inner);
+
+ Ok(())
+ }
+
+ /// Attempts to fetch a work item from the process queue.
+ pub(crate) fn get_work(&self) -> Option<DLArc<dyn DeliverToRead>> {
+ self.inner.lock().work.pop_front()
+ }
+
+ /// Attempts to fetch a work item from the process queue. If none is available, it registers the
+ /// given thread as ready to receive work directly.
+ ///
+ /// This must only be called when the thread is not participating in a transaction chain; when
+ /// it is, work will always be delivered directly to the thread (and not through the process
+ /// queue).
+ pub(crate) fn get_work_or_register<'a>(
+ &'a self,
+ thread: &'a Arc<Thread>,
+ ) -> GetWorkOrRegister<'a> {
+ let mut inner = self.inner.lock();
+ // Try to get work from the process queue.
+ if let Some(work) = inner.work.pop_front() {
+ return GetWorkOrRegister::Work(work);
+ }
+
+ // Register the thread as ready.
+ GetWorkOrRegister::Register(Registration::new(thread, &mut inner))
+ }
+
+ fn get_current_thread(self: ArcBorrow<'_, Self>) -> Result<Arc<Thread>> {
+ let id = {
+ let current = kernel::current!();
+ if !core::ptr::eq(current.group_leader(), &*self.task) {
+ pr_err!("get_current_thread was called from the wrong process.");
+ return Err(EINVAL);
+ }
+ current.pid()
+ };
+
+ {
+ let inner = self.inner.lock();
+ if let Some(thread) = inner.threads.get(&id) {
+ return Ok(thread.clone());
+ }
+ }
+
+ // Allocate a new `Thread` without holding any locks.
+ let reservation = RBTreeNodeReservation::new(GFP_KERNEL)?;
+ let ta: Arc<Thread> = Thread::new(id, self.into())?;
+
+ let mut inner = self.inner.lock();
+ match inner.threads.entry(id) {
+ rbtree::Entry::Vacant(entry) => {
+ entry.insert(ta.clone(), reservation);
+ Ok(ta)
+ }
+ rbtree::Entry::Occupied(_entry) => {
+ pr_err!("Cannot create two threads with the same id.");
+ Err(EINVAL)
+ }
+ }
+ }
+
+ pub(crate) fn push_work(&self, work: DLArc<dyn DeliverToRead>) -> BinderResult {
+ // If push_work fails, drop the work item outside the lock.
+ let res = self.inner.lock().push_work(work);
+ match res {
+ Ok(()) => Ok(()),
+ Err((err, work)) => {
+ drop(work);
+ Err(err)
+ }
+ }
+ }
+
+ fn set_as_manager(
+ self: ArcBorrow<'_, Self>,
+ info: Option<FlatBinderObject>,
+ thread: &Thread,
+ ) -> Result {
+ let (ptr, cookie, flags) = if let Some(obj) = info {
+ (
+ // SAFETY: The object type for this ioctl is implicitly `BINDER_TYPE_BINDER`, so it
+ // is safe to access the `binder` field.
+ unsafe { obj.__bindgen_anon_1.binder },
+ obj.cookie,
+ obj.flags,
+ )
+ } else {
+ (0, 0, 0)
+ };
+ let node_ref = self.get_node(ptr, cookie, flags as _, true, thread)?;
+ let node = node_ref.node.clone();
+ self.ctx.set_manager_node(node_ref)?;
+ self.inner.lock().is_manager = true;
+
+ // Force the state of the node to prevent the delivery of acquire/increfs.
+ let mut owner_inner = node.owner.inner.lock();
+ node.force_has_count(&mut owner_inner);
+ Ok(())
+ }
+
+ fn get_node_inner(
+ self: ArcBorrow<'_, Self>,
+ ptr: u64,
+ cookie: u64,
+ flags: u32,
+ strong: bool,
+ thread: &Thread,
+ wrapper: Option<CritIncrWrapper>,
+ ) -> Result<Result<NodeRef, CouldNotDeliverCriticalIncrement>> {
+ // Try to find an existing node.
+ {
+ let mut inner = self.inner.lock();
+ if let Some(node) = inner.get_existing_node(ptr, cookie)? {
+ return Ok(inner.new_node_ref_with_thread(node, strong, thread, wrapper));
+ }
+ }
+
+ // Allocate the node before reacquiring the lock.
+ let node = DTRWrap::arc_pin_init(Node::new(ptr, cookie, flags, self.into()))?.into_arc();
+ let rbnode = RBTreeNode::new(ptr, node.clone(), GFP_KERNEL)?;
+ let mut inner = self.inner.lock();
+ if let Some(node) = inner.get_existing_node(ptr, cookie)? {
+ return Ok(inner.new_node_ref_with_thread(node, strong, thread, wrapper));
+ }
+
+ inner.nodes.insert(rbnode);
+ // This can only fail if someone has already pushed the node to a list, but we just created
+ // it and still hold the lock, so it can't fail right now.
+ let node_ref = inner
+ .new_node_ref_with_thread(node, strong, thread, wrapper)
+ .unwrap();
+
+ Ok(Ok(node_ref))
+ }
+
+ pub(crate) fn get_node(
+ self: ArcBorrow<'_, Self>,
+ ptr: u64,
+ cookie: u64,
+ flags: u32,
+ strong: bool,
+ thread: &Thread,
+ ) -> Result<NodeRef> {
+ let mut wrapper = None;
+ for _ in 0..2 {
+ match self.get_node_inner(ptr, cookie, flags, strong, thread, wrapper) {
+ Err(err) => return Err(err),
+ Ok(Ok(node_ref)) => return Ok(node_ref),
+ Ok(Err(CouldNotDeliverCriticalIncrement)) => {
+ wrapper = Some(CritIncrWrapper::new()?);
+ }
+ }
+ }
+ // We only get a `CouldNotDeliverCriticalIncrement` error if `wrapper` is `None`, so the
+ // loop should run at most twice.
+ unreachable!()
+ }
+
+ pub(crate) fn insert_or_update_handle(
+ self: ArcBorrow<'_, Process>,
+ node_ref: NodeRef,
+ is_manager: bool,
+ ) -> Result<u32> {
+ {
+ let mut refs = self.node_refs.lock();
+
+ // Do a lookup before inserting.
+ if let Some(handle_ref) = refs.by_node.get(&node_ref.node.global_id()) {
+ let handle = *handle_ref;
+ let info = refs.by_handle.get_mut(&handle).unwrap();
+ info.node_ref().absorb(node_ref);
+ return Ok(handle);
+ }
+ }
+
+ // Reserve memory for tree nodes.
+ let reserve1 = RBTreeNodeReservation::new(GFP_KERNEL)?;
+ let reserve2 = RBTreeNodeReservation::new(GFP_KERNEL)?;
+ let info = UniqueArc::new_uninit(GFP_KERNEL)?;
+
+ let mut refs_lock = self.node_refs.lock();
+ let mut refs = &mut *refs_lock;
+
+ let (unused_id, by_handle_slot) = loop {
+ // ID 0 may only be used by the manager.
+ let start = if is_manager { 0 } else { 1 };
+
+ if let Some(res) = refs.handle_is_present.find_unused_id(start) {
+ match refs.by_handle.entry(res.as_u32()) {
+ rbtree::Entry::Vacant(entry) => break (res, entry),
+ rbtree::Entry::Occupied(_) => {
+ pr_err!("Detected mismatch between handle_is_present and by_handle");
+ res.acquire();
+ kernel::warn_on!(true);
+ return Err(EINVAL);
+ }
+ }
+ }
+
+ let grow_request = refs.handle_is_present.grow_request().ok_or(ENOMEM)?;
+ drop(refs_lock);
+ let resizer = grow_request.realloc(GFP_KERNEL)?;
+ refs_lock = self.node_refs.lock();
+ refs = &mut *refs_lock;
+ refs.handle_is_present.grow(resizer);
+ };
+ let handle = unused_id.as_u32();
+
+ // Do a lookup again as node may have been inserted before the lock was reacquired.
+ if let Some(handle_ref) = refs.by_node.get(&node_ref.node.global_id()) {
+ let handle = *handle_ref;
+ let info = refs.by_handle.get_mut(&handle).unwrap();
+ info.node_ref().absorb(node_ref);
+ return Ok(handle);
+ }
+
+ let gid = node_ref.node.global_id();
+ let (info_proc, info_node) = {
+ let info_init = NodeRefInfo::new(node_ref, handle, self.into());
+ match info.pin_init_with(info_init) {
+ Ok(info) => ListArc::pair_from_pin_unique(info),
+ // error is infallible
+ Err(err) => match err {},
+ }
+ };
+
+ // Ensure the process is still alive while we insert a new reference.
+ //
+ // This releases the lock before inserting the nodes, but since `is_dead` is set as the
+ // first thing in `deferred_release`, process cleanup will not miss the items inserted into
+ // `refs` below.
+ if self.inner.lock().is_dead {
+ return Err(ESRCH);
+ }
+
+ // SAFETY: `info_proc` and `info_node` reference the same node, so we are inserting
+ // `info_node` into the right node's `refs` list.
+ unsafe { info_proc.node_ref2().node.insert_node_info(info_node) };
+
+ refs.by_node.insert(reserve1.into_node(gid, handle));
+ by_handle_slot.insert(info_proc, reserve2);
+ unused_id.acquire();
+ Ok(handle)
+ }
+
+ pub(crate) fn get_transaction_node(&self, handle: u32) -> BinderResult<NodeRef> {
+ // When handle is zero, try to get the context manager.
+ if handle == 0 {
+ Ok(self.ctx.get_manager_node(true)?)
+ } else {
+ Ok(self.get_node_from_handle(handle, true)?)
+ }
+ }
+
+ pub(crate) fn get_node_from_handle(&self, handle: u32, strong: bool) -> Result<NodeRef> {
+ self.node_refs
+ .lock()
+ .by_handle
+ .get_mut(&handle)
+ .ok_or(ENOENT)?
+ .node_ref()
+ .clone(strong)
+ }
+
+ pub(crate) fn remove_from_delivered_deaths(&self, death: &DArc<NodeDeath>) {
+ let mut inner = self.inner.lock();
+ // SAFETY: By the invariant on the `delivered_links` field, this is the right linked list.
+ let removed = unsafe { inner.delivered_deaths.remove(death) };
+ drop(inner);
+ drop(removed);
+ }
+
+ pub(crate) fn update_ref(
+ self: ArcBorrow<'_, Process>,
+ handle: u32,
+ inc: bool,
+ strong: bool,
+ ) -> Result {
+ if inc && handle == 0 {
+ if let Ok(node_ref) = self.ctx.get_manager_node(strong) {
+ if core::ptr::eq(&*self, &*node_ref.node.owner) {
+ return Err(EINVAL);
+ }
+ let _ = self.insert_or_update_handle(node_ref, true);
+ return Ok(());
+ }
+ }
+
+ // To preserve original binder behaviour, we only fail requests where the manager tries to
+ // increment references on itself.
+ let mut refs = self.node_refs.lock();
+ if let Some(info) = refs.by_handle.get_mut(&handle) {
+ if info.node_ref().update(inc, strong) {
+ // Clean up death if there is one attached to this node reference.
+ if let Some(death) = info.death().take() {
+ death.set_cleared(true);
+ self.remove_from_delivered_deaths(&death);
+ }
+
+ // Remove reference from process tables, and from the node's `refs` list.
+
+ // SAFETY: We are removing the `NodeRefInfo` from the right node.
+ unsafe { info.node_ref2().node.remove_node_info(info) };
+
+ let id = info.node_ref().node.global_id();
+ refs.by_handle.remove(&handle);
+ refs.by_node.remove(&id);
+ refs.handle_is_present.release_id(handle as usize);
+
+ if let Some(shrink) = refs.handle_is_present.shrink_request() {
+ drop(refs);
+ // This intentionally ignores allocation failures.
+ if let Ok(new_bitmap) = shrink.realloc(GFP_KERNEL) {
+ refs = self.node_refs.lock();
+ refs.handle_is_present.shrink(new_bitmap);
+ }
+ }
+ }
+ } else {
+ // All refs are cleared in process exit, so this warning is expected in that case.
+ if !self.inner.lock().is_dead {
+ pr_warn!("{}: no such ref {handle}\n", self.pid_in_current_ns());
+ }
+ }
+ Ok(())
+ }
+
+ /// Decrements the refcount of the given node, if one exists.
+ pub(crate) fn update_node(&self, ptr: u64, cookie: u64, strong: bool) {
+ let mut inner = self.inner.lock();
+ if let Ok(Some(node)) = inner.get_existing_node(ptr, cookie) {
+ inner.update_node_refcount(&node, false, strong, 1, None);
+ }
+ }
+
+ pub(crate) fn inc_ref_done(&self, reader: &mut UserSliceReader, strong: bool) -> Result {
+ let ptr = reader.read::<u64>()?;
+ let cookie = reader.read::<u64>()?;
+ let mut inner = self.inner.lock();
+ if let Ok(Some(node)) = inner.get_existing_node(ptr, cookie) {
+ if let Some(node) = node.inc_ref_done_locked(strong, &mut inner) {
+ // This only fails if the process is dead.
+ let _ = inner.push_work(node);
+ }
+ }
+ Ok(())
+ }
+
+ pub(crate) fn buffer_alloc(
+ self: &Arc<Self>,
+ debug_id: usize,
+ size: usize,
+ is_oneway: bool,
+ from_pid: i32,
+ ) -> BinderResult<NewAllocation> {
+ use kernel::page::PAGE_SIZE;
+
+ let mut reserve_new_args = ReserveNewArgs {
+ debug_id,
+ size,
+ is_oneway,
+ pid: from_pid,
+ ..ReserveNewArgs::default()
+ };
+
+ let (new_alloc, addr) = loop {
+ let mut inner = self.inner.lock();
+ let mapping = inner.mapping.as_mut().ok_or_else(BinderError::new_dead)?;
+ let alloc_request = match mapping.alloc.reserve_new(reserve_new_args)? {
+ ReserveNew::Success(new_alloc) => break (new_alloc, mapping.address),
+ ReserveNew::NeedAlloc(request) => request,
+ };
+ drop(inner);
+ // We need to allocate memory and then call `reserve_new` again.
+ reserve_new_args = alloc_request.make_alloc()?;
+ };
+
+ let res = Allocation::new(
+ self.clone(),
+ debug_id,
+ new_alloc.offset,
+ size,
+ addr + new_alloc.offset,
+ new_alloc.oneway_spam_detected,
+ );
+
+ // This allocation will be marked as in use until the `Allocation` is used to free it.
+ //
+ // This method can't be called while holding a lock, so we release the lock first. It's
+ // okay for several threads to use the method on the same index at the same time. In that
+ // case, one of the calls will allocate the given page (if missing), and the other call
+ // will wait for the other call to finish allocating the page.
+ //
+ // We will not call `stop_using_range` in parallel with this on the same page, because the
+ // allocation can only be removed via the destructor of the `Allocation` object that we
+ // currently own.
+ match self.pages.use_range(
+ new_alloc.offset / PAGE_SIZE,
+ (new_alloc.offset + size).div_ceil(PAGE_SIZE),
+ ) {
+ Ok(()) => {}
+ Err(err) => {
+ pr_warn!("use_range failure {:?}", err);
+ return Err(err.into());
+ }
+ }
+
+ Ok(NewAllocation(res))
+ }
+
+ pub(crate) fn buffer_get(self: &Arc<Self>, ptr: usize) -> Option<Allocation> {
+ let mut inner = self.inner.lock();
+ let mapping = inner.mapping.as_mut()?;
+ let offset = ptr.checked_sub(mapping.address)?;
+ let (size, debug_id, odata) = mapping.alloc.reserve_existing(offset).ok()?;
+ let mut alloc = Allocation::new(self.clone(), debug_id, offset, size, ptr, false);
+ if let Some(data) = odata {
+ alloc.set_info(data);
+ }
+ Some(alloc)
+ }
+
+ pub(crate) fn buffer_raw_free(&self, ptr: usize) {
+ let mut inner = self.inner.lock();
+ if let Some(ref mut mapping) = &mut inner.mapping {
+ let offset = match ptr.checked_sub(mapping.address) {
+ Some(offset) => offset,
+ None => return,
+ };
+
+ let freed_range = match mapping.alloc.reservation_abort(offset) {
+ Ok(freed_range) => freed_range,
+ Err(_) => {
+ pr_warn!(
+ "Pointer {:x} failed to free, base = {:x}\n",
+ ptr,
+ mapping.address
+ );
+ return;
+ }
+ };
+
+ // No more allocations in this range. Mark them as not in use.
+ //
+ // Must be done before we release the lock so that `use_range` is not used on these
+ // indices until `stop_using_range` returns.
+ self.pages
+ .stop_using_range(freed_range.start_page_idx, freed_range.end_page_idx);
+ }
+ }
+
+ pub(crate) fn buffer_make_freeable(&self, offset: usize, mut data: Option<AllocationInfo>) {
+ let mut inner = self.inner.lock();
+ if let Some(ref mut mapping) = &mut inner.mapping {
+ if mapping.alloc.reservation_commit(offset, &mut data).is_err() {
+ pr_warn!("Offset {} failed to be marked freeable\n", offset);
+ }
+ }
+ }
+
+ fn create_mapping(&self, vma: &mm::virt::VmaNew) -> Result {
+ use kernel::page::PAGE_SIZE;
+ let size = usize::min(vma.end() - vma.start(), bindings::SZ_4M as usize);
+ let mapping = Mapping::new(vma.start(), size);
+ let page_count = self.pages.register_with_vma(vma)?;
+ if page_count * PAGE_SIZE != size {
+ return Err(EINVAL);
+ }
+
+ // Save range allocator for later.
+ self.inner.lock().mapping = Some(mapping);
+
+ Ok(())
+ }
+
+ fn version(&self, data: UserSlice) -> Result {
+ data.writer().write(&BinderVersion::current())
+ }
+
+ pub(crate) fn register_thread(&self) -> bool {
+ self.inner.lock().register_thread()
+ }
+
+ fn remove_thread(&self, thread: Arc<Thread>) {
+ self.inner.lock().threads.remove(&thread.id);
+ thread.release();
+ }
+
+ fn set_max_threads(&self, max: u32) {
+ self.inner.lock().max_threads = max;
+ }
+
+ fn set_oneway_spam_detection_enabled(&self, enabled: u32) {
+ self.inner.lock().oneway_spam_detection_enabled = enabled != 0;
+ }
+
+ pub(crate) fn is_oneway_spam_detection_enabled(&self) -> bool {
+ self.inner.lock().oneway_spam_detection_enabled
+ }
+
+ fn get_node_debug_info(&self, data: UserSlice) -> Result {
+ let (mut reader, mut writer) = data.reader_writer();
+
+ // Read the starting point.
+ let ptr = reader.read::<BinderNodeDebugInfo>()?.ptr;
+ let mut out = BinderNodeDebugInfo::default();
+
+ {
+ let inner = self.inner.lock();
+ for (node_ptr, node) in &inner.nodes {
+ if *node_ptr > ptr {
+ node.populate_debug_info(&mut out, &inner);
+ break;
+ }
+ }
+ }
+
+ writer.write(&out)
+ }
+
+ fn get_node_info_from_ref(&self, data: UserSlice) -> Result {
+ let (mut reader, mut writer) = data.reader_writer();
+ let mut out = reader.read::<BinderNodeInfoForRef>()?;
+
+ if out.strong_count != 0
+ || out.weak_count != 0
+ || out.reserved1 != 0
+ || out.reserved2 != 0
+ || out.reserved3 != 0
+ {
+ return Err(EINVAL);
+ }
+
+ // Only the context manager is allowed to use this ioctl.
+ if !self.inner.lock().is_manager {
+ return Err(EPERM);
+ }
+
+ {
+ let mut node_refs = self.node_refs.lock();
+ let node_info = node_refs.by_handle.get_mut(&out.handle).ok_or(ENOENT)?;
+ let node_ref = node_info.node_ref();
+ let owner_inner = node_ref.node.owner.inner.lock();
+ node_ref.node.populate_counts(&mut out, &owner_inner);
+ }
+
+ // Write the result back.
+ writer.write(&out)
+ }
+
+ pub(crate) fn needs_thread(&self) -> bool {
+ let mut inner = self.inner.lock();
+ let ret = inner.requested_thread_count == 0
+ && inner.ready_threads.is_empty()
+ && inner.started_thread_count < inner.max_threads;
+ if ret {
+ inner.requested_thread_count += 1
+ }
+ ret
+ }
+
+ pub(crate) fn request_death(
+ self: &Arc<Self>,
+ reader: &mut UserSliceReader,
+ thread: &Thread,
+ ) -> Result {
+ let handle: u32 = reader.read()?;
+ let cookie: u64 = reader.read()?;
+
+ // Queue BR_ERROR if we can't allocate memory for the death notification.
+ let death = UniqueArc::new_uninit(GFP_KERNEL).inspect_err(|_| {
+ thread.push_return_work(BR_ERROR);
+ })?;
+ let mut refs = self.node_refs.lock();
+ let Some(info) = refs.by_handle.get_mut(&handle) else {
+ pr_warn!("BC_REQUEST_DEATH_NOTIFICATION invalid ref {handle}\n");
+ return Ok(());
+ };
+
+ // Nothing to do if there is already a death notification request for this handle.
+ if info.death().is_some() {
+ pr_warn!("BC_REQUEST_DEATH_NOTIFICATION death notification already set\n");
+ return Ok(());
+ }
+
+ let death = {
+ let death_init = NodeDeath::new(info.node_ref().node.clone(), self.clone(), cookie);
+ match death.pin_init_with(death_init) {
+ Ok(death) => death,
+ // error is infallible
+ Err(err) => match err {},
+ }
+ };
+
+ // Register the death notification.
+ {
+ let owner = info.node_ref2().node.owner.clone();
+ let mut owner_inner = owner.inner.lock();
+ if owner_inner.is_dead {
+ let death = Arc::from(death);
+ *info.death() = Some(death.clone());
+ drop(owner_inner);
+ death.set_dead();
+ } else {
+ let death = ListArc::from(death);
+ *info.death() = Some(death.clone_arc());
+ info.node_ref().node.add_death(death, &mut owner_inner);
+ }
+ }
+ Ok(())
+ }
+
+ pub(crate) fn clear_death(&self, reader: &mut UserSliceReader, thread: &Thread) -> Result {
+ let handle: u32 = reader.read()?;
+ let cookie: u64 = reader.read()?;
+
+ let mut refs = self.node_refs.lock();
+ let Some(info) = refs.by_handle.get_mut(&handle) else {
+ pr_warn!("BC_CLEAR_DEATH_NOTIFICATION invalid ref {handle}\n");
+ return Ok(());
+ };
+
+ let Some(death) = info.death().take() else {
+ pr_warn!("BC_CLEAR_DEATH_NOTIFICATION death notification not active\n");
+ return Ok(());
+ };
+ if death.cookie != cookie {
+ *info.death() = Some(death);
+ pr_warn!("BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch\n");
+ return Ok(());
+ }
+
+ // Update state and determine if we need to queue a work item. We only need to do it when
+ // the node is not dead or if the user already completed the death notification.
+ if death.set_cleared(false) {
+ if let Some(death) = ListArc::try_from_arc_or_drop(death) {
+ let _ = thread.push_work_if_looper(death);
+ }
+ }
+
+ Ok(())
+ }
+
+ pub(crate) fn dead_binder_done(&self, cookie: u64, thread: &Thread) {
+ if let Some(death) = self.inner.lock().pull_delivered_death(cookie) {
+ death.set_notification_done(thread);
+ }
+ }
+
+ /// Locks the spinlock and move the `nodes` rbtree out.
+ ///
+ /// This allows you to iterate through `nodes` while also allowing you to give other parts of
+ /// the codebase exclusive access to `ProcessInner`.
+ pub(crate) fn lock_with_nodes(&self) -> WithNodes<'_> {
+ let mut inner = self.inner.lock();
+ WithNodes {
+ nodes: take(&mut inner.nodes),
+ inner,
+ }
+ }
+
+ fn deferred_flush(&self) {
+ let inner = self.inner.lock();
+ for thread in inner.threads.values() {
+ thread.exit_looper();
+ }
+ }
+
+ fn deferred_release(self: Arc<Self>) {
+ let is_manager = {
+ let mut inner = self.inner.lock();
+ inner.is_dead = true;
+ inner.is_frozen = IsFrozen::No;
+ inner.sync_recv = false;
+ inner.async_recv = false;
+ inner.is_manager
+ };
+
+ if is_manager {
+ self.ctx.unset_manager_node();
+ }
+
+ self.ctx.deregister_process(&self);
+
+ let binderfs_file = self.inner.lock().binderfs_file.take();
+ drop(binderfs_file);
+
+ // Release threads.
+ let threads = {
+ let mut inner = self.inner.lock();
+ let threads = take(&mut inner.threads);
+ let ready = take(&mut inner.ready_threads);
+ drop(inner);
+ drop(ready);
+
+ for thread in threads.values() {
+ thread.release();
+ }
+ threads
+ };
+
+ // Release nodes.
+ {
+ while let Some(node) = {
+ let mut lock = self.inner.lock();
+ lock.nodes.cursor_front_mut().map(|c| c.remove_current().1)
+ } {
+ node.to_key_value().1.release();
+ }
+ }
+
+ // Clean up death listeners and remove nodes from external node info lists.
+ for info in self.node_refs.lock().by_handle.values_mut() {
+ // SAFETY: We are removing the `NodeRefInfo` from the right node.
+ unsafe { info.node_ref2().node.remove_node_info(info) };
+
+ // Remove all death notifications from the nodes (that belong to a different process).
+ let death = if let Some(existing) = info.death().take() {
+ existing
+ } else {
+ continue;
+ };
+ death.set_cleared(false);
+ }
+
+ // Clean up freeze listeners.
+ let freeze_listeners = take(&mut self.node_refs.lock().freeze_listeners);
+ for listener in freeze_listeners.values() {
+ listener.on_process_exit(&self);
+ }
+ drop(freeze_listeners);
+
+ // Release refs on foreign nodes.
+ {
+ let mut refs = self.node_refs.lock();
+ let by_handle = take(&mut refs.by_handle);
+ let by_node = take(&mut refs.by_node);
+ drop(refs);
+ drop(by_node);
+ drop(by_handle);
+ }
+
+ // Cancel all pending work items.
+ while let Some(work) = self.get_work() {
+ work.into_arc().cancel();
+ }
+
+ // Clear delivered_deaths list.
+ //
+ // Scope ensures that MutexGuard is dropped while executing the body.
+ while let Some(delivered_death) = { self.inner.lock().delivered_deaths.pop_front() } {
+ drop(delivered_death);
+ }
+
+ // Free any resources kept alive by allocated buffers.
+ let omapping = self.inner.lock().mapping.take();
+ if let Some(mut mapping) = omapping {
+ let address = mapping.address;
+ mapping
+ .alloc
+ .take_for_each(|offset, size, debug_id, odata| {
+ let ptr = offset + address;
+ let mut alloc =
+ Allocation::new(self.clone(), debug_id, offset, size, ptr, false);
+ if let Some(data) = odata {
+ alloc.set_info(data);
+ }
+ drop(alloc)
+ });
+ }
+
+ // calls to synchronize_rcu() in thread drop will happen here
+ drop(threads);
+ }
+
+ pub(crate) fn drop_outstanding_txn(&self) {
+ let wake = {
+ let mut inner = self.inner.lock();
+ if inner.outstanding_txns == 0 {
+ pr_err!("outstanding_txns underflow");
+ return;
+ }
+ inner.outstanding_txns -= 1;
+ inner.is_frozen.is_frozen() && inner.outstanding_txns == 0
+ };
+
+ if wake {
+ self.freeze_wait.notify_all();
+ }
+ }
+
+ pub(crate) fn ioctl_freeze(&self, info: &BinderFreezeInfo) -> Result {
+ if info.enable == 0 {
+ let msgs = self.prepare_freeze_messages()?;
+ let mut inner = self.inner.lock();
+ inner.sync_recv = false;
+ inner.async_recv = false;
+ inner.is_frozen = IsFrozen::No;
+ drop(inner);
+ msgs.send_messages();
+ return Ok(());
+ }
+
+ let mut inner = self.inner.lock();
+ inner.sync_recv = false;
+ inner.async_recv = false;
+ inner.is_frozen = IsFrozen::InProgress;
+
+ if info.timeout_ms > 0 {
+ let mut jiffies = kernel::time::msecs_to_jiffies(info.timeout_ms);
+ while jiffies > 0 {
+ if inner.outstanding_txns == 0 {
+ break;
+ }
+
+ match self
+ .freeze_wait
+ .wait_interruptible_timeout(&mut inner, jiffies)
+ {
+ CondVarTimeoutResult::Signal { .. } => {
+ inner.is_frozen = IsFrozen::No;
+ return Err(ERESTARTSYS);
+ }
+ CondVarTimeoutResult::Woken { jiffies: remaining } => {
+ jiffies = remaining;
+ }
+ CondVarTimeoutResult::Timeout => {
+ jiffies = 0;
+ }
+ }
+ }
+ }
+
+ if inner.txns_pending_locked() {
+ inner.is_frozen = IsFrozen::No;
+ Err(EAGAIN)
+ } else {
+ drop(inner);
+ match self.prepare_freeze_messages() {
+ Ok(batch) => {
+ self.inner.lock().is_frozen = IsFrozen::Yes;
+ batch.send_messages();
+ Ok(())
+ }
+ Err(kernel::alloc::AllocError) => {
+ self.inner.lock().is_frozen = IsFrozen::No;
+ Err(ENOMEM)
+ }
+ }
+ }
+ }
+}
+
+fn get_frozen_status(data: UserSlice) -> Result {
+ let (mut reader, mut writer) = data.reader_writer();
+
+ let mut info = reader.read::<BinderFrozenStatusInfo>()?;
+ info.sync_recv = 0;
+ info.async_recv = 0;
+ let mut found = false;
+
+ for ctx in crate::context::get_all_contexts()? {
+ ctx.for_each_proc(|proc| {
+ if proc.task.pid() == info.pid as _ {
+ found = true;
+ let inner = proc.inner.lock();
+ let txns_pending = inner.txns_pending_locked();
+ info.async_recv |= inner.async_recv as u32;
+ info.sync_recv |= inner.sync_recv as u32;
+ info.sync_recv |= (txns_pending as u32) << 1;
+ }
+ });
+ }
+
+ if found {
+ writer.write(&info)?;
+ Ok(())
+ } else {
+ Err(EINVAL)
+ }
+}
+
+fn ioctl_freeze(reader: &mut UserSliceReader) -> Result {
+ let info = reader.read::<BinderFreezeInfo>()?;
+
+ // Very unlikely for there to be more than 3, since a process normally uses at most binder and
+ // hwbinder.
+ let mut procs = KVec::with_capacity(3, GFP_KERNEL)?;
+
+ let ctxs = crate::context::get_all_contexts()?;
+ for ctx in ctxs {
+ for proc in ctx.get_procs_with_pid(info.pid as i32)? {
+ procs.push(proc, GFP_KERNEL)?;
+ }
+ }
+
+ for proc in procs {
+ proc.ioctl_freeze(&info)?;
+ }
+ Ok(())
+}
+
+/// The ioctl handler.
+impl Process {
+ /// Ioctls that are write-only from the perspective of userspace.
+ ///
+ /// The kernel will only read from the pointer that userspace provided to us.
+ fn ioctl_write_only(
+ this: ArcBorrow<'_, Process>,
+ _file: &File,
+ cmd: u32,
+ reader: &mut UserSliceReader,
+ ) -> Result {
+ let thread = this.get_current_thread()?;
+ match cmd {
+ uapi::BINDER_SET_MAX_THREADS => this.set_max_threads(reader.read()?),
+ uapi::BINDER_THREAD_EXIT => this.remove_thread(thread),
+ uapi::BINDER_SET_CONTEXT_MGR => this.set_as_manager(None, &thread)?,
+ uapi::BINDER_SET_CONTEXT_MGR_EXT => {
+ this.set_as_manager(Some(reader.read()?), &thread)?
+ }
+ uapi::BINDER_ENABLE_ONEWAY_SPAM_DETECTION => {
+ this.set_oneway_spam_detection_enabled(reader.read()?)
+ }
+ uapi::BINDER_FREEZE => ioctl_freeze(reader)?,
+ _ => return Err(EINVAL),
+ }
+ Ok(())
+ }
+
+ /// Ioctls that are read/write from the perspective of userspace.
+ ///
+ /// The kernel will both read from and write to the pointer that userspace provided to us.
+ fn ioctl_write_read(
+ this: ArcBorrow<'_, Process>,
+ file: &File,
+ cmd: u32,
+ data: UserSlice,
+ ) -> Result {
+ let thread = this.get_current_thread()?;
+ let blocking = (file.flags() & file::flags::O_NONBLOCK) == 0;
+ match cmd {
+ uapi::BINDER_WRITE_READ => thread.write_read(data, blocking)?,
+ uapi::BINDER_GET_NODE_DEBUG_INFO => this.get_node_debug_info(data)?,
+ uapi::BINDER_GET_NODE_INFO_FOR_REF => this.get_node_info_from_ref(data)?,
+ uapi::BINDER_VERSION => this.version(data)?,
+ uapi::BINDER_GET_FROZEN_INFO => get_frozen_status(data)?,
+ uapi::BINDER_GET_EXTENDED_ERROR => thread.get_extended_error(data)?,
+ _ => return Err(EINVAL),
+ }
+ Ok(())
+ }
+}
+
+/// The file operations supported by `Process`.
+impl Process {
+ pub(crate) fn open(ctx: ArcBorrow<'_, Context>, file: &File) -> Result<Arc<Process>> {
+ Self::new(ctx.into(), ARef::from(file.cred()))
+ }
+
+ pub(crate) fn release(this: Arc<Process>, _file: &File) {
+ let binderfs_file;
+ let should_schedule;
+ {
+ let mut inner = this.inner.lock();
+ should_schedule = inner.defer_work == 0;
+ inner.defer_work |= PROC_DEFER_RELEASE;
+ binderfs_file = inner.binderfs_file.take();
+ }
+
+ if should_schedule {
+ // Ignore failures to schedule to the workqueue. Those just mean that we're already
+ // scheduled for execution.
+ let _ = workqueue::system().enqueue(this);
+ }
+
+ drop(binderfs_file);
+ }
+
+ pub(crate) fn flush(this: ArcBorrow<'_, Process>) -> Result {
+ let should_schedule;
+ {
+ let mut inner = this.inner.lock();
+ should_schedule = inner.defer_work == 0;
+ inner.defer_work |= PROC_DEFER_FLUSH;
+ }
+
+ if should_schedule {
+ // Ignore failures to schedule to the workqueue. Those just mean that we're already
+ // scheduled for execution.
+ let _ = workqueue::system().enqueue(Arc::from(this));
+ }
+ Ok(())
+ }
+
+ pub(crate) fn ioctl(this: ArcBorrow<'_, Process>, file: &File, cmd: u32, arg: usize) -> Result {
+ use kernel::ioctl::{_IOC_DIR, _IOC_SIZE};
+ use kernel::uapi::{_IOC_READ, _IOC_WRITE};
+
+ crate::trace::trace_ioctl(cmd, arg);
+
+ let user_slice = UserSlice::new(UserPtr::from_addr(arg), _IOC_SIZE(cmd));
+
+ const _IOC_READ_WRITE: u32 = _IOC_READ | _IOC_WRITE;
+
+ match _IOC_DIR(cmd) {
+ _IOC_WRITE => Self::ioctl_write_only(this, file, cmd, &mut user_slice.reader()),
+ _IOC_READ_WRITE => Self::ioctl_write_read(this, file, cmd, user_slice),
+ _ => Err(EINVAL),
+ }
+ }
+
+ pub(crate) fn mmap(
+ this: ArcBorrow<'_, Process>,
+ _file: &File,
+ vma: &mm::virt::VmaNew,
+ ) -> Result {
+ // We don't allow mmap to be used in a different process.
+ if !core::ptr::eq(kernel::current!().group_leader(), &*this.task) {
+ return Err(EINVAL);
+ }
+ if vma.start() == 0 {
+ return Err(EINVAL);
+ }
+
+ vma.try_clear_maywrite().map_err(|_| EPERM)?;
+ vma.set_dontcopy();
+ vma.set_mixedmap();
+
+ // TODO: Set ops. We need to learn when the user unmaps so that we can stop using it.
+ this.create_mapping(vma)
+ }
+
+ pub(crate) fn poll(
+ this: ArcBorrow<'_, Process>,
+ file: &File,
+ table: PollTable<'_>,
+ ) -> Result<u32> {
+ let thread = this.get_current_thread()?;
+ let (from_proc, mut mask) = thread.poll(file, table);
+ if mask == 0 && from_proc && !this.inner.lock().work.is_empty() {
+ mask |= bindings::POLLIN;
+ }
+ Ok(mask)
+ }
+}
+
+/// Represents that a thread has registered with the `ready_threads` list of its process.
+///
+/// The destructor of this type will unregister the thread from the list of ready threads.
+pub(crate) struct Registration<'a> {
+ thread: &'a Arc<Thread>,
+}
+
+impl<'a> Registration<'a> {
+ fn new(thread: &'a Arc<Thread>, guard: &mut Guard<'_, ProcessInner, SpinLockBackend>) -> Self {
+ assert!(core::ptr::eq(&thread.process.inner, guard.lock_ref()));
+ // INVARIANT: We are pushing this thread to the right `ready_threads` list.
+ if let Ok(list_arc) = ListArc::try_from_arc(thread.clone()) {
+ guard.ready_threads.push_front(list_arc);
+ } else {
+ // It is an error to hit this branch, and it should not be reachable. We try to do
+ // something reasonable when the failure path happens. Most likely, the thread in
+ // question will sleep forever.
+ pr_err!("Same thread registered with `ready_threads` twice.");
+ }
+ Self { thread }
+ }
+}
+
+impl Drop for Registration<'_> {
+ fn drop(&mut self) {
+ let mut inner = self.thread.process.inner.lock();
+ // SAFETY: The thread has the invariant that we never push it to any other linked list than
+ // the `ready_threads` list of its parent process. Therefore, the thread is either in that
+ // list, or in no list.
+ unsafe { inner.ready_threads.remove(self.thread) };
+ }
+}
+
+pub(crate) struct WithNodes<'a> {
+ pub(crate) inner: Guard<'a, ProcessInner, SpinLockBackend>,
+ pub(crate) nodes: RBTree<u64, DArc<Node>>,
+}
+
+impl Drop for WithNodes<'_> {
+ fn drop(&mut self) {
+ core::mem::swap(&mut self.nodes, &mut self.inner.nodes);
+ if self.nodes.iter().next().is_some() {
+ pr_err!("nodes array was modified while using lock_with_nodes\n");
+ }
+ }
+}
+
+pub(crate) enum GetWorkOrRegister<'a> {
+ Work(DLArc<dyn DeliverToRead>),
+ Register(Registration<'a>),
+}
diff --git a/drivers/android/binder/range_alloc/array.rs b/drivers/android/binder/range_alloc/array.rs
new file mode 100644
index 000000000000..07e1dec2ce63
--- /dev/null
+++ b/drivers/android/binder/range_alloc/array.rs
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{
+ page::{PAGE_MASK, PAGE_SIZE},
+ prelude::*,
+ seq_file::SeqFile,
+ seq_print,
+ task::Pid,
+};
+
+use crate::range_alloc::{DescriptorState, FreedRange, Range};
+
+/// Keeps track of allocations in a process' mmap.
+///
+/// Each process has an mmap where the data for incoming transactions will be placed. This struct
+/// keeps track of allocations made in the mmap. For each allocation, we store a descriptor that
+/// has metadata related to the allocation. We also keep track of available free space.
+pub(super) struct ArrayRangeAllocator<T> {
+ /// This stores all ranges that are allocated. Unlike the tree based allocator, we do *not*
+ /// store the free ranges.
+ ///
+ /// Sorted by offset.
+ pub(super) ranges: KVec<Range<T>>,
+ size: usize,
+ free_oneway_space: usize,
+}
+
+struct FindEmptyRes {
+ /// Which index in `ranges` should we insert the new range at?
+ ///
+ /// Inserting the new range at this index keeps `ranges` sorted.
+ insert_at_idx: usize,
+ /// Which offset should we insert the new range at?
+ insert_at_offset: usize,
+}
+
+impl<T> ArrayRangeAllocator<T> {
+ pub(crate) fn new(size: usize, alloc: EmptyArrayAlloc<T>) -> Self {
+ Self {
+ ranges: alloc.ranges,
+ size,
+ free_oneway_space: size / 2,
+ }
+ }
+
+ pub(crate) fn free_oneway_space(&self) -> usize {
+ self.free_oneway_space
+ }
+
+ pub(crate) fn count_buffers(&self) -> usize {
+ self.ranges.len()
+ }
+
+ pub(crate) fn total_size(&self) -> usize {
+ self.size
+ }
+
+ pub(crate) fn is_full(&self) -> bool {
+ self.ranges.len() == self.ranges.capacity()
+ }
+
+ pub(crate) fn debug_print(&self, m: &SeqFile) -> Result<()> {
+ for range in &self.ranges {
+ seq_print!(
+ m,
+ " buffer {}: {} size {} pid {} oneway {}",
+ 0,
+ range.offset,
+ range.size,
+ range.state.pid(),
+ range.state.is_oneway(),
+ );
+ if let DescriptorState::Reserved(_) = range.state {
+ seq_print!(m, " reserved\n");
+ } else {
+ seq_print!(m, " allocated\n");
+ }
+ }
+ Ok(())
+ }
+
+ /// Find somewhere to put a new range.
+ ///
+ /// Unlike the tree implementation, we do not bother to find the smallest gap. The idea is that
+ /// fragmentation isn't a big issue when we don't have many ranges.
+ ///
+ /// Returns the index that the new range should have in `self.ranges` after insertion.
+ fn find_empty_range(&self, size: usize) -> Option<FindEmptyRes> {
+ let after_last_range = self.ranges.last().map(Range::endpoint).unwrap_or(0);
+
+ if size <= self.total_size() - after_last_range {
+ // We can put the range at the end, so just do that.
+ Some(FindEmptyRes {
+ insert_at_idx: self.ranges.len(),
+ insert_at_offset: after_last_range,
+ })
+ } else {
+ let mut end_of_prev = 0;
+ for (i, range) in self.ranges.iter().enumerate() {
+ // Does it fit before the i'th range?
+ if size <= range.offset - end_of_prev {
+ return Some(FindEmptyRes {
+ insert_at_idx: i,
+ insert_at_offset: end_of_prev,
+ });
+ }
+ end_of_prev = range.endpoint();
+ }
+ None
+ }
+ }
+
+ pub(crate) fn reserve_new(
+ &mut self,
+ debug_id: usize,
+ size: usize,
+ is_oneway: bool,
+ pid: Pid,
+ ) -> Result<usize> {
+ // Compute new value of free_oneway_space, which is set only on success.
+ let new_oneway_space = if is_oneway {
+ match self.free_oneway_space.checked_sub(size) {
+ Some(new_oneway_space) => new_oneway_space,
+ None => return Err(ENOSPC),
+ }
+ } else {
+ self.free_oneway_space
+ };
+
+ let FindEmptyRes {
+ insert_at_idx,
+ insert_at_offset,
+ } = self.find_empty_range(size).ok_or(ENOSPC)?;
+ self.free_oneway_space = new_oneway_space;
+
+ let new_range = Range {
+ offset: insert_at_offset,
+ size,
+ state: DescriptorState::new(is_oneway, debug_id, pid),
+ };
+ // Insert the value at the given index to keep the array sorted.
+ self.ranges
+ .insert_within_capacity(insert_at_idx, new_range)
+ .ok()
+ .unwrap();
+
+ Ok(insert_at_offset)
+ }
+
+ pub(crate) fn reservation_abort(&mut self, offset: usize) -> Result<FreedRange> {
+ // This could use a binary search, but linear scans are usually faster for small arrays.
+ let i = self
+ .ranges
+ .iter()
+ .position(|range| range.offset == offset)
+ .ok_or(EINVAL)?;
+ let range = &self.ranges[i];
+
+ if let DescriptorState::Allocated(_) = range.state {
+ return Err(EPERM);
+ }
+
+ let size = range.size;
+ let offset = range.offset;
+
+ if range.state.is_oneway() {
+ self.free_oneway_space += size;
+ }
+
+ // This computes the range of pages that are no longer used by *any* allocated range. The
+ // caller will mark them as unused, which means that they can be freed if the system comes
+ // under memory pressure.
+ let mut freed_range = FreedRange::interior_pages(offset, size);
+ #[expect(clippy::collapsible_if)] // reads better like this
+ if offset % PAGE_SIZE != 0 {
+ if i == 0 || self.ranges[i - 1].endpoint() <= (offset & PAGE_MASK) {
+ freed_range.start_page_idx -= 1;
+ }
+ }
+ if range.endpoint() % PAGE_SIZE != 0 {
+ let page_after = (range.endpoint() & PAGE_MASK) + PAGE_SIZE;
+ if i + 1 == self.ranges.len() || page_after <= self.ranges[i + 1].offset {
+ freed_range.end_page_idx += 1;
+ }
+ }
+
+ self.ranges.remove(i)?;
+ Ok(freed_range)
+ }
+
+ pub(crate) fn reservation_commit(&mut self, offset: usize, data: &mut Option<T>) -> Result {
+ // This could use a binary search, but linear scans are usually faster for small arrays.
+ let range = self
+ .ranges
+ .iter_mut()
+ .find(|range| range.offset == offset)
+ .ok_or(ENOENT)?;
+
+ let DescriptorState::Reserved(reservation) = &range.state else {
+ return Err(ENOENT);
+ };
+
+ range.state = DescriptorState::Allocated(reservation.clone().allocate(data.take()));
+ Ok(())
+ }
+
+ pub(crate) fn reserve_existing(&mut self, offset: usize) -> Result<(usize, usize, Option<T>)> {
+ // This could use a binary search, but linear scans are usually faster for small arrays.
+ let range = self
+ .ranges
+ .iter_mut()
+ .find(|range| range.offset == offset)
+ .ok_or(ENOENT)?;
+
+ let DescriptorState::Allocated(allocation) = &mut range.state else {
+ return Err(ENOENT);
+ };
+
+ let data = allocation.take();
+ let debug_id = allocation.reservation.debug_id;
+ range.state = DescriptorState::Reserved(allocation.reservation.clone());
+ Ok((range.size, debug_id, data))
+ }
+
+ pub(crate) fn take_for_each<F: Fn(usize, usize, usize, Option<T>)>(&mut self, callback: F) {
+ for range in self.ranges.iter_mut() {
+ if let DescriptorState::Allocated(allocation) = &mut range.state {
+ callback(
+ range.offset,
+ range.size,
+ allocation.reservation.debug_id,
+ allocation.data.take(),
+ );
+ }
+ }
+ }
+}
+
+pub(crate) struct EmptyArrayAlloc<T> {
+ ranges: KVec<Range<T>>,
+}
+
+impl<T> EmptyArrayAlloc<T> {
+ pub(crate) fn try_new(capacity: usize) -> Result<Self> {
+ Ok(Self {
+ ranges: KVec::with_capacity(capacity, GFP_KERNEL)?,
+ })
+ }
+}
diff --git a/drivers/android/binder/range_alloc/mod.rs b/drivers/android/binder/range_alloc/mod.rs
new file mode 100644
index 000000000000..2301e2bc1a1f
--- /dev/null
+++ b/drivers/android/binder/range_alloc/mod.rs
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{page::PAGE_SIZE, prelude::*, seq_file::SeqFile, task::Pid};
+
+mod tree;
+use self::tree::{FromArrayAllocs, ReserveNewTreeAlloc, TreeRangeAllocator};
+
+mod array;
+use self::array::{ArrayRangeAllocator, EmptyArrayAlloc};
+
+enum DescriptorState<T> {
+ Reserved(Reservation),
+ Allocated(Allocation<T>),
+}
+
+impl<T> DescriptorState<T> {
+ fn new(is_oneway: bool, debug_id: usize, pid: Pid) -> Self {
+ DescriptorState::Reserved(Reservation {
+ debug_id,
+ is_oneway,
+ pid,
+ })
+ }
+
+ fn pid(&self) -> Pid {
+ match self {
+ DescriptorState::Reserved(inner) => inner.pid,
+ DescriptorState::Allocated(inner) => inner.reservation.pid,
+ }
+ }
+
+ fn is_oneway(&self) -> bool {
+ match self {
+ DescriptorState::Reserved(inner) => inner.is_oneway,
+ DescriptorState::Allocated(inner) => inner.reservation.is_oneway,
+ }
+ }
+}
+
+#[derive(Clone)]
+struct Reservation {
+ debug_id: usize,
+ is_oneway: bool,
+ pid: Pid,
+}
+
+impl Reservation {
+ fn allocate<T>(self, data: Option<T>) -> Allocation<T> {
+ Allocation {
+ data,
+ reservation: self,
+ }
+ }
+}
+
+struct Allocation<T> {
+ reservation: Reservation,
+ data: Option<T>,
+}
+
+impl<T> Allocation<T> {
+ fn deallocate(self) -> (Reservation, Option<T>) {
+ (self.reservation, self.data)
+ }
+
+ fn debug_id(&self) -> usize {
+ self.reservation.debug_id
+ }
+
+ fn take(&mut self) -> Option<T> {
+ self.data.take()
+ }
+}
+
+/// The array implementation must switch to the tree if it wants to go beyond this number of
+/// ranges.
+const TREE_THRESHOLD: usize = 8;
+
+/// Represents a range of pages that have just become completely free.
+#[derive(Copy, Clone)]
+pub(crate) struct FreedRange {
+ pub(crate) start_page_idx: usize,
+ pub(crate) end_page_idx: usize,
+}
+
+impl FreedRange {
+ fn interior_pages(offset: usize, size: usize) -> FreedRange {
+ FreedRange {
+ // Divide round up
+ start_page_idx: offset.div_ceil(PAGE_SIZE),
+ // Divide round down
+ end_page_idx: (offset + size) / PAGE_SIZE,
+ }
+ }
+}
+
+struct Range<T> {
+ offset: usize,
+ size: usize,
+ state: DescriptorState<T>,
+}
+
+impl<T> Range<T> {
+ fn endpoint(&self) -> usize {
+ self.offset + self.size
+ }
+}
+
+pub(crate) struct RangeAllocator<T> {
+ inner: Impl<T>,
+}
+
+enum Impl<T> {
+ Empty(usize),
+ Array(ArrayRangeAllocator<T>),
+ Tree(TreeRangeAllocator<T>),
+}
+
+impl<T> RangeAllocator<T> {
+ pub(crate) fn new(size: usize) -> Self {
+ Self {
+ inner: Impl::Empty(size),
+ }
+ }
+
+ pub(crate) fn free_oneway_space(&self) -> usize {
+ match &self.inner {
+ Impl::Empty(size) => size / 2,
+ Impl::Array(array) => array.free_oneway_space(),
+ Impl::Tree(tree) => tree.free_oneway_space(),
+ }
+ }
+
+ pub(crate) fn count_buffers(&self) -> usize {
+ match &self.inner {
+ Impl::Empty(_size) => 0,
+ Impl::Array(array) => array.count_buffers(),
+ Impl::Tree(tree) => tree.count_buffers(),
+ }
+ }
+
+ pub(crate) fn debug_print(&self, m: &SeqFile) -> Result<()> {
+ match &self.inner {
+ Impl::Empty(_size) => Ok(()),
+ Impl::Array(array) => array.debug_print(m),
+ Impl::Tree(tree) => tree.debug_print(m),
+ }
+ }
+
+ /// Try to reserve a new buffer, using the provided allocation if necessary.
+ pub(crate) fn reserve_new(&mut self, mut args: ReserveNewArgs<T>) -> Result<ReserveNew<T>> {
+ match &mut self.inner {
+ Impl::Empty(size) => {
+ let empty_array = match args.empty_array_alloc.take() {
+ Some(empty_array) => ArrayRangeAllocator::new(*size, empty_array),
+ None => {
+ return Ok(ReserveNew::NeedAlloc(ReserveNewNeedAlloc {
+ args,
+ need_empty_array_alloc: true,
+ need_new_tree_alloc: false,
+ need_tree_alloc: false,
+ }))
+ }
+ };
+
+ self.inner = Impl::Array(empty_array);
+ self.reserve_new(args)
+ }
+ Impl::Array(array) if array.is_full() => {
+ let allocs = match args.new_tree_alloc {
+ Some(ref mut allocs) => allocs,
+ None => {
+ return Ok(ReserveNew::NeedAlloc(ReserveNewNeedAlloc {
+ args,
+ need_empty_array_alloc: false,
+ need_new_tree_alloc: true,
+ need_tree_alloc: true,
+ }))
+ }
+ };
+
+ let new_tree =
+ TreeRangeAllocator::from_array(array.total_size(), &mut array.ranges, allocs);
+
+ self.inner = Impl::Tree(new_tree);
+ self.reserve_new(args)
+ }
+ Impl::Array(array) => {
+ let offset =
+ array.reserve_new(args.debug_id, args.size, args.is_oneway, args.pid)?;
+ Ok(ReserveNew::Success(ReserveNewSuccess {
+ offset,
+ oneway_spam_detected: false,
+ _empty_array_alloc: args.empty_array_alloc,
+ _new_tree_alloc: args.new_tree_alloc,
+ _tree_alloc: args.tree_alloc,
+ }))
+ }
+ Impl::Tree(tree) => {
+ let alloc = match args.tree_alloc {
+ Some(alloc) => alloc,
+ None => {
+ return Ok(ReserveNew::NeedAlloc(ReserveNewNeedAlloc {
+ args,
+ need_empty_array_alloc: false,
+ need_new_tree_alloc: false,
+ need_tree_alloc: true,
+ }));
+ }
+ };
+ let (offset, oneway_spam_detected) =
+ tree.reserve_new(args.debug_id, args.size, args.is_oneway, args.pid, alloc)?;
+ Ok(ReserveNew::Success(ReserveNewSuccess {
+ offset,
+ oneway_spam_detected,
+ _empty_array_alloc: args.empty_array_alloc,
+ _new_tree_alloc: args.new_tree_alloc,
+ _tree_alloc: None,
+ }))
+ }
+ }
+ }
+
+ /// Deletes the allocations at `offset`.
+ pub(crate) fn reservation_abort(&mut self, offset: usize) -> Result<FreedRange> {
+ match &mut self.inner {
+ Impl::Empty(_size) => Err(EINVAL),
+ Impl::Array(array) => array.reservation_abort(offset),
+ Impl::Tree(tree) => {
+ let freed_range = tree.reservation_abort(offset)?;
+ if tree.is_empty() {
+ self.inner = Impl::Empty(tree.total_size());
+ }
+ Ok(freed_range)
+ }
+ }
+ }
+
+ /// Called when an allocation is no longer in use by the kernel.
+ ///
+ /// The value in `data` will be stored, if any. A mutable reference is used to avoid dropping
+ /// the `T` when an error is returned.
+ pub(crate) fn reservation_commit(&mut self, offset: usize, data: &mut Option<T>) -> Result {
+ match &mut self.inner {
+ Impl::Empty(_size) => Err(EINVAL),
+ Impl::Array(array) => array.reservation_commit(offset, data),
+ Impl::Tree(tree) => tree.reservation_commit(offset, data),
+ }
+ }
+
+ /// Called when the kernel starts using an allocation.
+ ///
+ /// Returns the size of the existing entry and the data associated with it.
+ pub(crate) fn reserve_existing(&mut self, offset: usize) -> Result<(usize, usize, Option<T>)> {
+ match &mut self.inner {
+ Impl::Empty(_size) => Err(EINVAL),
+ Impl::Array(array) => array.reserve_existing(offset),
+ Impl::Tree(tree) => tree.reserve_existing(offset),
+ }
+ }
+
+ /// Call the provided callback at every allocated region.
+ ///
+ /// This destroys the range allocator. Used only during shutdown.
+ pub(crate) fn take_for_each<F: Fn(usize, usize, usize, Option<T>)>(&mut self, callback: F) {
+ match &mut self.inner {
+ Impl::Empty(_size) => {}
+ Impl::Array(array) => array.take_for_each(callback),
+ Impl::Tree(tree) => tree.take_for_each(callback),
+ }
+ }
+}
+
+/// The arguments for `reserve_new`.
+#[derive(Default)]
+pub(crate) struct ReserveNewArgs<T> {
+ pub(crate) size: usize,
+ pub(crate) is_oneway: bool,
+ pub(crate) debug_id: usize,
+ pub(crate) pid: Pid,
+ pub(crate) empty_array_alloc: Option<EmptyArrayAlloc<T>>,
+ pub(crate) new_tree_alloc: Option<FromArrayAllocs<T>>,
+ pub(crate) tree_alloc: Option<ReserveNewTreeAlloc<T>>,
+}
+
+/// The return type of `ReserveNew`.
+pub(crate) enum ReserveNew<T> {
+ Success(ReserveNewSuccess<T>),
+ NeedAlloc(ReserveNewNeedAlloc<T>),
+}
+
+/// Returned by `reserve_new` when the reservation was successul.
+pub(crate) struct ReserveNewSuccess<T> {
+ pub(crate) offset: usize,
+ pub(crate) oneway_spam_detected: bool,
+
+ // If the user supplied an allocation that we did not end up using, then we return it here.
+ // The caller will kfree it outside of the lock.
+ _empty_array_alloc: Option<EmptyArrayAlloc<T>>,
+ _new_tree_alloc: Option<FromArrayAllocs<T>>,
+ _tree_alloc: Option<ReserveNewTreeAlloc<T>>,
+}
+
+/// Returned by `reserve_new` to request the caller to make an allocation before calling the method
+/// again.
+pub(crate) struct ReserveNewNeedAlloc<T> {
+ args: ReserveNewArgs<T>,
+ need_empty_array_alloc: bool,
+ need_new_tree_alloc: bool,
+ need_tree_alloc: bool,
+}
+
+impl<T> ReserveNewNeedAlloc<T> {
+ /// Make the necessary allocations for another call to `reserve_new`.
+ pub(crate) fn make_alloc(mut self) -> Result<ReserveNewArgs<T>> {
+ if self.need_empty_array_alloc && self.args.empty_array_alloc.is_none() {
+ self.args.empty_array_alloc = Some(EmptyArrayAlloc::try_new(TREE_THRESHOLD)?);
+ }
+ if self.need_new_tree_alloc && self.args.new_tree_alloc.is_none() {
+ self.args.new_tree_alloc = Some(FromArrayAllocs::try_new(TREE_THRESHOLD)?);
+ }
+ if self.need_tree_alloc && self.args.tree_alloc.is_none() {
+ self.args.tree_alloc = Some(ReserveNewTreeAlloc::try_new()?);
+ }
+ Ok(self.args)
+ }
+}
diff --git a/drivers/android/binder/range_alloc/tree.rs b/drivers/android/binder/range_alloc/tree.rs
new file mode 100644
index 000000000000..838fdd2b47ea
--- /dev/null
+++ b/drivers/android/binder/range_alloc/tree.rs
@@ -0,0 +1,488 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{
+ page::PAGE_SIZE,
+ prelude::*,
+ rbtree::{RBTree, RBTreeNode, RBTreeNodeReservation},
+ seq_file::SeqFile,
+ seq_print,
+ task::Pid,
+};
+
+use crate::range_alloc::{DescriptorState, FreedRange, Range};
+
+/// Keeps track of allocations in a process' mmap.
+///
+/// Each process has an mmap where the data for incoming transactions will be placed. This struct
+/// keeps track of allocations made in the mmap. For each allocation, we store a descriptor that
+/// has metadata related to the allocation. We also keep track of available free space.
+pub(super) struct TreeRangeAllocator<T> {
+ /// This collection contains descriptors for *both* ranges containing an allocation, *and* free
+ /// ranges between allocations. The free ranges get merged, so there are never two free ranges
+ /// next to each other.
+ tree: RBTree<usize, Descriptor<T>>,
+ /// Contains an entry for every free range in `self.tree`. This tree sorts the ranges by size,
+ /// letting us look up the smallest range whose size is at least some lower bound.
+ free_tree: RBTree<FreeKey, ()>,
+ size: usize,
+ free_oneway_space: usize,
+}
+
+impl<T> TreeRangeAllocator<T> {
+ pub(crate) fn from_array(
+ size: usize,
+ ranges: &mut KVec<Range<T>>,
+ alloc: &mut FromArrayAllocs<T>,
+ ) -> Self {
+ let mut tree = TreeRangeAllocator {
+ tree: RBTree::new(),
+ free_tree: RBTree::new(),
+ size,
+ free_oneway_space: size / 2,
+ };
+
+ let mut free_offset = 0;
+ for range in ranges.drain_all() {
+ let free_size = range.offset - free_offset;
+ if free_size > 0 {
+ let free_node = alloc.free_tree.pop().unwrap();
+ tree.free_tree
+ .insert(free_node.into_node((free_size, free_offset), ()));
+ let tree_node = alloc.tree.pop().unwrap();
+ tree.tree.insert(
+ tree_node.into_node(free_offset, Descriptor::new(free_offset, free_size)),
+ );
+ }
+ free_offset = range.endpoint();
+
+ if range.state.is_oneway() {
+ tree.free_oneway_space = tree.free_oneway_space.saturating_sub(range.size);
+ }
+
+ let free_res = alloc.free_tree.pop().unwrap();
+ let tree_node = alloc.tree.pop().unwrap();
+ let mut desc = Descriptor::new(range.offset, range.size);
+ desc.state = Some((range.state, free_res));
+ tree.tree.insert(tree_node.into_node(range.offset, desc));
+ }
+
+ // After the last range, we may need a free range.
+ if free_offset < size {
+ let free_size = size - free_offset;
+ let free_node = alloc.free_tree.pop().unwrap();
+ tree.free_tree
+ .insert(free_node.into_node((free_size, free_offset), ()));
+ let tree_node = alloc.tree.pop().unwrap();
+ tree.tree
+ .insert(tree_node.into_node(free_offset, Descriptor::new(free_offset, free_size)));
+ }
+
+ tree
+ }
+
+ pub(crate) fn is_empty(&self) -> bool {
+ let mut tree_iter = self.tree.values();
+ // There's always at least one range, because index zero is either the start of a free or
+ // allocated range.
+ let first_value = tree_iter.next().unwrap();
+ if tree_iter.next().is_some() {
+ // There are never two free ranges next to each other, so if there is more than one
+ // descriptor, then at least one of them must hold an allocated range.
+ return false;
+ }
+ // There is only one descriptor. Return true if it is for a free range.
+ first_value.state.is_none()
+ }
+
+ pub(crate) fn total_size(&self) -> usize {
+ self.size
+ }
+
+ pub(crate) fn free_oneway_space(&self) -> usize {
+ self.free_oneway_space
+ }
+
+ pub(crate) fn count_buffers(&self) -> usize {
+ self.tree
+ .values()
+ .filter(|desc| desc.state.is_some())
+ .count()
+ }
+
+ pub(crate) fn debug_print(&self, m: &SeqFile) -> Result<()> {
+ for desc in self.tree.values() {
+ let state = match &desc.state {
+ Some(state) => &state.0,
+ None => continue,
+ };
+ seq_print!(
+ m,
+ " buffer: {} size {} pid {}",
+ desc.offset,
+ desc.size,
+ state.pid(),
+ );
+ if state.is_oneway() {
+ seq_print!(m, " oneway");
+ }
+ match state {
+ DescriptorState::Reserved(_res) => {
+ seq_print!(m, " reserved\n");
+ }
+ DescriptorState::Allocated(_alloc) => {
+ seq_print!(m, " allocated\n");
+ }
+ }
+ }
+ Ok(())
+ }
+
+ fn find_best_match(&mut self, size: usize) -> Option<&mut Descriptor<T>> {
+ let free_cursor = self.free_tree.cursor_lower_bound(&(size, 0))?;
+ let ((_, offset), ()) = free_cursor.current();
+ self.tree.get_mut(offset)
+ }
+
+ /// Try to reserve a new buffer, using the provided allocation if necessary.
+ pub(crate) fn reserve_new(
+ &mut self,
+ debug_id: usize,
+ size: usize,
+ is_oneway: bool,
+ pid: Pid,
+ alloc: ReserveNewTreeAlloc<T>,
+ ) -> Result<(usize, bool)> {
+ // Compute new value of free_oneway_space, which is set only on success.
+ let new_oneway_space = if is_oneway {
+ match self.free_oneway_space.checked_sub(size) {
+ Some(new_oneway_space) => new_oneway_space,
+ None => return Err(ENOSPC),
+ }
+ } else {
+ self.free_oneway_space
+ };
+
+ // Start detecting spammers once we have less than 20%
+ // of async space left (which is less than 10% of total
+ // buffer size).
+ //
+ // (This will short-circut, so `low_oneway_space` is
+ // only called when necessary.)
+ let oneway_spam_detected =
+ is_oneway && new_oneway_space < self.size / 10 && self.low_oneway_space(pid);
+
+ let (found_size, found_off, tree_node, free_tree_node) = match self.find_best_match(size) {
+ None => {
+ pr_warn!("ENOSPC from range_alloc.reserve_new - size: {}", size);
+ return Err(ENOSPC);
+ }
+ Some(desc) => {
+ let found_size = desc.size;
+ let found_offset = desc.offset;
+
+ // In case we need to break up the descriptor
+ let new_desc = Descriptor::new(found_offset + size, found_size - size);
+ let (tree_node, free_tree_node, desc_node_res) = alloc.initialize(new_desc);
+
+ desc.state = Some((
+ DescriptorState::new(is_oneway, debug_id, pid),
+ desc_node_res,
+ ));
+ desc.size = size;
+
+ (found_size, found_offset, tree_node, free_tree_node)
+ }
+ };
+ self.free_oneway_space = new_oneway_space;
+ self.free_tree.remove(&(found_size, found_off));
+
+ if found_size != size {
+ self.tree.insert(tree_node);
+ self.free_tree.insert(free_tree_node);
+ }
+
+ Ok((found_off, oneway_spam_detected))
+ }
+
+ pub(crate) fn reservation_abort(&mut self, offset: usize) -> Result<FreedRange> {
+ let mut cursor = self.tree.cursor_lower_bound_mut(&offset).ok_or_else(|| {
+ pr_warn!(
+ "EINVAL from range_alloc.reservation_abort - offset: {}",
+ offset
+ );
+ EINVAL
+ })?;
+
+ let (_, desc) = cursor.current_mut();
+
+ if desc.offset != offset {
+ pr_warn!(
+ "EINVAL from range_alloc.reservation_abort - offset: {}",
+ offset
+ );
+ return Err(EINVAL);
+ }
+
+ let (reservation, free_node_res) = desc.try_change_state(|state| match state {
+ Some((DescriptorState::Reserved(reservation), free_node_res)) => {
+ (None, Ok((reservation, free_node_res)))
+ }
+ None => {
+ pr_warn!(
+ "EINVAL from range_alloc.reservation_abort - offset: {}",
+ offset
+ );
+ (None, Err(EINVAL))
+ }
+ allocated => {
+ pr_warn!(
+ "EPERM from range_alloc.reservation_abort - offset: {}",
+ offset
+ );
+ (allocated, Err(EPERM))
+ }
+ })?;
+
+ let mut size = desc.size;
+ let mut offset = desc.offset;
+ let free_oneway_space_add = if reservation.is_oneway { size } else { 0 };
+
+ self.free_oneway_space += free_oneway_space_add;
+
+ let mut freed_range = FreedRange::interior_pages(offset, size);
+ // Compute how large the next free region needs to be to include one more page in
+ // the newly freed range.
+ let add_next_page_needed = match (offset + size) % PAGE_SIZE {
+ 0 => usize::MAX,
+ unalign => PAGE_SIZE - unalign,
+ };
+ // Compute how large the previous free region needs to be to include one more page
+ // in the newly freed range.
+ let add_prev_page_needed = match offset % PAGE_SIZE {
+ 0 => usize::MAX,
+ unalign => unalign,
+ };
+
+ // Merge next into current if next is free
+ let remove_next = match cursor.peek_next() {
+ Some((_, next)) if next.state.is_none() => {
+ if next.size >= add_next_page_needed {
+ freed_range.end_page_idx += 1;
+ }
+ self.free_tree.remove(&(next.size, next.offset));
+ size += next.size;
+ true
+ }
+ _ => false,
+ };
+
+ if remove_next {
+ let (_, desc) = cursor.current_mut();
+ desc.size = size;
+ cursor.remove_next();
+ }
+
+ // Merge current into prev if prev is free
+ match cursor.peek_prev_mut() {
+ Some((_, prev)) if prev.state.is_none() => {
+ if prev.size >= add_prev_page_needed {
+ freed_range.start_page_idx -= 1;
+ }
+ // merge previous with current, remove current
+ self.free_tree.remove(&(prev.size, prev.offset));
+ offset = prev.offset;
+ size += prev.size;
+ prev.size = size;
+ cursor.remove_current();
+ }
+ _ => {}
+ };
+
+ self.free_tree
+ .insert(free_node_res.into_node((size, offset), ()));
+
+ Ok(freed_range)
+ }
+
+ pub(crate) fn reservation_commit(&mut self, offset: usize, data: &mut Option<T>) -> Result {
+ let desc = self.tree.get_mut(&offset).ok_or(ENOENT)?;
+
+ desc.try_change_state(|state| match state {
+ Some((DescriptorState::Reserved(reservation), free_node_res)) => (
+ Some((
+ DescriptorState::Allocated(reservation.allocate(data.take())),
+ free_node_res,
+ )),
+ Ok(()),
+ ),
+ other => (other, Err(ENOENT)),
+ })
+ }
+
+ /// Takes an entry at the given offset from [`DescriptorState::Allocated`] to
+ /// [`DescriptorState::Reserved`].
+ ///
+ /// Returns the size of the existing entry and the data associated with it.
+ pub(crate) fn reserve_existing(&mut self, offset: usize) -> Result<(usize, usize, Option<T>)> {
+ let desc = self.tree.get_mut(&offset).ok_or_else(|| {
+ pr_warn!(
+ "ENOENT from range_alloc.reserve_existing - offset: {}",
+ offset
+ );
+ ENOENT
+ })?;
+
+ let (debug_id, data) = desc.try_change_state(|state| match state {
+ Some((DescriptorState::Allocated(allocation), free_node_res)) => {
+ let (reservation, data) = allocation.deallocate();
+ let debug_id = reservation.debug_id;
+ (
+ Some((DescriptorState::Reserved(reservation), free_node_res)),
+ Ok((debug_id, data)),
+ )
+ }
+ other => {
+ pr_warn!(
+ "ENOENT from range_alloc.reserve_existing - offset: {}",
+ offset
+ );
+ (other, Err(ENOENT))
+ }
+ })?;
+
+ Ok((desc.size, debug_id, data))
+ }
+
+ /// Call the provided callback at every allocated region.
+ ///
+ /// This destroys the range allocator. Used only during shutdown.
+ pub(crate) fn take_for_each<F: Fn(usize, usize, usize, Option<T>)>(&mut self, callback: F) {
+ for (_, desc) in self.tree.iter_mut() {
+ if let Some((DescriptorState::Allocated(allocation), _)) = &mut desc.state {
+ callback(
+ desc.offset,
+ desc.size,
+ allocation.debug_id(),
+ allocation.take(),
+ );
+ }
+ }
+ }
+
+ /// Find the amount and size of buffers allocated by the current caller.
+ ///
+ /// The idea is that once we cross the threshold, whoever is responsible
+ /// for the low async space is likely to try to send another async transaction,
+ /// and at some point we'll catch them in the act. This is more efficient
+ /// than keeping a map per pid.
+ fn low_oneway_space(&self, calling_pid: Pid) -> bool {
+ let mut total_alloc_size = 0;
+ let mut num_buffers = 0;
+ for (_, desc) in self.tree.iter() {
+ if let Some((state, _)) = &desc.state {
+ if state.is_oneway() && state.pid() == calling_pid {
+ total_alloc_size += desc.size;
+ num_buffers += 1;
+ }
+ }
+ }
+
+ // Warn if this pid has more than 50 transactions, or more than 50% of
+ // async space (which is 25% of total buffer size). Oneway spam is only
+ // detected when the threshold is exceeded.
+ num_buffers > 50 || total_alloc_size > self.size / 4
+ }
+}
+
+type TreeDescriptorState<T> = (DescriptorState<T>, FreeNodeRes);
+struct Descriptor<T> {
+ size: usize,
+ offset: usize,
+ state: Option<TreeDescriptorState<T>>,
+}
+
+impl<T> Descriptor<T> {
+ fn new(offset: usize, size: usize) -> Self {
+ Self {
+ size,
+ offset,
+ state: None,
+ }
+ }
+
+ fn try_change_state<F, Data>(&mut self, f: F) -> Result<Data>
+ where
+ F: FnOnce(Option<TreeDescriptorState<T>>) -> (Option<TreeDescriptorState<T>>, Result<Data>),
+ {
+ let (new_state, result) = f(self.state.take());
+ self.state = new_state;
+ result
+ }
+}
+
+// (Descriptor.size, Descriptor.offset)
+type FreeKey = (usize, usize);
+type FreeNodeRes = RBTreeNodeReservation<FreeKey, ()>;
+
+/// An allocation for use by `reserve_new`.
+pub(crate) struct ReserveNewTreeAlloc<T> {
+ tree_node_res: RBTreeNodeReservation<usize, Descriptor<T>>,
+ free_tree_node_res: FreeNodeRes,
+ desc_node_res: FreeNodeRes,
+}
+
+impl<T> ReserveNewTreeAlloc<T> {
+ pub(crate) fn try_new() -> Result<Self> {
+ let tree_node_res = RBTreeNodeReservation::new(GFP_KERNEL)?;
+ let free_tree_node_res = RBTreeNodeReservation::new(GFP_KERNEL)?;
+ let desc_node_res = RBTreeNodeReservation::new(GFP_KERNEL)?;
+ Ok(Self {
+ tree_node_res,
+ free_tree_node_res,
+ desc_node_res,
+ })
+ }
+
+ fn initialize(
+ self,
+ desc: Descriptor<T>,
+ ) -> (
+ RBTreeNode<usize, Descriptor<T>>,
+ RBTreeNode<FreeKey, ()>,
+ FreeNodeRes,
+ ) {
+ let size = desc.size;
+ let offset = desc.offset;
+ (
+ self.tree_node_res.into_node(offset, desc),
+ self.free_tree_node_res.into_node((size, offset), ()),
+ self.desc_node_res,
+ )
+ }
+}
+
+/// An allocation for creating a tree from an `ArrayRangeAllocator`.
+pub(crate) struct FromArrayAllocs<T> {
+ tree: KVec<RBTreeNodeReservation<usize, Descriptor<T>>>,
+ free_tree: KVec<RBTreeNodeReservation<FreeKey, ()>>,
+}
+
+impl<T> FromArrayAllocs<T> {
+ pub(crate) fn try_new(len: usize) -> Result<Self> {
+ let num_descriptors = 2 * len + 1;
+
+ let mut tree = KVec::with_capacity(num_descriptors, GFP_KERNEL)?;
+ for _ in 0..num_descriptors {
+ tree.push(RBTreeNodeReservation::new(GFP_KERNEL)?, GFP_KERNEL)?;
+ }
+
+ let mut free_tree = KVec::with_capacity(num_descriptors, GFP_KERNEL)?;
+ for _ in 0..num_descriptors {
+ free_tree.push(RBTreeNodeReservation::new(GFP_KERNEL)?, GFP_KERNEL)?;
+ }
+
+ Ok(Self { tree, free_tree })
+ }
+}
diff --git a/drivers/android/binder/rust_binder.h b/drivers/android/binder/rust_binder.h
new file mode 100644
index 000000000000..31806890ed1a
--- /dev/null
+++ b/drivers/android/binder/rust_binder.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025 Google, Inc.
+ */
+
+#ifndef _LINUX_RUST_BINDER_H
+#define _LINUX_RUST_BINDER_H
+
+#include <uapi/linux/android/binder.h>
+#include <uapi/linux/android/binderfs.h>
+
+/*
+ * These symbols are exposed by `rust_binderfs.c` and exist here so that Rust
+ * Binder can call them.
+ */
+int init_rust_binderfs(void);
+
+struct dentry;
+struct inode;
+struct dentry *rust_binderfs_create_proc_file(struct inode *nodp, int pid);
+void rust_binderfs_remove_file(struct dentry *dentry);
+
+#endif
diff --git a/drivers/android/binder/rust_binder_events.c b/drivers/android/binder/rust_binder_events.c
new file mode 100644
index 000000000000..488b1470060c
--- /dev/null
+++ b/drivers/android/binder/rust_binder_events.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* rust_binder_events.c
+ *
+ * Rust Binder tracepoints.
+ *
+ * Copyright 2025 Google LLC
+ */
+
+#include "rust_binder.h"
+
+const char * const binder_command_strings[] = {
+ "BC_TRANSACTION",
+ "BC_REPLY",
+ "BC_ACQUIRE_RESULT",
+ "BC_FREE_BUFFER",
+ "BC_INCREFS",
+ "BC_ACQUIRE",
+ "BC_RELEASE",
+ "BC_DECREFS",
+ "BC_INCREFS_DONE",
+ "BC_ACQUIRE_DONE",
+ "BC_ATTEMPT_ACQUIRE",
+ "BC_REGISTER_LOOPER",
+ "BC_ENTER_LOOPER",
+ "BC_EXIT_LOOPER",
+ "BC_REQUEST_DEATH_NOTIFICATION",
+ "BC_CLEAR_DEATH_NOTIFICATION",
+ "BC_DEAD_BINDER_DONE",
+ "BC_TRANSACTION_SG",
+ "BC_REPLY_SG",
+};
+
+const char * const binder_return_strings[] = {
+ "BR_ERROR",
+ "BR_OK",
+ "BR_TRANSACTION",
+ "BR_REPLY",
+ "BR_ACQUIRE_RESULT",
+ "BR_DEAD_REPLY",
+ "BR_TRANSACTION_COMPLETE",
+ "BR_INCREFS",
+ "BR_ACQUIRE",
+ "BR_RELEASE",
+ "BR_DECREFS",
+ "BR_ATTEMPT_ACQUIRE",
+ "BR_NOOP",
+ "BR_SPAWN_LOOPER",
+ "BR_FINISHED",
+ "BR_DEAD_BINDER",
+ "BR_CLEAR_DEATH_NOTIFICATION_DONE",
+ "BR_FAILED_REPLY",
+ "BR_FROZEN_REPLY",
+ "BR_ONEWAY_SPAM_SUSPECT",
+ "BR_TRANSACTION_PENDING_FROZEN"
+};
+
+#define CREATE_TRACE_POINTS
+#define CREATE_RUST_TRACE_POINTS
+#include "rust_binder_events.h"
diff --git a/drivers/android/binder/rust_binder_events.h b/drivers/android/binder/rust_binder_events.h
new file mode 100644
index 000000000000..2f3efbf9dba6
--- /dev/null
+++ b/drivers/android/binder/rust_binder_events.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2025 Google, Inc.
+ */
+
+#undef TRACE_SYSTEM
+#undef TRACE_INCLUDE_FILE
+#undef TRACE_INCLUDE_PATH
+#define TRACE_SYSTEM rust_binder
+#define TRACE_INCLUDE_FILE rust_binder_events
+#define TRACE_INCLUDE_PATH ../drivers/android/binder
+
+#if !defined(_RUST_BINDER_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _RUST_BINDER_TRACE_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(rust_binder_ioctl,
+ TP_PROTO(unsigned int cmd, unsigned long arg),
+ TP_ARGS(cmd, arg),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cmd)
+ __field(unsigned long, arg)
+ ),
+ TP_fast_assign(
+ __entry->cmd = cmd;
+ __entry->arg = arg;
+ ),
+ TP_printk("cmd=0x%x arg=0x%lx", __entry->cmd, __entry->arg)
+);
+
+#endif /* _RUST_BINDER_TRACE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/android/binder/rust_binder_internal.h b/drivers/android/binder/rust_binder_internal.h
new file mode 100644
index 000000000000..78288fe7964d
--- /dev/null
+++ b/drivers/android/binder/rust_binder_internal.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* rust_binder_internal.h
+ *
+ * This file contains internal data structures used by Rust Binder. Mostly,
+ * these are type definitions used only by binderfs or things that Rust Binder
+ * define and export to binderfs.
+ *
+ * It does not include things exported by binderfs to Rust Binder since this
+ * file is not included as input to bindgen.
+ *
+ * Copyright (C) 2025 Google LLC.
+ */
+
+#ifndef _LINUX_RUST_BINDER_INTERNAL_H
+#define _LINUX_RUST_BINDER_INTERNAL_H
+
+#define RUST_BINDERFS_SUPER_MAGIC 0x6c6f6f71
+
+#include <linux/seq_file.h>
+#include <uapi/linux/android/binder.h>
+#include <uapi/linux/android/binderfs.h>
+
+/*
+ * The internal data types in the Rust Binder driver are opaque to C, so we use
+ * void pointer typedefs for these types.
+ */
+typedef void *rust_binder_context;
+
+/**
+ * struct binder_device - information about a binder device node
+ * @minor: the minor number used by this device
+ * @ctx: the Rust Context used by this device, or null for binder-control
+ *
+ * This is used as the private data for files directly in binderfs, but not
+ * files in the binder_logs subdirectory. This struct owns a refcount on `ctx`
+ * and the entry for `minor` in `binderfs_minors`. For binder-control `ctx` is
+ * null.
+ */
+struct binder_device {
+ int minor;
+ rust_binder_context ctx;
+};
+
+int rust_binder_stats_show(struct seq_file *m, void *unused);
+int rust_binder_state_show(struct seq_file *m, void *unused);
+int rust_binder_transactions_show(struct seq_file *m, void *unused);
+int rust_binder_proc_show(struct seq_file *m, void *pid);
+
+extern const struct file_operations rust_binder_fops;
+rust_binder_context rust_binder_new_context(char *name);
+void rust_binder_remove_context(rust_binder_context device);
+
+/**
+ * binderfs_mount_opts - mount options for binderfs
+ * @max: maximum number of allocatable binderfs binder devices
+ * @stats_mode: enable binder stats in binderfs.
+ */
+struct binderfs_mount_opts {
+ int max;
+ int stats_mode;
+};
+
+/**
+ * binderfs_info - information about a binderfs mount
+ * @ipc_ns: The ipc namespace the binderfs mount belongs to.
+ * @control_dentry: This records the dentry of this binderfs mount
+ * binder-control device.
+ * @root_uid: uid that needs to be used when a new binder device is
+ * created.
+ * @root_gid: gid that needs to be used when a new binder device is
+ * created.
+ * @mount_opts: The mount options in use.
+ * @device_count: The current number of allocated binder devices.
+ * @proc_log_dir: Pointer to the directory dentry containing process-specific
+ * logs.
+ */
+struct binderfs_info {
+ struct ipc_namespace *ipc_ns;
+ struct dentry *control_dentry;
+ kuid_t root_uid;
+ kgid_t root_gid;
+ struct binderfs_mount_opts mount_opts;
+ int device_count;
+ struct dentry *proc_log_dir;
+};
+
+#endif /* _LINUX_RUST_BINDER_INTERNAL_H */
diff --git a/drivers/android/binder/rust_binder_main.rs b/drivers/android/binder/rust_binder_main.rs
new file mode 100644
index 000000000000..c79a9e742240
--- /dev/null
+++ b/drivers/android/binder/rust_binder_main.rs
@@ -0,0 +1,611 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! Binder -- the Android IPC mechanism.
+#![recursion_limit = "256"]
+#![allow(
+ clippy::as_underscore,
+ clippy::ref_as_ptr,
+ clippy::ptr_as_ptr,
+ clippy::cast_lossless
+)]
+
+use kernel::{
+ bindings::{self, seq_file},
+ fs::File,
+ list::{ListArc, ListArcSafe, ListLinksSelfPtr, TryNewListArc},
+ prelude::*,
+ seq_file::SeqFile,
+ seq_print,
+ sync::poll::PollTable,
+ sync::Arc,
+ task::Pid,
+ transmute::AsBytes,
+ types::ForeignOwnable,
+ uaccess::UserSliceWriter,
+};
+
+use crate::{context::Context, page_range::Shrinker, process::Process, thread::Thread};
+
+use core::{
+ ptr::NonNull,
+ sync::atomic::{AtomicBool, AtomicUsize, Ordering},
+};
+
+mod allocation;
+mod context;
+mod deferred_close;
+mod defs;
+mod error;
+mod node;
+mod page_range;
+mod process;
+mod range_alloc;
+mod stats;
+mod thread;
+mod trace;
+mod transaction;
+
+#[allow(warnings)] // generated bindgen code
+mod binderfs {
+ use kernel::bindings::{dentry, inode};
+
+ extern "C" {
+ pub fn init_rust_binderfs() -> kernel::ffi::c_int;
+ }
+ extern "C" {
+ pub fn rust_binderfs_create_proc_file(
+ nodp: *mut inode,
+ pid: kernel::ffi::c_int,
+ ) -> *mut dentry;
+ }
+ extern "C" {
+ pub fn rust_binderfs_remove_file(dentry: *mut dentry);
+ }
+ pub type rust_binder_context = *mut kernel::ffi::c_void;
+ #[repr(C)]
+ #[derive(Copy, Clone)]
+ pub struct binder_device {
+ pub minor: kernel::ffi::c_int,
+ pub ctx: rust_binder_context,
+ }
+ impl Default for binder_device {
+ fn default() -> Self {
+ let mut s = ::core::mem::MaybeUninit::<Self>::uninit();
+ unsafe {
+ ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
+ s.assume_init()
+ }
+ }
+ }
+}
+
+module! {
+ type: BinderModule,
+ name: "rust_binder",
+ authors: ["Wedson Almeida Filho", "Alice Ryhl"],
+ description: "Android Binder",
+ license: "GPL",
+}
+
+fn next_debug_id() -> usize {
+ static NEXT_DEBUG_ID: AtomicUsize = AtomicUsize::new(0);
+
+ NEXT_DEBUG_ID.fetch_add(1, Ordering::Relaxed)
+}
+
+/// Provides a single place to write Binder return values via the
+/// supplied `UserSliceWriter`.
+pub(crate) struct BinderReturnWriter<'a> {
+ writer: UserSliceWriter,
+ thread: &'a Thread,
+}
+
+impl<'a> BinderReturnWriter<'a> {
+ fn new(writer: UserSliceWriter, thread: &'a Thread) -> Self {
+ BinderReturnWriter { writer, thread }
+ }
+
+ /// Write a return code back to user space.
+ /// Should be a `BR_` constant from [`defs`] e.g. [`defs::BR_TRANSACTION_COMPLETE`].
+ fn write_code(&mut self, code: u32) -> Result {
+ stats::GLOBAL_STATS.inc_br(code);
+ self.thread.process.stats.inc_br(code);
+ self.writer.write(&code)
+ }
+
+ /// Write something *other than* a return code to user space.
+ fn write_payload<T: AsBytes>(&mut self, payload: &T) -> Result {
+ self.writer.write(payload)
+ }
+
+ fn len(&self) -> usize {
+ self.writer.len()
+ }
+}
+
+/// Specifies how a type should be delivered to the read part of a BINDER_WRITE_READ ioctl.
+///
+/// When a value is pushed to the todo list for a process or thread, it is stored as a trait object
+/// with the type `Arc<dyn DeliverToRead>`. Trait objects are a Rust feature that lets you
+/// implement dynamic dispatch over many different types. This lets us store many different types
+/// in the todo list.
+trait DeliverToRead: ListArcSafe + Send + Sync {
+ /// Performs work. Returns true if remaining work items in the queue should be processed
+ /// immediately, or false if it should return to caller before processing additional work
+ /// items.
+ fn do_work(
+ self: DArc<Self>,
+ thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool>;
+
+ /// Cancels the given work item. This is called instead of [`DeliverToRead::do_work`] when work
+ /// won't be delivered.
+ fn cancel(self: DArc<Self>);
+
+ /// Should we use `wake_up_interruptible_sync` or `wake_up_interruptible` when scheduling this
+ /// work item?
+ ///
+ /// Generally only set to true for non-oneway transactions.
+ fn should_sync_wakeup(&self) -> bool;
+
+ fn debug_print(&self, m: &SeqFile, prefix: &str, transaction_prefix: &str) -> Result<()>;
+}
+
+// Wrapper around a `DeliverToRead` with linked list links.
+#[pin_data]
+struct DTRWrap<T: ?Sized> {
+ #[pin]
+ links: ListLinksSelfPtr<DTRWrap<dyn DeliverToRead>>,
+ #[pin]
+ wrapped: T,
+}
+kernel::list::impl_list_arc_safe! {
+ impl{T: ListArcSafe + ?Sized} ListArcSafe<0> for DTRWrap<T> {
+ tracked_by wrapped: T;
+ }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<0> for DTRWrap<dyn DeliverToRead> {
+ using ListLinksSelfPtr { self.links };
+ }
+}
+
+impl<T: ?Sized> core::ops::Deref for DTRWrap<T> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ &self.wrapped
+ }
+}
+
+type DArc<T> = kernel::sync::Arc<DTRWrap<T>>;
+type DLArc<T> = kernel::list::ListArc<DTRWrap<T>>;
+
+impl<T: ListArcSafe> DTRWrap<T> {
+ fn new(val: impl PinInit<T>) -> impl PinInit<Self> {
+ pin_init!(Self {
+ links <- ListLinksSelfPtr::new(),
+ wrapped <- val,
+ })
+ }
+
+ fn arc_try_new(val: T) -> Result<DLArc<T>, kernel::alloc::AllocError> {
+ ListArc::pin_init(
+ try_pin_init!(Self {
+ links <- ListLinksSelfPtr::new(),
+ wrapped: val,
+ }),
+ GFP_KERNEL,
+ )
+ .map_err(|_| kernel::alloc::AllocError)
+ }
+
+ fn arc_pin_init(init: impl PinInit<T>) -> Result<DLArc<T>, kernel::error::Error> {
+ ListArc::pin_init(
+ try_pin_init!(Self {
+ links <- ListLinksSelfPtr::new(),
+ wrapped <- init,
+ }),
+ GFP_KERNEL,
+ )
+ }
+}
+
+struct DeliverCode {
+ code: u32,
+ skip: AtomicBool,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for DeliverCode { untracked; }
+}
+
+impl DeliverCode {
+ fn new(code: u32) -> Self {
+ Self {
+ code,
+ skip: AtomicBool::new(false),
+ }
+ }
+
+ /// Disable this DeliverCode and make it do nothing.
+ ///
+ /// This is used instead of removing it from the work list, since `LinkedList::remove` is
+ /// unsafe, whereas this method is not.
+ fn skip(&self) {
+ self.skip.store(true, Ordering::Relaxed);
+ }
+}
+
+impl DeliverToRead for DeliverCode {
+ fn do_work(
+ self: DArc<Self>,
+ _thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ if !self.skip.load(Ordering::Relaxed) {
+ writer.write_code(self.code)?;
+ }
+ Ok(true)
+ }
+
+ fn cancel(self: DArc<Self>) {}
+
+ fn should_sync_wakeup(&self) -> bool {
+ false
+ }
+
+ fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
+ seq_print!(m, "{}", prefix);
+ if self.skip.load(Ordering::Relaxed) {
+ seq_print!(m, "(skipped) ");
+ }
+ if self.code == defs::BR_TRANSACTION_COMPLETE {
+ seq_print!(m, "transaction complete\n");
+ } else {
+ seq_print!(m, "transaction error: {}\n", self.code);
+ }
+ Ok(())
+ }
+}
+
+fn ptr_align(value: usize) -> Option<usize> {
+ let size = core::mem::size_of::<usize>() - 1;
+ Some(value.checked_add(size)? & !size)
+}
+
+// SAFETY: We call register in `init`.
+static BINDER_SHRINKER: Shrinker = unsafe { Shrinker::new() };
+
+struct BinderModule {}
+
+impl kernel::Module for BinderModule {
+ fn init(_module: &'static kernel::ThisModule) -> Result<Self> {
+ // SAFETY: The module initializer never runs twice, so we only call this once.
+ unsafe { crate::context::CONTEXTS.init() };
+
+ pr_warn!("Loaded Rust Binder.");
+
+ BINDER_SHRINKER.register(kernel::c_str!("android-binder"))?;
+
+ // SAFETY: The module is being loaded, so we can initialize binderfs.
+ unsafe { kernel::error::to_result(binderfs::init_rust_binderfs())? };
+
+ Ok(Self {})
+ }
+}
+
+/// Makes the inner type Sync.
+#[repr(transparent)]
+pub struct AssertSync<T>(T);
+// SAFETY: Used only to insert `file_operations` into a global, which is safe.
+unsafe impl<T> Sync for AssertSync<T> {}
+
+/// File operations that rust_binderfs.c can use.
+#[no_mangle]
+#[used]
+pub static rust_binder_fops: AssertSync<kernel::bindings::file_operations> = {
+ // SAFETY: All zeroes is safe for the `file_operations` type.
+ let zeroed_ops = unsafe { core::mem::MaybeUninit::zeroed().assume_init() };
+
+ let ops = kernel::bindings::file_operations {
+ owner: THIS_MODULE.as_ptr(),
+ poll: Some(rust_binder_poll),
+ unlocked_ioctl: Some(rust_binder_ioctl),
+ compat_ioctl: Some(bindings::compat_ptr_ioctl),
+ mmap: Some(rust_binder_mmap),
+ open: Some(rust_binder_open),
+ release: Some(rust_binder_release),
+ flush: Some(rust_binder_flush),
+ ..zeroed_ops
+ };
+ AssertSync(ops)
+};
+
+/// # Safety
+/// Only called by binderfs.
+#[no_mangle]
+unsafe extern "C" fn rust_binder_new_context(
+ name: *const kernel::ffi::c_char,
+) -> *mut kernel::ffi::c_void {
+ // SAFETY: The caller will always provide a valid c string here.
+ let name = unsafe { kernel::str::CStr::from_char_ptr(name) };
+ match Context::new(name) {
+ Ok(ctx) => Arc::into_foreign(ctx),
+ Err(_err) => core::ptr::null_mut(),
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+#[no_mangle]
+unsafe extern "C" fn rust_binder_remove_context(device: *mut kernel::ffi::c_void) {
+ if !device.is_null() {
+ // SAFETY: The caller ensures that the `device` pointer came from a previous call to
+ // `rust_binder_new_device`.
+ let ctx = unsafe { Arc::<Context>::from_foreign(device) };
+ ctx.deregister();
+ drop(ctx);
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_open(
+ inode: *mut bindings::inode,
+ file_ptr: *mut bindings::file,
+) -> kernel::ffi::c_int {
+ // SAFETY: The `rust_binderfs.c` file ensures that `i_private` is set to a
+ // `struct binder_device`.
+ let device = unsafe { (*inode).i_private } as *const binderfs::binder_device;
+
+ assert!(!device.is_null());
+
+ // SAFETY: The `rust_binderfs.c` file ensures that `device->ctx` holds a binder context when
+ // using the rust binder fops.
+ let ctx = unsafe { Arc::<Context>::borrow((*device).ctx) };
+
+ // SAFETY: The caller provides a valid file pointer to a new `struct file`.
+ let file = unsafe { File::from_raw_file(file_ptr) };
+ let process = match Process::open(ctx, file) {
+ Ok(process) => process,
+ Err(err) => return err.to_errno(),
+ };
+
+ // SAFETY: This is an `inode` for a newly created binder file.
+ match unsafe { BinderfsProcFile::new(inode, process.task.pid()) } {
+ Ok(Some(file)) => process.inner.lock().binderfs_file = Some(file),
+ Ok(None) => { /* pid already exists */ }
+ Err(err) => return err.to_errno(),
+ }
+
+ // SAFETY: This file is associated with Rust binder, so we own the `private_data` field.
+ unsafe { (*file_ptr).private_data = process.into_foreign() };
+ 0
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_release(
+ _inode: *mut bindings::inode,
+ file: *mut bindings::file,
+) -> kernel::ffi::c_int {
+ // SAFETY: We previously set `private_data` in `rust_binder_open`.
+ let process = unsafe { Arc::<Process>::from_foreign((*file).private_data) };
+ // SAFETY: The caller ensures that the file is valid.
+ let file = unsafe { File::from_raw_file(file) };
+ Process::release(process, file);
+ 0
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_ioctl(
+ file: *mut bindings::file,
+ cmd: kernel::ffi::c_uint,
+ arg: kernel::ffi::c_ulong,
+) -> kernel::ffi::c_long {
+ // SAFETY: We previously set `private_data` in `rust_binder_open`.
+ let f = unsafe { Arc::<Process>::borrow((*file).private_data) };
+ // SAFETY: The caller ensures that the file is valid.
+ match Process::ioctl(f, unsafe { File::from_raw_file(file) }, cmd as _, arg as _) {
+ Ok(()) => 0,
+ Err(err) => err.to_errno() as isize,
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_mmap(
+ file: *mut bindings::file,
+ vma: *mut bindings::vm_area_struct,
+) -> kernel::ffi::c_int {
+ // SAFETY: We previously set `private_data` in `rust_binder_open`.
+ let f = unsafe { Arc::<Process>::borrow((*file).private_data) };
+ // SAFETY: The caller ensures that the vma is valid.
+ let area = unsafe { kernel::mm::virt::VmaNew::from_raw(vma) };
+ // SAFETY: The caller ensures that the file is valid.
+ match Process::mmap(f, unsafe { File::from_raw_file(file) }, area) {
+ Ok(()) => 0,
+ Err(err) => err.to_errno(),
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_poll(
+ file: *mut bindings::file,
+ wait: *mut bindings::poll_table_struct,
+) -> bindings::__poll_t {
+ // SAFETY: We previously set `private_data` in `rust_binder_open`.
+ let f = unsafe { Arc::<Process>::borrow((*file).private_data) };
+ // SAFETY: The caller ensures that the file is valid.
+ let fileref = unsafe { File::from_raw_file(file) };
+ // SAFETY: The caller ensures that the `PollTable` is valid.
+ match Process::poll(f, fileref, unsafe { PollTable::from_raw(wait) }) {
+ Ok(v) => v,
+ Err(_) => bindings::POLLERR,
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_flush(
+ file: *mut bindings::file,
+ _id: bindings::fl_owner_t,
+) -> kernel::ffi::c_int {
+ // SAFETY: We previously set `private_data` in `rust_binder_open`.
+ let f = unsafe { Arc::<Process>::borrow((*file).private_data) };
+ match Process::flush(f) {
+ Ok(()) => 0,
+ Err(err) => err.to_errno(),
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+#[no_mangle]
+unsafe extern "C" fn rust_binder_stats_show(
+ ptr: *mut seq_file,
+ _: *mut kernel::ffi::c_void,
+) -> kernel::ffi::c_int {
+ // SAFETY: The caller ensures that the pointer is valid and exclusive for the duration in which
+ // this method is called.
+ let m = unsafe { SeqFile::from_raw(ptr) };
+ if let Err(err) = rust_binder_stats_show_impl(m) {
+ seq_print!(m, "failed to generate state: {:?}\n", err);
+ }
+ 0
+}
+
+/// # Safety
+/// Only called by binderfs.
+#[no_mangle]
+unsafe extern "C" fn rust_binder_state_show(
+ ptr: *mut seq_file,
+ _: *mut kernel::ffi::c_void,
+) -> kernel::ffi::c_int {
+ // SAFETY: The caller ensures that the pointer is valid and exclusive for the duration in which
+ // this method is called.
+ let m = unsafe { SeqFile::from_raw(ptr) };
+ if let Err(err) = rust_binder_state_show_impl(m) {
+ seq_print!(m, "failed to generate state: {:?}\n", err);
+ }
+ 0
+}
+
+/// # Safety
+/// Only called by binderfs.
+#[no_mangle]
+unsafe extern "C" fn rust_binder_proc_show(
+ ptr: *mut seq_file,
+ _: *mut kernel::ffi::c_void,
+) -> kernel::ffi::c_int {
+ // SAFETY: Accessing the private field of `seq_file` is okay.
+ let pid = (unsafe { (*ptr).private }) as usize as Pid;
+ // SAFETY: The caller ensures that the pointer is valid and exclusive for the duration in which
+ // this method is called.
+ let m = unsafe { SeqFile::from_raw(ptr) };
+ if let Err(err) = rust_binder_proc_show_impl(m, pid) {
+ seq_print!(m, "failed to generate state: {:?}\n", err);
+ }
+ 0
+}
+
+/// # Safety
+/// Only called by binderfs.
+#[no_mangle]
+unsafe extern "C" fn rust_binder_transactions_show(
+ ptr: *mut seq_file,
+ _: *mut kernel::ffi::c_void,
+) -> kernel::ffi::c_int {
+ // SAFETY: The caller ensures that the pointer is valid and exclusive for the duration in which
+ // this method is called.
+ let m = unsafe { SeqFile::from_raw(ptr) };
+ if let Err(err) = rust_binder_transactions_show_impl(m) {
+ seq_print!(m, "failed to generate state: {:?}\n", err);
+ }
+ 0
+}
+
+fn rust_binder_transactions_show_impl(m: &SeqFile) -> Result<()> {
+ seq_print!(m, "binder transactions:\n");
+ let contexts = context::get_all_contexts()?;
+ for ctx in contexts {
+ let procs = ctx.get_all_procs()?;
+ for proc in procs {
+ proc.debug_print(m, &ctx, false)?;
+ seq_print!(m, "\n");
+ }
+ }
+ Ok(())
+}
+
+fn rust_binder_stats_show_impl(m: &SeqFile) -> Result<()> {
+ seq_print!(m, "binder stats:\n");
+ stats::GLOBAL_STATS.debug_print("", m);
+ let contexts = context::get_all_contexts()?;
+ for ctx in contexts {
+ let procs = ctx.get_all_procs()?;
+ for proc in procs {
+ proc.debug_print_stats(m, &ctx)?;
+ seq_print!(m, "\n");
+ }
+ }
+ Ok(())
+}
+
+fn rust_binder_state_show_impl(m: &SeqFile) -> Result<()> {
+ seq_print!(m, "binder state:\n");
+ let contexts = context::get_all_contexts()?;
+ for ctx in contexts {
+ let procs = ctx.get_all_procs()?;
+ for proc in procs {
+ proc.debug_print(m, &ctx, true)?;
+ seq_print!(m, "\n");
+ }
+ }
+ Ok(())
+}
+
+fn rust_binder_proc_show_impl(m: &SeqFile, pid: Pid) -> Result<()> {
+ seq_print!(m, "binder proc state:\n");
+ let contexts = context::get_all_contexts()?;
+ for ctx in contexts {
+ let procs = ctx.get_procs_with_pid(pid)?;
+ for proc in procs {
+ proc.debug_print(m, &ctx, true)?;
+ seq_print!(m, "\n");
+ }
+ }
+ Ok(())
+}
+
+struct BinderfsProcFile(NonNull<bindings::dentry>);
+
+// SAFETY: Safe to drop any thread.
+unsafe impl Send for BinderfsProcFile {}
+
+impl BinderfsProcFile {
+ /// # Safety
+ ///
+ /// Takes an inode from a newly created binder file.
+ unsafe fn new(nodp: *mut bindings::inode, pid: i32) -> Result<Option<Self>> {
+ // SAFETY: The caller passes an `inode` for a newly created binder file.
+ let dentry = unsafe { binderfs::rust_binderfs_create_proc_file(nodp, pid) };
+ match kernel::error::from_err_ptr(dentry) {
+ Ok(dentry) => Ok(NonNull::new(dentry).map(Self)),
+ Err(err) if err == EEXIST => Ok(None),
+ Err(err) => Err(err),
+ }
+ }
+}
+
+impl Drop for BinderfsProcFile {
+ fn drop(&mut self) {
+ // SAFETY: This is a dentry from `rust_binderfs_remove_file` that has not been deleted yet.
+ unsafe { binderfs::rust_binderfs_remove_file(self.0.as_ptr()) };
+ }
+}
diff --git a/drivers/android/binder/rust_binderfs.c b/drivers/android/binder/rust_binderfs.c
new file mode 100644
index 000000000000..c69026df775c
--- /dev/null
+++ b/drivers/android/binder/rust_binderfs.c
@@ -0,0 +1,795 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/compiler_types.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/fsnotify.h>
+#include <linux/gfp.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/ipc_namespace.h>
+#include <linux/kdev_t.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/namei.h>
+#include <linux/magic.h>
+#include <linux/major.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/mount.h>
+#include <linux/fs_parser.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock_types.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/user_namespace.h>
+#include <linux/xarray.h>
+#include <uapi/asm-generic/errno-base.h>
+#include <uapi/linux/android/binder.h>
+#include <uapi/linux/android/binderfs.h>
+
+#include "rust_binder.h"
+#include "rust_binder_internal.h"
+
+#define FIRST_INODE 1
+#define SECOND_INODE 2
+#define INODE_OFFSET 3
+#define BINDERFS_MAX_MINOR (1U << MINORBITS)
+/* Ensure that the initial ipc namespace always has devices available. */
+#define BINDERFS_MAX_MINOR_CAPPED (BINDERFS_MAX_MINOR - 4)
+
+DEFINE_SHOW_ATTRIBUTE(rust_binder_stats);
+DEFINE_SHOW_ATTRIBUTE(rust_binder_state);
+DEFINE_SHOW_ATTRIBUTE(rust_binder_transactions);
+DEFINE_SHOW_ATTRIBUTE(rust_binder_proc);
+
+char *rust_binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
+module_param_named(rust_devices, rust_binder_devices_param, charp, 0444);
+
+static dev_t binderfs_dev;
+static DEFINE_MUTEX(binderfs_minors_mutex);
+static DEFINE_IDA(binderfs_minors);
+
+enum binderfs_param {
+ Opt_max,
+ Opt_stats_mode,
+};
+
+enum binderfs_stats_mode {
+ binderfs_stats_mode_unset,
+ binderfs_stats_mode_global,
+};
+
+struct binder_features {
+ bool oneway_spam_detection;
+ bool extended_error;
+ bool freeze_notification;
+};
+
+static const struct constant_table binderfs_param_stats[] = {
+ { "global", binderfs_stats_mode_global },
+ {}
+};
+
+static const struct fs_parameter_spec binderfs_fs_parameters[] = {
+ fsparam_u32("max", Opt_max),
+ fsparam_enum("stats", Opt_stats_mode, binderfs_param_stats),
+ {}
+};
+
+static struct binder_features binder_features = {
+ .oneway_spam_detection = true,
+ .extended_error = true,
+ .freeze_notification = true,
+};
+
+static inline struct binderfs_info *BINDERFS_SB(const struct super_block *sb)
+{
+ return sb->s_fs_info;
+}
+
+/**
+ * binderfs_binder_device_create - allocate inode from super block of a
+ * binderfs mount
+ * @ref_inode: inode from wich the super block will be taken
+ * @userp: buffer to copy information about new device for userspace to
+ * @req: struct binderfs_device as copied from userspace
+ *
+ * This function allocates a new binder_device and reserves a new minor
+ * number for it.
+ * Minor numbers are limited and tracked globally in binderfs_minors. The
+ * function will stash a struct binder_device for the specific binder
+ * device in i_private of the inode.
+ * It will go on to allocate a new inode from the super block of the
+ * filesystem mount, stash a struct binder_device in its i_private field
+ * and attach a dentry to that inode.
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+static int binderfs_binder_device_create(struct inode *ref_inode,
+ struct binderfs_device __user *userp,
+ struct binderfs_device *req)
+{
+ int minor, ret;
+ struct dentry *dentry, *root;
+ struct binder_device *device = NULL;
+ rust_binder_context ctx = NULL;
+ struct inode *inode = NULL;
+ struct super_block *sb = ref_inode->i_sb;
+ struct binderfs_info *info = sb->s_fs_info;
+#if defined(CONFIG_IPC_NS)
+ bool use_reserve = (info->ipc_ns == &init_ipc_ns);
+#else
+ bool use_reserve = true;
+#endif
+
+ /* Reserve new minor number for the new device. */
+ mutex_lock(&binderfs_minors_mutex);
+ if (++info->device_count <= info->mount_opts.max)
+ minor = ida_alloc_max(&binderfs_minors,
+ use_reserve ? BINDERFS_MAX_MINOR :
+ BINDERFS_MAX_MINOR_CAPPED,
+ GFP_KERNEL);
+ else
+ minor = -ENOSPC;
+ if (minor < 0) {
+ --info->device_count;
+ mutex_unlock(&binderfs_minors_mutex);
+ return minor;
+ }
+ mutex_unlock(&binderfs_minors_mutex);
+
+ ret = -ENOMEM;
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
+ if (!device)
+ goto err;
+
+ req->name[BINDERFS_MAX_NAME] = '\0'; /* NUL-terminate */
+
+ ctx = rust_binder_new_context(req->name);
+ if (!ctx)
+ goto err;
+
+ inode = new_inode(sb);
+ if (!inode)
+ goto err;
+
+ inode->i_ino = minor + INODE_OFFSET;
+ simple_inode_init_ts(inode);
+ init_special_inode(inode, S_IFCHR | 0600,
+ MKDEV(MAJOR(binderfs_dev), minor));
+ inode->i_fop = &rust_binder_fops;
+ inode->i_uid = info->root_uid;
+ inode->i_gid = info->root_gid;
+
+ req->major = MAJOR(binderfs_dev);
+ req->minor = minor;
+ device->ctx = ctx;
+ device->minor = minor;
+
+ if (userp && copy_to_user(userp, req, sizeof(*req))) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ root = sb->s_root;
+ dentry = simple_start_creating(root, req->name);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto err;
+ }
+
+ inode->i_private = device;
+ d_make_persistent(dentry, inode);
+
+ fsnotify_create(root->d_inode, dentry);
+ simple_done_creating(dentry);
+
+ return 0;
+
+err:
+ kfree(device);
+ rust_binder_remove_context(ctx);
+ mutex_lock(&binderfs_minors_mutex);
+ --info->device_count;
+ ida_free(&binderfs_minors, minor);
+ mutex_unlock(&binderfs_minors_mutex);
+ iput(inode);
+
+ return ret;
+}
+
+/**
+ * binder_ctl_ioctl - handle binder device node allocation requests
+ *
+ * The request handler for the binder-control device. All requests operate on
+ * the binderfs mount the binder-control device resides in:
+ * - BINDER_CTL_ADD
+ * Allocate a new binder device.
+ *
+ * Return: %0 on success, negative errno on failure.
+ */
+static long binder_ctl_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret = -EINVAL;
+ struct inode *inode = file_inode(file);
+ struct binderfs_device __user *device = (struct binderfs_device __user *)arg;
+ struct binderfs_device device_req;
+
+ switch (cmd) {
+ case BINDER_CTL_ADD:
+ ret = copy_from_user(&device_req, device, sizeof(device_req));
+ if (ret) {
+ ret = -EFAULT;
+ break;
+ }
+
+ ret = binderfs_binder_device_create(inode, device, &device_req);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static void binderfs_evict_inode(struct inode *inode)
+{
+ struct binder_device *device = inode->i_private;
+ struct binderfs_info *info = BINDERFS_SB(inode->i_sb);
+
+ clear_inode(inode);
+
+ if (!S_ISCHR(inode->i_mode) || !device)
+ return;
+
+ mutex_lock(&binderfs_minors_mutex);
+ --info->device_count;
+ ida_free(&binderfs_minors, device->minor);
+ mutex_unlock(&binderfs_minors_mutex);
+
+ /* ctx is null for binder-control, but this function ignores null pointers */
+ rust_binder_remove_context(device->ctx);
+
+ kfree(device);
+}
+
+static int binderfs_fs_context_parse_param(struct fs_context *fc,
+ struct fs_parameter *param)
+{
+ int opt;
+ struct binderfs_mount_opts *ctx = fc->fs_private;
+ struct fs_parse_result result;
+
+ opt = fs_parse(fc, binderfs_fs_parameters, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_max:
+ if (result.uint_32 > BINDERFS_MAX_MINOR)
+ return invalfc(fc, "Bad value for '%s'", param->key);
+
+ ctx->max = result.uint_32;
+ break;
+ case Opt_stats_mode:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ ctx->stats_mode = result.uint_32;
+ break;
+ default:
+ return invalfc(fc, "Unsupported parameter '%s'", param->key);
+ }
+
+ return 0;
+}
+
+static int binderfs_fs_context_reconfigure(struct fs_context *fc)
+{
+ struct binderfs_mount_opts *ctx = fc->fs_private;
+ struct binderfs_info *info = BINDERFS_SB(fc->root->d_sb);
+
+ if (info->mount_opts.stats_mode != ctx->stats_mode)
+ return invalfc(fc, "Binderfs stats mode cannot be changed during a remount");
+
+ info->mount_opts.stats_mode = ctx->stats_mode;
+ info->mount_opts.max = ctx->max;
+ return 0;
+}
+
+static int binderfs_show_options(struct seq_file *seq, struct dentry *root)
+{
+ struct binderfs_info *info = BINDERFS_SB(root->d_sb);
+
+ if (info->mount_opts.max <= BINDERFS_MAX_MINOR)
+ seq_printf(seq, ",max=%d", info->mount_opts.max);
+
+ switch (info->mount_opts.stats_mode) {
+ case binderfs_stats_mode_unset:
+ break;
+ case binderfs_stats_mode_global:
+ seq_puts(seq, ",stats=global");
+ break;
+ }
+
+ return 0;
+}
+
+static const struct super_operations binderfs_super_ops = {
+ .evict_inode = binderfs_evict_inode,
+ .show_options = binderfs_show_options,
+ .statfs = simple_statfs,
+};
+
+static inline bool is_binderfs_control_device(const struct dentry *dentry)
+{
+ struct binderfs_info *info = dentry->d_sb->s_fs_info;
+
+ return info->control_dentry == dentry;
+}
+
+static int binderfs_rename(struct mnt_idmap *idmap,
+ struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags)
+{
+ if (is_binderfs_control_device(old_dentry) ||
+ is_binderfs_control_device(new_dentry))
+ return -EPERM;
+
+ return simple_rename(idmap, old_dir, old_dentry, new_dir,
+ new_dentry, flags);
+}
+
+static int binderfs_unlink(struct inode *dir, struct dentry *dentry)
+{
+ if (is_binderfs_control_device(dentry))
+ return -EPERM;
+
+ return simple_unlink(dir, dentry);
+}
+
+static const struct file_operations binder_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = nonseekable_open,
+ .unlocked_ioctl = binder_ctl_ioctl,
+ .compat_ioctl = binder_ctl_ioctl,
+ .llseek = noop_llseek,
+};
+
+/**
+ * binderfs_binder_ctl_create - create a new binder-control device
+ * @sb: super block of the binderfs mount
+ *
+ * This function creates a new binder-control device node in the binderfs mount
+ * referred to by @sb.
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+static int binderfs_binder_ctl_create(struct super_block *sb)
+{
+ int minor, ret;
+ struct dentry *dentry;
+ struct binder_device *device;
+ struct inode *inode = NULL;
+ struct dentry *root = sb->s_root;
+ struct binderfs_info *info = sb->s_fs_info;
+#if defined(CONFIG_IPC_NS)
+ bool use_reserve = (info->ipc_ns == &init_ipc_ns);
+#else
+ bool use_reserve = true;
+#endif
+
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
+ if (!device)
+ return -ENOMEM;
+
+ /* If we have already created a binder-control node, return. */
+ if (info->control_dentry) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = -ENOMEM;
+ inode = new_inode(sb);
+ if (!inode)
+ goto out;
+
+ /* Reserve a new minor number for the new device. */
+ mutex_lock(&binderfs_minors_mutex);
+ minor = ida_alloc_max(&binderfs_minors,
+ use_reserve ? BINDERFS_MAX_MINOR :
+ BINDERFS_MAX_MINOR_CAPPED,
+ GFP_KERNEL);
+ mutex_unlock(&binderfs_minors_mutex);
+ if (minor < 0) {
+ ret = minor;
+ goto out;
+ }
+
+ inode->i_ino = SECOND_INODE;
+ simple_inode_init_ts(inode);
+ init_special_inode(inode, S_IFCHR | 0600,
+ MKDEV(MAJOR(binderfs_dev), minor));
+ inode->i_fop = &binder_ctl_fops;
+ inode->i_uid = info->root_uid;
+ inode->i_gid = info->root_gid;
+
+ device->minor = minor;
+ device->ctx = NULL;
+
+ dentry = d_alloc_name(root, "binder-control");
+ if (!dentry)
+ goto out;
+
+ inode->i_private = device;
+ info->control_dentry = dentry;
+ d_add(dentry, inode);
+
+ return 0;
+
+out:
+ kfree(device);
+ iput(inode);
+
+ return ret;
+}
+
+static const struct inode_operations binderfs_dir_inode_operations = {
+ .lookup = simple_lookup,
+ .rename = binderfs_rename,
+ .unlink = binderfs_unlink,
+};
+
+static struct inode *binderfs_make_inode(struct super_block *sb, int mode)
+{
+ struct inode *ret;
+
+ ret = new_inode(sb);
+ if (ret) {
+ ret->i_ino = iunique(sb, BINDERFS_MAX_MINOR + INODE_OFFSET);
+ ret->i_mode = mode;
+ simple_inode_init_ts(ret);
+ }
+ return ret;
+}
+
+void rust_binderfs_remove_file(struct dentry *dentry)
+{
+ simple_recursive_removal(dentry, NULL);
+}
+
+static struct dentry *rust_binderfs_create_file(struct dentry *parent, const char *name,
+ const struct file_operations *fops,
+ void *data)
+{
+ struct dentry *dentry;
+ struct inode *new_inode;
+
+ new_inode = binderfs_make_inode(parent->d_sb, S_IFREG | 0444);
+ if (!new_inode)
+ return ERR_PTR(-ENOMEM);
+ new_inode->i_fop = fops;
+ new_inode->i_private = data;
+
+ dentry = simple_start_creating(parent, name);
+ if (IS_ERR(dentry)) {
+ iput(new_inode);
+ return dentry;
+ }
+
+ d_make_persistent(dentry, new_inode);
+ fsnotify_create(parent->d_inode, dentry);
+ simple_done_creating(dentry);
+ return dentry;
+}
+
+struct dentry *rust_binderfs_create_proc_file(struct inode *nodp, int pid)
+{
+ struct binderfs_info *info = nodp->i_sb->s_fs_info;
+ struct dentry *dir = info->proc_log_dir;
+ char strbuf[20 + 1];
+ void *data = (void *)(unsigned long) pid;
+
+ if (!dir)
+ return NULL;
+
+ snprintf(strbuf, sizeof(strbuf), "%u", pid);
+ return rust_binderfs_create_file(dir, strbuf, &rust_binder_proc_fops, data);
+}
+
+static struct dentry *binderfs_create_dir(struct dentry *parent,
+ const char *name)
+{
+ struct dentry *dentry;
+ struct inode *new_inode;
+
+ new_inode = binderfs_make_inode(parent->d_sb, S_IFDIR | 0755);
+ if (!new_inode)
+ return ERR_PTR(-ENOMEM);
+
+ new_inode->i_fop = &simple_dir_operations;
+ new_inode->i_op = &simple_dir_inode_operations;
+
+ dentry = simple_start_creating(parent, name);
+ if (IS_ERR(dentry)) {
+ iput(new_inode);
+ return dentry;
+ }
+
+ inc_nlink(parent->d_inode);
+ set_nlink(new_inode, 2);
+ d_make_persistent(dentry, new_inode);
+ fsnotify_mkdir(parent->d_inode, dentry);
+ simple_done_creating(dentry);
+ return dentry;
+}
+
+static int binder_features_show(struct seq_file *m, void *unused)
+{
+ bool *feature = m->private;
+
+ seq_printf(m, "%d\n", *feature);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(binder_features);
+
+static int init_binder_features(struct super_block *sb)
+{
+ struct dentry *dentry, *dir;
+
+ dir = binderfs_create_dir(sb->s_root, "features");
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+
+ dentry = rust_binderfs_create_file(dir, "oneway_spam_detection",
+ &binder_features_fops,
+ &binder_features.oneway_spam_detection);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ dentry = rust_binderfs_create_file(dir, "extended_error",
+ &binder_features_fops,
+ &binder_features.extended_error);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ dentry = rust_binderfs_create_file(dir, "freeze_notification",
+ &binder_features_fops,
+ &binder_features.freeze_notification);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ return 0;
+}
+
+static int init_binder_logs(struct super_block *sb)
+{
+ struct dentry *binder_logs_root_dir, *dentry, *proc_log_dir;
+ struct binderfs_info *info;
+ int ret = 0;
+
+ binder_logs_root_dir = binderfs_create_dir(sb->s_root,
+ "binder_logs");
+ if (IS_ERR(binder_logs_root_dir)) {
+ ret = PTR_ERR(binder_logs_root_dir);
+ goto out;
+ }
+
+ dentry = rust_binderfs_create_file(binder_logs_root_dir, "stats",
+ &rust_binder_stats_fops, NULL);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
+ dentry = rust_binderfs_create_file(binder_logs_root_dir, "state",
+ &rust_binder_state_fops, NULL);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
+ dentry = rust_binderfs_create_file(binder_logs_root_dir, "transactions",
+ &rust_binder_transactions_fops, NULL);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
+ proc_log_dir = binderfs_create_dir(binder_logs_root_dir, "proc");
+ if (IS_ERR(proc_log_dir)) {
+ ret = PTR_ERR(proc_log_dir);
+ goto out;
+ }
+ info = sb->s_fs_info;
+ info->proc_log_dir = proc_log_dir;
+
+out:
+ return ret;
+}
+
+static int binderfs_fill_super(struct super_block *sb, struct fs_context *fc)
+{
+ int ret;
+ struct binderfs_info *info;
+ struct binderfs_mount_opts *ctx = fc->fs_private;
+ struct inode *inode = NULL;
+ struct binderfs_device device_info = {};
+ const char *name;
+ size_t len;
+
+ sb->s_blocksize = PAGE_SIZE;
+ sb->s_blocksize_bits = PAGE_SHIFT;
+
+ /*
+ * The binderfs filesystem can be mounted by userns root in a
+ * non-initial userns. By default such mounts have the SB_I_NODEV flag
+ * set in s_iflags to prevent security issues where userns root can
+ * just create random device nodes via mknod() since it owns the
+ * filesystem mount. But binderfs does not allow to create any files
+ * including devices nodes. The only way to create binder devices nodes
+ * is through the binder-control device which userns root is explicitly
+ * allowed to do. So removing the SB_I_NODEV flag from s_iflags is both
+ * necessary and safe.
+ */
+ sb->s_iflags &= ~SB_I_NODEV;
+ sb->s_iflags |= SB_I_NOEXEC;
+ sb->s_magic = RUST_BINDERFS_SUPER_MAGIC;
+ sb->s_op = &binderfs_super_ops;
+ sb->s_time_gran = 1;
+
+ sb->s_fs_info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL);
+ if (!sb->s_fs_info)
+ return -ENOMEM;
+ info = sb->s_fs_info;
+
+ info->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns);
+
+ info->root_gid = make_kgid(sb->s_user_ns, 0);
+ if (!gid_valid(info->root_gid))
+ info->root_gid = GLOBAL_ROOT_GID;
+ info->root_uid = make_kuid(sb->s_user_ns, 0);
+ if (!uid_valid(info->root_uid))
+ info->root_uid = GLOBAL_ROOT_UID;
+ info->mount_opts.max = ctx->max;
+ info->mount_opts.stats_mode = ctx->stats_mode;
+
+ inode = new_inode(sb);
+ if (!inode)
+ return -ENOMEM;
+
+ inode->i_ino = FIRST_INODE;
+ inode->i_fop = &simple_dir_operations;
+ inode->i_mode = S_IFDIR | 0755;
+ simple_inode_init_ts(inode);
+ inode->i_op = &binderfs_dir_inode_operations;
+ set_nlink(inode, 2);
+
+ sb->s_root = d_make_root(inode);
+ if (!sb->s_root)
+ return -ENOMEM;
+
+ ret = binderfs_binder_ctl_create(sb);
+ if (ret)
+ return ret;
+
+ name = rust_binder_devices_param;
+ for (len = strcspn(name, ","); len > 0; len = strcspn(name, ",")) {
+ strscpy(device_info.name, name, len + 1);
+ ret = binderfs_binder_device_create(inode, NULL, &device_info);
+ if (ret)
+ return ret;
+ name += len;
+ if (*name == ',')
+ name++;
+ }
+
+ ret = init_binder_features(sb);
+ if (ret)
+ return ret;
+
+ if (info->mount_opts.stats_mode == binderfs_stats_mode_global)
+ return init_binder_logs(sb);
+
+ return 0;
+}
+
+static int binderfs_fs_context_get_tree(struct fs_context *fc)
+{
+ return get_tree_nodev(fc, binderfs_fill_super);
+}
+
+static void binderfs_fs_context_free(struct fs_context *fc)
+{
+ struct binderfs_mount_opts *ctx = fc->fs_private;
+
+ kfree(ctx);
+}
+
+static const struct fs_context_operations binderfs_fs_context_ops = {
+ .free = binderfs_fs_context_free,
+ .get_tree = binderfs_fs_context_get_tree,
+ .parse_param = binderfs_fs_context_parse_param,
+ .reconfigure = binderfs_fs_context_reconfigure,
+};
+
+static int binderfs_init_fs_context(struct fs_context *fc)
+{
+ struct binderfs_mount_opts *ctx;
+
+ ctx = kzalloc(sizeof(struct binderfs_mount_opts), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->max = BINDERFS_MAX_MINOR;
+ ctx->stats_mode = binderfs_stats_mode_unset;
+
+ fc->fs_private = ctx;
+ fc->ops = &binderfs_fs_context_ops;
+
+ return 0;
+}
+
+static void binderfs_kill_super(struct super_block *sb)
+{
+ struct binderfs_info *info = sb->s_fs_info;
+
+ /*
+ * During inode eviction struct binderfs_info is needed.
+ * So first wipe the super_block then free struct binderfs_info.
+ */
+ kill_anon_super(sb);
+
+ if (info && info->ipc_ns)
+ put_ipc_ns(info->ipc_ns);
+
+ kfree(info);
+}
+
+static struct file_system_type binder_fs_type = {
+ .name = "binder",
+ .init_fs_context = binderfs_init_fs_context,
+ .parameters = binderfs_fs_parameters,
+ .kill_sb = binderfs_kill_super,
+ .fs_flags = FS_USERNS_MOUNT,
+};
+
+int init_rust_binderfs(void)
+{
+ int ret;
+ const char *name;
+ size_t len;
+
+ /* Verify that the default binderfs device names are valid. */
+ name = rust_binder_devices_param;
+ for (len = strcspn(name, ","); len > 0; len = strcspn(name, ",")) {
+ if (len > BINDERFS_MAX_NAME)
+ return -E2BIG;
+ name += len;
+ if (*name == ',')
+ name++;
+ }
+
+ /* Allocate new major number for binderfs. */
+ ret = alloc_chrdev_region(&binderfs_dev, 0, BINDERFS_MAX_MINOR,
+ "rust_binder");
+ if (ret)
+ return ret;
+
+ ret = register_filesystem(&binder_fs_type);
+ if (ret) {
+ unregister_chrdev_region(binderfs_dev, BINDERFS_MAX_MINOR);
+ return ret;
+ }
+
+ return ret;
+}
diff --git a/drivers/android/binder/stats.rs b/drivers/android/binder/stats.rs
new file mode 100644
index 000000000000..037002651941
--- /dev/null
+++ b/drivers/android/binder/stats.rs
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! Keep track of statistics for binder_logs.
+
+use crate::defs::*;
+use core::sync::atomic::{AtomicU32, Ordering::Relaxed};
+use kernel::{ioctl::_IOC_NR, seq_file::SeqFile, seq_print};
+
+const BC_COUNT: usize = _IOC_NR(BC_REPLY_SG) as usize + 1;
+const BR_COUNT: usize = _IOC_NR(BR_TRANSACTION_PENDING_FROZEN) as usize + 1;
+
+pub(crate) static GLOBAL_STATS: BinderStats = BinderStats::new();
+
+pub(crate) struct BinderStats {
+ bc: [AtomicU32; BC_COUNT],
+ br: [AtomicU32; BR_COUNT],
+}
+
+impl BinderStats {
+ pub(crate) const fn new() -> Self {
+ #[expect(clippy::declare_interior_mutable_const)]
+ const ZERO: AtomicU32 = AtomicU32::new(0);
+
+ Self {
+ bc: [ZERO; BC_COUNT],
+ br: [ZERO; BR_COUNT],
+ }
+ }
+
+ pub(crate) fn inc_bc(&self, bc: u32) {
+ let idx = _IOC_NR(bc) as usize;
+ if let Some(bc_ref) = self.bc.get(idx) {
+ bc_ref.fetch_add(1, Relaxed);
+ }
+ }
+
+ pub(crate) fn inc_br(&self, br: u32) {
+ let idx = _IOC_NR(br) as usize;
+ if let Some(br_ref) = self.br.get(idx) {
+ br_ref.fetch_add(1, Relaxed);
+ }
+ }
+
+ pub(crate) fn debug_print(&self, prefix: &str, m: &SeqFile) {
+ for (i, cnt) in self.bc.iter().enumerate() {
+ let cnt = cnt.load(Relaxed);
+ if cnt > 0 {
+ seq_print!(m, "{}{}: {}\n", prefix, command_string(i), cnt);
+ }
+ }
+ for (i, cnt) in self.br.iter().enumerate() {
+ let cnt = cnt.load(Relaxed);
+ if cnt > 0 {
+ seq_print!(m, "{}{}: {}\n", prefix, return_string(i), cnt);
+ }
+ }
+ }
+}
+
+mod strings {
+ use core::str::from_utf8_unchecked;
+ use kernel::str::{CStr, CStrExt as _};
+
+ extern "C" {
+ static binder_command_strings: [*const u8; super::BC_COUNT];
+ static binder_return_strings: [*const u8; super::BR_COUNT];
+ }
+
+ pub(super) fn command_string(i: usize) -> &'static str {
+ // SAFETY: Accessing `binder_command_strings` is always safe.
+ let c_str_ptr = unsafe { binder_command_strings[i] };
+ // SAFETY: The `binder_command_strings` array only contains nul-terminated strings.
+ let bytes = unsafe { CStr::from_char_ptr(c_str_ptr) }.to_bytes();
+ // SAFETY: The `binder_command_strings` array only contains strings with ascii-chars.
+ unsafe { from_utf8_unchecked(bytes) }
+ }
+
+ pub(super) fn return_string(i: usize) -> &'static str {
+ // SAFETY: Accessing `binder_return_strings` is always safe.
+ let c_str_ptr = unsafe { binder_return_strings[i] };
+ // SAFETY: The `binder_command_strings` array only contains nul-terminated strings.
+ let bytes = unsafe { CStr::from_char_ptr(c_str_ptr) }.to_bytes();
+ // SAFETY: The `binder_command_strings` array only contains strings with ascii-chars.
+ unsafe { from_utf8_unchecked(bytes) }
+ }
+}
+use strings::{command_string, return_string};
diff --git a/drivers/android/binder/thread.rs b/drivers/android/binder/thread.rs
new file mode 100644
index 000000000000..1a8e6fdc0dc4
--- /dev/null
+++ b/drivers/android/binder/thread.rs
@@ -0,0 +1,1596 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! This module defines the `Thread` type, which represents a userspace thread that is using
+//! binder.
+//!
+//! The `Process` object stores all of the threads in an rb tree.
+
+use kernel::{
+ bindings,
+ fs::{File, LocalFile},
+ list::{AtomicTracker, List, ListArc, ListLinks, TryNewListArc},
+ prelude::*,
+ security,
+ seq_file::SeqFile,
+ seq_print,
+ sync::poll::{PollCondVar, PollTable},
+ sync::{Arc, SpinLock},
+ task::Task,
+ types::ARef,
+ uaccess::UserSlice,
+ uapi,
+};
+
+use crate::{
+ allocation::{Allocation, AllocationView, BinderObject, BinderObjectRef, NewAllocation},
+ defs::*,
+ error::BinderResult,
+ process::{GetWorkOrRegister, Process},
+ ptr_align,
+ stats::GLOBAL_STATS,
+ transaction::Transaction,
+ BinderReturnWriter, DArc, DLArc, DTRWrap, DeliverCode, DeliverToRead,
+};
+
+use core::{
+ mem::size_of,
+ sync::atomic::{AtomicU32, Ordering},
+};
+
+/// Stores the layout of the scatter-gather entries. This is used during the `translate_objects`
+/// call and is discarded when it returns.
+struct ScatterGatherState {
+ /// A struct that tracks the amount of unused buffer space.
+ unused_buffer_space: UnusedBufferSpace,
+ /// Scatter-gather entries to copy.
+ sg_entries: KVec<ScatterGatherEntry>,
+ /// Indexes into `sg_entries` corresponding to the last binder_buffer_object that
+ /// was processed and all of its ancestors. The array is in sorted order.
+ ancestors: KVec<usize>,
+}
+
+/// This entry specifies an additional buffer that should be copied using the scatter-gather
+/// mechanism.
+struct ScatterGatherEntry {
+ /// The index in the offset array of the BINDER_TYPE_PTR that this entry originates from.
+ obj_index: usize,
+ /// Offset in target buffer.
+ offset: usize,
+ /// User address in source buffer.
+ sender_uaddr: usize,
+ /// Number of bytes to copy.
+ length: usize,
+ /// The minimum offset of the next fixup in this buffer.
+ fixup_min_offset: usize,
+ /// The offsets within this buffer that contain pointers which should be translated.
+ pointer_fixups: KVec<PointerFixupEntry>,
+}
+
+/// This entry specifies that a fixup should happen at `target_offset` of the
+/// buffer. If `skip` is nonzero, then the fixup is a `binder_fd_array_object`
+/// and is applied later. Otherwise if `skip` is zero, then the size of the
+/// fixup is `sizeof::<u64>()` and `pointer_value` is written to the buffer.
+struct PointerFixupEntry {
+ /// The number of bytes to skip, or zero for a `binder_buffer_object` fixup.
+ skip: usize,
+ /// The translated pointer to write when `skip` is zero.
+ pointer_value: u64,
+ /// The offset at which the value should be written. The offset is relative
+ /// to the original buffer.
+ target_offset: usize,
+}
+
+/// Return type of `apply_and_validate_fixup_in_parent`.
+struct ParentFixupInfo {
+ /// The index of the parent buffer in `sg_entries`.
+ parent_sg_index: usize,
+ /// The number of ancestors of the buffer.
+ ///
+ /// The buffer is considered an ancestor of itself, so this is always at
+ /// least one.
+ num_ancestors: usize,
+ /// New value of `fixup_min_offset` if this fixup is applied.
+ new_min_offset: usize,
+ /// The offset of the fixup in the target buffer.
+ target_offset: usize,
+}
+
+impl ScatterGatherState {
+ /// Called when a `binder_buffer_object` or `binder_fd_array_object` tries
+ /// to access a region in its parent buffer. These accesses have various
+ /// restrictions, which this method verifies.
+ ///
+ /// The `parent_offset` and `length` arguments describe the offset and
+ /// length of the access in the parent buffer.
+ ///
+ /// # Detailed restrictions
+ ///
+ /// Obviously the fixup must be in-bounds for the parent buffer.
+ ///
+ /// For safety reasons, we only allow fixups inside a buffer to happen
+ /// at increasing offsets; additionally, we only allow fixup on the last
+ /// buffer object that was verified, or one of its parents.
+ ///
+ /// Example of what is allowed:
+ ///
+ /// A
+ /// B (parent = A, offset = 0)
+ /// C (parent = A, offset = 16)
+ /// D (parent = C, offset = 0)
+ /// E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
+ ///
+ /// Examples of what is not allowed:
+ ///
+ /// Decreasing offsets within the same parent:
+ /// A
+ /// C (parent = A, offset = 16)
+ /// B (parent = A, offset = 0) // decreasing offset within A
+ ///
+ /// Arcerring to a parent that wasn't the last object or any of its parents:
+ /// A
+ /// B (parent = A, offset = 0)
+ /// C (parent = A, offset = 0)
+ /// C (parent = A, offset = 16)
+ /// D (parent = B, offset = 0) // B is not A or any of A's parents
+ fn validate_parent_fixup(
+ &self,
+ parent: usize,
+ parent_offset: usize,
+ length: usize,
+ ) -> Result<ParentFixupInfo> {
+ // Using `position` would also be correct, but `rposition` avoids
+ // quadratic running times.
+ let ancestors_i = self
+ .ancestors
+ .iter()
+ .copied()
+ .rposition(|sg_idx| self.sg_entries[sg_idx].obj_index == parent)
+ .ok_or(EINVAL)?;
+ let sg_idx = self.ancestors[ancestors_i];
+ let sg_entry = match self.sg_entries.get(sg_idx) {
+ Some(sg_entry) => sg_entry,
+ None => {
+ pr_err!(
+ "self.ancestors[{}] is {}, but self.sg_entries.len() is {}",
+ ancestors_i,
+ sg_idx,
+ self.sg_entries.len()
+ );
+ return Err(EINVAL);
+ }
+ };
+ if sg_entry.fixup_min_offset > parent_offset {
+ pr_warn!(
+ "validate_parent_fixup: fixup_min_offset={}, parent_offset={}",
+ sg_entry.fixup_min_offset,
+ parent_offset
+ );
+ return Err(EINVAL);
+ }
+ let new_min_offset = parent_offset.checked_add(length).ok_or(EINVAL)?;
+ if new_min_offset > sg_entry.length {
+ pr_warn!(
+ "validate_parent_fixup: new_min_offset={}, sg_entry.length={}",
+ new_min_offset,
+ sg_entry.length
+ );
+ return Err(EINVAL);
+ }
+ let target_offset = sg_entry.offset.checked_add(parent_offset).ok_or(EINVAL)?;
+ // The `ancestors_i + 1` operation can't overflow since the output of the addition is at
+ // most `self.ancestors.len()`, which also fits in a usize.
+ Ok(ParentFixupInfo {
+ parent_sg_index: sg_idx,
+ num_ancestors: ancestors_i + 1,
+ new_min_offset,
+ target_offset,
+ })
+ }
+}
+
+/// Keeps track of how much unused buffer space is left. The initial amount is the number of bytes
+/// requested by the user using the `buffers_size` field of `binder_transaction_data_sg`. Each time
+/// we translate an object of type `BINDER_TYPE_PTR`, some of the unused buffer space is consumed.
+struct UnusedBufferSpace {
+ /// The start of the remaining space.
+ offset: usize,
+ /// The end of the remaining space.
+ limit: usize,
+}
+impl UnusedBufferSpace {
+ /// Claim the next `size` bytes from the unused buffer space. The offset for the claimed chunk
+ /// into the buffer is returned.
+ fn claim_next(&mut self, size: usize) -> Result<usize> {
+ // We require every chunk to be aligned.
+ let size = ptr_align(size).ok_or(EINVAL)?;
+ let new_offset = self.offset.checked_add(size).ok_or(EINVAL)?;
+
+ if new_offset <= self.limit {
+ let offset = self.offset;
+ self.offset = new_offset;
+ Ok(offset)
+ } else {
+ Err(EINVAL)
+ }
+ }
+}
+
+pub(crate) enum PushWorkRes {
+ Ok,
+ FailedDead(DLArc<dyn DeliverToRead>),
+}
+
+impl PushWorkRes {
+ fn is_ok(&self) -> bool {
+ match self {
+ PushWorkRes::Ok => true,
+ PushWorkRes::FailedDead(_) => false,
+ }
+ }
+}
+
+/// The fields of `Thread` protected by the spinlock.
+struct InnerThread {
+ /// Determines the looper state of the thread. It is a bit-wise combination of the constants
+ /// prefixed with `LOOPER_`.
+ looper_flags: u32,
+
+ /// Determines whether the looper should return.
+ looper_need_return: bool,
+
+ /// Determines if thread is dead.
+ is_dead: bool,
+
+ /// Work item used to deliver error codes to the thread that started a transaction. Stored here
+ /// so that it can be reused.
+ reply_work: DArc<ThreadError>,
+
+ /// Work item used to deliver error codes to the current thread. Stored here so that it can be
+ /// reused.
+ return_work: DArc<ThreadError>,
+
+ /// Determines whether the work list below should be processed. When set to false, `work_list`
+ /// is treated as if it were empty.
+ process_work_list: bool,
+ /// List of work items to deliver to userspace.
+ work_list: List<DTRWrap<dyn DeliverToRead>>,
+ current_transaction: Option<DArc<Transaction>>,
+
+ /// Extended error information for this thread.
+ extended_error: ExtendedError,
+}
+
+const LOOPER_REGISTERED: u32 = 0x01;
+const LOOPER_ENTERED: u32 = 0x02;
+const LOOPER_EXITED: u32 = 0x04;
+const LOOPER_INVALID: u32 = 0x08;
+const LOOPER_WAITING: u32 = 0x10;
+const LOOPER_WAITING_PROC: u32 = 0x20;
+const LOOPER_POLL: u32 = 0x40;
+
+impl InnerThread {
+ fn new() -> Result<Self> {
+ fn next_err_id() -> u32 {
+ static EE_ID: AtomicU32 = AtomicU32::new(0);
+ EE_ID.fetch_add(1, Ordering::Relaxed)
+ }
+
+ Ok(Self {
+ looper_flags: 0,
+ looper_need_return: false,
+ is_dead: false,
+ process_work_list: false,
+ reply_work: ThreadError::try_new()?,
+ return_work: ThreadError::try_new()?,
+ work_list: List::new(),
+ current_transaction: None,
+ extended_error: ExtendedError::new(next_err_id(), BR_OK, 0),
+ })
+ }
+
+ fn pop_work(&mut self) -> Option<DLArc<dyn DeliverToRead>> {
+ if !self.process_work_list {
+ return None;
+ }
+
+ let ret = self.work_list.pop_front();
+ self.process_work_list = !self.work_list.is_empty();
+ ret
+ }
+
+ fn push_work(&mut self, work: DLArc<dyn DeliverToRead>) -> PushWorkRes {
+ if self.is_dead {
+ PushWorkRes::FailedDead(work)
+ } else {
+ self.work_list.push_back(work);
+ self.process_work_list = true;
+ PushWorkRes::Ok
+ }
+ }
+
+ fn push_reply_work(&mut self, code: u32) {
+ if let Ok(work) = ListArc::try_from_arc(self.reply_work.clone()) {
+ work.set_error_code(code);
+ self.push_work(work);
+ } else {
+ pr_warn!("Thread reply work is already in use.");
+ }
+ }
+
+ fn push_return_work(&mut self, reply: u32) {
+ if let Ok(work) = ListArc::try_from_arc(self.return_work.clone()) {
+ work.set_error_code(reply);
+ self.push_work(work);
+ } else {
+ pr_warn!("Thread return work is already in use.");
+ }
+ }
+
+ /// Used to push work items that do not need to be processed immediately and can wait until the
+ /// thread gets another work item.
+ fn push_work_deferred(&mut self, work: DLArc<dyn DeliverToRead>) {
+ self.work_list.push_back(work);
+ }
+
+ /// Fetches the transaction this thread can reply to. If the thread has a pending transaction
+ /// (that it could respond to) but it has also issued a transaction, it must first wait for the
+ /// previously-issued transaction to complete.
+ ///
+ /// The `thread` parameter should be the thread containing this `ThreadInner`.
+ fn pop_transaction_to_reply(&mut self, thread: &Thread) -> Result<DArc<Transaction>> {
+ let transaction = self.current_transaction.take().ok_or(EINVAL)?;
+ if core::ptr::eq(thread, transaction.from.as_ref()) {
+ self.current_transaction = Some(transaction);
+ return Err(EINVAL);
+ }
+ // Find a new current transaction for this thread.
+ self.current_transaction = transaction.find_from(thread).cloned();
+ Ok(transaction)
+ }
+
+ fn pop_transaction_replied(&mut self, transaction: &DArc<Transaction>) -> bool {
+ match self.current_transaction.take() {
+ None => false,
+ Some(old) => {
+ if !Arc::ptr_eq(transaction, &old) {
+ self.current_transaction = Some(old);
+ return false;
+ }
+ self.current_transaction = old.clone_next();
+ true
+ }
+ }
+ }
+
+ fn looper_enter(&mut self) {
+ self.looper_flags |= LOOPER_ENTERED;
+ if self.looper_flags & LOOPER_REGISTERED != 0 {
+ self.looper_flags |= LOOPER_INVALID;
+ }
+ }
+
+ fn looper_register(&mut self, valid: bool) {
+ self.looper_flags |= LOOPER_REGISTERED;
+ if !valid || self.looper_flags & LOOPER_ENTERED != 0 {
+ self.looper_flags |= LOOPER_INVALID;
+ }
+ }
+
+ fn looper_exit(&mut self) {
+ self.looper_flags |= LOOPER_EXITED;
+ }
+
+ /// Determines whether the thread is part of a pool, i.e., if it is a looper.
+ fn is_looper(&self) -> bool {
+ self.looper_flags & (LOOPER_ENTERED | LOOPER_REGISTERED) != 0
+ }
+
+ /// Determines whether the thread should attempt to fetch work items from the process queue.
+ /// This is generally case when the thread is registered as a looper and not part of a
+ /// transaction stack. But if there is local work, we want to return to userspace before we
+ /// deliver any remote work.
+ fn should_use_process_work_queue(&self) -> bool {
+ self.current_transaction.is_none() && !self.process_work_list && self.is_looper()
+ }
+
+ fn poll(&mut self) -> u32 {
+ self.looper_flags |= LOOPER_POLL;
+ if self.process_work_list || self.looper_need_return {
+ bindings::POLLIN
+ } else {
+ 0
+ }
+ }
+}
+
+/// This represents a thread that's used with binder.
+#[pin_data]
+pub(crate) struct Thread {
+ pub(crate) id: i32,
+ pub(crate) process: Arc<Process>,
+ pub(crate) task: ARef<Task>,
+ #[pin]
+ inner: SpinLock<InnerThread>,
+ #[pin]
+ work_condvar: PollCondVar,
+ /// Used to insert this thread into the process' `ready_threads` list.
+ ///
+ /// INVARIANT: May never be used for any other list than the `self.process.ready_threads`.
+ #[pin]
+ links: ListLinks,
+ #[pin]
+ links_track: AtomicTracker,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for Thread {
+ tracked_by links_track: AtomicTracker;
+ }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<0> for Thread {
+ using ListLinks { self.links };
+ }
+}
+
+impl Thread {
+ pub(crate) fn new(id: i32, process: Arc<Process>) -> Result<Arc<Self>> {
+ let inner = InnerThread::new()?;
+
+ Arc::pin_init(
+ try_pin_init!(Thread {
+ id,
+ process,
+ task: ARef::from(&**kernel::current!()),
+ inner <- kernel::new_spinlock!(inner, "Thread::inner"),
+ work_condvar <- kernel::new_poll_condvar!("Thread::work_condvar"),
+ links <- ListLinks::new(),
+ links_track <- AtomicTracker::new(),
+ }),
+ GFP_KERNEL,
+ )
+ }
+
+ #[inline(never)]
+ pub(crate) fn debug_print(self: &Arc<Self>, m: &SeqFile, print_all: bool) -> Result<()> {
+ let inner = self.inner.lock();
+
+ if print_all || inner.current_transaction.is_some() || !inner.work_list.is_empty() {
+ seq_print!(
+ m,
+ " thread {}: l {:02x} need_return {}\n",
+ self.id,
+ inner.looper_flags,
+ inner.looper_need_return,
+ );
+ }
+
+ let mut t_opt = inner.current_transaction.as_ref();
+ while let Some(t) = t_opt {
+ if Arc::ptr_eq(&t.from, self) {
+ t.debug_print_inner(m, " outgoing transaction ");
+ t_opt = t.from_parent.as_ref();
+ } else if Arc::ptr_eq(&t.to, &self.process) {
+ t.debug_print_inner(m, " incoming transaction ");
+ t_opt = t.find_from(self);
+ } else {
+ t.debug_print_inner(m, " bad transaction ");
+ t_opt = None;
+ }
+ }
+
+ for work in &inner.work_list {
+ work.debug_print(m, " ", " pending transaction ")?;
+ }
+ Ok(())
+ }
+
+ pub(crate) fn get_extended_error(&self, data: UserSlice) -> Result {
+ let mut writer = data.writer();
+ let ee = self.inner.lock().extended_error;
+ writer.write(&ee)?;
+ Ok(())
+ }
+
+ pub(crate) fn set_current_transaction(&self, transaction: DArc<Transaction>) {
+ self.inner.lock().current_transaction = Some(transaction);
+ }
+
+ pub(crate) fn has_current_transaction(&self) -> bool {
+ self.inner.lock().current_transaction.is_some()
+ }
+
+ /// Attempts to fetch a work item from the thread-local queue. The behaviour if the queue is
+ /// empty depends on `wait`: if it is true, the function waits for some work to be queued (or a
+ /// signal); otherwise it returns indicating that none is available.
+ fn get_work_local(self: &Arc<Self>, wait: bool) -> Result<Option<DLArc<dyn DeliverToRead>>> {
+ {
+ let mut inner = self.inner.lock();
+ if inner.looper_need_return {
+ return Ok(inner.pop_work());
+ }
+ }
+
+ // Try once if the caller does not want to wait.
+ if !wait {
+ return self.inner.lock().pop_work().ok_or(EAGAIN).map(Some);
+ }
+
+ // Loop waiting only on the local queue (i.e., not registering with the process queue).
+ let mut inner = self.inner.lock();
+ loop {
+ if let Some(work) = inner.pop_work() {
+ return Ok(Some(work));
+ }
+
+ inner.looper_flags |= LOOPER_WAITING;
+ let signal_pending = self.work_condvar.wait_interruptible_freezable(&mut inner);
+ inner.looper_flags &= !LOOPER_WAITING;
+
+ if signal_pending {
+ return Err(EINTR);
+ }
+ if inner.looper_need_return {
+ return Ok(None);
+ }
+ }
+ }
+
+ /// Attempts to fetch a work item from the thread-local queue, falling back to the process-wide
+ /// queue if none is available locally.
+ ///
+ /// This must only be called when the thread is not participating in a transaction chain. If it
+ /// is, the local version (`get_work_local`) should be used instead.
+ fn get_work(self: &Arc<Self>, wait: bool) -> Result<Option<DLArc<dyn DeliverToRead>>> {
+ // Try to get work from the thread's work queue, using only a local lock.
+ {
+ let mut inner = self.inner.lock();
+ if let Some(work) = inner.pop_work() {
+ return Ok(Some(work));
+ }
+ if inner.looper_need_return {
+ drop(inner);
+ return Ok(self.process.get_work());
+ }
+ }
+
+ // If the caller doesn't want to wait, try to grab work from the process queue.
+ //
+ // We know nothing will have been queued directly to the thread queue because it is not in
+ // a transaction and it is not in the process' ready list.
+ if !wait {
+ return self.process.get_work().ok_or(EAGAIN).map(Some);
+ }
+
+ // Get work from the process queue. If none is available, atomically register as ready.
+ let reg = match self.process.get_work_or_register(self) {
+ GetWorkOrRegister::Work(work) => return Ok(Some(work)),
+ GetWorkOrRegister::Register(reg) => reg,
+ };
+
+ let mut inner = self.inner.lock();
+ loop {
+ if let Some(work) = inner.pop_work() {
+ return Ok(Some(work));
+ }
+
+ inner.looper_flags |= LOOPER_WAITING | LOOPER_WAITING_PROC;
+ let signal_pending = self.work_condvar.wait_interruptible_freezable(&mut inner);
+ inner.looper_flags &= !(LOOPER_WAITING | LOOPER_WAITING_PROC);
+
+ if signal_pending || inner.looper_need_return {
+ // We need to return now. We need to pull the thread off the list of ready threads
+ // (by dropping `reg`), then check the state again after it's off the list to
+ // ensure that something was not queued in the meantime. If something has been
+ // queued, we just return it (instead of the error).
+ drop(inner);
+ drop(reg);
+
+ let res = match self.inner.lock().pop_work() {
+ Some(work) => Ok(Some(work)),
+ None if signal_pending => Err(EINTR),
+ None => Ok(None),
+ };
+ return res;
+ }
+ }
+ }
+
+ /// Push the provided work item to be delivered to user space via this thread.
+ ///
+ /// Returns whether the item was successfully pushed. This can only fail if the thread is dead.
+ pub(crate) fn push_work(&self, work: DLArc<dyn DeliverToRead>) -> PushWorkRes {
+ let sync = work.should_sync_wakeup();
+
+ let res = self.inner.lock().push_work(work);
+
+ if res.is_ok() {
+ if sync {
+ self.work_condvar.notify_sync();
+ } else {
+ self.work_condvar.notify_one();
+ }
+ }
+
+ res
+ }
+
+ /// Attempts to push to given work item to the thread if it's a looper thread (i.e., if it's
+ /// part of a thread pool) and is alive. Otherwise, push the work item to the process instead.
+ pub(crate) fn push_work_if_looper(&self, work: DLArc<dyn DeliverToRead>) -> BinderResult {
+ let mut inner = self.inner.lock();
+ if inner.is_looper() && !inner.is_dead {
+ inner.push_work(work);
+ Ok(())
+ } else {
+ drop(inner);
+ self.process.push_work(work)
+ }
+ }
+
+ pub(crate) fn push_work_deferred(&self, work: DLArc<dyn DeliverToRead>) {
+ self.inner.lock().push_work_deferred(work);
+ }
+
+ pub(crate) fn push_return_work(&self, reply: u32) {
+ self.inner.lock().push_return_work(reply);
+ }
+
+ fn translate_object(
+ &self,
+ obj_index: usize,
+ offset: usize,
+ object: BinderObjectRef<'_>,
+ view: &mut AllocationView<'_>,
+ allow_fds: bool,
+ sg_state: &mut ScatterGatherState,
+ ) -> BinderResult {
+ match object {
+ BinderObjectRef::Binder(obj) => {
+ let strong = obj.hdr.type_ == BINDER_TYPE_BINDER;
+ // SAFETY: `binder` is a `binder_uintptr_t`; any bit pattern is a valid
+ // representation.
+ let ptr = unsafe { obj.__bindgen_anon_1.binder } as _;
+ let cookie = obj.cookie as _;
+ let flags = obj.flags as _;
+ let node = self
+ .process
+ .as_arc_borrow()
+ .get_node(ptr, cookie, flags, strong, self)?;
+ security::binder_transfer_binder(&self.process.cred, &view.alloc.process.cred)?;
+ view.transfer_binder_object(offset, obj, strong, node)?;
+ }
+ BinderObjectRef::Handle(obj) => {
+ let strong = obj.hdr.type_ == BINDER_TYPE_HANDLE;
+ // SAFETY: `handle` is a `u32`; any bit pattern is a valid representation.
+ let handle = unsafe { obj.__bindgen_anon_1.handle } as _;
+ let node = self.process.get_node_from_handle(handle, strong)?;
+ security::binder_transfer_binder(&self.process.cred, &view.alloc.process.cred)?;
+ view.transfer_binder_object(offset, obj, strong, node)?;
+ }
+ BinderObjectRef::Fd(obj) => {
+ if !allow_fds {
+ return Err(EPERM.into());
+ }
+
+ // SAFETY: `fd` is a `u32`; any bit pattern is a valid representation.
+ let fd = unsafe { obj.__bindgen_anon_1.fd };
+ let file = LocalFile::fget(fd)?;
+ // SAFETY: The binder driver never calls `fdget_pos` and this code runs from an
+ // ioctl, so there are no active calls to `fdget_pos` on this thread.
+ let file = unsafe { LocalFile::assume_no_fdget_pos(file) };
+ security::binder_transfer_file(
+ &self.process.cred,
+ &view.alloc.process.cred,
+ &file,
+ )?;
+
+ let mut obj_write = BinderFdObject::default();
+ obj_write.hdr.type_ = BINDER_TYPE_FD;
+ // This will be overwritten with the actual fd when the transaction is received.
+ obj_write.__bindgen_anon_1.fd = u32::MAX;
+ obj_write.cookie = obj.cookie;
+ view.write::<BinderFdObject>(offset, &obj_write)?;
+
+ const FD_FIELD_OFFSET: usize =
+ core::mem::offset_of!(uapi::binder_fd_object, __bindgen_anon_1.fd);
+
+ let field_offset = offset + FD_FIELD_OFFSET;
+
+ view.alloc.info_add_fd(file, field_offset, false)?;
+ }
+ BinderObjectRef::Ptr(obj) => {
+ let obj_length = obj.length.try_into().map_err(|_| EINVAL)?;
+ let alloc_offset = match sg_state.unused_buffer_space.claim_next(obj_length) {
+ Ok(alloc_offset) => alloc_offset,
+ Err(err) => {
+ pr_warn!(
+ "Failed to claim space for a BINDER_TYPE_PTR. (offset: {}, limit: {}, size: {})",
+ sg_state.unused_buffer_space.offset,
+ sg_state.unused_buffer_space.limit,
+ obj_length,
+ );
+ return Err(err.into());
+ }
+ };
+
+ let sg_state_idx = sg_state.sg_entries.len();
+ sg_state.sg_entries.push(
+ ScatterGatherEntry {
+ obj_index,
+ offset: alloc_offset,
+ sender_uaddr: obj.buffer as _,
+ length: obj_length,
+ pointer_fixups: KVec::new(),
+ fixup_min_offset: 0,
+ },
+ GFP_KERNEL,
+ )?;
+
+ let buffer_ptr_in_user_space = (view.alloc.ptr + alloc_offset) as u64;
+
+ if obj.flags & uapi::BINDER_BUFFER_FLAG_HAS_PARENT == 0 {
+ sg_state.ancestors.clear();
+ sg_state.ancestors.push(sg_state_idx, GFP_KERNEL)?;
+ } else {
+ // Another buffer also has a pointer to this buffer, and we need to fixup that
+ // pointer too.
+
+ let parent_index = usize::try_from(obj.parent).map_err(|_| EINVAL)?;
+ let parent_offset = usize::try_from(obj.parent_offset).map_err(|_| EINVAL)?;
+
+ let info = sg_state.validate_parent_fixup(
+ parent_index,
+ parent_offset,
+ size_of::<u64>(),
+ )?;
+
+ sg_state.ancestors.truncate(info.num_ancestors);
+ sg_state.ancestors.push(sg_state_idx, GFP_KERNEL)?;
+
+ let parent_entry = match sg_state.sg_entries.get_mut(info.parent_sg_index) {
+ Some(parent_entry) => parent_entry,
+ None => {
+ pr_err!(
+ "validate_parent_fixup returned index out of bounds for sg.entries"
+ );
+ return Err(EINVAL.into());
+ }
+ };
+
+ parent_entry.fixup_min_offset = info.new_min_offset;
+ parent_entry.pointer_fixups.push(
+ PointerFixupEntry {
+ skip: 0,
+ pointer_value: buffer_ptr_in_user_space,
+ target_offset: info.target_offset,
+ },
+ GFP_KERNEL,
+ )?;
+ }
+
+ let mut obj_write = BinderBufferObject::default();
+ obj_write.hdr.type_ = BINDER_TYPE_PTR;
+ obj_write.flags = obj.flags;
+ obj_write.buffer = buffer_ptr_in_user_space;
+ obj_write.length = obj.length;
+ obj_write.parent = obj.parent;
+ obj_write.parent_offset = obj.parent_offset;
+ view.write::<BinderBufferObject>(offset, &obj_write)?;
+ }
+ BinderObjectRef::Fda(obj) => {
+ if !allow_fds {
+ return Err(EPERM.into());
+ }
+ let parent_index = usize::try_from(obj.parent).map_err(|_| EINVAL)?;
+ let parent_offset = usize::try_from(obj.parent_offset).map_err(|_| EINVAL)?;
+ let num_fds = usize::try_from(obj.num_fds).map_err(|_| EINVAL)?;
+ let fds_len = num_fds.checked_mul(size_of::<u32>()).ok_or(EINVAL)?;
+
+ let info = sg_state.validate_parent_fixup(parent_index, parent_offset, fds_len)?;
+ view.alloc.info_add_fd_reserve(num_fds)?;
+
+ sg_state.ancestors.truncate(info.num_ancestors);
+ let parent_entry = match sg_state.sg_entries.get_mut(info.parent_sg_index) {
+ Some(parent_entry) => parent_entry,
+ None => {
+ pr_err!(
+ "validate_parent_fixup returned index out of bounds for sg.entries"
+ );
+ return Err(EINVAL.into());
+ }
+ };
+
+ parent_entry.fixup_min_offset = info.new_min_offset;
+ parent_entry
+ .pointer_fixups
+ .push(
+ PointerFixupEntry {
+ skip: fds_len,
+ pointer_value: 0,
+ target_offset: info.target_offset,
+ },
+ GFP_KERNEL,
+ )
+ .map_err(|_| ENOMEM)?;
+
+ let fda_uaddr = parent_entry
+ .sender_uaddr
+ .checked_add(parent_offset)
+ .ok_or(EINVAL)?;
+ let mut fda_bytes = KVec::new();
+ UserSlice::new(UserPtr::from_addr(fda_uaddr as _), fds_len)
+ .read_all(&mut fda_bytes, GFP_KERNEL)?;
+
+ if fds_len != fda_bytes.len() {
+ pr_err!("UserSlice::read_all returned wrong length in BINDER_TYPE_FDA");
+ return Err(EINVAL.into());
+ }
+
+ for i in (0..fds_len).step_by(size_of::<u32>()) {
+ let fd = {
+ let mut fd_bytes = [0u8; size_of::<u32>()];
+ fd_bytes.copy_from_slice(&fda_bytes[i..i + size_of::<u32>()]);
+ u32::from_ne_bytes(fd_bytes)
+ };
+
+ let file = LocalFile::fget(fd)?;
+ // SAFETY: The binder driver never calls `fdget_pos` and this code runs from an
+ // ioctl, so there are no active calls to `fdget_pos` on this thread.
+ let file = unsafe { LocalFile::assume_no_fdget_pos(file) };
+ security::binder_transfer_file(
+ &self.process.cred,
+ &view.alloc.process.cred,
+ &file,
+ )?;
+
+ // The `validate_parent_fixup` call ensuers that this addition will not
+ // overflow.
+ view.alloc.info_add_fd(file, info.target_offset + i, true)?;
+ }
+ drop(fda_bytes);
+
+ let mut obj_write = BinderFdArrayObject::default();
+ obj_write.hdr.type_ = BINDER_TYPE_FDA;
+ obj_write.num_fds = obj.num_fds;
+ obj_write.parent = obj.parent;
+ obj_write.parent_offset = obj.parent_offset;
+ view.write::<BinderFdArrayObject>(offset, &obj_write)?;
+ }
+ }
+ Ok(())
+ }
+
+ fn apply_sg(&self, alloc: &mut Allocation, sg_state: &mut ScatterGatherState) -> BinderResult {
+ for sg_entry in &mut sg_state.sg_entries {
+ let mut end_of_previous_fixup = sg_entry.offset;
+ let offset_end = sg_entry.offset.checked_add(sg_entry.length).ok_or(EINVAL)?;
+
+ let mut reader =
+ UserSlice::new(UserPtr::from_addr(sg_entry.sender_uaddr), sg_entry.length).reader();
+ for fixup in &mut sg_entry.pointer_fixups {
+ let fixup_len = if fixup.skip == 0 {
+ size_of::<u64>()
+ } else {
+ fixup.skip
+ };
+
+ let target_offset_end = fixup.target_offset.checked_add(fixup_len).ok_or(EINVAL)?;
+ if fixup.target_offset < end_of_previous_fixup || offset_end < target_offset_end {
+ pr_warn!(
+ "Fixups oob {} {} {} {}",
+ fixup.target_offset,
+ end_of_previous_fixup,
+ offset_end,
+ target_offset_end
+ );
+ return Err(EINVAL.into());
+ }
+
+ let copy_off = end_of_previous_fixup;
+ let copy_len = fixup.target_offset - end_of_previous_fixup;
+ if let Err(err) = alloc.copy_into(&mut reader, copy_off, copy_len) {
+ pr_warn!("Failed copying into alloc: {:?}", err);
+ return Err(err.into());
+ }
+ if fixup.skip == 0 {
+ let res = alloc.write::<u64>(fixup.target_offset, &fixup.pointer_value);
+ if let Err(err) = res {
+ pr_warn!("Failed copying ptr into alloc: {:?}", err);
+ return Err(err.into());
+ }
+ }
+ if let Err(err) = reader.skip(fixup_len) {
+ pr_warn!("Failed skipping {} from reader: {:?}", fixup_len, err);
+ return Err(err.into());
+ }
+ end_of_previous_fixup = target_offset_end;
+ }
+ let copy_off = end_of_previous_fixup;
+ let copy_len = offset_end - end_of_previous_fixup;
+ if let Err(err) = alloc.copy_into(&mut reader, copy_off, copy_len) {
+ pr_warn!("Failed copying remainder into alloc: {:?}", err);
+ return Err(err.into());
+ }
+ }
+ Ok(())
+ }
+
+ /// This method copies the payload of a transaction into the target process.
+ ///
+ /// The resulting payload will have several different components, which will be stored next to
+ /// each other in the allocation. Furthermore, various objects can be embedded in the payload,
+ /// and those objects have to be translated so that they make sense to the target transaction.
+ pub(crate) fn copy_transaction_data(
+ &self,
+ to_process: Arc<Process>,
+ tr: &BinderTransactionDataSg,
+ debug_id: usize,
+ allow_fds: bool,
+ txn_security_ctx_offset: Option<&mut usize>,
+ ) -> BinderResult<NewAllocation> {
+ let trd = &tr.transaction_data;
+ let is_oneway = trd.flags & TF_ONE_WAY != 0;
+ let mut secctx = if let Some(offset) = txn_security_ctx_offset {
+ let secid = self.process.cred.get_secid();
+ let ctx = match security::SecurityCtx::from_secid(secid) {
+ Ok(ctx) => ctx,
+ Err(err) => {
+ pr_warn!("Failed to get security ctx for id {}: {:?}", secid, err);
+ return Err(err.into());
+ }
+ };
+ Some((offset, ctx))
+ } else {
+ None
+ };
+
+ let data_size = trd.data_size.try_into().map_err(|_| EINVAL)?;
+ let aligned_data_size = ptr_align(data_size).ok_or(EINVAL)?;
+ let offsets_size = trd.offsets_size.try_into().map_err(|_| EINVAL)?;
+ let aligned_offsets_size = ptr_align(offsets_size).ok_or(EINVAL)?;
+ let buffers_size = tr.buffers_size.try_into().map_err(|_| EINVAL)?;
+ let aligned_buffers_size = ptr_align(buffers_size).ok_or(EINVAL)?;
+ let aligned_secctx_size = match secctx.as_ref() {
+ Some((_offset, ctx)) => ptr_align(ctx.len()).ok_or(EINVAL)?,
+ None => 0,
+ };
+
+ // This guarantees that at least `sizeof(usize)` bytes will be allocated.
+ let len = usize::max(
+ aligned_data_size
+ .checked_add(aligned_offsets_size)
+ .and_then(|sum| sum.checked_add(aligned_buffers_size))
+ .and_then(|sum| sum.checked_add(aligned_secctx_size))
+ .ok_or(ENOMEM)?,
+ size_of::<usize>(),
+ );
+ let secctx_off = aligned_data_size + aligned_offsets_size + aligned_buffers_size;
+ let mut alloc =
+ match to_process.buffer_alloc(debug_id, len, is_oneway, self.process.task.pid()) {
+ Ok(alloc) => alloc,
+ Err(err) => {
+ pr_warn!(
+ "Failed to allocate buffer. len:{}, is_oneway:{}",
+ len,
+ is_oneway
+ );
+ return Err(err);
+ }
+ };
+
+ // SAFETY: This accesses a union field, but it's okay because the field's type is valid for
+ // all bit-patterns.
+ let trd_data_ptr = unsafe { &trd.data.ptr };
+ let mut buffer_reader =
+ UserSlice::new(UserPtr::from_addr(trd_data_ptr.buffer as _), data_size).reader();
+ let mut end_of_previous_object = 0;
+ let mut sg_state = None;
+
+ // Copy offsets if there are any.
+ if offsets_size > 0 {
+ {
+ let mut reader =
+ UserSlice::new(UserPtr::from_addr(trd_data_ptr.offsets as _), offsets_size)
+ .reader();
+ alloc.copy_into(&mut reader, aligned_data_size, offsets_size)?;
+ }
+
+ let offsets_start = aligned_data_size;
+ let offsets_end = aligned_data_size + aligned_offsets_size;
+
+ // This state is used for BINDER_TYPE_PTR objects.
+ let sg_state = sg_state.insert(ScatterGatherState {
+ unused_buffer_space: UnusedBufferSpace {
+ offset: offsets_end,
+ limit: len,
+ },
+ sg_entries: KVec::new(),
+ ancestors: KVec::new(),
+ });
+
+ // Traverse the objects specified.
+ let mut view = AllocationView::new(&mut alloc, data_size);
+ for (index, index_offset) in (offsets_start..offsets_end)
+ .step_by(size_of::<usize>())
+ .enumerate()
+ {
+ let offset = view.alloc.read(index_offset)?;
+
+ if offset < end_of_previous_object {
+ pr_warn!("Got transaction with invalid offset.");
+ return Err(EINVAL.into());
+ }
+
+ // Copy data between two objects.
+ if end_of_previous_object < offset {
+ view.copy_into(
+ &mut buffer_reader,
+ end_of_previous_object,
+ offset - end_of_previous_object,
+ )?;
+ }
+
+ let mut object = BinderObject::read_from(&mut buffer_reader)?;
+
+ match self.translate_object(
+ index,
+ offset,
+ object.as_ref(),
+ &mut view,
+ allow_fds,
+ sg_state,
+ ) {
+ Ok(()) => end_of_previous_object = offset + object.size(),
+ Err(err) => {
+ pr_warn!("Error while translating object.");
+ return Err(err);
+ }
+ }
+
+ // Update the indexes containing objects to clean up.
+ let offset_after_object = index_offset + size_of::<usize>();
+ view.alloc
+ .set_info_offsets(offsets_start..offset_after_object);
+ }
+ }
+
+ // Copy remaining raw data.
+ alloc.copy_into(
+ &mut buffer_reader,
+ end_of_previous_object,
+ data_size - end_of_previous_object,
+ )?;
+
+ if let Some(sg_state) = sg_state.as_mut() {
+ if let Err(err) = self.apply_sg(&mut alloc, sg_state) {
+ pr_warn!("Failure in apply_sg: {:?}", err);
+ return Err(err);
+ }
+ }
+
+ if let Some((off_out, secctx)) = secctx.as_mut() {
+ if let Err(err) = alloc.write(secctx_off, secctx.as_bytes()) {
+ pr_warn!("Failed to write security context: {:?}", err);
+ return Err(err.into());
+ }
+ **off_out = secctx_off;
+ }
+ Ok(alloc)
+ }
+
+ fn unwind_transaction_stack(self: &Arc<Self>) {
+ let mut thread = self.clone();
+ while let Ok(transaction) = {
+ let mut inner = thread.inner.lock();
+ inner.pop_transaction_to_reply(thread.as_ref())
+ } {
+ let reply = Err(BR_DEAD_REPLY);
+ if !transaction.from.deliver_single_reply(reply, &transaction) {
+ break;
+ }
+
+ thread = transaction.from.clone();
+ }
+ }
+
+ pub(crate) fn deliver_reply(
+ &self,
+ reply: Result<DLArc<Transaction>, u32>,
+ transaction: &DArc<Transaction>,
+ ) {
+ if self.deliver_single_reply(reply, transaction) {
+ transaction.from.unwind_transaction_stack();
+ }
+ }
+
+ /// Delivers a reply to the thread that started a transaction. The reply can either be a
+ /// reply-transaction or an error code to be delivered instead.
+ ///
+ /// Returns whether the thread is dead. If it is, the caller is expected to unwind the
+ /// transaction stack by completing transactions for threads that are dead.
+ fn deliver_single_reply(
+ &self,
+ reply: Result<DLArc<Transaction>, u32>,
+ transaction: &DArc<Transaction>,
+ ) -> bool {
+ if let Ok(transaction) = &reply {
+ transaction.set_outstanding(&mut self.process.inner.lock());
+ }
+
+ {
+ let mut inner = self.inner.lock();
+ if !inner.pop_transaction_replied(transaction) {
+ return false;
+ }
+
+ if inner.is_dead {
+ return true;
+ }
+
+ match reply {
+ Ok(work) => {
+ inner.push_work(work);
+ }
+ Err(code) => inner.push_reply_work(code),
+ }
+ }
+
+ // Notify the thread now that we've released the inner lock.
+ self.work_condvar.notify_sync();
+ false
+ }
+
+ /// Determines if the given transaction is the current transaction for this thread.
+ fn is_current_transaction(&self, transaction: &DArc<Transaction>) -> bool {
+ let inner = self.inner.lock();
+ match &inner.current_transaction {
+ None => false,
+ Some(current) => Arc::ptr_eq(current, transaction),
+ }
+ }
+
+ /// Determines the current top of the transaction stack. It fails if the top is in another
+ /// thread (i.e., this thread belongs to a stack but it has called another thread). The top is
+ /// [`None`] if the thread is not currently participating in a transaction stack.
+ fn top_of_transaction_stack(&self) -> Result<Option<DArc<Transaction>>> {
+ let inner = self.inner.lock();
+ if let Some(cur) = &inner.current_transaction {
+ if core::ptr::eq(self, cur.from.as_ref()) {
+ pr_warn!("got new transaction with bad transaction stack");
+ return Err(EINVAL);
+ }
+ Ok(Some(cur.clone()))
+ } else {
+ Ok(None)
+ }
+ }
+
+ fn transaction<T>(self: &Arc<Self>, tr: &BinderTransactionDataSg, inner: T)
+ where
+ T: FnOnce(&Arc<Self>, &BinderTransactionDataSg) -> BinderResult,
+ {
+ if let Err(err) = inner(self, tr) {
+ if err.should_pr_warn() {
+ let mut ee = self.inner.lock().extended_error;
+ ee.command = err.reply;
+ ee.param = err.as_errno();
+ pr_warn!(
+ "Transaction failed: {:?} my_pid:{}",
+ err,
+ self.process.pid_in_current_ns()
+ );
+ }
+
+ self.push_return_work(err.reply);
+ }
+ }
+
+ fn transaction_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult {
+ // SAFETY: Handle's type has no invalid bit patterns.
+ let handle = unsafe { tr.transaction_data.target.handle };
+ let node_ref = self.process.get_transaction_node(handle)?;
+ security::binder_transaction(&self.process.cred, &node_ref.node.owner.cred)?;
+ // TODO: We need to ensure that there isn't a pending transaction in the work queue. How
+ // could this happen?
+ let top = self.top_of_transaction_stack()?;
+ let list_completion = DTRWrap::arc_try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?;
+ let completion = list_completion.clone_arc();
+ let transaction = Transaction::new(node_ref, top, self, tr)?;
+
+ // Check that the transaction stack hasn't changed while the lock was released, then update
+ // it with the new transaction.
+ {
+ let mut inner = self.inner.lock();
+ if !transaction.is_stacked_on(&inner.current_transaction) {
+ pr_warn!("Transaction stack changed during transaction!");
+ return Err(EINVAL.into());
+ }
+ inner.current_transaction = Some(transaction.clone_arc());
+ // We push the completion as a deferred work so that we wait for the reply before
+ // returning to userland.
+ inner.push_work_deferred(list_completion);
+ }
+
+ if let Err(e) = transaction.submit() {
+ completion.skip();
+ // Define `transaction` first to drop it after `inner`.
+ let transaction;
+ let mut inner = self.inner.lock();
+ transaction = inner.current_transaction.take().unwrap();
+ inner.current_transaction = transaction.clone_next();
+ Err(e)
+ } else {
+ Ok(())
+ }
+ }
+
+ fn reply_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult {
+ let orig = self.inner.lock().pop_transaction_to_reply(self)?;
+ if !orig.from.is_current_transaction(&orig) {
+ return Err(EINVAL.into());
+ }
+
+ // We need to complete the transaction even if we cannot complete building the reply.
+ let out = (|| -> BinderResult<_> {
+ let completion = DTRWrap::arc_try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?;
+ let process = orig.from.process.clone();
+ let allow_fds = orig.flags & TF_ACCEPT_FDS != 0;
+ let reply = Transaction::new_reply(self, process, tr, allow_fds)?;
+ self.inner.lock().push_work(completion);
+ orig.from.deliver_reply(Ok(reply), &orig);
+ Ok(())
+ })()
+ .map_err(|mut err| {
+ // At this point we only return `BR_TRANSACTION_COMPLETE` to the caller, and we must let
+ // the sender know that the transaction has completed (with an error in this case).
+ pr_warn!(
+ "Failure {:?} during reply - delivering BR_FAILED_REPLY to sender.",
+ err
+ );
+ let reply = Err(BR_FAILED_REPLY);
+ orig.from.deliver_reply(reply, &orig);
+ err.reply = BR_TRANSACTION_COMPLETE;
+ err
+ });
+
+ out
+ }
+
+ fn oneway_transaction_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult {
+ // SAFETY: The `handle` field is valid for all possible byte values, so reading from the
+ // union is okay.
+ let handle = unsafe { tr.transaction_data.target.handle };
+ let node_ref = self.process.get_transaction_node(handle)?;
+ security::binder_transaction(&self.process.cred, &node_ref.node.owner.cred)?;
+ let transaction = Transaction::new(node_ref, None, self, tr)?;
+ let code = if self.process.is_oneway_spam_detection_enabled()
+ && transaction.oneway_spam_detected
+ {
+ BR_ONEWAY_SPAM_SUSPECT
+ } else {
+ BR_TRANSACTION_COMPLETE
+ };
+ let list_completion = DTRWrap::arc_try_new(DeliverCode::new(code))?;
+ let completion = list_completion.clone_arc();
+ self.inner.lock().push_work(list_completion);
+ match transaction.submit() {
+ Ok(()) => Ok(()),
+ Err(err) => {
+ completion.skip();
+ Err(err)
+ }
+ }
+ }
+
+ fn write(self: &Arc<Self>, req: &mut BinderWriteRead) -> Result {
+ let write_start = req.write_buffer.wrapping_add(req.write_consumed);
+ let write_len = req.write_size.saturating_sub(req.write_consumed);
+ let mut reader =
+ UserSlice::new(UserPtr::from_addr(write_start as _), write_len as _).reader();
+
+ while reader.len() >= size_of::<u32>() && self.inner.lock().return_work.is_unused() {
+ let before = reader.len();
+ let cmd = reader.read::<u32>()?;
+ GLOBAL_STATS.inc_bc(cmd);
+ self.process.stats.inc_bc(cmd);
+ match cmd {
+ BC_TRANSACTION => {
+ let tr = reader.read::<BinderTransactionData>()?.with_buffers_size(0);
+ if tr.transaction_data.flags & TF_ONE_WAY != 0 {
+ self.transaction(&tr, Self::oneway_transaction_inner);
+ } else {
+ self.transaction(&tr, Self::transaction_inner);
+ }
+ }
+ BC_TRANSACTION_SG => {
+ let tr = reader.read::<BinderTransactionDataSg>()?;
+ if tr.transaction_data.flags & TF_ONE_WAY != 0 {
+ self.transaction(&tr, Self::oneway_transaction_inner);
+ } else {
+ self.transaction(&tr, Self::transaction_inner);
+ }
+ }
+ BC_REPLY => {
+ let tr = reader.read::<BinderTransactionData>()?.with_buffers_size(0);
+ self.transaction(&tr, Self::reply_inner)
+ }
+ BC_REPLY_SG => {
+ let tr = reader.read::<BinderTransactionDataSg>()?;
+ self.transaction(&tr, Self::reply_inner)
+ }
+ BC_FREE_BUFFER => {
+ let buffer = self.process.buffer_get(reader.read()?);
+ if let Some(buffer) = buffer {
+ if buffer.looper_need_return_on_free() {
+ self.inner.lock().looper_need_return = true;
+ }
+ drop(buffer);
+ }
+ }
+ BC_INCREFS => {
+ self.process
+ .as_arc_borrow()
+ .update_ref(reader.read()?, true, false)?
+ }
+ BC_ACQUIRE => {
+ self.process
+ .as_arc_borrow()
+ .update_ref(reader.read()?, true, true)?
+ }
+ BC_RELEASE => {
+ self.process
+ .as_arc_borrow()
+ .update_ref(reader.read()?, false, true)?
+ }
+ BC_DECREFS => {
+ self.process
+ .as_arc_borrow()
+ .update_ref(reader.read()?, false, false)?
+ }
+ BC_INCREFS_DONE => self.process.inc_ref_done(&mut reader, false)?,
+ BC_ACQUIRE_DONE => self.process.inc_ref_done(&mut reader, true)?,
+ BC_REQUEST_DEATH_NOTIFICATION => self.process.request_death(&mut reader, self)?,
+ BC_CLEAR_DEATH_NOTIFICATION => self.process.clear_death(&mut reader, self)?,
+ BC_DEAD_BINDER_DONE => self.process.dead_binder_done(reader.read()?, self),
+ BC_REGISTER_LOOPER => {
+ let valid = self.process.register_thread();
+ self.inner.lock().looper_register(valid);
+ }
+ BC_ENTER_LOOPER => self.inner.lock().looper_enter(),
+ BC_EXIT_LOOPER => self.inner.lock().looper_exit(),
+ BC_REQUEST_FREEZE_NOTIFICATION => self.process.request_freeze_notif(&mut reader)?,
+ BC_CLEAR_FREEZE_NOTIFICATION => self.process.clear_freeze_notif(&mut reader)?,
+ BC_FREEZE_NOTIFICATION_DONE => self.process.freeze_notif_done(&mut reader)?,
+
+ // Fail if given an unknown error code.
+ // BC_ATTEMPT_ACQUIRE and BC_ACQUIRE_RESULT are no longer supported.
+ _ => return Err(EINVAL),
+ }
+ // Update the number of write bytes consumed.
+ req.write_consumed += (before - reader.len()) as u64;
+ }
+
+ Ok(())
+ }
+
+ fn read(self: &Arc<Self>, req: &mut BinderWriteRead, wait: bool) -> Result {
+ let read_start = req.read_buffer.wrapping_add(req.read_consumed);
+ let read_len = req.read_size.saturating_sub(req.read_consumed);
+ let mut writer = BinderReturnWriter::new(
+ UserSlice::new(UserPtr::from_addr(read_start as _), read_len as _).writer(),
+ self,
+ );
+ let (in_pool, use_proc_queue) = {
+ let inner = self.inner.lock();
+ (inner.is_looper(), inner.should_use_process_work_queue())
+ };
+
+ let getter = if use_proc_queue {
+ Self::get_work
+ } else {
+ Self::get_work_local
+ };
+
+ // Reserve some room at the beginning of the read buffer so that we can send a
+ // BR_SPAWN_LOOPER if we need to.
+ let mut has_noop_placeholder = false;
+ if req.read_consumed == 0 {
+ if let Err(err) = writer.write_code(BR_NOOP) {
+ pr_warn!("Failure when writing BR_NOOP at beginning of buffer.");
+ return Err(err);
+ }
+ has_noop_placeholder = true;
+ }
+
+ // Loop doing work while there is room in the buffer.
+ let initial_len = writer.len();
+ while writer.len() >= size_of::<uapi::binder_transaction_data_secctx>() + 4 {
+ match getter(self, wait && initial_len == writer.len()) {
+ Ok(Some(work)) => match work.into_arc().do_work(self, &mut writer) {
+ Ok(true) => {}
+ Ok(false) => break,
+ Err(err) => {
+ return Err(err);
+ }
+ },
+ Ok(None) => {
+ break;
+ }
+ Err(err) => {
+ // Propagate the error if we haven't written anything else.
+ if err != EINTR && err != EAGAIN {
+ pr_warn!("Failure in work getter: {:?}", err);
+ }
+ if initial_len == writer.len() {
+ return Err(err);
+ } else {
+ break;
+ }
+ }
+ }
+ }
+
+ req.read_consumed += read_len - writer.len() as u64;
+
+ // Write BR_SPAWN_LOOPER if the process needs more threads for its pool.
+ if has_noop_placeholder && in_pool && self.process.needs_thread() {
+ let mut writer =
+ UserSlice::new(UserPtr::from_addr(req.read_buffer as _), req.read_size as _)
+ .writer();
+ writer.write(&BR_SPAWN_LOOPER)?;
+ }
+ Ok(())
+ }
+
+ pub(crate) fn write_read(self: &Arc<Self>, data: UserSlice, wait: bool) -> Result {
+ let (mut reader, mut writer) = data.reader_writer();
+ let mut req = reader.read::<BinderWriteRead>()?;
+
+ // Go through the write buffer.
+ let mut ret = Ok(());
+ if req.write_size > 0 {
+ ret = self.write(&mut req);
+ if let Err(err) = ret {
+ pr_warn!(
+ "Write failure {:?} in pid:{}",
+ err,
+ self.process.pid_in_current_ns()
+ );
+ req.read_consumed = 0;
+ writer.write(&req)?;
+ self.inner.lock().looper_need_return = false;
+ return ret;
+ }
+ }
+
+ // Go through the work queue.
+ if req.read_size > 0 {
+ ret = self.read(&mut req, wait);
+ if ret.is_err() && ret != Err(EINTR) {
+ pr_warn!(
+ "Read failure {:?} in pid:{}",
+ ret,
+ self.process.pid_in_current_ns()
+ );
+ }
+ }
+
+ // Write the request back so that the consumed fields are visible to the caller.
+ writer.write(&req)?;
+
+ self.inner.lock().looper_need_return = false;
+
+ ret
+ }
+
+ pub(crate) fn poll(&self, file: &File, table: PollTable<'_>) -> (bool, u32) {
+ table.register_wait(file, &self.work_condvar);
+ let mut inner = self.inner.lock();
+ (inner.should_use_process_work_queue(), inner.poll())
+ }
+
+ /// Make the call to `get_work` or `get_work_local` return immediately, if any.
+ pub(crate) fn exit_looper(&self) {
+ let mut inner = self.inner.lock();
+ let should_notify = inner.looper_flags & LOOPER_WAITING != 0;
+ if should_notify {
+ inner.looper_need_return = true;
+ }
+ drop(inner);
+
+ if should_notify {
+ self.work_condvar.notify_one();
+ }
+ }
+
+ pub(crate) fn notify_if_poll_ready(&self, sync: bool) {
+ // Determine if we need to notify. This requires the lock.
+ let inner = self.inner.lock();
+ let notify = inner.looper_flags & LOOPER_POLL != 0 && inner.should_use_process_work_queue();
+ drop(inner);
+
+ // Now that the lock is no longer held, notify the waiters if we have to.
+ if notify {
+ if sync {
+ self.work_condvar.notify_sync();
+ } else {
+ self.work_condvar.notify_one();
+ }
+ }
+ }
+
+ pub(crate) fn release(self: &Arc<Self>) {
+ self.inner.lock().is_dead = true;
+
+ //self.work_condvar.clear();
+ self.unwind_transaction_stack();
+
+ // Cancel all pending work items.
+ while let Ok(Some(work)) = self.get_work_local(false) {
+ work.into_arc().cancel();
+ }
+ }
+}
+
+#[pin_data]
+struct ThreadError {
+ error_code: AtomicU32,
+ #[pin]
+ links_track: AtomicTracker,
+}
+
+impl ThreadError {
+ fn try_new() -> Result<DArc<Self>> {
+ DTRWrap::arc_pin_init(pin_init!(Self {
+ error_code: AtomicU32::new(BR_OK),
+ links_track <- AtomicTracker::new(),
+ }))
+ .map(ListArc::into_arc)
+ }
+
+ fn set_error_code(&self, code: u32) {
+ self.error_code.store(code, Ordering::Relaxed);
+ }
+
+ fn is_unused(&self) -> bool {
+ self.error_code.load(Ordering::Relaxed) == BR_OK
+ }
+}
+
+impl DeliverToRead for ThreadError {
+ fn do_work(
+ self: DArc<Self>,
+ _thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ let code = self.error_code.load(Ordering::Relaxed);
+ self.error_code.store(BR_OK, Ordering::Relaxed);
+ writer.write_code(code)?;
+ Ok(true)
+ }
+
+ fn cancel(self: DArc<Self>) {}
+
+ fn should_sync_wakeup(&self) -> bool {
+ false
+ }
+
+ fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
+ seq_print!(
+ m,
+ "{}transaction error: {}\n",
+ prefix,
+ self.error_code.load(Ordering::Relaxed)
+ );
+ Ok(())
+ }
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for ThreadError {
+ tracked_by links_track: AtomicTracker;
+ }
+}
diff --git a/drivers/android/binder/trace.rs b/drivers/android/binder/trace.rs
new file mode 100644
index 000000000000..af0e4392805e
--- /dev/null
+++ b/drivers/android/binder/trace.rs
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::ffi::{c_uint, c_ulong};
+use kernel::tracepoint::declare_trace;
+
+declare_trace! {
+ unsafe fn rust_binder_ioctl(cmd: c_uint, arg: c_ulong);
+}
+
+#[inline]
+pub(crate) fn trace_ioctl(cmd: u32, arg: usize) {
+ // SAFETY: Always safe to call.
+ unsafe { rust_binder_ioctl(cmd, arg as c_ulong) }
+}
diff --git a/drivers/android/binder/transaction.rs b/drivers/android/binder/transaction.rs
new file mode 100644
index 000000000000..4bd3c0e417eb
--- /dev/null
+++ b/drivers/android/binder/transaction.rs
@@ -0,0 +1,456 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use core::sync::atomic::{AtomicBool, Ordering};
+use kernel::{
+ prelude::*,
+ seq_file::SeqFile,
+ seq_print,
+ sync::{Arc, SpinLock},
+ task::Kuid,
+ time::{Instant, Monotonic},
+ types::ScopeGuard,
+};
+
+use crate::{
+ allocation::{Allocation, TranslatedFds},
+ defs::*,
+ error::{BinderError, BinderResult},
+ node::{Node, NodeRef},
+ process::{Process, ProcessInner},
+ ptr_align,
+ thread::{PushWorkRes, Thread},
+ BinderReturnWriter, DArc, DLArc, DTRWrap, DeliverToRead,
+};
+
+#[pin_data(PinnedDrop)]
+pub(crate) struct Transaction {
+ pub(crate) debug_id: usize,
+ target_node: Option<DArc<Node>>,
+ pub(crate) from_parent: Option<DArc<Transaction>>,
+ pub(crate) from: Arc<Thread>,
+ pub(crate) to: Arc<Process>,
+ #[pin]
+ allocation: SpinLock<Option<Allocation>>,
+ is_outstanding: AtomicBool,
+ code: u32,
+ pub(crate) flags: u32,
+ data_size: usize,
+ offsets_size: usize,
+ data_address: usize,
+ sender_euid: Kuid,
+ txn_security_ctx_off: Option<usize>,
+ pub(crate) oneway_spam_detected: bool,
+ start_time: Instant<Monotonic>,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for Transaction { untracked; }
+}
+
+impl Transaction {
+ pub(crate) fn new(
+ node_ref: NodeRef,
+ from_parent: Option<DArc<Transaction>>,
+ from: &Arc<Thread>,
+ tr: &BinderTransactionDataSg,
+ ) -> BinderResult<DLArc<Self>> {
+ let debug_id = super::next_debug_id();
+ let trd = &tr.transaction_data;
+ let allow_fds = node_ref.node.flags & FLAT_BINDER_FLAG_ACCEPTS_FDS != 0;
+ let txn_security_ctx = node_ref.node.flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX != 0;
+ let mut txn_security_ctx_off = if txn_security_ctx { Some(0) } else { None };
+ let to = node_ref.node.owner.clone();
+ let mut alloc = match from.copy_transaction_data(
+ to.clone(),
+ tr,
+ debug_id,
+ allow_fds,
+ txn_security_ctx_off.as_mut(),
+ ) {
+ Ok(alloc) => alloc,
+ Err(err) => {
+ if !err.is_dead() {
+ pr_warn!("Failure in copy_transaction_data: {:?}", err);
+ }
+ return Err(err);
+ }
+ };
+ let oneway_spam_detected = alloc.oneway_spam_detected;
+ if trd.flags & TF_ONE_WAY != 0 {
+ if from_parent.is_some() {
+ pr_warn!("Oneway transaction should not be in a transaction stack.");
+ return Err(EINVAL.into());
+ }
+ alloc.set_info_oneway_node(node_ref.node.clone());
+ }
+ if trd.flags & TF_CLEAR_BUF != 0 {
+ alloc.set_info_clear_on_drop();
+ }
+ let target_node = node_ref.node.clone();
+ alloc.set_info_target_node(node_ref);
+ let data_address = alloc.ptr;
+
+ Ok(DTRWrap::arc_pin_init(pin_init!(Transaction {
+ debug_id,
+ target_node: Some(target_node),
+ from_parent,
+ sender_euid: from.process.task.euid(),
+ from: from.clone(),
+ to,
+ code: trd.code,
+ flags: trd.flags,
+ data_size: trd.data_size as _,
+ offsets_size: trd.offsets_size as _,
+ data_address,
+ allocation <- kernel::new_spinlock!(Some(alloc.success()), "Transaction::new"),
+ is_outstanding: AtomicBool::new(false),
+ txn_security_ctx_off,
+ oneway_spam_detected,
+ start_time: Instant::now(),
+ }))?)
+ }
+
+ pub(crate) fn new_reply(
+ from: &Arc<Thread>,
+ to: Arc<Process>,
+ tr: &BinderTransactionDataSg,
+ allow_fds: bool,
+ ) -> BinderResult<DLArc<Self>> {
+ let debug_id = super::next_debug_id();
+ let trd = &tr.transaction_data;
+ let mut alloc = match from.copy_transaction_data(to.clone(), tr, debug_id, allow_fds, None)
+ {
+ Ok(alloc) => alloc,
+ Err(err) => {
+ pr_warn!("Failure in copy_transaction_data: {:?}", err);
+ return Err(err);
+ }
+ };
+ let oneway_spam_detected = alloc.oneway_spam_detected;
+ if trd.flags & TF_CLEAR_BUF != 0 {
+ alloc.set_info_clear_on_drop();
+ }
+ Ok(DTRWrap::arc_pin_init(pin_init!(Transaction {
+ debug_id,
+ target_node: None,
+ from_parent: None,
+ sender_euid: from.process.task.euid(),
+ from: from.clone(),
+ to,
+ code: trd.code,
+ flags: trd.flags,
+ data_size: trd.data_size as _,
+ offsets_size: trd.offsets_size as _,
+ data_address: alloc.ptr,
+ allocation <- kernel::new_spinlock!(Some(alloc.success()), "Transaction::new"),
+ is_outstanding: AtomicBool::new(false),
+ txn_security_ctx_off: None,
+ oneway_spam_detected,
+ start_time: Instant::now(),
+ }))?)
+ }
+
+ #[inline(never)]
+ pub(crate) fn debug_print_inner(&self, m: &SeqFile, prefix: &str) {
+ seq_print!(
+ m,
+ "{}{}: from {}:{} to {} code {:x} flags {:x} elapsed {}ms",
+ prefix,
+ self.debug_id,
+ self.from.process.task.pid(),
+ self.from.id,
+ self.to.task.pid(),
+ self.code,
+ self.flags,
+ self.start_time.elapsed().as_millis(),
+ );
+ if let Some(target_node) = &self.target_node {
+ seq_print!(m, " node {}", target_node.debug_id);
+ }
+ seq_print!(m, " size {}:{}\n", self.data_size, self.offsets_size);
+ }
+
+ /// Determines if the transaction is stacked on top of the given transaction.
+ pub(crate) fn is_stacked_on(&self, onext: &Option<DArc<Self>>) -> bool {
+ match (&self.from_parent, onext) {
+ (None, None) => true,
+ (Some(from_parent), Some(next)) => Arc::ptr_eq(from_parent, next),
+ _ => false,
+ }
+ }
+
+ /// Returns a pointer to the next transaction on the transaction stack, if there is one.
+ pub(crate) fn clone_next(&self) -> Option<DArc<Self>> {
+ Some(self.from_parent.as_ref()?.clone())
+ }
+
+ /// Searches in the transaction stack for a thread that belongs to the target process. This is
+ /// useful when finding a target for a new transaction: if the node belongs to a process that
+ /// is already part of the transaction stack, we reuse the thread.
+ fn find_target_thread(&self) -> Option<Arc<Thread>> {
+ let mut it = &self.from_parent;
+ while let Some(transaction) = it {
+ if Arc::ptr_eq(&transaction.from.process, &self.to) {
+ return Some(transaction.from.clone());
+ }
+ it = &transaction.from_parent;
+ }
+ None
+ }
+
+ /// Searches in the transaction stack for a transaction originating at the given thread.
+ pub(crate) fn find_from(&self, thread: &Thread) -> Option<&DArc<Transaction>> {
+ let mut it = &self.from_parent;
+ while let Some(transaction) = it {
+ if core::ptr::eq(thread, transaction.from.as_ref()) {
+ return Some(transaction);
+ }
+
+ it = &transaction.from_parent;
+ }
+ None
+ }
+
+ pub(crate) fn set_outstanding(&self, to_process: &mut ProcessInner) {
+ // No race because this method is only called once.
+ if !self.is_outstanding.load(Ordering::Relaxed) {
+ self.is_outstanding.store(true, Ordering::Relaxed);
+ to_process.add_outstanding_txn();
+ }
+ }
+
+ /// Decrement `outstanding_txns` in `to` if it hasn't already been decremented.
+ fn drop_outstanding_txn(&self) {
+ // No race because this is called at most twice, and one of the calls are in the
+ // destructor, which is guaranteed to not race with any other operations on the
+ // transaction. It also cannot race with `set_outstanding`, since submission happens
+ // before delivery.
+ if self.is_outstanding.load(Ordering::Relaxed) {
+ self.is_outstanding.store(false, Ordering::Relaxed);
+ self.to.drop_outstanding_txn();
+ }
+ }
+
+ /// Submits the transaction to a work queue. Uses a thread if there is one in the transaction
+ /// stack, otherwise uses the destination process.
+ ///
+ /// Not used for replies.
+ pub(crate) fn submit(self: DLArc<Self>) -> BinderResult {
+ // Defined before `process_inner` so that the destructor runs after releasing the lock.
+ let mut _t_outdated;
+
+ let oneway = self.flags & TF_ONE_WAY != 0;
+ let process = self.to.clone();
+ let mut process_inner = process.inner.lock();
+
+ self.set_outstanding(&mut process_inner);
+
+ if oneway {
+ if let Some(target_node) = self.target_node.clone() {
+ if process_inner.is_frozen.is_frozen() {
+ process_inner.async_recv = true;
+ if self.flags & TF_UPDATE_TXN != 0 {
+ if let Some(t_outdated) =
+ target_node.take_outdated_transaction(&self, &mut process_inner)
+ {
+ // Save the transaction to be dropped after locks are released.
+ _t_outdated = t_outdated;
+ }
+ }
+ }
+ match target_node.submit_oneway(self, &mut process_inner) {
+ Ok(()) => {}
+ Err((err, work)) => {
+ drop(process_inner);
+ // Drop work after releasing process lock.
+ drop(work);
+ return Err(err);
+ }
+ }
+
+ if process_inner.is_frozen.is_frozen() {
+ return Err(BinderError::new_frozen_oneway());
+ } else {
+ return Ok(());
+ }
+ } else {
+ pr_err!("Failed to submit oneway transaction to node.");
+ }
+ }
+
+ if process_inner.is_frozen.is_frozen() {
+ process_inner.sync_recv = true;
+ return Err(BinderError::new_frozen());
+ }
+
+ let res = if let Some(thread) = self.find_target_thread() {
+ match thread.push_work(self) {
+ PushWorkRes::Ok => Ok(()),
+ PushWorkRes::FailedDead(me) => Err((BinderError::new_dead(), me)),
+ }
+ } else {
+ process_inner.push_work(self)
+ };
+ drop(process_inner);
+
+ match res {
+ Ok(()) => Ok(()),
+ Err((err, work)) => {
+ // Drop work after releasing process lock.
+ drop(work);
+ Err(err)
+ }
+ }
+ }
+
+ /// Check whether one oneway transaction can supersede another.
+ pub(crate) fn can_replace(&self, old: &Transaction) -> bool {
+ if self.from.process.task.pid() != old.from.process.task.pid() {
+ return false;
+ }
+
+ if self.flags & old.flags & (TF_ONE_WAY | TF_UPDATE_TXN) != (TF_ONE_WAY | TF_UPDATE_TXN) {
+ return false;
+ }
+
+ let target_node_match = match (self.target_node.as_ref(), old.target_node.as_ref()) {
+ (None, None) => true,
+ (Some(tn1), Some(tn2)) => Arc::ptr_eq(tn1, tn2),
+ _ => false,
+ };
+
+ self.code == old.code && self.flags == old.flags && target_node_match
+ }
+
+ fn prepare_file_list(&self) -> Result<TranslatedFds> {
+ let mut alloc = self.allocation.lock().take().ok_or(ESRCH)?;
+
+ match alloc.translate_fds() {
+ Ok(translated) => {
+ *self.allocation.lock() = Some(alloc);
+ Ok(translated)
+ }
+ Err(err) => {
+ // Free the allocation eagerly.
+ drop(alloc);
+ Err(err)
+ }
+ }
+ }
+}
+
+impl DeliverToRead for Transaction {
+ fn do_work(
+ self: DArc<Self>,
+ thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ let send_failed_reply = ScopeGuard::new(|| {
+ if self.target_node.is_some() && self.flags & TF_ONE_WAY == 0 {
+ let reply = Err(BR_FAILED_REPLY);
+ self.from.deliver_reply(reply, &self);
+ }
+ self.drop_outstanding_txn();
+ });
+
+ let files = if let Ok(list) = self.prepare_file_list() {
+ list
+ } else {
+ // On failure to process the list, we send a reply back to the sender and ignore the
+ // transaction on the recipient.
+ return Ok(true);
+ };
+
+ let mut tr_sec = BinderTransactionDataSecctx::default();
+ let tr = tr_sec.tr_data();
+ if let Some(target_node) = &self.target_node {
+ let (ptr, cookie) = target_node.get_id();
+ tr.target.ptr = ptr as _;
+ tr.cookie = cookie as _;
+ };
+ tr.code = self.code;
+ tr.flags = self.flags;
+ tr.data_size = self.data_size as _;
+ tr.data.ptr.buffer = self.data_address as _;
+ tr.offsets_size = self.offsets_size as _;
+ if tr.offsets_size > 0 {
+ tr.data.ptr.offsets = (self.data_address + ptr_align(self.data_size).unwrap()) as _;
+ }
+ tr.sender_euid = self.sender_euid.into_uid_in_current_ns();
+ tr.sender_pid = 0;
+ if self.target_node.is_some() && self.flags & TF_ONE_WAY == 0 {
+ // Not a reply and not one-way.
+ tr.sender_pid = self.from.process.pid_in_current_ns();
+ }
+ let code = if self.target_node.is_none() {
+ BR_REPLY
+ } else if self.txn_security_ctx_off.is_some() {
+ BR_TRANSACTION_SEC_CTX
+ } else {
+ BR_TRANSACTION
+ };
+
+ // Write the transaction code and data to the user buffer.
+ writer.write_code(code)?;
+ if let Some(off) = self.txn_security_ctx_off {
+ tr_sec.secctx = (self.data_address + off) as u64;
+ writer.write_payload(&tr_sec)?;
+ } else {
+ writer.write_payload(&*tr)?;
+ }
+
+ let mut alloc = self.allocation.lock().take().ok_or(ESRCH)?;
+
+ // Dismiss the completion of transaction with a failure. No failure paths are allowed from
+ // here on out.
+ send_failed_reply.dismiss();
+
+ // Commit files, and set FDs in FDA to be closed on buffer free.
+ let close_on_free = files.commit();
+ alloc.set_info_close_on_free(close_on_free);
+
+ // It is now the user's responsibility to clear the allocation.
+ alloc.keep_alive();
+
+ self.drop_outstanding_txn();
+
+ // When this is not a reply and not a oneway transaction, update `current_transaction`. If
+ // it's a reply, `current_transaction` has already been updated appropriately.
+ if self.target_node.is_some() && tr_sec.transaction_data.flags & TF_ONE_WAY == 0 {
+ thread.set_current_transaction(self);
+ }
+
+ Ok(false)
+ }
+
+ fn cancel(self: DArc<Self>) {
+ let allocation = self.allocation.lock().take();
+ drop(allocation);
+
+ // If this is not a reply or oneway transaction, then send a dead reply.
+ if self.target_node.is_some() && self.flags & TF_ONE_WAY == 0 {
+ let reply = Err(BR_DEAD_REPLY);
+ self.from.deliver_reply(reply, &self);
+ }
+
+ self.drop_outstanding_txn();
+ }
+
+ fn should_sync_wakeup(&self) -> bool {
+ self.flags & TF_ONE_WAY == 0
+ }
+
+ fn debug_print(&self, m: &SeqFile, _prefix: &str, tprefix: &str) -> Result<()> {
+ self.debug_print_inner(m, tprefix);
+ Ok(())
+ }
+}
+
+#[pinned_drop]
+impl PinnedDrop for Transaction {
+ fn drop(self: Pin<&mut Self>) {
+ self.drop_outstanding_txn();
+ }
+}
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 2e1f261ec5c8..979c96b74cad 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -23,10 +23,11 @@
#include <linux/uaccess.h>
#include <linux/highmem.h>
#include <linux/sizes.h>
+#include <kunit/visibility.h>
#include "binder_alloc.h"
#include "binder_trace.h"
-struct list_lru binder_freelist;
+static struct list_lru binder_freelist;
static DEFINE_MUTEX(binder_alloc_mmap_lock);
@@ -57,13 +58,14 @@ static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
return list_entry(buffer->entry.prev, struct binder_buffer, entry);
}
-static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
- struct binder_buffer *buffer)
+VISIBLE_IF_KUNIT size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
+ struct binder_buffer *buffer)
{
if (list_is_last(&buffer->entry, &alloc->buffers))
- return alloc->buffer + alloc->buffer_size - buffer->user_data;
+ return alloc->vm_start + alloc->buffer_size - buffer->user_data;
return binder_buffer_next(buffer)->user_data - buffer->user_data;
}
+EXPORT_SYMBOL_IF_KUNIT(binder_alloc_buffer_size);
static void binder_insert_free_buffer(struct binder_alloc *alloc,
struct binder_buffer *new_buffer)
@@ -167,34 +169,31 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
unsigned long user_ptr)
{
- struct binder_buffer *buffer;
-
- spin_lock(&alloc->lock);
- buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
- spin_unlock(&alloc->lock);
- return buffer;
+ guard(mutex)(&alloc->mutex);
+ return binder_alloc_prepare_to_free_locked(alloc, user_ptr);
}
static inline void
-binder_set_installed_page(struct binder_lru_page *lru_page,
+binder_set_installed_page(struct binder_alloc *alloc,
+ unsigned long index,
struct page *page)
{
/* Pairs with acquire in binder_get_installed_page() */
- smp_store_release(&lru_page->page_ptr, page);
+ smp_store_release(&alloc->pages[index], page);
}
static inline struct page *
-binder_get_installed_page(struct binder_lru_page *lru_page)
+binder_get_installed_page(struct binder_alloc *alloc, unsigned long index)
{
/* Pairs with release in binder_set_installed_page() */
- return smp_load_acquire(&lru_page->page_ptr);
+ return smp_load_acquire(&alloc->pages[index]);
}
static void binder_lru_freelist_add(struct binder_alloc *alloc,
unsigned long start, unsigned long end)
{
- struct binder_lru_page *page;
unsigned long page_addr;
+ struct page *page;
trace_binder_update_page_range(alloc, false, start, end);
@@ -202,65 +201,159 @@ static void binder_lru_freelist_add(struct binder_alloc *alloc,
size_t index;
int ret;
- index = (page_addr - alloc->buffer) / PAGE_SIZE;
- page = &alloc->pages[index];
-
- if (!binder_get_installed_page(page))
+ index = (page_addr - alloc->vm_start) / PAGE_SIZE;
+ page = binder_get_installed_page(alloc, index);
+ if (!page)
continue;
trace_binder_free_lru_start(alloc, index);
- ret = list_lru_add_obj(&binder_freelist, &page->lru);
+ ret = list_lru_add(alloc->freelist,
+ page_to_lru(page),
+ page_to_nid(page),
+ NULL);
WARN_ON(!ret);
trace_binder_free_lru_end(alloc, index);
}
}
-static int binder_install_single_page(struct binder_alloc *alloc,
- struct binder_lru_page *lru_page,
- unsigned long addr)
+static inline
+void binder_alloc_set_mapped(struct binder_alloc *alloc, bool state)
{
- struct page *page;
- int ret = 0;
+ /* pairs with smp_load_acquire in binder_alloc_is_mapped() */
+ smp_store_release(&alloc->mapped, state);
+}
- if (!mmget_not_zero(alloc->mm))
- return -ESRCH;
+static inline bool binder_alloc_is_mapped(struct binder_alloc *alloc)
+{
+ /* pairs with smp_store_release in binder_alloc_set_mapped() */
+ return smp_load_acquire(&alloc->mapped);
+}
+
+static struct page *binder_page_lookup(struct binder_alloc *alloc,
+ unsigned long addr)
+{
+ struct mm_struct *mm = alloc->mm;
+ struct page *page;
+ long npages = 0;
/*
- * Protected with mmap_sem in write mode as multiple tasks
- * might race to install the same page.
+ * Find an existing page in the remote mm. If missing,
+ * don't attempt to fault-in just propagate an error.
*/
- mmap_write_lock(alloc->mm);
- if (binder_get_installed_page(lru_page))
- goto out;
+ mmap_read_lock(mm);
+ if (binder_alloc_is_mapped(alloc))
+ npages = get_user_pages_remote(mm, addr, 1, FOLL_NOFAULT,
+ &page, NULL);
+ mmap_read_unlock(mm);
- if (!alloc->vma) {
- pr_err("%d: %s failed, no vma\n", alloc->pid, __func__);
- ret = -ESRCH;
- goto out;
+ return npages > 0 ? page : NULL;
+}
+
+static int binder_page_insert(struct binder_alloc *alloc,
+ unsigned long addr,
+ struct page *page)
+{
+ struct mm_struct *mm = alloc->mm;
+ struct vm_area_struct *vma;
+ int ret = -ESRCH;
+
+ /* attempt per-vma lock first */
+ vma = lock_vma_under_rcu(mm, addr);
+ if (vma) {
+ if (binder_alloc_is_mapped(alloc))
+ ret = vm_insert_page(vma, addr, page);
+ vma_end_read(vma);
+ return ret;
}
+ /* fall back to mmap_lock */
+ mmap_read_lock(mm);
+ vma = vma_lookup(mm, addr);
+ if (vma && binder_alloc_is_mapped(alloc))
+ ret = vm_insert_page(vma, addr, page);
+ mmap_read_unlock(mm);
+
+ return ret;
+}
+
+static struct page *binder_page_alloc(struct binder_alloc *alloc,
+ unsigned long index)
+{
+ struct binder_shrinker_mdata *mdata;
+ struct page *page;
+
page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
+ if (!page)
+ return NULL;
+
+ /* allocate and install shrinker metadata under page->private */
+ mdata = kzalloc(sizeof(*mdata), GFP_KERNEL);
+ if (!mdata) {
+ __free_page(page);
+ return NULL;
+ }
+
+ mdata->alloc = alloc;
+ mdata->page_index = index;
+ INIT_LIST_HEAD(&mdata->lru);
+ set_page_private(page, (unsigned long)mdata);
+
+ return page;
+}
+
+static void binder_free_page(struct page *page)
+{
+ kfree((struct binder_shrinker_mdata *)page_private(page));
+ __free_page(page);
+}
+
+static int binder_install_single_page(struct binder_alloc *alloc,
+ unsigned long index,
+ unsigned long addr)
+{
+ struct page *page;
+ int ret;
+
+ if (!mmget_not_zero(alloc->mm))
+ return -ESRCH;
+
+ page = binder_page_alloc(alloc, index);
if (!page) {
- pr_err("%d: failed to allocate page\n", alloc->pid);
ret = -ENOMEM;
goto out;
}
- ret = vm_insert_page(alloc->vma, addr, page);
- if (ret) {
+ ret = binder_page_insert(alloc, addr, page);
+ switch (ret) {
+ case -EBUSY:
+ /*
+ * EBUSY is ok. Someone installed the pte first but the
+ * alloc->pages[index] has not been updated yet. Discard
+ * our page and look up the one already installed.
+ */
+ ret = 0;
+ binder_free_page(page);
+ page = binder_page_lookup(alloc, addr);
+ if (!page) {
+ pr_err("%d: failed to find page at offset %lx\n",
+ alloc->pid, addr - alloc->vm_start);
+ ret = -ESRCH;
+ break;
+ }
+ fallthrough;
+ case 0:
+ /* Mark page installation complete and safe to use */
+ binder_set_installed_page(alloc, index, page);
+ break;
+ default:
+ binder_free_page(page);
pr_err("%d: %s failed to insert page at offset %lx with %d\n",
- alloc->pid, __func__, addr - alloc->buffer, ret);
- __free_page(page);
- ret = -ENOMEM;
- goto out;
+ alloc->pid, __func__, addr - alloc->vm_start, ret);
+ break;
}
-
- /* Mark page installation complete and safe to use */
- binder_set_installed_page(lru_page, page);
out:
- mmap_write_unlock(alloc->mm);
mmput_async(alloc->mm);
return ret;
}
@@ -269,7 +362,6 @@ static int binder_install_buffer_pages(struct binder_alloc *alloc,
struct binder_buffer *buffer,
size_t size)
{
- struct binder_lru_page *page;
unsigned long start, final;
unsigned long page_addr;
@@ -280,15 +372,13 @@ static int binder_install_buffer_pages(struct binder_alloc *alloc,
unsigned long index;
int ret;
- index = (page_addr - alloc->buffer) / PAGE_SIZE;
- page = &alloc->pages[index];
-
- if (binder_get_installed_page(page))
+ index = (page_addr - alloc->vm_start) / PAGE_SIZE;
+ if (binder_get_installed_page(alloc, index))
continue;
trace_binder_alloc_page_start(alloc, index);
- ret = binder_install_single_page(alloc, page, page_addr);
+ ret = binder_install_single_page(alloc, index, page_addr);
if (ret)
return ret;
@@ -302,8 +392,8 @@ static int binder_install_buffer_pages(struct binder_alloc *alloc,
static void binder_lru_freelist_del(struct binder_alloc *alloc,
unsigned long start, unsigned long end)
{
- struct binder_lru_page *page;
unsigned long page_addr;
+ struct page *page;
trace_binder_update_page_range(alloc, true, start, end);
@@ -311,13 +401,16 @@ static void binder_lru_freelist_del(struct binder_alloc *alloc,
unsigned long index;
bool on_lru;
- index = (page_addr - alloc->buffer) / PAGE_SIZE;
- page = &alloc->pages[index];
+ index = (page_addr - alloc->vm_start) / PAGE_SIZE;
+ page = binder_get_installed_page(alloc, index);
- if (page->page_ptr) {
+ if (page) {
trace_binder_alloc_lru_start(alloc, index);
- on_lru = list_lru_del_obj(&binder_freelist, &page->lru);
+ on_lru = list_lru_del(alloc->freelist,
+ page_to_lru(page),
+ page_to_nid(page),
+ NULL);
WARN_ON(!on_lru);
trace_binder_alloc_lru_end(alloc, index);
@@ -329,20 +422,6 @@ static void binder_lru_freelist_del(struct binder_alloc *alloc,
}
}
-static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
- struct vm_area_struct *vma)
-{
- /* pairs with smp_load_acquire in binder_alloc_get_vma() */
- smp_store_release(&alloc->vma, vma);
-}
-
-static inline struct vm_area_struct *binder_alloc_get_vma(
- struct binder_alloc *alloc)
-{
- /* pairs with smp_store_release in binder_alloc_set_vma() */
- return smp_load_acquire(&alloc->vma);
-}
-
static void debug_no_space_locked(struct binder_alloc *alloc)
{
size_t largest_alloc_size = 0;
@@ -576,7 +655,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
int ret;
/* Check binder_alloc is fully initialized */
- if (!binder_alloc_get_vma(alloc)) {
+ if (!binder_alloc_is_mapped(alloc)) {
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"%d: binder_alloc_buf, no vma\n",
alloc->pid);
@@ -597,10 +676,10 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
if (!next)
return ERR_PTR(-ENOMEM);
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async);
if (IS_ERR(buffer)) {
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
goto out;
}
@@ -608,7 +687,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
buffer->offsets_size = offsets_size;
buffer->extra_buffers_size = extra_buffers_size;
buffer->pid = current->tgid;
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
ret = binder_install_buffer_pages(alloc, buffer, size);
if (ret) {
@@ -618,6 +697,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
out:
return buffer;
}
+EXPORT_SYMBOL_IF_KUNIT(binder_alloc_new_buf);
static unsigned long buffer_start_page(struct binder_buffer *buffer)
{
@@ -674,8 +754,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
BUG_ON(buffer->free);
BUG_ON(size > buffer_size);
BUG_ON(buffer->transaction != NULL);
- BUG_ON(buffer->user_data < alloc->buffer);
- BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
+ BUG_ON(buffer->user_data < alloc->vm_start);
+ BUG_ON(buffer->user_data > alloc->vm_start + alloc->buffer_size);
if (buffer->async_transaction) {
alloc->free_async_space += buffer_size;
@@ -734,14 +814,13 @@ static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
pgoff_t *pgoffp)
{
binder_size_t buffer_space_offset = buffer_offset +
- (buffer->user_data - alloc->buffer);
+ (buffer->user_data - alloc->vm_start);
pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
size_t index = buffer_space_offset >> PAGE_SHIFT;
- struct binder_lru_page *lru_page;
- lru_page = &alloc->pages[index];
*pgoffp = pgoff;
- return lru_page->page_ptr;
+
+ return alloc->pages[index];
}
/**
@@ -785,18 +864,19 @@ void binder_alloc_free_buf(struct binder_alloc *alloc,
* We could eliminate the call to binder_alloc_clear_buf()
* from binder_alloc_deferred_release() by moving this to
* binder_free_buf_locked(). However, that could
- * increase contention for the alloc->lock if clear_on_free
- * is used frequently for large buffers. This lock is not
+ * increase contention for the alloc mutex if clear_on_free
+ * is used frequently for large buffers. The mutex is not
* needed for correctness here.
*/
if (buffer->clear_on_free) {
binder_alloc_clear_buf(alloc, buffer);
buffer->clear_on_free = false;
}
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
binder_free_buf_locked(alloc, buffer);
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
}
+EXPORT_SYMBOL_IF_KUNIT(binder_alloc_free_buf);
/**
* binder_alloc_mmap_handler() - map virtual address space for proc
@@ -816,7 +896,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
{
struct binder_buffer *buffer;
const char *failure_string;
- int ret, i;
+ int ret;
if (unlikely(vma->vm_mm != alloc->mm)) {
ret = -EINVAL;
@@ -834,22 +914,17 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
SZ_4M);
mutex_unlock(&binder_alloc_mmap_lock);
- alloc->buffer = vma->vm_start;
+ alloc->vm_start = vma->vm_start;
- alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
- sizeof(alloc->pages[0]),
- GFP_KERNEL);
- if (alloc->pages == NULL) {
+ alloc->pages = kvcalloc(alloc->buffer_size / PAGE_SIZE,
+ sizeof(alloc->pages[0]),
+ GFP_KERNEL);
+ if (!alloc->pages) {
ret = -ENOMEM;
failure_string = "alloc page array";
goto err_alloc_pages_failed;
}
- for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
- alloc->pages[i].alloc = alloc;
- INIT_LIST_HEAD(&alloc->pages[i].lru);
- }
-
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer) {
ret = -ENOMEM;
@@ -857,22 +932,22 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
goto err_alloc_buf_struct_failed;
}
- buffer->user_data = alloc->buffer;
+ buffer->user_data = alloc->vm_start;
list_add(&buffer->entry, &alloc->buffers);
buffer->free = 1;
binder_insert_free_buffer(alloc, buffer);
alloc->free_async_space = alloc->buffer_size / 2;
/* Signal binder_alloc is fully initialized */
- binder_alloc_set_vma(alloc, vma);
+ binder_alloc_set_mapped(alloc, true);
return 0;
err_alloc_buf_struct_failed:
- kfree(alloc->pages);
+ kvfree(alloc->pages);
alloc->pages = NULL;
err_alloc_pages_failed:
- alloc->buffer = 0;
+ alloc->vm_start = 0;
mutex_lock(&binder_alloc_mmap_lock);
alloc->buffer_size = 0;
err_already_mapped:
@@ -884,7 +959,7 @@ err_invalid_mm:
failure_string, ret);
return ret;
}
-
+EXPORT_SYMBOL_IF_KUNIT(binder_alloc_mmap_handler);
void binder_alloc_deferred_release(struct binder_alloc *alloc)
{
@@ -893,8 +968,8 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
struct binder_buffer *buffer;
buffers = 0;
- spin_lock(&alloc->lock);
- BUG_ON(alloc->vma);
+ mutex_lock(&alloc->mutex);
+ BUG_ON(alloc->mapped);
while ((n = rb_first(&alloc->allocated_buffers))) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
@@ -925,23 +1000,27 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
int i;
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ struct page *page;
bool on_lru;
- if (!alloc->pages[i].page_ptr)
+ page = binder_get_installed_page(alloc, i);
+ if (!page)
continue;
- on_lru = list_lru_del_obj(&binder_freelist,
- &alloc->pages[i].lru);
+ on_lru = list_lru_del(alloc->freelist,
+ page_to_lru(page),
+ page_to_nid(page),
+ NULL);
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%s: %d: page %d %s\n",
__func__, alloc->pid, i,
on_lru ? "on lru" : "active");
- __free_page(alloc->pages[i].page_ptr);
+ binder_free_page(page);
page_count++;
}
- kfree(alloc->pages);
}
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
+ kvfree(alloc->pages);
if (alloc->mm)
mmdrop(alloc->mm);
@@ -949,6 +1028,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
"%s: %d buffers %d, pages %d\n",
__func__, alloc->pid, buffers, page_count);
}
+EXPORT_SYMBOL_IF_KUNIT(binder_alloc_deferred_release);
/**
* binder_alloc_print_allocated() - print buffer info
@@ -964,17 +1044,16 @@ void binder_alloc_print_allocated(struct seq_file *m,
struct binder_buffer *buffer;
struct rb_node *n;
- spin_lock(&alloc->lock);
+ guard(mutex)(&alloc->mutex);
for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
seq_printf(m, " buffer %d: %lx size %zd:%zd:%zd %s\n",
buffer->debug_id,
- buffer->user_data - alloc->buffer,
+ buffer->user_data - alloc->vm_start,
buffer->data_size, buffer->offsets_size,
buffer->extra_buffers_size,
buffer->transaction ? "active" : "delivered");
}
- spin_unlock(&alloc->lock);
}
/**
@@ -985,29 +1064,29 @@ void binder_alloc_print_allocated(struct seq_file *m,
void binder_alloc_print_pages(struct seq_file *m,
struct binder_alloc *alloc)
{
- struct binder_lru_page *page;
+ struct page *page;
int i;
int active = 0;
int lru = 0;
int free = 0;
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
/*
* Make sure the binder_alloc is fully initialized, otherwise we might
* read inconsistent state.
*/
- if (binder_alloc_get_vma(alloc) != NULL) {
+ if (binder_alloc_is_mapped(alloc)) {
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
- page = &alloc->pages[i];
- if (!page->page_ptr)
+ page = binder_get_installed_page(alloc, i);
+ if (!page)
free++;
- else if (list_empty(&page->lru))
+ else if (list_empty(page_to_lru(page)))
active++;
else
lru++;
}
}
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
}
@@ -1023,10 +1102,9 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
struct rb_node *n;
int count = 0;
- spin_lock(&alloc->lock);
+ guard(mutex)(&alloc->mutex);
for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
count++;
- spin_unlock(&alloc->lock);
return count;
}
@@ -1036,18 +1114,19 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
* @alloc: binder_alloc for this proc
*
* Called from binder_vma_close() when releasing address space.
- * Clears alloc->vma to prevent new incoming transactions from
+ * Clears alloc->mapped to prevent new incoming transactions from
* allocating more buffers.
*/
void binder_alloc_vma_close(struct binder_alloc *alloc)
{
- binder_alloc_set_vma(alloc, NULL);
+ binder_alloc_set_mapped(alloc, false);
}
+EXPORT_SYMBOL_IF_KUNIT(binder_alloc_vma_close);
/**
* binder_alloc_free_page() - shrinker callback to free pages
* @item: item to free
- * @lock: lock protecting the item
+ * @lru: list_lru instance of the item
* @cb_arg: callback argument
*
* Called from list_lru_walk() in binder_shrink_scan() to free
@@ -1055,44 +1134,54 @@ void binder_alloc_vma_close(struct binder_alloc *alloc)
*/
enum lru_status binder_alloc_free_page(struct list_head *item,
struct list_lru_one *lru,
- spinlock_t *lock,
void *cb_arg)
- __must_hold(lock)
+ __must_hold(&lru->lock)
{
- struct binder_lru_page *page = container_of(item, typeof(*page), lru);
- struct binder_alloc *alloc = page->alloc;
+ struct binder_shrinker_mdata *mdata = container_of(item, typeof(*mdata), lru);
+ struct binder_alloc *alloc = mdata->alloc;
struct mm_struct *mm = alloc->mm;
struct vm_area_struct *vma;
struct page *page_to_free;
unsigned long page_addr;
+ int mm_locked = 0;
size_t index;
if (!mmget_not_zero(mm))
goto err_mmget;
- if (!mmap_read_trylock(mm))
- goto err_mmap_read_lock_failed;
- if (!spin_trylock(&alloc->lock))
- goto err_get_alloc_lock_failed;
- if (!page->page_ptr)
- goto err_page_already_freed;
-
- index = page - alloc->pages;
- page_addr = alloc->buffer + index * PAGE_SIZE;
-
- vma = vma_lookup(mm, page_addr);
- if (vma && vma != binder_alloc_get_vma(alloc))
+
+ index = mdata->page_index;
+ page_addr = alloc->vm_start + index * PAGE_SIZE;
+
+ /* attempt per-vma lock first */
+ vma = lock_vma_under_rcu(mm, page_addr);
+ if (!vma) {
+ /* fall back to mmap_lock */
+ if (!mmap_read_trylock(mm))
+ goto err_mmap_read_lock_failed;
+ mm_locked = 1;
+ vma = vma_lookup(mm, page_addr);
+ }
+
+ if (!mutex_trylock(&alloc->mutex))
+ goto err_get_alloc_mutex_failed;
+
+ /*
+ * Since a binder_alloc can only be mapped once, we ensure
+ * the vma corresponds to this mapping by checking whether
+ * the binder_alloc is still mapped.
+ */
+ if (vma && !binder_alloc_is_mapped(alloc))
goto err_invalid_vma;
trace_binder_unmap_kernel_start(alloc, index);
- page_to_free = page->page_ptr;
- page->page_ptr = NULL;
+ page_to_free = alloc->pages[index];
+ binder_set_installed_page(alloc, index, NULL);
trace_binder_unmap_kernel_end(alloc, index);
list_lru_isolate(lru, item);
- spin_unlock(&alloc->lock);
- spin_unlock(lock);
+ spin_unlock(&lru->lock);
if (vma) {
trace_binder_unmap_user_start(alloc, index);
@@ -1102,23 +1191,29 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
trace_binder_unmap_user_end(alloc, index);
}
- mmap_read_unlock(mm);
+ mutex_unlock(&alloc->mutex);
+ if (mm_locked)
+ mmap_read_unlock(mm);
+ else
+ vma_end_read(vma);
mmput_async(mm);
- __free_page(page_to_free);
+ binder_free_page(page_to_free);
- spin_lock(lock);
return LRU_REMOVED_RETRY;
err_invalid_vma:
-err_page_already_freed:
- spin_unlock(&alloc->lock);
-err_get_alloc_lock_failed:
- mmap_read_unlock(mm);
+ mutex_unlock(&alloc->mutex);
+err_get_alloc_mutex_failed:
+ if (mm_locked)
+ mmap_read_unlock(mm);
+ else
+ vma_end_read(vma);
err_mmap_read_lock_failed:
mmput_async(mm);
err_mmget:
return LRU_SKIP;
}
+EXPORT_SYMBOL_IF_KUNIT(binder_alloc_free_page);
static unsigned long
binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
@@ -1135,6 +1230,18 @@ binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
static struct shrinker *binder_shrinker;
+VISIBLE_IF_KUNIT void __binder_alloc_init(struct binder_alloc *alloc,
+ struct list_lru *freelist)
+{
+ alloc->pid = current->group_leader->pid;
+ alloc->mm = current->mm;
+ mmgrab(alloc->mm);
+ mutex_init(&alloc->mutex);
+ INIT_LIST_HEAD(&alloc->buffers);
+ alloc->freelist = freelist;
+}
+EXPORT_SYMBOL_IF_KUNIT(__binder_alloc_init);
+
/**
* binder_alloc_init() - called by binder_open() for per-proc initialization
* @alloc: binder_alloc for this proc
@@ -1144,11 +1251,7 @@ static struct shrinker *binder_shrinker;
*/
void binder_alloc_init(struct binder_alloc *alloc)
{
- alloc->pid = current->group_leader->pid;
- alloc->mm = current->mm;
- mmgrab(alloc->mm);
- spin_lock_init(&alloc->lock);
- INIT_LIST_HEAD(&alloc->buffers);
+ __binder_alloc_init(alloc, &binder_freelist);
}
int binder_alloc_shrinker_init(void)
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index 70387234477e..d6f1f6f2d00e 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -9,13 +9,12 @@
#include <linux/rbtree.h>
#include <linux/list.h>
#include <linux/mm.h>
-#include <linux/spinlock.h>
+#include <linux/rtmutex.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/list_lru.h>
#include <uapi/linux/android/binder.h>
-extern struct list_lru binder_freelist;
struct binder_transaction;
/**
@@ -59,34 +58,44 @@ struct binder_buffer {
};
/**
- * struct binder_lru_page - page object used for binder shrinker
- * @page_ptr: pointer to physical page in mmap'd space
- * @lru: entry in binder_freelist
- * @alloc: binder_alloc for a proc
+ * struct binder_shrinker_mdata - binder metadata used to reclaim pages
+ * @lru: LRU entry in binder_freelist
+ * @alloc: binder_alloc owning the page to reclaim
+ * @page_index: offset in @alloc->pages[] into the page to reclaim
*/
-struct binder_lru_page {
+struct binder_shrinker_mdata {
struct list_head lru;
- struct page *page_ptr;
struct binder_alloc *alloc;
+ unsigned long page_index;
};
+static inline struct list_head *page_to_lru(struct page *p)
+{
+ struct binder_shrinker_mdata *mdata;
+
+ mdata = (struct binder_shrinker_mdata *)page_private(p);
+
+ return &mdata->lru;
+}
+
/**
* struct binder_alloc - per-binder proc state for binder allocator
- * @lock: protects binder_alloc fields
- * @vma: vm_area_struct passed to mmap_handler
- * (invariant after mmap)
+ * @mutex: protects binder_alloc fields
* @mm: copy of task->mm (invariant after open)
- * @buffer: base of per-proc address space mapped via mmap
+ * @vm_start: base of per-proc address space mapped via mmap
* @buffers: list of all buffers for this proc
* @free_buffers: rb tree of buffers available for allocation
* sorted by size
* @allocated_buffers: rb tree of allocated buffers sorted by address
* @free_async_space: VA space available for async buffers. This is
* initialized at mmap time to 1/2 the full VA space
- * @pages: array of binder_lru_page
+ * @pages: array of struct page *
+ * @freelist: lru list to use for free pages (invariant after init)
* @buffer_size: size of address space specified via mmap
* @pid: pid for associated binder_proc (invariant after init)
* @pages_high: high watermark of offset in @pages
+ * @mapped: whether the vm area is mapped, each binder instance is
+ * allowed a single mapping throughout its lifetime
* @oneway_spam_detected: %true if oneway spam detection fired, clear that
* flag once the async buffer has returned to a healthy state
*
@@ -96,29 +105,25 @@ struct binder_lru_page {
* struct binder_buffer objects used to track the user buffers
*/
struct binder_alloc {
- spinlock_t lock;
- struct vm_area_struct *vma;
+ struct mutex mutex;
struct mm_struct *mm;
- unsigned long buffer;
+ unsigned long vm_start;
struct list_head buffers;
struct rb_root free_buffers;
struct rb_root allocated_buffers;
size_t free_async_space;
- struct binder_lru_page *pages;
+ struct page **pages;
+ struct list_lru *freelist;
size_t buffer_size;
int pid;
size_t pages_high;
+ bool mapped;
bool oneway_spam_detected;
};
-#ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST
-void binder_selftest_alloc(struct binder_alloc *alloc);
-#else
-static inline void binder_selftest_alloc(struct binder_alloc *alloc) {}
-#endif
enum lru_status binder_alloc_free_page(struct list_head *item,
struct list_lru_one *lru,
- spinlock_t *lock, void *cb_arg);
+ void *cb_arg);
struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
@@ -151,12 +156,8 @@ void binder_alloc_print_pages(struct seq_file *m,
static inline size_t
binder_alloc_get_free_async_space(struct binder_alloc *alloc)
{
- size_t free_async_space;
-
- spin_lock(&alloc->lock);
- free_async_space = alloc->free_async_space;
- spin_unlock(&alloc->lock);
- return free_async_space;
+ guard(mutex)(&alloc->mutex);
+ return alloc->free_async_space;
}
unsigned long
@@ -178,5 +179,11 @@ int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
binder_size_t buffer_offset,
size_t bytes);
+#if IS_ENABLED(CONFIG_KUNIT)
+void __binder_alloc_init(struct binder_alloc *alloc, struct list_lru *freelist);
+size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
+ struct binder_buffer *buffer);
+#endif
+
#endif /* _LINUX_BINDER_ALLOC_H */
diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c
deleted file mode 100644
index 81442fe20a69..000000000000
--- a/drivers/android/binder_alloc_selftest.c
+++ /dev/null
@@ -1,306 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* binder_alloc_selftest.c
- *
- * Android IPC Subsystem
- *
- * Copyright (C) 2017 Google, Inc.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/mm_types.h>
-#include <linux/err.h>
-#include "binder_alloc.h"
-
-#define BUFFER_NUM 5
-#define BUFFER_MIN_SIZE (PAGE_SIZE / 8)
-
-static bool binder_selftest_run = true;
-static int binder_selftest_failures;
-static DEFINE_MUTEX(binder_selftest_lock);
-
-/**
- * enum buf_end_align_type - Page alignment of a buffer
- * end with regard to the end of the previous buffer.
- *
- * In the pictures below, buf2 refers to the buffer we
- * are aligning. buf1 refers to previous buffer by addr.
- * Symbol [ means the start of a buffer, ] means the end
- * of a buffer, and | means page boundaries.
- */
-enum buf_end_align_type {
- /**
- * @SAME_PAGE_UNALIGNED: The end of this buffer is on
- * the same page as the end of the previous buffer and
- * is not page aligned. Examples:
- * buf1 ][ buf2 ][ ...
- * buf1 ]|[ buf2 ][ ...
- */
- SAME_PAGE_UNALIGNED = 0,
- /**
- * @SAME_PAGE_ALIGNED: When the end of the previous buffer
- * is not page aligned, the end of this buffer is on the
- * same page as the end of the previous buffer and is page
- * aligned. When the previous buffer is page aligned, the
- * end of this buffer is aligned to the next page boundary.
- * Examples:
- * buf1 ][ buf2 ]| ...
- * buf1 ]|[ buf2 ]| ...
- */
- SAME_PAGE_ALIGNED,
- /**
- * @NEXT_PAGE_UNALIGNED: The end of this buffer is on
- * the page next to the end of the previous buffer and
- * is not page aligned. Examples:
- * buf1 ][ buf2 | buf2 ][ ...
- * buf1 ]|[ buf2 | buf2 ][ ...
- */
- NEXT_PAGE_UNALIGNED,
- /**
- * @NEXT_PAGE_ALIGNED: The end of this buffer is on
- * the page next to the end of the previous buffer and
- * is page aligned. Examples:
- * buf1 ][ buf2 | buf2 ]| ...
- * buf1 ]|[ buf2 | buf2 ]| ...
- */
- NEXT_PAGE_ALIGNED,
- /**
- * @NEXT_NEXT_UNALIGNED: The end of this buffer is on
- * the page that follows the page after the end of the
- * previous buffer and is not page aligned. Examples:
- * buf1 ][ buf2 | buf2 | buf2 ][ ...
- * buf1 ]|[ buf2 | buf2 | buf2 ][ ...
- */
- NEXT_NEXT_UNALIGNED,
- /**
- * @LOOP_END: The number of enum values in &buf_end_align_type.
- * It is used for controlling loop termination.
- */
- LOOP_END,
-};
-
-static void pr_err_size_seq(size_t *sizes, int *seq)
-{
- int i;
-
- pr_err("alloc sizes: ");
- for (i = 0; i < BUFFER_NUM; i++)
- pr_cont("[%zu]", sizes[i]);
- pr_cont("\n");
- pr_err("free seq: ");
- for (i = 0; i < BUFFER_NUM; i++)
- pr_cont("[%d]", seq[i]);
- pr_cont("\n");
-}
-
-static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
- struct binder_buffer *buffer,
- size_t size)
-{
- unsigned long page_addr;
- unsigned long end;
- int page_index;
-
- end = PAGE_ALIGN(buffer->user_data + size);
- page_addr = buffer->user_data;
- for (; page_addr < end; page_addr += PAGE_SIZE) {
- page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
- if (!alloc->pages[page_index].page_ptr ||
- !list_empty(&alloc->pages[page_index].lru)) {
- pr_err("expect alloc but is %s at page index %d\n",
- alloc->pages[page_index].page_ptr ?
- "lru" : "free", page_index);
- return false;
- }
- }
- return true;
-}
-
-static void binder_selftest_alloc_buf(struct binder_alloc *alloc,
- struct binder_buffer *buffers[],
- size_t *sizes, int *seq)
-{
- int i;
-
- for (i = 0; i < BUFFER_NUM; i++) {
- buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
- if (IS_ERR(buffers[i]) ||
- !check_buffer_pages_allocated(alloc, buffers[i],
- sizes[i])) {
- pr_err_size_seq(sizes, seq);
- binder_selftest_failures++;
- }
- }
-}
-
-static void binder_selftest_free_buf(struct binder_alloc *alloc,
- struct binder_buffer *buffers[],
- size_t *sizes, int *seq, size_t end)
-{
- int i;
-
- for (i = 0; i < BUFFER_NUM; i++)
- binder_alloc_free_buf(alloc, buffers[seq[i]]);
-
- for (i = 0; i < end / PAGE_SIZE; i++) {
- /**
- * Error message on a free page can be false positive
- * if binder shrinker ran during binder_alloc_free_buf
- * calls above.
- */
- if (list_empty(&alloc->pages[i].lru)) {
- pr_err_size_seq(sizes, seq);
- pr_err("expect lru but is %s at page index %d\n",
- alloc->pages[i].page_ptr ? "alloc" : "free", i);
- binder_selftest_failures++;
- }
- }
-}
-
-static void binder_selftest_free_page(struct binder_alloc *alloc)
-{
- int i;
- unsigned long count;
-
- while ((count = list_lru_count(&binder_freelist))) {
- list_lru_walk(&binder_freelist, binder_alloc_free_page,
- NULL, count);
- }
-
- for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
- if (alloc->pages[i].page_ptr) {
- pr_err("expect free but is %s at page index %d\n",
- list_empty(&alloc->pages[i].lru) ?
- "alloc" : "lru", i);
- binder_selftest_failures++;
- }
- }
-}
-
-static void binder_selftest_alloc_free(struct binder_alloc *alloc,
- size_t *sizes, int *seq, size_t end)
-{
- struct binder_buffer *buffers[BUFFER_NUM];
-
- binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
- binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
-
- /* Allocate from lru. */
- binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
- if (list_lru_count(&binder_freelist))
- pr_err("lru list should be empty but is not\n");
-
- binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
- binder_selftest_free_page(alloc);
-}
-
-static bool is_dup(int *seq, int index, int val)
-{
- int i;
-
- for (i = 0; i < index; i++) {
- if (seq[i] == val)
- return true;
- }
- return false;
-}
-
-/* Generate BUFFER_NUM factorial free orders. */
-static void binder_selftest_free_seq(struct binder_alloc *alloc,
- size_t *sizes, int *seq,
- int index, size_t end)
-{
- int i;
-
- if (index == BUFFER_NUM) {
- binder_selftest_alloc_free(alloc, sizes, seq, end);
- return;
- }
- for (i = 0; i < BUFFER_NUM; i++) {
- if (is_dup(seq, index, i))
- continue;
- seq[index] = i;
- binder_selftest_free_seq(alloc, sizes, seq, index + 1, end);
- }
-}
-
-static void binder_selftest_alloc_size(struct binder_alloc *alloc,
- size_t *end_offset)
-{
- int i;
- int seq[BUFFER_NUM] = {0};
- size_t front_sizes[BUFFER_NUM];
- size_t back_sizes[BUFFER_NUM];
- size_t last_offset, offset = 0;
-
- for (i = 0; i < BUFFER_NUM; i++) {
- last_offset = offset;
- offset = end_offset[i];
- front_sizes[i] = offset - last_offset;
- back_sizes[BUFFER_NUM - i - 1] = front_sizes[i];
- }
- /*
- * Buffers share the first or last few pages.
- * Only BUFFER_NUM - 1 buffer sizes are adjustable since
- * we need one giant buffer before getting to the last page.
- */
- back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
- binder_selftest_free_seq(alloc, front_sizes, seq, 0,
- end_offset[BUFFER_NUM - 1]);
- binder_selftest_free_seq(alloc, back_sizes, seq, 0, alloc->buffer_size);
-}
-
-static void binder_selftest_alloc_offset(struct binder_alloc *alloc,
- size_t *end_offset, int index)
-{
- int align;
- size_t end, prev;
-
- if (index == BUFFER_NUM) {
- binder_selftest_alloc_size(alloc, end_offset);
- return;
- }
- prev = index == 0 ? 0 : end_offset[index - 1];
- end = prev;
-
- BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE);
-
- for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) {
- if (align % 2)
- end = ALIGN(end, PAGE_SIZE);
- else
- end += BUFFER_MIN_SIZE;
- end_offset[index] = end;
- binder_selftest_alloc_offset(alloc, end_offset, index + 1);
- }
-}
-
-/**
- * binder_selftest_alloc() - Test alloc and free of buffer pages.
- * @alloc: Pointer to alloc struct.
- *
- * Allocate BUFFER_NUM buffers to cover all page alignment cases,
- * then free them in all orders possible. Check that pages are
- * correctly allocated, put onto lru when buffers are freed, and
- * are freed when binder_alloc_free_page is called.
- */
-void binder_selftest_alloc(struct binder_alloc *alloc)
-{
- size_t end_offset[BUFFER_NUM];
-
- if (!binder_selftest_run)
- return;
- mutex_lock(&binder_selftest_lock);
- if (!binder_selftest_run || !alloc->vma)
- goto done;
- pr_info("STARTED\n");
- binder_selftest_alloc_offset(alloc, end_offset, 0);
- binder_selftest_run = false;
- if (binder_selftest_failures > 0)
- pr_info("%d tests FAILED\n", binder_selftest_failures);
- else
- pr_info("PASSED\n");
-
-done:
- mutex_unlock(&binder_selftest_lock);
-}
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index 7270d4d22207..342574bfd28a 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -3,7 +3,6 @@
#ifndef _LINUX_BINDER_INTERNAL_H
#define _LINUX_BINDER_INTERNAL_H
-#include <linux/export.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/miscdevice.h>
@@ -14,6 +13,7 @@
#include <linux/uidgid.h>
#include <uapi/linux/android/binderfs.h>
#include "binder_alloc.h"
+#include "dbitmap.h"
struct binder_context {
struct binder_node *binder_context_mgr_node;
@@ -24,8 +24,7 @@ struct binder_context {
/**
* struct binder_device - information about a binder device node
- * @hlist: list of binder devices (only used for devices requested via
- * CONFIG_ANDROID_BINDER_DEVICES)
+ * @hlist: list of binder devices
* @miscdev: information about a binder character device node
* @context: binder context information
* @binderfs_inode: This is the inode of the root dentry of the super block
@@ -82,7 +81,6 @@ extern bool is_binderfs_device(const struct inode *inode);
extern struct dentry *binderfs_create_file(struct dentry *dir, const char *name,
const struct file_operations *fops,
void *data);
-extern void binderfs_remove_file(struct dentry *dentry);
#else
static inline bool is_binderfs_device(const struct inode *inode)
{
@@ -95,7 +93,6 @@ static inline struct dentry *binderfs_create_file(struct dentry *dir,
{
return NULL;
}
-static inline void binderfs_remove_file(struct dentry *dentry) {}
#endif
#ifdef CONFIG_ANDROID_BINDERFS
@@ -129,12 +126,13 @@ enum binder_stat_types {
BINDER_STAT_DEATH,
BINDER_STAT_TRANSACTION,
BINDER_STAT_TRANSACTION_COMPLETE,
+ BINDER_STAT_FREEZE,
BINDER_STAT_COUNT
};
struct binder_stats {
- atomic_t br[_IOC_NR(BR_TRANSACTION_PENDING_FROZEN) + 1];
- atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
+ atomic_t br[_IOC_NR(BR_CLEAR_FREEZE_NOTIFICATION_DONE) + 1];
+ atomic_t bc[_IOC_NR(BC_FREEZE_NOTIFICATION_DONE) + 1];
atomic_t obj_created[BINDER_STAT_COUNT];
atomic_t obj_deleted[BINDER_STAT_COUNT];
};
@@ -159,6 +157,8 @@ struct binder_work {
BINDER_WORK_DEAD_BINDER,
BINDER_WORK_DEAD_BINDER_AND_CLEAR,
BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
+ BINDER_WORK_FROZEN_BINDER,
+ BINDER_WORK_CLEAR_FREEZE_NOTIFICATION,
} type;
};
@@ -275,6 +275,14 @@ struct binder_ref_death {
binder_uintptr_t cookie;
};
+struct binder_ref_freeze {
+ struct binder_work work;
+ binder_uintptr_t cookie;
+ bool is_frozen:1;
+ bool sent:1;
+ bool resend:1;
+};
+
/**
* struct binder_ref_data - binder_ref counts and id
* @debug_id: unique ID for the ref
@@ -307,6 +315,8 @@ struct binder_ref_data {
* @node indicates the node must be freed
* @death: pointer to death notification (ref_death) if requested
* (protected by @node->lock)
+ * @freeze: pointer to freeze notification (ref_freeze) if requested
+ * (protected by @node->lock)
*
* Structure to track references from procA to target node (on procB). This
* structure is unsafe to access without holding @proc->outer_lock.
@@ -323,6 +333,7 @@ struct binder_ref {
struct binder_proc *proc;
struct binder_node *node;
struct binder_ref_death *death;
+ struct binder_ref_freeze *freeze;
};
/**
@@ -368,12 +379,16 @@ struct binder_ref {
* @freeze_wait: waitqueue of processes waiting for all outstanding
* transactions to be processed
* (protected by @inner_lock)
+ * @dmap dbitmap to manage available reference descriptors
+ * (protected by @outer_lock)
* @todo: list of work for this process
* (protected by @inner_lock)
* @stats: per-process binder statistics
* (atomics, no lock needed)
* @delivered_death: list of delivered death notification
* (protected by @inner_lock)
+ * @delivered_freeze: list of delivered freeze notification
+ * (protected by @inner_lock)
* @max_threads: cap on number of binder threads
* (protected by @inner_lock)
* @requested_threads: number of binder threads requested but not
@@ -417,11 +432,12 @@ struct binder_proc {
bool sync_recv;
bool async_recv;
wait_queue_head_t freeze_wait;
-
+ struct dbitmap dmap;
struct list_head todo;
struct binder_stats stats;
struct list_head delivered_death;
- int max_threads;
+ struct list_head delivered_freeze;
+ u32 max_threads;
int requested_threads;
int requested_threads_started;
int tmp_ref;
@@ -521,8 +537,8 @@ struct binder_transaction {
struct binder_proc *to_proc;
struct binder_thread *to_thread;
struct binder_transaction *to_parent;
- unsigned need_reply:1;
- /* unsigned is_dead:1; */ /* not used at the moment */
+ unsigned is_async:1;
+ unsigned is_reply:1;
struct binder_buffer *buffer;
unsigned int code;
@@ -562,4 +578,20 @@ struct binder_object {
};
};
+/**
+ * Add a binder device to binder_devices
+ * @device: the new binder device to add to the global list
+ */
+void binder_add_device(struct binder_device *device);
+
+/**
+ * Remove a binder device to binder_devices
+ * @device: the binder device to remove from the global list
+ */
+void binder_remove_device(struct binder_device *device);
+
+#if IS_ENABLED(CONFIG_KUNIT)
+vm_fault_t binder_vm_fault(struct vm_fault *vmf);
+#endif
+
#endif /* _LINUX_BINDER_INTERNAL_H */
diff --git a/drivers/android/binder_netlink.c b/drivers/android/binder_netlink.c
new file mode 100644
index 000000000000..81e8432b5904
--- /dev/null
+++ b/drivers/android/binder_netlink.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/binder.yaml */
+/* YNL-GEN kernel source */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include "binder_netlink.h"
+
+#include <uapi/linux/android/binder_netlink.h>
+
+/* Ops table for binder */
+static const struct genl_split_ops binder_nl_ops[] = {
+};
+
+static const struct genl_multicast_group binder_nl_mcgrps[] = {
+ [BINDER_NLGRP_REPORT] = { "report", },
+};
+
+struct genl_family binder_nl_family __ro_after_init = {
+ .name = BINDER_FAMILY_NAME,
+ .version = BINDER_FAMILY_VERSION,
+ .netnsok = true,
+ .parallel_ops = true,
+ .module = THIS_MODULE,
+ .split_ops = binder_nl_ops,
+ .n_split_ops = ARRAY_SIZE(binder_nl_ops),
+ .mcgrps = binder_nl_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(binder_nl_mcgrps),
+};
diff --git a/drivers/android/binder_netlink.h b/drivers/android/binder_netlink.h
new file mode 100644
index 000000000000..57399942a5e3
--- /dev/null
+++ b/drivers/android/binder_netlink.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/binder.yaml */
+/* YNL-GEN kernel header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
+
+#ifndef _LINUX_BINDER_GEN_H
+#define _LINUX_BINDER_GEN_H
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include <uapi/linux/android/binder_netlink.h>
+
+enum {
+ BINDER_NLGRP_REPORT,
+};
+
+extern struct genl_family binder_nl_family;
+
+#endif /* _LINUX_BINDER_GEN_H */
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index fe38c6fc65d0..fa5eb61cf580 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -34,27 +34,6 @@ TRACE_EVENT(binder_ioctl,
TP_printk("cmd=0x%x arg=0x%lx", __entry->cmd, __entry->arg)
);
-DECLARE_EVENT_CLASS(binder_lock_class,
- TP_PROTO(const char *tag),
- TP_ARGS(tag),
- TP_STRUCT__entry(
- __field(const char *, tag)
- ),
- TP_fast_assign(
- __entry->tag = tag;
- ),
- TP_printk("tag=%s", __entry->tag)
-);
-
-#define DEFINE_BINDER_LOCK_EVENT(name) \
-DEFINE_EVENT(binder_lock_class, name, \
- TP_PROTO(const char *func), \
- TP_ARGS(func))
-
-DEFINE_BINDER_LOCK_EVENT(binder_lock);
-DEFINE_BINDER_LOCK_EVENT(binder_locked);
-DEFINE_BINDER_LOCK_EVENT(binder_unlock);
-
DECLARE_EVENT_CLASS(binder_function_return_class,
TP_PROTO(int ret),
TP_ARGS(ret),
@@ -328,7 +307,7 @@ TRACE_EVENT(binder_update_page_range,
TP_fast_assign(
__entry->proc = alloc->pid;
__entry->allocate = allocate;
- __entry->offset = start - alloc->buffer;
+ __entry->offset = start - alloc->vm_start;
__entry->size = end - start;
),
TP_printk("proc=%d allocate=%d offset=%zu size=%zu",
@@ -423,6 +402,43 @@ TRACE_EVENT(binder_return,
"unknown")
);
+TRACE_EVENT(binder_netlink_report,
+ TP_PROTO(const char *context,
+ struct binder_transaction *t,
+ u32 data_size,
+ u32 error),
+ TP_ARGS(context, t, data_size, error),
+ TP_STRUCT__entry(
+ __field(const char *, context)
+ __field(u32, error)
+ __field(int, from_pid)
+ __field(int, from_tid)
+ __field(int, to_pid)
+ __field(int, to_tid)
+ __field(bool, is_reply)
+ __field(unsigned int, flags)
+ __field(unsigned int, code)
+ __field(size_t, data_size)
+ ),
+ TP_fast_assign(
+ __entry->context = context;
+ __entry->error = error;
+ __entry->from_pid = t->from_pid;
+ __entry->from_tid = t->from_tid;
+ __entry->to_pid = t->to_proc ? t->to_proc->pid : 0;
+ __entry->to_tid = t->to_thread ? t->to_thread->pid : 0;
+ __entry->is_reply = t->is_reply;
+ __entry->flags = t->flags;
+ __entry->code = t->code;
+ __entry->data_size = data_size;
+ ),
+ TP_printk("from %d:%d to %d:%d context=%s error=%d is_reply=%d flags=0x%x code=0x%x size=%zu",
+ __entry->from_pid, __entry->from_tid,
+ __entry->to_pid, __entry->to_tid,
+ __entry->context, __entry->error, __entry->is_reply,
+ __entry->flags, __entry->code, __entry->data_size)
+);
+
#endif /* _BINDER_TRACE_H */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index 3001d754ac36..b46bcb91072d 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -58,6 +58,8 @@ enum binderfs_stats_mode {
struct binder_features {
bool oneway_spam_detection;
bool extended_error;
+ bool freeze_notification;
+ bool transaction_report;
};
static const struct constant_table binderfs_param_stats[] = {
@@ -74,6 +76,8 @@ static const struct fs_parameter_spec binderfs_fs_parameters[] = {
static struct binder_features binder_features = {
.oneway_spam_detection = true,
.extended_error = true,
+ .freeze_notification = true,
+ .transaction_report = true,
};
static inline struct binderfs_info *BINDERFS_SB(const struct super_block *sb)
@@ -115,7 +119,6 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
struct dentry *dentry, *root;
struct binder_device *device;
char *name = NULL;
- size_t name_len;
struct inode *inode = NULL;
struct super_block *sb = ref_inode->i_sb;
struct binderfs_info *info = sb->s_fs_info;
@@ -159,9 +162,7 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
inode->i_gid = info->root_gid;
req->name[BINDERFS_MAX_NAME] = '\0'; /* NUL-terminate */
- name_len = strlen(req->name);
- /* Make sure to include terminating NUL byte */
- name = kmemdup(req->name, name_len + 1, GFP_KERNEL);
+ name = kstrdup(req->name, GFP_KERNEL);
if (!name)
goto err;
@@ -182,28 +183,17 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
}
root = sb->s_root;
- inode_lock(d_inode(root));
-
- /* look it up */
- dentry = lookup_one_len(name, root, name_len);
+ dentry = simple_start_creating(root, name);
if (IS_ERR(dentry)) {
- inode_unlock(d_inode(root));
ret = PTR_ERR(dentry);
goto err;
}
-
- if (d_really_is_positive(dentry)) {
- /* already exists */
- dput(dentry);
- inode_unlock(d_inode(root));
- ret = -EEXIST;
- goto err;
- }
-
inode->i_private = device;
- d_instantiate(dentry, inode);
+ d_make_persistent(dentry, inode);
fsnotify_create(root->d_inode, dentry);
- inode_unlock(d_inode(root));
+ simple_done_creating(dentry);
+
+ binder_add_device(device);
return 0;
@@ -221,6 +211,9 @@ err:
/**
* binder_ctl_ioctl - handle binder device node allocation requests
+ * @file: The file pointer for the binder-control device node.
+ * @cmd: The ioctl command.
+ * @arg: The ioctl argument.
*
* The request handler for the binder-control device. All requests operate on
* the binderfs mount the binder-control device resides in:
@@ -270,6 +263,7 @@ static void binderfs_evict_inode(struct inode *inode)
mutex_unlock(&binderfs_minors_mutex);
if (refcount_dec_and_test(&device->ref)) {
+ binder_remove_device(device);
kfree(device->context.name);
kfree(device);
}
@@ -406,12 +400,6 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
if (!device)
return -ENOMEM;
- /* If we have already created a binder-control node, return. */
- if (info->control_dentry) {
- ret = 0;
- goto out;
- }
-
ret = -ENOMEM;
inode = new_inode(sb);
if (!inode)
@@ -447,7 +435,8 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
inode->i_private = device;
info->control_dentry = dentry;
- d_add(dentry, inode);
+ d_make_persistent(dentry, inode);
+ dput(dentry);
return 0;
@@ -477,39 +466,6 @@ static struct inode *binderfs_make_inode(struct super_block *sb, int mode)
return ret;
}
-static struct dentry *binderfs_create_dentry(struct dentry *parent,
- const char *name)
-{
- struct dentry *dentry;
-
- dentry = lookup_one_len(name, parent, strlen(name));
- if (IS_ERR(dentry))
- return dentry;
-
- /* Return error if the file/dir already exists. */
- if (d_really_is_positive(dentry)) {
- dput(dentry);
- return ERR_PTR(-EEXIST);
- }
-
- return dentry;
-}
-
-void binderfs_remove_file(struct dentry *dentry)
-{
- struct inode *parent_inode;
-
- parent_inode = d_inode(dentry->d_parent);
- inode_lock(parent_inode);
- if (simple_positive(dentry)) {
- dget(dentry);
- simple_unlink(parent_inode, dentry);
- d_delete(dentry);
- dput(dentry);
- }
- inode_unlock(parent_inode);
-}
-
struct dentry *binderfs_create_file(struct dentry *parent, const char *name,
const struct file_operations *fops,
void *data)
@@ -519,28 +475,24 @@ struct dentry *binderfs_create_file(struct dentry *parent, const char *name,
struct super_block *sb;
parent_inode = d_inode(parent);
- inode_lock(parent_inode);
- dentry = binderfs_create_dentry(parent, name);
+ dentry = simple_start_creating(parent, name);
if (IS_ERR(dentry))
- goto out;
+ return dentry;
sb = parent_inode->i_sb;
new_inode = binderfs_make_inode(sb, S_IFREG | 0444);
if (!new_inode) {
- dput(dentry);
- dentry = ERR_PTR(-ENOMEM);
- goto out;
+ simple_done_creating(dentry);
+ return ERR_PTR(-ENOMEM);
}
new_inode->i_fop = fops;
new_inode->i_private = data;
- d_instantiate(dentry, new_inode);
+ d_make_persistent(dentry, new_inode);
fsnotify_create(parent_inode, dentry);
-
-out:
- inode_unlock(parent_inode);
- return dentry;
+ simple_done_creating(dentry);
+ return dentry; // borrowed
}
static struct dentry *binderfs_create_dir(struct dentry *parent,
@@ -551,30 +503,26 @@ static struct dentry *binderfs_create_dir(struct dentry *parent,
struct super_block *sb;
parent_inode = d_inode(parent);
- inode_lock(parent_inode);
- dentry = binderfs_create_dentry(parent, name);
+ dentry = simple_start_creating(parent, name);
if (IS_ERR(dentry))
- goto out;
+ return dentry;
sb = parent_inode->i_sb;
new_inode = binderfs_make_inode(sb, S_IFDIR | 0755);
if (!new_inode) {
- dput(dentry);
- dentry = ERR_PTR(-ENOMEM);
- goto out;
+ simple_done_creating(dentry);
+ return ERR_PTR(-ENOMEM);
}
new_inode->i_fop = &simple_dir_operations;
new_inode->i_op = &simple_dir_inode_operations;
set_nlink(new_inode, 2);
- d_instantiate(dentry, new_inode);
+ d_make_persistent(dentry, new_inode);
inc_nlink(parent_inode);
fsnotify_mkdir(parent_inode, dentry);
-
-out:
- inode_unlock(parent_inode);
+ simple_done_creating(dentry);
return dentry;
}
@@ -608,6 +556,18 @@ static int init_binder_features(struct super_block *sb)
if (IS_ERR(dentry))
return PTR_ERR(dentry);
+ dentry = binderfs_create_file(dir, "freeze_notification",
+ &binder_features_fops,
+ &binder_features.freeze_notification);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ dentry = binderfs_create_file(dir, "transaction_report",
+ &binder_features_fops,
+ &binder_features.transaction_report);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
return 0;
}
@@ -778,7 +738,7 @@ static void binderfs_kill_super(struct super_block *sb)
* During inode eviction struct binderfs_info is needed.
* So first wipe the super_block then free struct binderfs_info.
*/
- kill_litter_super(sb);
+ kill_anon_super(sb);
if (info && info->ipc_ns)
put_ipc_ns(info->ipc_ns);
diff --git a/drivers/android/dbitmap.h b/drivers/android/dbitmap.h
new file mode 100644
index 000000000000..c7299ce8b374
--- /dev/null
+++ b/drivers/android/dbitmap.h
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2024 Google LLC
+ *
+ * dbitmap - dynamically sized bitmap library.
+ *
+ * Used by the binder driver to optimize the allocation of the smallest
+ * available descriptor ID. Each bit in the bitmap represents the state
+ * of an ID.
+ *
+ * A dbitmap can grow or shrink as needed. This part has been designed
+ * considering that users might need to briefly release their locks in
+ * order to allocate memory for the new bitmap. These operations then,
+ * are verified to determine if the grow or shrink is sill valid.
+ *
+ * This library does not provide protection against concurrent access
+ * by itself. Binder uses the proc->outer_lock for this purpose.
+ */
+
+#ifndef _LINUX_DBITMAP_H
+#define _LINUX_DBITMAP_H
+#include <linux/bitmap.h>
+
+#define NBITS_MIN BITS_PER_TYPE(unsigned long)
+
+struct dbitmap {
+ unsigned int nbits;
+ unsigned long *map;
+};
+
+static inline int dbitmap_enabled(struct dbitmap *dmap)
+{
+ return !!dmap->nbits;
+}
+
+static inline void dbitmap_free(struct dbitmap *dmap)
+{
+ dmap->nbits = 0;
+ kfree(dmap->map);
+ dmap->map = NULL;
+}
+
+/* Returns the nbits that a dbitmap can shrink to, 0 if not possible. */
+static inline unsigned int dbitmap_shrink_nbits(struct dbitmap *dmap)
+{
+ unsigned int bit;
+
+ if (dmap->nbits <= NBITS_MIN)
+ return 0;
+
+ /*
+ * Determine if the bitmap can shrink based on the position of
+ * its last set bit. If the bit is within the first quarter of
+ * the bitmap then shrinking is possible. In this case, the
+ * bitmap should shrink to half its current size.
+ */
+ bit = find_last_bit(dmap->map, dmap->nbits);
+ if (bit < (dmap->nbits >> 2))
+ return dmap->nbits >> 1;
+
+ /* find_last_bit() returns dmap->nbits when no bits are set. */
+ if (bit == dmap->nbits)
+ return NBITS_MIN;
+
+ return 0;
+}
+
+/* Replace the internal bitmap with a new one of different size */
+static inline void
+dbitmap_replace(struct dbitmap *dmap, unsigned long *new, unsigned int nbits)
+{
+ bitmap_copy(new, dmap->map, min(dmap->nbits, nbits));
+ kfree(dmap->map);
+ dmap->map = new;
+ dmap->nbits = nbits;
+}
+
+static inline void
+dbitmap_shrink(struct dbitmap *dmap, unsigned long *new, unsigned int nbits)
+{
+ if (!new)
+ return;
+
+ /*
+ * Verify that shrinking to @nbits is still possible. The @new
+ * bitmap might have been allocated without locks, so this call
+ * could now be outdated. In this case, free @new and move on.
+ */
+ if (!dbitmap_enabled(dmap) || dbitmap_shrink_nbits(dmap) != nbits) {
+ kfree(new);
+ return;
+ }
+
+ dbitmap_replace(dmap, new, nbits);
+}
+
+/* Returns the nbits that a dbitmap can grow to. */
+static inline unsigned int dbitmap_grow_nbits(struct dbitmap *dmap)
+{
+ return dmap->nbits << 1;
+}
+
+static inline void
+dbitmap_grow(struct dbitmap *dmap, unsigned long *new, unsigned int nbits)
+{
+ /*
+ * Verify that growing to @nbits is still possible. The @new
+ * bitmap might have been allocated without locks, so this call
+ * could now be outdated. In this case, free @new and move on.
+ */
+ if (!dbitmap_enabled(dmap) || nbits <= dmap->nbits) {
+ kfree(new);
+ return;
+ }
+
+ /*
+ * Check for ENOMEM after confirming the grow operation is still
+ * required. This ensures we only disable the dbitmap when it's
+ * necessary. Once the dbitmap is disabled, binder will fallback
+ * to slow_desc_lookup_olocked().
+ */
+ if (!new) {
+ dbitmap_free(dmap);
+ return;
+ }
+
+ dbitmap_replace(dmap, new, nbits);
+}
+
+/*
+ * Finds and sets the next zero bit in the bitmap. Upon success @bit
+ * is populated with the index and 0 is returned. Otherwise, -ENOSPC
+ * is returned to indicate that a dbitmap_grow() is needed.
+ */
+static inline int
+dbitmap_acquire_next_zero_bit(struct dbitmap *dmap, unsigned long offset,
+ unsigned long *bit)
+{
+ unsigned long n;
+
+ n = find_next_zero_bit(dmap->map, dmap->nbits, offset);
+ if (n == dmap->nbits)
+ return -ENOSPC;
+
+ *bit = n;
+ set_bit(n, dmap->map);
+
+ return 0;
+}
+
+static inline void
+dbitmap_clear_bit(struct dbitmap *dmap, unsigned long bit)
+{
+ clear_bit(bit, dmap->map);
+}
+
+static inline int dbitmap_init(struct dbitmap *dmap)
+{
+ dmap->map = bitmap_zalloc(NBITS_MIN, GFP_KERNEL);
+ if (!dmap->map) {
+ dmap->nbits = 0;
+ return -ENOMEM;
+ }
+
+ dmap->nbits = NBITS_MIN;
+
+ return 0;
+}
+#endif
diff --git a/drivers/android/tests/.kunitconfig b/drivers/android/tests/.kunitconfig
new file mode 100644
index 000000000000..39b76bab9d9a
--- /dev/null
+++ b/drivers/android/tests/.kunitconfig
@@ -0,0 +1,7 @@
+#
+# Copyright 2025 Google LLC.
+#
+
+CONFIG_KUNIT=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ANDROID_BINDER_ALLOC_KUNIT_TEST=y
diff --git a/drivers/android/tests/Makefile b/drivers/android/tests/Makefile
new file mode 100644
index 000000000000..27268418eb03
--- /dev/null
+++ b/drivers/android/tests/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Copyright 2025 Google LLC.
+#
+
+obj-$(CONFIG_ANDROID_BINDER_ALLOC_KUNIT_TEST) += binder_alloc_kunit.o
diff --git a/drivers/android/tests/binder_alloc_kunit.c b/drivers/android/tests/binder_alloc_kunit.c
new file mode 100644
index 000000000000..7f9cc003bbe3
--- /dev/null
+++ b/drivers/android/tests/binder_alloc_kunit.c
@@ -0,0 +1,572 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test cases for binder allocator code.
+ *
+ * Copyright 2025 Google LLC.
+ * Author: Tiffany Yang <ynaffit@google.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/test.h>
+#include <linux/anon_inodes.h>
+#include <linux/err.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/seq_buf.h>
+#include <linux/sizes.h>
+
+#include "../binder_alloc.h"
+#include "../binder_internal.h"
+
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
+
+#define BINDER_MMAP_SIZE SZ_128K
+
+#define BUFFER_NUM 5
+#define BUFFER_MIN_SIZE (PAGE_SIZE / 8)
+
+#define FREESEQ_BUFLEN ((3 * BUFFER_NUM) + 1)
+
+#define ALIGN_TYPE_STRLEN (12)
+
+#define ALIGNMENTS_BUFLEN (((ALIGN_TYPE_STRLEN + 6) * BUFFER_NUM) + 1)
+
+#define PRINT_ALL_CASES (0)
+
+/* 5^5 alignment combinations * 2 places to share pages * 5! free sequences */
+#define TOTAL_EXHAUSTIVE_CASES (3125 * 2 * 120)
+
+/**
+ * enum buf_end_align_type - Page alignment of a buffer
+ * end with regard to the end of the previous buffer.
+ *
+ * In the pictures below, buf2 refers to the buffer we
+ * are aligning. buf1 refers to previous buffer by addr.
+ * Symbol [ means the start of a buffer, ] means the end
+ * of a buffer, and | means page boundaries.
+ */
+enum buf_end_align_type {
+ /**
+ * @SAME_PAGE_UNALIGNED: The end of this buffer is on
+ * the same page as the end of the previous buffer and
+ * is not page aligned. Examples:
+ * buf1 ][ buf2 ][ ...
+ * buf1 ]|[ buf2 ][ ...
+ */
+ SAME_PAGE_UNALIGNED = 0,
+ /**
+ * @SAME_PAGE_ALIGNED: When the end of the previous buffer
+ * is not page aligned, the end of this buffer is on the
+ * same page as the end of the previous buffer and is page
+ * aligned. When the previous buffer is page aligned, the
+ * end of this buffer is aligned to the next page boundary.
+ * Examples:
+ * buf1 ][ buf2 ]| ...
+ * buf1 ]|[ buf2 ]| ...
+ */
+ SAME_PAGE_ALIGNED,
+ /**
+ * @NEXT_PAGE_UNALIGNED: The end of this buffer is on
+ * the page next to the end of the previous buffer and
+ * is not page aligned. Examples:
+ * buf1 ][ buf2 | buf2 ][ ...
+ * buf1 ]|[ buf2 | buf2 ][ ...
+ */
+ NEXT_PAGE_UNALIGNED,
+ /**
+ * @NEXT_PAGE_ALIGNED: The end of this buffer is on
+ * the page next to the end of the previous buffer and
+ * is page aligned. Examples:
+ * buf1 ][ buf2 | buf2 ]| ...
+ * buf1 ]|[ buf2 | buf2 ]| ...
+ */
+ NEXT_PAGE_ALIGNED,
+ /**
+ * @NEXT_NEXT_UNALIGNED: The end of this buffer is on
+ * the page that follows the page after the end of the
+ * previous buffer and is not page aligned. Examples:
+ * buf1 ][ buf2 | buf2 | buf2 ][ ...
+ * buf1 ]|[ buf2 | buf2 | buf2 ][ ...
+ */
+ NEXT_NEXT_UNALIGNED,
+ /**
+ * @LOOP_END: The number of enum values in &buf_end_align_type.
+ * It is used for controlling loop termination.
+ */
+ LOOP_END,
+};
+
+static const char *const buf_end_align_type_strs[LOOP_END] = {
+ [SAME_PAGE_UNALIGNED] = "SP_UNALIGNED",
+ [SAME_PAGE_ALIGNED] = " SP_ALIGNED ",
+ [NEXT_PAGE_UNALIGNED] = "NP_UNALIGNED",
+ [NEXT_PAGE_ALIGNED] = " NP_ALIGNED ",
+ [NEXT_NEXT_UNALIGNED] = "NN_UNALIGNED",
+};
+
+struct binder_alloc_test_case_info {
+ char alignments[ALIGNMENTS_BUFLEN];
+ struct seq_buf alignments_sb;
+ size_t *buffer_sizes;
+ int *free_sequence;
+ bool front_pages;
+};
+
+static void stringify_free_seq(struct kunit *test, int *seq, struct seq_buf *sb)
+{
+ int i;
+
+ for (i = 0; i < BUFFER_NUM; i++)
+ seq_buf_printf(sb, "[%d]", seq[i]);
+
+ KUNIT_EXPECT_FALSE(test, seq_buf_has_overflowed(sb));
+}
+
+static void stringify_alignments(struct kunit *test, int *alignments,
+ struct seq_buf *sb)
+{
+ int i;
+
+ for (i = 0; i < BUFFER_NUM; i++)
+ seq_buf_printf(sb, "[ %d:%s ]", i,
+ buf_end_align_type_strs[alignments[i]]);
+
+ KUNIT_EXPECT_FALSE(test, seq_buf_has_overflowed(sb));
+}
+
+static bool check_buffer_pages_allocated(struct kunit *test,
+ struct binder_alloc *alloc,
+ struct binder_buffer *buffer,
+ size_t size)
+{
+ unsigned long page_addr;
+ unsigned long end;
+ int page_index;
+
+ end = PAGE_ALIGN(buffer->user_data + size);
+ page_addr = buffer->user_data;
+ for (; page_addr < end; page_addr += PAGE_SIZE) {
+ page_index = (page_addr - alloc->vm_start) / PAGE_SIZE;
+ if (!alloc->pages[page_index] ||
+ !list_empty(page_to_lru(alloc->pages[page_index]))) {
+ kunit_err(test, "expect alloc but is %s at page index %d\n",
+ alloc->pages[page_index] ?
+ "lru" : "free", page_index);
+ return false;
+ }
+ }
+ return true;
+}
+
+static unsigned long binder_alloc_test_alloc_buf(struct kunit *test,
+ struct binder_alloc *alloc,
+ struct binder_buffer *buffers[],
+ size_t *sizes, int *seq)
+{
+ unsigned long failures = 0;
+ int i;
+
+ for (i = 0; i < BUFFER_NUM; i++) {
+ buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
+ if (IS_ERR(buffers[i]) ||
+ !check_buffer_pages_allocated(test, alloc, buffers[i], sizes[i]))
+ failures++;
+ }
+
+ return failures;
+}
+
+static unsigned long binder_alloc_test_free_buf(struct kunit *test,
+ struct binder_alloc *alloc,
+ struct binder_buffer *buffers[],
+ size_t *sizes, int *seq, size_t end)
+{
+ unsigned long failures = 0;
+ int i;
+
+ for (i = 0; i < BUFFER_NUM; i++)
+ binder_alloc_free_buf(alloc, buffers[seq[i]]);
+
+ for (i = 0; i <= (end - 1) / PAGE_SIZE; i++) {
+ if (list_empty(page_to_lru(alloc->pages[i]))) {
+ kunit_err(test, "expect lru but is %s at page index %d\n",
+ alloc->pages[i] ? "alloc" : "free", i);
+ failures++;
+ }
+ }
+
+ return failures;
+}
+
+static unsigned long binder_alloc_test_free_page(struct kunit *test,
+ struct binder_alloc *alloc)
+{
+ unsigned long failures = 0;
+ unsigned long count;
+ int i;
+
+ while ((count = list_lru_count(alloc->freelist))) {
+ list_lru_walk(alloc->freelist, binder_alloc_free_page,
+ NULL, count);
+ }
+
+ for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
+ if (alloc->pages[i]) {
+ kunit_err(test, "expect free but is %s at page index %d\n",
+ list_empty(page_to_lru(alloc->pages[i])) ?
+ "alloc" : "lru", i);
+ failures++;
+ }
+ }
+
+ return failures;
+}
+
+/* Executes one full test run for the given test case. */
+static bool binder_alloc_test_alloc_free(struct kunit *test,
+ struct binder_alloc *alloc,
+ struct binder_alloc_test_case_info *tc,
+ size_t end)
+{
+ unsigned long pages = PAGE_ALIGN(end) / PAGE_SIZE;
+ struct binder_buffer *buffers[BUFFER_NUM];
+ unsigned long failures;
+ bool failed = false;
+
+ failures = binder_alloc_test_alloc_buf(test, alloc, buffers,
+ tc->buffer_sizes,
+ tc->free_sequence);
+ failed = failed || failures;
+ KUNIT_EXPECT_EQ_MSG(test, failures, 0,
+ "Initial allocation failed: %lu/%u buffers with errors",
+ failures, BUFFER_NUM);
+
+ failures = binder_alloc_test_free_buf(test, alloc, buffers,
+ tc->buffer_sizes,
+ tc->free_sequence, end);
+ failed = failed || failures;
+ KUNIT_EXPECT_EQ_MSG(test, failures, 0,
+ "Initial buffers not freed correctly: %lu/%lu pages not on lru list",
+ failures, pages);
+
+ /* Allocate from lru. */
+ failures = binder_alloc_test_alloc_buf(test, alloc, buffers,
+ tc->buffer_sizes,
+ tc->free_sequence);
+ failed = failed || failures;
+ KUNIT_EXPECT_EQ_MSG(test, failures, 0,
+ "Reallocation failed: %lu/%u buffers with errors",
+ failures, BUFFER_NUM);
+
+ failures = list_lru_count(alloc->freelist);
+ failed = failed || failures;
+ KUNIT_EXPECT_EQ_MSG(test, failures, 0,
+ "lru list should be empty after reallocation but still has %lu pages",
+ failures);
+
+ failures = binder_alloc_test_free_buf(test, alloc, buffers,
+ tc->buffer_sizes,
+ tc->free_sequence, end);
+ failed = failed || failures;
+ KUNIT_EXPECT_EQ_MSG(test, failures, 0,
+ "Reallocated buffers not freed correctly: %lu/%lu pages not on lru list",
+ failures, pages);
+
+ failures = binder_alloc_test_free_page(test, alloc);
+ failed = failed || failures;
+ KUNIT_EXPECT_EQ_MSG(test, failures, 0,
+ "Failed to clean up allocated pages: %lu/%lu pages still installed",
+ failures, (alloc->buffer_size / PAGE_SIZE));
+
+ return failed;
+}
+
+static bool is_dup(int *seq, int index, int val)
+{
+ int i;
+
+ for (i = 0; i < index; i++) {
+ if (seq[i] == val)
+ return true;
+ }
+ return false;
+}
+
+/* Generate BUFFER_NUM factorial free orders. */
+static void permute_frees(struct kunit *test, struct binder_alloc *alloc,
+ struct binder_alloc_test_case_info *tc,
+ unsigned long *runs, unsigned long *failures,
+ int index, size_t end)
+{
+ bool case_failed;
+ int i;
+
+ if (index == BUFFER_NUM) {
+ DECLARE_SEQ_BUF(freeseq_sb, FREESEQ_BUFLEN);
+
+ case_failed = binder_alloc_test_alloc_free(test, alloc, tc, end);
+ *runs += 1;
+ *failures += case_failed;
+
+ if (case_failed || PRINT_ALL_CASES) {
+ stringify_free_seq(test, tc->free_sequence,
+ &freeseq_sb);
+ kunit_err(test, "case %lu: [%s] | %s - %s - %s", *runs,
+ case_failed ? "FAILED" : "PASSED",
+ tc->front_pages ? "front" : "back ",
+ seq_buf_str(&tc->alignments_sb),
+ seq_buf_str(&freeseq_sb));
+ }
+
+ return;
+ }
+ for (i = 0; i < BUFFER_NUM; i++) {
+ if (is_dup(tc->free_sequence, index, i))
+ continue;
+ tc->free_sequence[index] = i;
+ permute_frees(test, alloc, tc, runs, failures, index + 1, end);
+ }
+}
+
+static void gen_buf_sizes(struct kunit *test,
+ struct binder_alloc *alloc,
+ struct binder_alloc_test_case_info *tc,
+ size_t *end_offset, unsigned long *runs,
+ unsigned long *failures)
+{
+ size_t last_offset, offset = 0;
+ size_t front_sizes[BUFFER_NUM];
+ size_t back_sizes[BUFFER_NUM];
+ int seq[BUFFER_NUM] = {0};
+ int i;
+
+ tc->free_sequence = seq;
+ for (i = 0; i < BUFFER_NUM; i++) {
+ last_offset = offset;
+ offset = end_offset[i];
+ front_sizes[i] = offset - last_offset;
+ back_sizes[BUFFER_NUM - i - 1] = front_sizes[i];
+ }
+ back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
+
+ /*
+ * Buffers share the first or last few pages.
+ * Only BUFFER_NUM - 1 buffer sizes are adjustable since
+ * we need one giant buffer before getting to the last page.
+ */
+ tc->front_pages = true;
+ tc->buffer_sizes = front_sizes;
+ permute_frees(test, alloc, tc, runs, failures, 0,
+ end_offset[BUFFER_NUM - 1]);
+
+ tc->front_pages = false;
+ tc->buffer_sizes = back_sizes;
+ permute_frees(test, alloc, tc, runs, failures, 0, alloc->buffer_size);
+}
+
+static void gen_buf_offsets(struct kunit *test, struct binder_alloc *alloc,
+ size_t *end_offset, int *alignments,
+ unsigned long *runs, unsigned long *failures,
+ int index)
+{
+ size_t end, prev;
+ int align;
+
+ if (index == BUFFER_NUM) {
+ struct binder_alloc_test_case_info tc = {0};
+
+ seq_buf_init(&tc.alignments_sb, tc.alignments,
+ ALIGNMENTS_BUFLEN);
+ stringify_alignments(test, alignments, &tc.alignments_sb);
+
+ gen_buf_sizes(test, alloc, &tc, end_offset, runs, failures);
+ return;
+ }
+ prev = index == 0 ? 0 : end_offset[index - 1];
+ end = prev;
+
+ BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE);
+
+ for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) {
+ if (align % 2)
+ end = ALIGN(end, PAGE_SIZE);
+ else
+ end += BUFFER_MIN_SIZE;
+ end_offset[index] = end;
+ alignments[index] = align;
+ gen_buf_offsets(test, alloc, end_offset, alignments, runs,
+ failures, index + 1);
+ }
+}
+
+struct binder_alloc_test {
+ struct binder_alloc alloc;
+ struct list_lru binder_test_freelist;
+ struct file *filp;
+ unsigned long mmap_uaddr;
+};
+
+static void binder_alloc_test_init_freelist(struct kunit *test)
+{
+ struct binder_alloc_test *priv = test->priv;
+
+ KUNIT_EXPECT_PTR_EQ(test, priv->alloc.freelist,
+ &priv->binder_test_freelist);
+}
+
+static void binder_alloc_test_mmap(struct kunit *test)
+{
+ struct binder_alloc_test *priv = test->priv;
+ struct binder_alloc *alloc = &priv->alloc;
+ struct binder_buffer *buf;
+ struct rb_node *n;
+
+ KUNIT_EXPECT_EQ(test, alloc->mapped, true);
+ KUNIT_EXPECT_EQ(test, alloc->buffer_size, BINDER_MMAP_SIZE);
+
+ n = rb_first(&alloc->allocated_buffers);
+ KUNIT_EXPECT_PTR_EQ(test, n, NULL);
+
+ n = rb_first(&alloc->free_buffers);
+ buf = rb_entry(n, struct binder_buffer, rb_node);
+ KUNIT_EXPECT_EQ(test, binder_alloc_buffer_size(alloc, buf),
+ BINDER_MMAP_SIZE);
+ KUNIT_EXPECT_TRUE(test, list_is_last(&buf->entry, &alloc->buffers));
+}
+
+/**
+ * binder_alloc_exhaustive_test() - Exhaustively test alloc and free of buffer pages.
+ * @test: The test context object.
+ *
+ * Allocate BUFFER_NUM buffers to cover all page alignment cases,
+ * then free them in all orders possible. Check that pages are
+ * correctly allocated, put onto lru when buffers are freed, and
+ * are freed when binder_alloc_free_page() is called.
+ */
+static void binder_alloc_exhaustive_test(struct kunit *test)
+{
+ struct binder_alloc_test *priv = test->priv;
+ size_t end_offset[BUFFER_NUM];
+ int alignments[BUFFER_NUM];
+ unsigned long failures = 0;
+ unsigned long runs = 0;
+
+ gen_buf_offsets(test, &priv->alloc, end_offset, alignments, &runs,
+ &failures, 0);
+
+ KUNIT_EXPECT_EQ(test, runs, TOTAL_EXHAUSTIVE_CASES);
+ KUNIT_EXPECT_EQ(test, failures, 0);
+}
+
+/* ===== End test cases ===== */
+
+static void binder_alloc_test_vma_close(struct vm_area_struct *vma)
+{
+ struct binder_alloc *alloc = vma->vm_private_data;
+
+ binder_alloc_vma_close(alloc);
+}
+
+static const struct vm_operations_struct binder_alloc_test_vm_ops = {
+ .close = binder_alloc_test_vma_close,
+ .fault = binder_vm_fault,
+};
+
+static int binder_alloc_test_mmap_handler(struct file *filp,
+ struct vm_area_struct *vma)
+{
+ struct binder_alloc *alloc = filp->private_data;
+
+ vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
+
+ vma->vm_ops = &binder_alloc_test_vm_ops;
+ vma->vm_private_data = alloc;
+
+ return binder_alloc_mmap_handler(alloc, vma);
+}
+
+static const struct file_operations binder_alloc_test_fops = {
+ .mmap = binder_alloc_test_mmap_handler,
+};
+
+static int binder_alloc_test_init(struct kunit *test)
+{
+ struct binder_alloc_test *priv;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ test->priv = priv;
+
+ ret = list_lru_init(&priv->binder_test_freelist);
+ if (ret) {
+ kunit_err(test, "Failed to initialize test freelist\n");
+ return ret;
+ }
+
+ /* __binder_alloc_init requires mm to be attached */
+ ret = kunit_attach_mm();
+ if (ret) {
+ kunit_err(test, "Failed to attach mm\n");
+ return ret;
+ }
+ __binder_alloc_init(&priv->alloc, &priv->binder_test_freelist);
+
+ priv->filp = anon_inode_getfile("binder_alloc_kunit",
+ &binder_alloc_test_fops, &priv->alloc,
+ O_RDWR | O_CLOEXEC);
+ if (IS_ERR_OR_NULL(priv->filp)) {
+ kunit_err(test, "Failed to open binder alloc test driver file\n");
+ return priv->filp ? PTR_ERR(priv->filp) : -ENOMEM;
+ }
+
+ priv->mmap_uaddr = kunit_vm_mmap(test, priv->filp, 0, BINDER_MMAP_SIZE,
+ PROT_READ, MAP_PRIVATE | MAP_NORESERVE,
+ 0);
+ if (!priv->mmap_uaddr) {
+ kunit_err(test, "Could not map the test's transaction memory\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void binder_alloc_test_exit(struct kunit *test)
+{
+ struct binder_alloc_test *priv = test->priv;
+
+ /* Close the backing file to make sure binder_alloc_vma_close runs */
+ if (!IS_ERR_OR_NULL(priv->filp))
+ fput(priv->filp);
+
+ if (priv->alloc.mm)
+ binder_alloc_deferred_release(&priv->alloc);
+
+ /* Make sure freelist is empty */
+ KUNIT_EXPECT_EQ(test, list_lru_count(&priv->binder_test_freelist), 0);
+ list_lru_destroy(&priv->binder_test_freelist);
+}
+
+static struct kunit_case binder_alloc_test_cases[] = {
+ KUNIT_CASE(binder_alloc_test_init_freelist),
+ KUNIT_CASE(binder_alloc_test_mmap),
+ KUNIT_CASE_SLOW(binder_alloc_exhaustive_test),
+ {}
+};
+
+static struct kunit_suite binder_alloc_test_suite = {
+ .name = "binder_alloc",
+ .test_cases = binder_alloc_test_cases,
+ .init = binder_alloc_test_init,
+ .exit = binder_alloc_test_exit,
+};
+
+kunit_test_suite(binder_alloc_test_suite);
+
+MODULE_AUTHOR("Tiffany Yang <ynaffit@google.com>");
+MODULE_DESCRIPTION("Binder Alloc KUnit tests");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index b595494ab9b4..120a2b7067fc 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -117,23 +117,39 @@ config SATA_AHCI
config SATA_MOBILE_LPM_POLICY
int "Default SATA Link Power Management policy"
- range 0 4
- default 0
+ range 0 5
+ default 3
depends on SATA_AHCI
help
Select the Default SATA Link Power Management (LPM) policy to use
for chipsets / "South Bridges" supporting low-power modes. Such
chipsets are ubiquitous across laptops, desktops and servers.
-
- The value set has the following meanings:
+ Each policy combines power saving states and features:
+ - Partial: The Phy logic is powered but is in a reduced power
+ state. The exit latency from this state is no longer than
+ 10us).
+ - Slumber: The Phy logic is powered but is in an even lower power
+ state. The exit latency from this state is potentially
+ longer, but no longer than 10ms.
+ - DevSleep: The Phy logic may be powered down. The exit latency from
+ this state is no longer than 20 ms, unless otherwise
+ specified by DETO in the device Identify Device Data log.
+ - HIPM: Host Initiated Power Management (host automatically
+ transitions to partial and slumber).
+ - DIPM: Device Initiated Power Management (device automatically
+ transitions to partial and slumber).
+
+ The possible values for the default SATA link power management
+ policies are:
0 => Keep firmware settings
- 1 => Maximum performance
- 2 => Medium power
- 3 => Medium power with Device Initiated PM enabled
- 4 => Minimum power
-
- Note "Minimum power" is known to cause issues, including disk
- corruption, with some disks and should not be used.
+ 1 => No power savings (maximum performance)
+ 2 => HIPM (Partial)
+ 3 => HIPM (Partial) and DIPM (Partial and Slumber)
+ 4 => HIPM (Partial and DevSleep) and DIPM (Partial and Slumber)
+ 5 => HIPM (Slumber and DevSleep) and DIPM (Partial and Slumber)
+
+ Excluding the value 0, higher values represent policies with higher
+ power savings.
config SATA_AHCI_PLATFORM
tristate "Platform AHCI SATA support"
diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c
index 547f56341705..3999305b5356 100644
--- a/drivers/ata/acard-ahci.c
+++ b/drivers/ata/acard-ahci.c
@@ -370,7 +370,7 @@ static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id
/* AHCI controllers often implement SFF compatible interface.
* Grab all PCI BARs just in case.
*/
- rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
+ rc = pcim_request_all_regions(pdev, DRV_NAME);
if (rc == -EBUSY)
pcim_pin_device(pdev);
if (rc)
@@ -386,7 +386,9 @@ static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id
if (!(hpriv->flags & AHCI_HFLAG_NO_MSI))
pci_enable_msi(pdev);
- hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
+ hpriv->mmio = pcim_iomap(pdev, AHCI_PCI_BAR, 0);
+ if (!hpriv->mmio)
+ return -ENOMEM;
/* save initial config */
ahci_save_initial_config(&pdev->dev, hpriv);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 6548f10e61d9..7a7f88b3fa2b 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -63,6 +63,7 @@ enum board_ids {
board_ahci_pcs_quirk_no_devslp,
board_ahci_pcs_quirk_no_sntf,
board_ahci_yes_fbs,
+ board_ahci_yes_fbs_atapi_dma,
/* board IDs for specific chipsets in alphabetical order */
board_ahci_al,
@@ -109,17 +110,17 @@ static const struct scsi_host_template ahci_sht = {
static struct ata_port_operations ahci_vt8251_ops = {
.inherits = &ahci_ops,
- .hardreset = ahci_vt8251_hardreset,
+ .reset.hardreset = ahci_vt8251_hardreset,
};
static struct ata_port_operations ahci_p5wdh_ops = {
.inherits = &ahci_ops,
- .hardreset = ahci_p5wdh_hardreset,
+ .reset.hardreset = ahci_p5wdh_hardreset,
};
static struct ata_port_operations ahci_avn_ops = {
.inherits = &ahci_ops,
- .hardreset = ahci_avn_hardreset,
+ .reset.hardreset = ahci_avn_hardreset,
};
static const struct ata_port_info ahci_port_info[] = {
@@ -188,6 +189,14 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
+ [board_ahci_yes_fbs_atapi_dma] = {
+ AHCI_HFLAGS (AHCI_HFLAG_YES_FBS |
+ AHCI_HFLAG_ATAPI_DMA_QUIRK),
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
/* by chipsets */
[board_ahci_al] = {
AHCI_HFLAGS (AHCI_HFLAG_NO_PMP | AHCI_HFLAG_NO_MSI),
@@ -429,7 +438,6 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x02d7), board_ahci_pcs_quirk }, /* Comet Lake PCH RAID */
/* Elkhart Lake IDs 0x4b60 & 0x4b62 https://sata-io.org/product/8803 not tested yet */
{ PCI_VDEVICE(INTEL, 0x4b63), board_ahci_pcs_quirk }, /* Elkhart Lake AHCI */
- { PCI_VDEVICE(INTEL, 0x7ae2), board_ahci_pcs_quirk }, /* Alder Lake-P AHCI */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -590,6 +598,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
.driver_data = board_ahci_yes_fbs },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3),
.driver_data = board_ahci_yes_fbs },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9215),
+ .driver_data = board_ahci_yes_fbs_atapi_dma },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
.driver_data = board_ahci_yes_fbs },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9235),
@@ -664,7 +674,9 @@ MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
static int mobile_lpm_policy = -1;
module_param(mobile_lpm_policy, int, 0644);
-MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
+MODULE_PARM_DESC(mobile_lpm_policy,
+ "Default LPM policy. Despite its name, this parameter applies "
+ "to all chipsets, including desktop and server chipsets");
static char *ahci_mask_port_map;
module_param_named(mask_port_map, ahci_mask_port_map, charp, 0444);
@@ -677,40 +689,50 @@ MODULE_PARM_DESC(mask_port_map,
"where <pci_dev> is the PCI ID of an AHCI controller in the "
"form \"domain:bus:dev.func\"");
-static void ahci_apply_port_map_mask(struct device *dev,
- struct ahci_host_priv *hpriv, char *mask_s)
+static char *ahci_mask_port_ext;
+module_param_named(mask_port_ext, ahci_mask_port_ext, charp, 0444);
+MODULE_PARM_DESC(mask_port_ext,
+ "32-bits mask to ignore the external/hotplug capability of ports. "
+ "Valid values are: "
+ "\"<mask>\" to apply the same mask to all AHCI controller "
+ "devices, and \"<pci_dev>=<mask>,<pci_dev>=<mask>,...\" to "
+ "specify different masks for the controllers specified, "
+ "where <pci_dev> is the PCI ID of an AHCI controller in the "
+ "form \"domain:bus:dev.func\"");
+
+static u32 ahci_port_mask(struct device *dev, char *mask_s)
{
unsigned int mask;
if (kstrtouint(mask_s, 0, &mask)) {
dev_err(dev, "Invalid port map mask\n");
- return;
+ return 0;
}
- hpriv->mask_port_map = mask;
+ return mask;
}
-static void ahci_get_port_map_mask(struct device *dev,
- struct ahci_host_priv *hpriv)
+static u32 ahci_get_port_mask(struct device *dev, char *mask_p)
{
char *param, *end, *str, *mask_s;
char *name;
+ u32 mask = 0;
- if (!strlen(ahci_mask_port_map))
- return;
+ if (!mask_p || !strlen(mask_p))
+ return 0;
- str = kstrdup(ahci_mask_port_map, GFP_KERNEL);
+ str = kstrdup(mask_p, GFP_KERNEL);
if (!str)
- return;
+ return 0;
/* Handle single mask case */
if (!strchr(str, '=')) {
- ahci_apply_port_map_mask(dev, hpriv, str);
+ mask = ahci_port_mask(dev, str);
goto free;
}
/*
- * Mask list case: parse the parameter to apply the mask only if
+ * Mask list case: parse the parameter to get the mask only if
* the device name matches.
*/
param = str;
@@ -740,11 +762,13 @@ static void ahci_get_port_map_mask(struct device *dev,
param++;
}
- ahci_apply_port_map_mask(dev, hpriv, mask_s);
+ mask = ahci_port_mask(dev, mask_s);
}
free:
kfree(str);
+
+ return mask;
}
static void ahci_pci_save_initial_config(struct pci_dev *pdev,
@@ -770,8 +794,10 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
}
/* Handle port map masks passed as module parameter. */
- if (ahci_mask_port_map)
- ahci_get_port_map_mask(&pdev->dev, hpriv);
+ hpriv->mask_port_map =
+ ahci_get_port_mask(&pdev->dev, ahci_mask_port_map);
+ hpriv->mask_port_ext =
+ ahci_get_port_mask(&pdev->dev, ahci_mask_port_ext);
ahci_save_initial_config(&pdev->dev, hpriv);
}
@@ -1371,7 +1397,7 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
* V1.03 is known to be broken. V3.04 is known to
* work. Between, there are V1.06, V2.06 and V3.03
* that we don't have much idea about. For now,
- * blacklist anything older than V3.04.
+ * assume that anything older than V3.04 is broken.
*
* http://bugzilla.kernel.org/show_bug.cgi?id=15104
*/
@@ -1400,8 +1426,15 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
static bool ahci_broken_lpm(struct pci_dev *pdev)
{
+ /*
+ * Platforms with LPM problems.
+ * If driver_data is NULL, there is no existing BIOS version with
+ * functioning LPM.
+ * If driver_data is non-NULL, then driver_data contains the DMI BIOS
+ * build date of the first BIOS version with functioning LPM (i.e. older
+ * BIOS versions have broken LPM).
+ */
static const struct dmi_system_id sysids[] = {
- /* Various Lenovo 50 series have LPM issues with older BIOSen */
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
@@ -1428,13 +1461,30 @@ static bool ahci_broken_lpm(struct pci_dev *pdev)
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W541"),
},
+ .driver_data = "20180409", /* 2.35 */
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ASUSPRO D840MB_M840SA"),
+ },
+ /* 320 is broken, there is no known good version. */
+ },
+ {
/*
- * Note date based on release notes, 2.35 has been
- * reported to be good, but I've been unable to get
- * a hold of the reporter to get the DMI BIOS date.
- * TODO: fix this.
+ * AMD 500 Series Chipset SATA Controller [1022:43eb]
+ * on this motherboard timeouts on ports 5 and 6 when
+ * LPM is enabled, at least with WDC WD20EFAX-68FB5N0
+ * hard drives. LPM with the same drive works fine on
+ * all other ports on the same controller.
*/
- .driver_data = "20180310", /* 2.35 */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR,
+ "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME,
+ "ROG STRIX B550-F GAMING (WI-FI)"),
+ },
+ /* 3621 is broken, there is no known good version. */
},
{ } /* terminate list */
};
@@ -1445,6 +1495,9 @@ static bool ahci_broken_lpm(struct pci_dev *pdev)
if (!dmi)
return false;
+ if (!dmi->driver_data)
+ return true;
+
dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
@@ -1666,18 +1719,20 @@ static int ahci_get_irq_vector(struct ata_host *host, int port)
return pci_irq_vector(to_pci_dev(host->dev), port);
}
-static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
+static void ahci_init_irq(struct pci_dev *pdev, unsigned int n_ports,
struct ahci_host_priv *hpriv)
{
int nvec;
- if (hpriv->flags & AHCI_HFLAG_NO_MSI)
- return -ENODEV;
+ if (hpriv->flags & AHCI_HFLAG_NO_MSI) {
+ pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_INTX);
+ return;
+ }
/*
* If number of MSIs is less than number of ports then Sharing Last
* Message mode could be enforced. In this case assume that advantage
- * of multipe MSIs is negated and use single MSI mode instead.
+ * of multiple MSIs is negated and use single MSI mode instead.
*/
if (n_ports > 1) {
nvec = pci_alloc_irq_vectors(pdev, n_ports, INT_MAX,
@@ -1686,7 +1741,7 @@ static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
if (!(readl(hpriv->mmio + HOST_CTL) & HOST_MRSM)) {
hpriv->get_irq_vector = ahci_get_irq_vector;
hpriv->flags |= AHCI_HFLAG_MULTI_MSI;
- return nvec;
+ return;
}
/*
@@ -1701,12 +1756,13 @@ static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
/*
* If the host is not capable of supporting per-port vectors, fall
- * back to single MSI before finally attempting single MSI-X.
+ * back to single MSI before finally attempting single MSI-X or
+ * a legacy INTx.
*/
nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
if (nvec == 1)
- return nvec;
- return pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
+ return;
+ pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX | PCI_IRQ_INTX);
}
static void ahci_mark_external_port(struct ata_port *ap)
@@ -1715,11 +1771,20 @@ static void ahci_mark_external_port(struct ata_port *ap)
void __iomem *port_mmio = ahci_port_base(ap);
u32 tmp;
- /* mark external ports (hotplug-capable, eSATA) */
+ /*
+ * Mark external ports (hotplug-capable, eSATA), unless we were asked to
+ * ignore this feature.
+ */
tmp = readl(port_mmio + PORT_CMD);
if (((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS)) ||
- (tmp & PORT_CMD_HPCP))
+ (tmp & PORT_CMD_HPCP)) {
+ if (hpriv->mask_port_ext & (1U << ap->port_no)) {
+ ata_port_info(ap,
+ "Ignoring external/hotplug capability\n");
+ return;
+ }
ap->pflags |= ATA_PFLAG_EXTERNAL;
+ }
}
static void ahci_update_initial_lpm_policy(struct ata_port *ap)
@@ -1733,8 +1798,29 @@ static void ahci_update_initial_lpm_policy(struct ata_port *ap)
* Management Interaction in AHCI 1.3.1. Therefore, do not enable
* LPM if the port advertises itself as an external port.
*/
- if (ap->pflags & ATA_PFLAG_EXTERNAL)
+ if (ap->pflags & ATA_PFLAG_EXTERNAL) {
+ ap->flags |= ATA_FLAG_NO_LPM;
+ ap->target_lpm_policy = ATA_LPM_MAX_POWER;
+ return;
+ }
+
+ /* If no Partial or no Slumber, we cannot support DIPM. */
+ if ((ap->host->flags & ATA_HOST_NO_PART) ||
+ (ap->host->flags & ATA_HOST_NO_SSC)) {
+ ata_port_dbg(ap, "Host does not support DIPM\n");
+ ap->flags |= ATA_FLAG_NO_DIPM;
+ }
+
+ /* If no LPM states are supported by the HBA, do not bother with LPM */
+ if ((ap->host->flags & ATA_HOST_NO_PART) &&
+ (ap->host->flags & ATA_HOST_NO_SSC) &&
+ (ap->host->flags & ATA_HOST_NO_DEVSLP)) {
+ ata_port_dbg(ap,
+ "No LPM states supported, forcing LPM max_power\n");
+ ap->flags |= ATA_FLAG_NO_LPM;
+ ap->target_lpm_policy = ATA_LPM_MAX_POWER;
return;
+ }
/* user modified policy via module param */
if (mobile_lpm_policy != -1) {
@@ -1860,7 +1946,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* AHCI controllers often implement SFF compatible interface.
* Grab all PCI BARs just in case.
*/
- rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME);
+ rc = pcim_request_all_regions(pdev, DRV_NAME);
if (rc == -EBUSY)
pcim_pin_device(pdev);
if (rc)
@@ -1884,7 +1970,9 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ahci_sb600_enable_64bit(pdev))
hpriv->flags &= ~AHCI_HFLAG_32BIT_ONLY;
- hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar];
+ hpriv->mmio = pcim_iomap(pdev, ahci_pci_bar, 0);
+ if (!hpriv->mmio)
+ return -ENOMEM;
/* detect remapped nvme devices */
ahci_remap_check(pdev, ahci_pci_bar, hpriv);
@@ -1968,14 +2056,14 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
- if (!host)
- return -ENOMEM;
+ if (!host) {
+ rc = -ENOMEM;
+ goto err_rm_sysfs_file;
+ }
host->private_data = hpriv;
- if (ahci_init_msi(pdev, n_ports, hpriv) < 0) {
- /* legacy intx interrupts */
- pci_intx(pdev, 1);
- }
+ ahci_init_irq(pdev, n_ports, hpriv);
+
hpriv->irq = pci_irq_vector(pdev, 0);
if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
@@ -2024,11 +2112,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* initialize adapter */
rc = ahci_configure_dma_masks(pdev, hpriv);
if (rc)
- return rc;
+ goto err_rm_sysfs_file;
rc = ahci_pci_reset_controller(host);
if (rc)
- return rc;
+ goto err_rm_sysfs_file;
ahci_pci_init_controller(host);
ahci_pci_print_info(host);
@@ -2037,10 +2125,15 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = ahci_host_activate(host, &ahci_sht);
if (rc)
- return rc;
+ goto err_rm_sysfs_file;
pm_runtime_put_noidle(&pdev->dev);
return 0;
+
+err_rm_sysfs_file:
+ sysfs_remove_file_from_group(&pdev->dev.kobj,
+ &dev_attr_remapped_nvme.attr, NULL);
+ return rc;
}
static void ahci_shutdown_one(struct pci_dev *pdev)
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 8f40f75ba08c..293b7fb216b5 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -246,6 +246,7 @@ enum {
AHCI_HFLAG_NO_SXS = BIT(26), /* SXS not supported */
AHCI_HFLAG_43BIT_ONLY = BIT(27), /* 43bit DMA addr limit */
AHCI_HFLAG_INTEL_PCS_QUIRK = BIT(28), /* apply Intel PCS quirk */
+ AHCI_HFLAG_ATAPI_DMA_QUIRK = BIT(29), /* force ATAPI to use DMA */
/* ap->flags bits */
@@ -328,7 +329,8 @@ struct ahci_port_priv {
struct ahci_host_priv {
/* Input fields */
unsigned int flags; /* AHCI_HFLAG_* */
- u32 mask_port_map; /* mask out particular bits */
+ u32 mask_port_map; /* Mask of valid ports */
+ u32 mask_port_ext; /* Mask of ports ext capability */
void __iomem * mmio; /* bus-independent mem map */
u32 cap; /* cap to use */
@@ -379,6 +381,21 @@ struct ahci_host_priv {
int port);
};
+/*
+ * Return true if a port should be ignored because it is excluded from
+ * the host port map.
+ */
+static inline bool ahci_ignore_port(struct ahci_host_priv *hpriv,
+ unsigned int portid)
+{
+ if (portid >= hpriv->nports)
+ return true;
+ /* mask_port_map not set means that all ports are available */
+ if (!hpriv->mask_port_map)
+ return false;
+ return !(hpriv->mask_port_map & (1 << portid));
+}
+
extern int ahci_ignore_sss;
extern const struct attribute_group *ahci_shost_groups[];
@@ -396,8 +413,8 @@ extern const struct attribute_group *ahci_sdev_groups[];
.shost_groups = ahci_shost_groups, \
.sdev_groups = ahci_sdev_groups, \
.change_queue_depth = ata_scsi_change_queue_depth, \
- .tag_alloc_policy = BLK_TAG_ALLOC_RR, \
- .device_configure = ata_scsi_device_configure
+ .tag_alloc_policy_rr = true, \
+ .sdev_configure = ata_scsi_sdev_configure
extern struct ata_port_operations ahci_ops;
extern struct ata_port_operations ahci_platform_ops;
diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
index 70c3a33eee6f..29be74fedcf0 100644
--- a/drivers/ata/ahci_brcm.c
+++ b/drivers/ata/ahci_brcm.c
@@ -288,6 +288,9 @@ static unsigned int brcm_ahci_read_id(struct ata_device *dev,
/* Re-initialize and calibrate the PHY */
for (i = 0; i < hpriv->nports; i++) {
+ if (ahci_ignore_port(hpriv, i))
+ continue;
+
rc = phy_init(hpriv->phys[i]);
if (rc)
goto disable_phys;
@@ -437,7 +440,6 @@ static int brcm_ahci_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct brcm_ahci_priv *priv;
struct ahci_host_priv *hpriv;
- struct resource *res;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -451,8 +453,7 @@ static int brcm_ahci_probe(struct platform_device *pdev)
priv->version = (unsigned long)of_id->data;
priv->dev = dev;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "top-ctrl");
- priv->top_ctrl = devm_ioremap_resource(dev, res);
+ priv->top_ctrl = devm_platform_ioremap_resource_byname(pdev, "top-ctrl");
if (IS_ERR(priv->top_ctrl))
return PTR_ERR(priv->top_ctrl);
@@ -573,7 +574,7 @@ static SIMPLE_DEV_PM_OPS(ahci_brcm_pm_ops, brcm_ahci_suspend, brcm_ahci_resume);
static struct platform_driver brcm_ahci_driver = {
.probe = brcm_ahci_probe,
- .remove_new = brcm_ahci_remove,
+ .remove = brcm_ahci_remove,
.shutdown = brcm_ahci_shutdown,
.driver = {
.name = DRV_NAME,
diff --git a/drivers/ata/ahci_ceva.c b/drivers/ata/ahci_ceva.c
index 11a2c199a7c2..2d6a08c23d6a 100644
--- a/drivers/ata/ahci_ceva.c
+++ b/drivers/ata/ahci_ceva.c
@@ -206,6 +206,9 @@ static int ceva_ahci_platform_enable_resources(struct ahci_host_priv *hpriv)
goto disable_clks;
for (i = 0; i < hpriv->nports; i++) {
+ if (ahci_ignore_port(hpriv, i))
+ continue;
+
rc = phy_init(hpriv->phys[i]);
if (rc)
goto disable_rsts;
@@ -215,6 +218,9 @@ static int ceva_ahci_platform_enable_resources(struct ahci_host_priv *hpriv)
ahci_platform_deassert_rsts(hpriv);
for (i = 0; i < hpriv->nports; i++) {
+ if (ahci_ignore_port(hpriv, i))
+ continue;
+
rc = phy_power_on(hpriv->phys[i]);
if (rc) {
phy_exit(hpriv->phys[i]);
@@ -402,7 +408,7 @@ MODULE_DEVICE_TABLE(of, ceva_ahci_of_match);
static struct platform_driver ceva_ahci_driver = {
.probe = ceva_ahci_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = ceva_ahci_of_match,
diff --git a/drivers/ata/ahci_da850.c b/drivers/ata/ahci_da850.c
index 55a6627d5450..f97566c420f8 100644
--- a/drivers/ata/ahci_da850.c
+++ b/drivers/ata/ahci_da850.c
@@ -137,13 +137,13 @@ static int ahci_da850_hardreset(struct ata_link *link,
static struct ata_port_operations ahci_da850_port_ops = {
.inherits = &ahci_platform_ops,
- .softreset = ahci_da850_softreset,
+ .reset.softreset = ahci_da850_softreset,
/*
* No need to override .pmp_softreset - it's only used for actual
* PMP-enabled ports.
*/
- .hardreset = ahci_da850_hardreset,
- .pmp_hardreset = ahci_da850_hardreset,
+ .reset.hardreset = ahci_da850_hardreset,
+ .pmp_reset.hardreset = ahci_da850_hardreset,
};
static const struct ata_port_info ahci_da850_port_info = {
@@ -238,7 +238,7 @@ MODULE_DEVICE_TABLE(of, ahci_da850_of_match);
static struct platform_driver ahci_da850_driver = {
.probe = ahci_da850_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = ahci_da850_of_match,
diff --git a/drivers/ata/ahci_dm816.c b/drivers/ata/ahci_dm816.c
index 4cb70064fb99..93faed2cfeb6 100644
--- a/drivers/ata/ahci_dm816.c
+++ b/drivers/ata/ahci_dm816.c
@@ -124,7 +124,7 @@ static int ahci_dm816_softreset(struct ata_link *link,
static struct ata_port_operations ahci_dm816_port_ops = {
.inherits = &ahci_platform_ops,
- .softreset = ahci_dm816_softreset,
+ .reset.softreset = ahci_dm816_softreset,
};
static const struct ata_port_info ahci_dm816_port_info = {
@@ -182,7 +182,7 @@ MODULE_DEVICE_TABLE(of, ahci_dm816_of_match);
static struct platform_driver ahci_dm816_driver = {
.probe = ahci_dm816_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = AHCI_DM816_DRV_NAME,
.of_match_table = ahci_dm816_of_match,
diff --git a/drivers/ata/ahci_dwc.c b/drivers/ata/ahci_dwc.c
index ed263de3fd70..aec6d793f51a 100644
--- a/drivers/ata/ahci_dwc.c
+++ b/drivers/ata/ahci_dwc.c
@@ -478,7 +478,7 @@ MODULE_DEVICE_TABLE(of, ahci_dwc_of_match);
static struct platform_driver ahci_dwc_driver = {
.probe = ahci_dwc_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.shutdown = ahci_platform_shutdown,
.driver = {
.name = DRV_NAME,
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index cb768f66f0a7..86aedd5923ac 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -19,6 +19,7 @@
#include <linux/libata.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
+#include <linux/phy/phy.h>
#include <linux/thermal.h>
#include "ahci.h"
@@ -44,42 +45,10 @@ enum {
/* Clock Reset Register */
IMX_CLOCK_RESET = 0x7f3f,
IMX_CLOCK_RESET_RESET = 1 << 0,
- /* IMX8QM HSIO AHCI definitions */
- IMX8QM_SATA_PHY_RX_IMPED_RATIO_OFFSET = 0x03,
- IMX8QM_SATA_PHY_TX_IMPED_RATIO_OFFSET = 0x09,
- IMX8QM_SATA_PHY_IMPED_RATIO_85OHM = 0x6c,
- IMX8QM_LPCG_PHYX2_OFFSET = 0x00000,
- IMX8QM_CSR_PHYX2_OFFSET = 0x90000,
- IMX8QM_CSR_PHYX1_OFFSET = 0xa0000,
- IMX8QM_CSR_PHYX_STTS0_OFFSET = 0x4,
- IMX8QM_CSR_PCIEA_OFFSET = 0xb0000,
- IMX8QM_CSR_PCIEB_OFFSET = 0xc0000,
- IMX8QM_CSR_SATA_OFFSET = 0xd0000,
- IMX8QM_CSR_PCIE_CTRL2_OFFSET = 0x8,
- IMX8QM_CSR_MISC_OFFSET = 0xe0000,
-
- IMX8QM_LPCG_PHYX2_PCLK0_MASK = (0x3 << 16),
- IMX8QM_LPCG_PHYX2_PCLK1_MASK = (0x3 << 20),
- IMX8QM_PHY_APB_RSTN_0 = BIT(0),
- IMX8QM_PHY_MODE_SATA = BIT(19),
- IMX8QM_PHY_MODE_MASK = (0xf << 17),
- IMX8QM_PHY_PIPE_RSTN_0 = BIT(24),
- IMX8QM_PHY_PIPE_RSTN_OVERRIDE_0 = BIT(25),
- IMX8QM_PHY_PIPE_RSTN_1 = BIT(26),
- IMX8QM_PHY_PIPE_RSTN_OVERRIDE_1 = BIT(27),
- IMX8QM_STTS0_LANE0_TX_PLL_LOCK = BIT(4),
- IMX8QM_MISC_IOB_RXENA = BIT(0),
- IMX8QM_MISC_IOB_TXENA = BIT(1),
- IMX8QM_MISC_PHYX1_EPCS_SEL = BIT(12),
- IMX8QM_MISC_CLKREQN_OUT_OVERRIDE_1 = BIT(24),
- IMX8QM_MISC_CLKREQN_OUT_OVERRIDE_0 = BIT(25),
- IMX8QM_MISC_CLKREQN_IN_OVERRIDE_1 = BIT(28),
- IMX8QM_MISC_CLKREQN_IN_OVERRIDE_0 = BIT(29),
- IMX8QM_SATA_CTRL_RESET_N = BIT(12),
- IMX8QM_SATA_CTRL_EPCS_PHYRESET_N = BIT(7),
- IMX8QM_CTRL_BUTTON_RST_N = BIT(21),
- IMX8QM_CTRL_POWER_UP_RST_N = BIT(23),
- IMX8QM_CTRL_LTSSM_ENABLE = BIT(4),
+ /* IMX8QM SATA specific control registers */
+ IMX8QM_SATA_AHCI_PTC = 0xc8,
+ IMX8QM_SATA_AHCI_PTC_RXWM_MASK = GENMASK(6, 0),
+ IMX8QM_SATA_AHCI_PTC_RXWM = 0x29,
};
enum ahci_imx_type {
@@ -95,14 +64,10 @@ struct imx_ahci_priv {
struct clk *sata_clk;
struct clk *sata_ref_clk;
struct clk *ahb_clk;
- struct clk *epcs_tx_clk;
- struct clk *epcs_rx_clk;
- struct clk *phy_apbclk;
- struct clk *phy_pclk0;
- struct clk *phy_pclk1;
- void __iomem *phy_base;
- struct gpio_desc *clkreq_gpiod;
struct regmap *gpr;
+ struct phy *sata_phy;
+ struct phy *cali_phy0;
+ struct phy *cali_phy1;
bool no_device;
bool first_time;
u32 phy_params;
@@ -450,201 +415,79 @@ ATTRIBUTE_GROUPS(fsl_sata_ahci);
static int imx8_sata_enable(struct ahci_host_priv *hpriv)
{
- u32 val, reg;
- int i, ret;
+ u32 val;
+ int ret;
struct imx_ahci_priv *imxpriv = hpriv->plat_data;
struct device *dev = &imxpriv->ahci_pdev->dev;
- /* configure the hsio for sata */
- ret = clk_prepare_enable(imxpriv->phy_pclk0);
- if (ret < 0) {
- dev_err(dev, "can't enable phy_pclk0.\n");
+ /*
+ * Since "REXT" pin is only present for first lane of i.MX8QM
+ * PHY, its calibration results will be stored, passed through
+ * to the second lane PHY, and shared with all three lane PHYs.
+ *
+ * Initialize the first two lane PHYs here, although only the
+ * third lane PHY is used by SATA.
+ */
+ ret = phy_init(imxpriv->cali_phy0);
+ if (ret) {
+ dev_err(dev, "cali PHY init failed\n");
return ret;
}
- ret = clk_prepare_enable(imxpriv->phy_pclk1);
- if (ret < 0) {
- dev_err(dev, "can't enable phy_pclk1.\n");
- goto disable_phy_pclk0;
+ ret = phy_power_on(imxpriv->cali_phy0);
+ if (ret) {
+ dev_err(dev, "cali PHY power on failed\n");
+ goto err_cali_phy0_exit;
}
- ret = clk_prepare_enable(imxpriv->epcs_tx_clk);
- if (ret < 0) {
- dev_err(dev, "can't enable epcs_tx_clk.\n");
- goto disable_phy_pclk1;
+ ret = phy_init(imxpriv->cali_phy1);
+ if (ret) {
+ dev_err(dev, "cali PHY1 init failed\n");
+ goto err_cali_phy0_off;
}
- ret = clk_prepare_enable(imxpriv->epcs_rx_clk);
- if (ret < 0) {
- dev_err(dev, "can't enable epcs_rx_clk.\n");
- goto disable_epcs_tx_clk;
+ ret = phy_power_on(imxpriv->cali_phy1);
+ if (ret) {
+ dev_err(dev, "cali PHY1 power on failed\n");
+ goto err_cali_phy1_exit;
}
- ret = clk_prepare_enable(imxpriv->phy_apbclk);
- if (ret < 0) {
- dev_err(dev, "can't enable phy_apbclk.\n");
- goto disable_epcs_rx_clk;
+ ret = phy_init(imxpriv->sata_phy);
+ if (ret) {
+ dev_err(dev, "sata PHY init failed\n");
+ goto err_cali_phy1_off;
}
- /* Configure PHYx2 PIPE_RSTN */
- regmap_read(imxpriv->gpr, IMX8QM_CSR_PCIEA_OFFSET +
- IMX8QM_CSR_PCIE_CTRL2_OFFSET, &val);
- if ((val & IMX8QM_CTRL_LTSSM_ENABLE) == 0) {
- /* The link of the PCIEA of HSIO is down */
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_PHYX2_OFFSET,
- IMX8QM_PHY_PIPE_RSTN_0 |
- IMX8QM_PHY_PIPE_RSTN_OVERRIDE_0,
- IMX8QM_PHY_PIPE_RSTN_0 |
- IMX8QM_PHY_PIPE_RSTN_OVERRIDE_0);
+ ret = phy_set_mode(imxpriv->sata_phy, PHY_MODE_SATA);
+ if (ret) {
+ dev_err(dev, "unable to set SATA PHY mode\n");
+ goto err_sata_phy_exit;
}
- regmap_read(imxpriv->gpr, IMX8QM_CSR_PCIEB_OFFSET +
- IMX8QM_CSR_PCIE_CTRL2_OFFSET, &reg);
- if ((reg & IMX8QM_CTRL_LTSSM_ENABLE) == 0) {
- /* The link of the PCIEB of HSIO is down */
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_PHYX2_OFFSET,
- IMX8QM_PHY_PIPE_RSTN_1 |
- IMX8QM_PHY_PIPE_RSTN_OVERRIDE_1,
- IMX8QM_PHY_PIPE_RSTN_1 |
- IMX8QM_PHY_PIPE_RSTN_OVERRIDE_1);
+ ret = phy_power_on(imxpriv->sata_phy);
+ if (ret) {
+ dev_err(dev, "sata PHY power up failed\n");
+ goto err_sata_phy_exit;
}
- if (((reg | val) & IMX8QM_CTRL_LTSSM_ENABLE) == 0) {
- /* The links of both PCIA and PCIEB of HSIO are down */
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_LPCG_PHYX2_OFFSET,
- IMX8QM_LPCG_PHYX2_PCLK0_MASK |
- IMX8QM_LPCG_PHYX2_PCLK1_MASK,
- 0);
- }
-
- /* set PWR_RST and BT_RST of csr_pciea */
- val = IMX8QM_CSR_PCIEA_OFFSET + IMX8QM_CSR_PCIE_CTRL2_OFFSET;
- regmap_update_bits(imxpriv->gpr,
- val,
- IMX8QM_CTRL_BUTTON_RST_N,
- IMX8QM_CTRL_BUTTON_RST_N);
- regmap_update_bits(imxpriv->gpr,
- val,
- IMX8QM_CTRL_POWER_UP_RST_N,
- IMX8QM_CTRL_POWER_UP_RST_N);
-
- /* PHYX1_MODE to SATA */
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_PHYX1_OFFSET,
- IMX8QM_PHY_MODE_MASK,
- IMX8QM_PHY_MODE_SATA);
- /*
- * BIT0 RXENA 1, BIT1 TXENA 0
- * BIT12 PHY_X1_EPCS_SEL 1.
- */
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_MISC_OFFSET,
- IMX8QM_MISC_IOB_RXENA,
- IMX8QM_MISC_IOB_RXENA);
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_MISC_OFFSET,
- IMX8QM_MISC_IOB_TXENA,
- 0);
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_MISC_OFFSET,
- IMX8QM_MISC_PHYX1_EPCS_SEL,
- IMX8QM_MISC_PHYX1_EPCS_SEL);
- /*
- * It is possible, for PCIe and SATA are sharing
- * the same clock source, HPLL or external oscillator.
- * When PCIe is in low power modes (L1.X or L2 etc),
- * the clock source can be turned off. In this case,
- * if this clock source is required to be toggling by
- * SATA, then SATA functions will be abnormal.
- * Set the override here to avoid it.
- */
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_MISC_OFFSET,
- IMX8QM_MISC_CLKREQN_OUT_OVERRIDE_1 |
- IMX8QM_MISC_CLKREQN_OUT_OVERRIDE_0 |
- IMX8QM_MISC_CLKREQN_IN_OVERRIDE_1 |
- IMX8QM_MISC_CLKREQN_IN_OVERRIDE_0,
- IMX8QM_MISC_CLKREQN_OUT_OVERRIDE_1 |
- IMX8QM_MISC_CLKREQN_OUT_OVERRIDE_0 |
- IMX8QM_MISC_CLKREQN_IN_OVERRIDE_1 |
- IMX8QM_MISC_CLKREQN_IN_OVERRIDE_0);
-
- /* clear PHY RST, then set it */
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_SATA_OFFSET,
- IMX8QM_SATA_CTRL_EPCS_PHYRESET_N,
- 0);
-
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_SATA_OFFSET,
- IMX8QM_SATA_CTRL_EPCS_PHYRESET_N,
- IMX8QM_SATA_CTRL_EPCS_PHYRESET_N);
-
- /* CTRL RST: SET -> delay 1 us -> CLEAR -> SET */
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_SATA_OFFSET,
- IMX8QM_SATA_CTRL_RESET_N,
- IMX8QM_SATA_CTRL_RESET_N);
- udelay(1);
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_SATA_OFFSET,
- IMX8QM_SATA_CTRL_RESET_N,
- 0);
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_SATA_OFFSET,
- IMX8QM_SATA_CTRL_RESET_N,
- IMX8QM_SATA_CTRL_RESET_N);
-
- /* APB reset */
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_PHYX1_OFFSET,
- IMX8QM_PHY_APB_RSTN_0,
- IMX8QM_PHY_APB_RSTN_0);
-
- for (i = 0; i < 100; i++) {
- reg = IMX8QM_CSR_PHYX1_OFFSET +
- IMX8QM_CSR_PHYX_STTS0_OFFSET;
- regmap_read(imxpriv->gpr, reg, &val);
- val &= IMX8QM_STTS0_LANE0_TX_PLL_LOCK;
- if (val == IMX8QM_STTS0_LANE0_TX_PLL_LOCK)
- break;
- udelay(1);
- }
+ /* The cali_phy# can be turned off after SATA PHY is initialized. */
+ phy_power_off(imxpriv->cali_phy1);
+ phy_exit(imxpriv->cali_phy1);
+ phy_power_off(imxpriv->cali_phy0);
+ phy_exit(imxpriv->cali_phy0);
- if (val != IMX8QM_STTS0_LANE0_TX_PLL_LOCK) {
- dev_err(dev, "TX PLL of the PHY is not locked\n");
- ret = -ENODEV;
- } else {
- writeb(imxpriv->imped_ratio, imxpriv->phy_base +
- IMX8QM_SATA_PHY_RX_IMPED_RATIO_OFFSET);
- writeb(imxpriv->imped_ratio, imxpriv->phy_base +
- IMX8QM_SATA_PHY_TX_IMPED_RATIO_OFFSET);
- reg = readb(imxpriv->phy_base +
- IMX8QM_SATA_PHY_RX_IMPED_RATIO_OFFSET);
- if (unlikely(reg != imxpriv->imped_ratio))
- dev_info(dev, "Can't set PHY RX impedance ratio.\n");
- reg = readb(imxpriv->phy_base +
- IMX8QM_SATA_PHY_TX_IMPED_RATIO_OFFSET);
- if (unlikely(reg != imxpriv->imped_ratio))
- dev_info(dev, "Can't set PHY TX impedance ratio.\n");
- usleep_range(50, 100);
+ /* RxWaterMark setting */
+ val = readl(hpriv->mmio + IMX8QM_SATA_AHCI_PTC);
+ val &= ~IMX8QM_SATA_AHCI_PTC_RXWM_MASK;
+ val |= IMX8QM_SATA_AHCI_PTC_RXWM;
+ writel(val, hpriv->mmio + IMX8QM_SATA_AHCI_PTC);
- /*
- * To reduce the power consumption, gate off
- * the PHY clks
- */
- clk_disable_unprepare(imxpriv->phy_apbclk);
- clk_disable_unprepare(imxpriv->phy_pclk1);
- clk_disable_unprepare(imxpriv->phy_pclk0);
- return ret;
- }
+ return 0;
- clk_disable_unprepare(imxpriv->phy_apbclk);
-disable_epcs_rx_clk:
- clk_disable_unprepare(imxpriv->epcs_rx_clk);
-disable_epcs_tx_clk:
- clk_disable_unprepare(imxpriv->epcs_tx_clk);
-disable_phy_pclk1:
- clk_disable_unprepare(imxpriv->phy_pclk1);
-disable_phy_pclk0:
- clk_disable_unprepare(imxpriv->phy_pclk0);
+err_sata_phy_exit:
+ phy_exit(imxpriv->sata_phy);
+err_cali_phy1_off:
+ phy_power_off(imxpriv->cali_phy1);
+err_cali_phy1_exit:
+ phy_exit(imxpriv->cali_phy1);
+err_cali_phy0_off:
+ phy_power_off(imxpriv->cali_phy0);
+err_cali_phy0_exit:
+ phy_exit(imxpriv->cali_phy0);
return ret;
}
@@ -668,7 +511,7 @@ static int imx_sata_enable(struct ahci_host_priv *hpriv)
if (imxpriv->type == AHCI_IMX6Q || imxpriv->type == AHCI_IMX6QP) {
/*
- * set PHY Paremeters, two steps to configure the GPR13,
+ * set PHY Parameters, two steps to configure the GPR13,
* one write for rest of parameters, mask of first write
* is 0x07ffffff, and the other one write for setting
* the mpll_clk_en.
@@ -698,6 +541,9 @@ static int imx_sata_enable(struct ahci_host_priv *hpriv)
}
} else if (imxpriv->type == AHCI_IMX8QM) {
ret = imx8_sata_enable(hpriv);
+ if (ret)
+ goto disable_clk;
+
}
usleep_range(1000, 2000);
@@ -736,8 +582,10 @@ static void imx_sata_disable(struct ahci_host_priv *hpriv)
break;
case AHCI_IMX8QM:
- clk_disable_unprepare(imxpriv->epcs_rx_clk);
- clk_disable_unprepare(imxpriv->epcs_tx_clk);
+ if (imxpriv->sata_phy) {
+ phy_power_off(imxpriv->sata_phy);
+ phy_exit(imxpriv->sata_phy);
+ }
break;
default:
@@ -760,6 +608,9 @@ static void ahci_imx_error_handler(struct ata_port *ap)
ahci_error_handler(ap);
+ if (imxpriv->type == AHCI_IMX8QM)
+ return;
+
if (!(imxpriv->first_time) || ahci_imx_hotplug)
return;
@@ -791,18 +642,19 @@ static int ahci_imx_softreset(struct ata_link *link, unsigned int *class,
int ret;
if (imxpriv->type == AHCI_IMX53)
- ret = ahci_pmp_retry_srst_ops.softreset(link, class, deadline);
+ ret = ahci_pmp_retry_srst_ops.reset.softreset(link, class,
+ deadline);
else
- ret = ahci_ops.softreset(link, class, deadline);
+ ret = ahci_ops.reset.softreset(link, class, deadline);
return ret;
}
static struct ata_port_operations ahci_imx_ops = {
- .inherits = &ahci_ops,
- .host_stop = ahci_imx_host_stop,
- .error_handler = ahci_imx_error_handler,
- .softreset = ahci_imx_softreset,
+ .inherits = &ahci_ops,
+ .host_stop = ahci_imx_host_stop,
+ .error_handler = ahci_imx_error_handler,
+ .reset.softreset = ahci_imx_softreset,
};
static const struct ata_port_info ahci_imx_port_info = {
@@ -986,65 +838,19 @@ static const struct scsi_host_template ahci_platform_sht = {
static int imx8_sata_probe(struct device *dev, struct imx_ahci_priv *imxpriv)
{
- struct resource *phy_res;
- struct platform_device *pdev = imxpriv->ahci_pdev;
- struct device_node *np = dev->of_node;
-
- if (of_property_read_u32(np, "fsl,phy-imp", &imxpriv->imped_ratio))
- imxpriv->imped_ratio = IMX8QM_SATA_PHY_IMPED_RATIO_85OHM;
- phy_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
- if (phy_res) {
- imxpriv->phy_base = devm_ioremap(dev, phy_res->start,
- resource_size(phy_res));
- if (!imxpriv->phy_base) {
- dev_err(dev, "error with ioremap\n");
- return -ENOMEM;
- }
- } else {
- dev_err(dev, "missing *phy* reg region.\n");
- return -ENOMEM;
- }
- imxpriv->gpr =
- syscon_regmap_lookup_by_phandle(np, "hsio");
- if (IS_ERR(imxpriv->gpr)) {
- dev_err(dev, "unable to find gpr registers\n");
- return PTR_ERR(imxpriv->gpr);
- }
-
- imxpriv->epcs_tx_clk = devm_clk_get(dev, "epcs_tx");
- if (IS_ERR(imxpriv->epcs_tx_clk)) {
- dev_err(dev, "can't get epcs_tx_clk clock.\n");
- return PTR_ERR(imxpriv->epcs_tx_clk);
- }
- imxpriv->epcs_rx_clk = devm_clk_get(dev, "epcs_rx");
- if (IS_ERR(imxpriv->epcs_rx_clk)) {
- dev_err(dev, "can't get epcs_rx_clk clock.\n");
- return PTR_ERR(imxpriv->epcs_rx_clk);
- }
- imxpriv->phy_pclk0 = devm_clk_get(dev, "phy_pclk0");
- if (IS_ERR(imxpriv->phy_pclk0)) {
- dev_err(dev, "can't get phy_pclk0 clock.\n");
- return PTR_ERR(imxpriv->phy_pclk0);
- }
- imxpriv->phy_pclk1 = devm_clk_get(dev, "phy_pclk1");
- if (IS_ERR(imxpriv->phy_pclk1)) {
- dev_err(dev, "can't get phy_pclk1 clock.\n");
- return PTR_ERR(imxpriv->phy_pclk1);
- }
- imxpriv->phy_apbclk = devm_clk_get(dev, "phy_apbclk");
- if (IS_ERR(imxpriv->phy_apbclk)) {
- dev_err(dev, "can't get phy_apbclk clock.\n");
- return PTR_ERR(imxpriv->phy_apbclk);
- }
-
- /* Fetch GPIO, then enable the external OSC */
- imxpriv->clkreq_gpiod = devm_gpiod_get_optional(dev, "clkreq",
- GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_NONEXCLUSIVE);
- if (IS_ERR(imxpriv->clkreq_gpiod))
- return PTR_ERR(imxpriv->clkreq_gpiod);
- if (imxpriv->clkreq_gpiod)
- gpiod_set_consumer_name(imxpriv->clkreq_gpiod, "SATA CLKREQ");
-
+ imxpriv->sata_phy = devm_phy_get(dev, "sata-phy");
+ if (IS_ERR(imxpriv->sata_phy))
+ return dev_err_probe(dev, PTR_ERR(imxpriv->sata_phy),
+ "Failed to get sata_phy\n");
+
+ imxpriv->cali_phy0 = devm_phy_get(dev, "cali-phy0");
+ if (IS_ERR(imxpriv->cali_phy0))
+ return dev_err_probe(dev, PTR_ERR(imxpriv->cali_phy0),
+ "Failed to get cali_phy0\n");
+ imxpriv->cali_phy1 = devm_phy_get(dev, "cali-phy1");
+ if (IS_ERR(imxpriv->cali_phy1))
+ return dev_err_probe(dev, PTR_ERR(imxpriv->cali_phy1),
+ "Failed to get cali_phy1\n");
return 0;
}
@@ -1077,12 +883,6 @@ static int imx_ahci_probe(struct platform_device *pdev)
return PTR_ERR(imxpriv->sata_ref_clk);
}
- imxpriv->ahb_clk = devm_clk_get(dev, "ahb");
- if (IS_ERR(imxpriv->ahb_clk)) {
- dev_err(dev, "can't get ahb clock.\n");
- return PTR_ERR(imxpriv->ahb_clk);
- }
-
if (imxpriv->type == AHCI_IMX6Q || imxpriv->type == AHCI_IMX6QP) {
u32 reg_value;
@@ -1142,11 +942,8 @@ static int imx_ahci_probe(struct platform_device *pdev)
goto disable_clk;
/*
- * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL,
- * and IP vendor specific register IMX_TIMER1MS.
- * Configure CAP_SSS (support stagered spin up).
- * Implement the port0.
- * Get the ahb clock rate, and configure the TIMER1MS register.
+ * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL.
+ * Set CAP_SSS (support stagered spin up) and Implement the port0.
*/
reg_val = readl(hpriv->mmio + HOST_CAP);
if (!(reg_val & HOST_CAP_SSS)) {
@@ -1159,8 +956,20 @@ static int imx_ahci_probe(struct platform_device *pdev)
writel(reg_val, hpriv->mmio + HOST_PORTS_IMPL);
}
- reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000;
- writel(reg_val, hpriv->mmio + IMX_TIMER1MS);
+ if (imxpriv->type != AHCI_IMX8QM) {
+ /*
+ * Get AHB clock rate and configure the vendor specified
+ * TIMER1MS register on i.MX53, i.MX6Q and i.MX6QP only.
+ */
+ imxpriv->ahb_clk = devm_clk_get(dev, "ahb");
+ if (IS_ERR(imxpriv->ahb_clk)) {
+ dev_err(dev, "Failed to get ahb clock\n");
+ ret = PTR_ERR(imxpriv->ahb_clk);
+ goto disable_sata;
+ }
+ reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000;
+ writel(reg_val, hpriv->mmio + IMX_TIMER1MS);
+ }
ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info,
&ahci_platform_sht);
@@ -1219,7 +1028,7 @@ static SIMPLE_DEV_PM_OPS(ahci_imx_pm_ops, imx_ahci_suspend, imx_ahci_resume);
static struct platform_driver imx_ahci_driver = {
.probe = imx_ahci_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = imx_ahci_of_match,
@@ -1229,6 +1038,6 @@ static struct platform_driver imx_ahci_driver = {
module_platform_driver(imx_ahci_driver);
MODULE_DESCRIPTION("Freescale i.MX AHCI SATA platform driver");
-MODULE_AUTHOR("Richard Zhu <Hong-Xing.Zhu@freescale.com>");
+MODULE_AUTHOR("Richard Zhu <hongxing.zhu@nxp.com>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/ahci_mtk.c b/drivers/ata/ahci_mtk.c
index adc851cd5578..7295b9066ae2 100644
--- a/drivers/ata/ahci_mtk.c
+++ b/drivers/ata/ahci_mtk.c
@@ -174,7 +174,7 @@ MODULE_DEVICE_TABLE(of, ahci_of_match);
static struct platform_driver mtk_ahci_driver = {
.probe = mtk_ahci_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = ahci_of_match,
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
index f3187351e8a6..8744dae41612 100644
--- a/drivers/ata/ahci_mvebu.c
+++ b/drivers/ata/ahci_mvebu.c
@@ -245,7 +245,7 @@ MODULE_DEVICE_TABLE(of, ahci_mvebu_of_match);
static struct platform_driver ahci_mvebu_driver = {
.probe = ahci_mvebu_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.suspend = ahci_mvebu_suspend,
.resume = ahci_mvebu_resume,
.driver = {
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 81fc63f6b008..c18054333f7c 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -96,7 +96,7 @@ MODULE_DEVICE_TABLE(acpi, ahci_acpi_match);
static struct platform_driver ahci_driver = {
.probe = ahci_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.shutdown = ahci_platform_shutdown,
.driver = {
.name = DRV_NAME,
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index b1a4e57578e2..0dec1a17e5b1 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -146,8 +146,8 @@ static int ahci_qoriq_hardreset(struct ata_link *link, unsigned int *class,
}
static struct ata_port_operations ahci_qoriq_ops = {
- .inherits = &ahci_ops,
- .hardreset = ahci_qoriq_hardreset,
+ .inherits = &ahci_ops,
+ .reset.hardreset = ahci_qoriq_hardreset,
};
static const struct ata_port_info ahci_qoriq_port_info = {
@@ -357,7 +357,7 @@ static SIMPLE_DEV_PM_OPS(ahci_qoriq_pm_ops, ahci_platform_suspend,
static struct platform_driver ahci_qoriq_driver = {
.probe = ahci_qoriq_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = ahci_qoriq_of_match,
diff --git a/drivers/ata/ahci_seattle.c b/drivers/ata/ahci_seattle.c
index 59f97aa7ac75..3f16c1678402 100644
--- a/drivers/ata/ahci_seattle.c
+++ b/drivers/ata/ahci_seattle.c
@@ -185,7 +185,7 @@ MODULE_DEVICE_TABLE(acpi, ahci_acpi_match);
static struct platform_driver ahci_seattle_driver = {
.probe = ahci_seattle_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.acpi_match_table = ahci_acpi_match,
diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c
index 79a8b0aa37bf..4336c8a6e208 100644
--- a/drivers/ata/ahci_st.c
+++ b/drivers/ata/ahci_st.c
@@ -176,7 +176,6 @@ static int st_ahci_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int st_ahci_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
@@ -221,9 +220,8 @@ static int st_ahci_resume(struct device *dev)
return ahci_platform_resume_host(dev);
}
-#endif
-static SIMPLE_DEV_PM_OPS(st_ahci_pm_ops, st_ahci_suspend, st_ahci_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(st_ahci_pm_ops, st_ahci_suspend, st_ahci_resume);
static const struct of_device_id st_ahci_match[] = {
{ .compatible = "st,ahci", },
@@ -234,11 +232,11 @@ MODULE_DEVICE_TABLE(of, st_ahci_match);
static struct platform_driver st_ahci_driver = {
.driver = {
.name = DRV_NAME,
- .pm = &st_ahci_pm_ops,
+ .pm = pm_sleep_ptr(&st_ahci_pm_ops),
.of_match_table = st_ahci_match,
},
.probe = st_ahci_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
};
module_platform_driver(st_ahci_driver);
diff --git a/drivers/ata/ahci_sunxi.c b/drivers/ata/ahci_sunxi.c
index 58b2683954dd..5d4584570ae0 100644
--- a/drivers/ata/ahci_sunxi.c
+++ b/drivers/ata/ahci_sunxi.c
@@ -292,7 +292,7 @@ MODULE_DEVICE_TABLE(of, ahci_sunxi_of_match);
static struct platform_driver ahci_sunxi_driver = {
.probe = ahci_sunxi_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = ahci_sunxi_of_match,
diff --git a/drivers/ata/ahci_tegra.c b/drivers/ata/ahci_tegra.c
index 8703c2a4658b..44584eed6374 100644
--- a/drivers/ata/ahci_tegra.c
+++ b/drivers/ata/ahci_tegra.c
@@ -608,7 +608,7 @@ deinit_controller:
static struct platform_driver tegra_ahci_driver = {
.probe = tegra_ahci_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = tegra_ahci_of_match,
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index 81a1d838c0fc..6b8844646fcd 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -450,7 +450,6 @@ static int xgene_ahci_pmp_softreset(struct ata_link *link, unsigned int *class,
{
int pmp = sata_srst_pmp(link);
struct ata_port *ap = link->ap;
- u32 rc;
void __iomem *port_mmio = ahci_port_base(ap);
u32 port_fbs;
@@ -463,9 +462,7 @@ static int xgene_ahci_pmp_softreset(struct ata_link *link, unsigned int *class,
port_fbs |= pmp << PORT_FBS_DEV_OFFSET;
writel(port_fbs, port_mmio + PORT_FBS);
- rc = ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
-
- return rc;
+ return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
}
/**
@@ -500,7 +497,7 @@ static int xgene_ahci_softreset(struct ata_link *link, unsigned int *class,
u32 port_fbs;
u32 port_fbs_save;
u32 retry = 1;
- u32 rc;
+ int rc;
port_fbs_save = readl(port_mmio + PORT_FBS);
@@ -534,7 +531,7 @@ softreset_retry:
/**
* xgene_ahci_handle_broken_edge_irq - Handle the broken irq.
- * @host: Host that recieved the irq
+ * @host: Host that received the irq
* @irq_masked: HOST_IRQ_STAT value
*
* For hardware with broken edge trigger latch
@@ -613,11 +610,11 @@ static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance)
static struct ata_port_operations xgene_ahci_v1_ops = {
.inherits = &ahci_ops,
.host_stop = xgene_ahci_host_stop,
- .hardreset = xgene_ahci_hardreset,
+ .reset.hardreset = xgene_ahci_hardreset,
+ .reset.softreset = xgene_ahci_softreset,
+ .pmp_reset.softreset = xgene_ahci_pmp_softreset,
.read_id = xgene_ahci_read_id,
.qc_issue = xgene_ahci_qc_issue,
- .softreset = xgene_ahci_softreset,
- .pmp_softreset = xgene_ahci_pmp_softreset
};
static const struct ata_port_info xgene_ahci_v1_port_info = {
@@ -630,7 +627,7 @@ static const struct ata_port_info xgene_ahci_v1_port_info = {
static struct ata_port_operations xgene_ahci_v2_ops = {
.inherits = &ahci_ops,
.host_stop = xgene_ahci_host_stop,
- .hardreset = xgene_ahci_hardreset,
+ .reset.hardreset = xgene_ahci_hardreset,
.read_id = xgene_ahci_read_id,
};
@@ -859,7 +856,7 @@ disable_resources:
static struct platform_driver xgene_ahci_driver = {
.probe = xgene_ahci_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = xgene_ahci_of_match,
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index 2f57ec00ab82..e70b6c089cf1 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -209,7 +209,7 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id
return ata_pci_bmdma_init_one(dev, ppi, &generic_sht, (void *)id, 0);
}
-static struct pci_device_id ata_generic[] = {
+static const struct pci_device_id ata_generic[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_PCTECH, PCI_DEVICE_ID_PCTECH_SAMURAI_IDE), },
{ PCI_DEVICE(PCI_VENDOR_ID_HOLTEK, PCI_DEVICE_ID_HOLTEK_6565), },
{ PCI_DEVICE(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8673F), },
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index ec3c5bd1f813..495fa096dd65 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -1074,7 +1074,7 @@ static struct ata_port_operations piix_pata_ops = {
.cable_detect = ata_cable_40wire,
.set_piomode = piix_set_piomode,
.set_dmamode = piix_set_dmamode,
- .prereset = piix_pata_prereset,
+ .reset.prereset = piix_pata_prereset,
};
static struct ata_port_operations piix_vmw_ops = {
@@ -1089,6 +1089,7 @@ static struct ata_port_operations ich_pata_ops = {
};
static struct attribute *piix_sidpr_shost_attrs[] = {
+ &dev_attr_link_power_management_supported.attr,
&dev_attr_link_power_management_policy.attr,
NULL
};
@@ -1102,7 +1103,7 @@ static const struct scsi_host_template piix_sidpr_sht = {
static struct ata_port_operations piix_sidpr_sata_ops = {
.inherits = &piix_sata_ops,
- .hardreset = sata_std_hardreset,
+ .reset.hardreset = sata_std_hardreset,
.scr_read = piix_sidpr_scr_read,
.scr_write = piix_sidpr_scr_write,
.set_lpm = piix_sidpr_set_lpm,
@@ -1446,7 +1447,6 @@ static int piix_init_sidpr(struct ata_host *host)
if (hpriv->map[i] == IDE)
return 0;
- /* is it blacklisted? */
if (piix_no_sidpr(host))
return 0;
@@ -1726,7 +1726,7 @@ static int piix_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
* message-signalled interrupts currently).
*/
if (port_flags & PIIX_FLAG_CHECKINTR)
- pci_intx(pdev, 1);
+ pcim_intx(pdev, 1);
if (piix_check_450nx_errata(pdev)) {
/* This writes into the master table but it does not
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 83431aae74d8..c79abdfcd7a9 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -111,6 +111,7 @@ static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
static DEVICE_ATTR(em_message_supported, S_IRUGO, ahci_show_em_supported, NULL);
static struct attribute *ahci_shost_attrs[] = {
+ &dev_attr_link_power_management_supported.attr,
&dev_attr_link_power_management_policy.attr,
&dev_attr_em_message_type.attr,
&dev_attr_em_message.attr,
@@ -162,10 +163,10 @@ struct ata_port_operations ahci_ops = {
.freeze = ahci_freeze,
.thaw = ahci_thaw,
- .softreset = ahci_softreset,
- .hardreset = ahci_hardreset,
- .postreset = ahci_postreset,
- .pmp_softreset = ahci_softreset,
+ .reset.softreset = ahci_softreset,
+ .reset.hardreset = ahci_hardreset,
+ .reset.postreset = ahci_postreset,
+ .pmp_reset.softreset = ahci_softreset,
.error_handler = ahci_error_handler,
.post_internal_cmd = ahci_post_internal_cmd,
.dev_config = ahci_dev_config,
@@ -192,7 +193,7 @@ EXPORT_SYMBOL_GPL(ahci_ops);
struct ata_port_operations ahci_pmp_retry_srst_ops = {
.inherits = &ahci_ops,
- .softreset = ahci_pmp_retry_softreset,
+ .reset.softreset = ahci_pmp_retry_softreset,
};
EXPORT_SYMBOL_GPL(ahci_pmp_retry_srst_ops);
@@ -541,6 +542,7 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
hpriv->saved_port_map = port_map;
}
+ /* mask_port_map not set means that all ports are available */
if (hpriv->mask_port_map) {
dev_warn(dev, "masking port_map 0x%lx -> 0x%lx\n",
port_map,
@@ -1033,7 +1035,7 @@ static void ahci_sw_activity(struct ata_link *link)
static void ahci_sw_activity_blink(struct timer_list *t)
{
- struct ahci_em_priv *emp = from_timer(emp, t, timer);
+ struct ahci_em_priv *emp = timer_container_of(emp, t, timer);
struct ata_link *link = emp->link;
struct ata_port *ap = link->ap;
@@ -1321,6 +1323,10 @@ static void ahci_dev_config(struct ata_device *dev)
{
struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
+ if ((dev->class == ATA_DEV_ATAPI) &&
+ (hpriv->flags & AHCI_HFLAG_ATAPI_DMA_QUIRK))
+ dev->quirks |= ATA_QUIRK_ATAPI_MOD16_DMA;
+
if (hpriv->flags & AHCI_HFLAG_SECT255) {
dev->max_sectors = 255;
ata_dev_info(dev,
@@ -2075,13 +2081,6 @@ static void ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
struct ahci_port_priv *pp = qc->ap->private_data;
u8 *rx_fis = pp->rx_fis;
- /*
- * rtf may already be filled (e.g. for successful NCQ commands).
- * If that is the case, we have nothing to do.
- */
- if (qc->flags & ATA_QCFLAG_RTF_FILLED)
- return;
-
if (pp->fbs_enabled)
rx_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
@@ -2095,7 +2094,6 @@ static void ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
!(qc->flags & ATA_QCFLAG_EH)) {
ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf);
qc->result_tf.status = (rx_fis + RX_FIS_PIO_SETUP)[15];
- qc->flags |= ATA_QCFLAG_RTF_FILLED;
return;
}
@@ -2118,12 +2116,10 @@ static void ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
*/
qc->result_tf.status = fis[2];
qc->result_tf.error = fis[3];
- qc->flags |= ATA_QCFLAG_RTF_FILLED;
return;
}
ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf);
- qc->flags |= ATA_QCFLAG_RTF_FILLED;
}
static void ahci_qc_ncq_fill_rtf(struct ata_port *ap, u64 done_mask)
@@ -2158,6 +2154,7 @@ static void ahci_qc_ncq_fill_rtf(struct ata_port *ap, u64 done_mask)
if (qc && ata_is_ncq(qc->tf.protocol)) {
qc->result_tf.status = status;
qc->result_tf.error = error;
+ qc->result_tf.flags = qc->tf.flags;
qc->flags |= ATA_QCFLAG_RTF_FILLED;
}
done_mask &= ~(1ULL << tag);
@@ -2182,6 +2179,7 @@ static void ahci_qc_ncq_fill_rtf(struct ata_port *ap, u64 done_mask)
fis += RX_FIS_SDB;
qc->result_tf.status = fis[2];
qc->result_tf.error = fis[3];
+ qc->result_tf.flags = qc->tf.flags;
qc->flags |= ATA_QCFLAG_RTF_FILLED;
}
done_mask &= ~(1ULL << tag);
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 581704e61f28..91d44302eac9 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -49,6 +49,9 @@ int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
int rc, i;
for (i = 0; i < hpriv->nports; i++) {
+ if (ahci_ignore_port(hpriv, i))
+ continue;
+
rc = phy_init(hpriv->phys[i]);
if (rc)
goto disable_phys;
@@ -70,6 +73,9 @@ int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
disable_phys:
while (--i >= 0) {
+ if (ahci_ignore_port(hpriv, i))
+ continue;
+
phy_power_off(hpriv->phys[i]);
phy_exit(hpriv->phys[i]);
}
@@ -88,6 +94,9 @@ void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
int i;
for (i = 0; i < hpriv->nports; i++) {
+ if (ahci_ignore_port(hpriv, i))
+ continue;
+
phy_power_off(hpriv->phys[i]);
phy_exit(hpriv->phys[i]);
}
@@ -410,7 +419,6 @@ static int ahci_platform_get_regulator(struct ahci_host_priv *hpriv, u32 port,
static int ahci_platform_get_firmware(struct ahci_host_priv *hpriv,
struct device *dev)
{
- struct device_node *child;
u32 port;
if (!of_property_read_u32(dev->of_node, "hba-cap", &hpriv->saved_cap))
@@ -419,14 +427,12 @@ static int ahci_platform_get_firmware(struct ahci_host_priv *hpriv,
of_property_read_u32(dev->of_node,
"ports-implemented", &hpriv->saved_port_map);
- for_each_child_of_node(dev->of_node, child) {
+ for_each_child_of_node_scoped(dev->of_node, child) {
if (!of_device_is_available(child))
continue;
- if (of_property_read_u32(child, "reg", &port)) {
- of_node_put(child);
+ if (of_property_read_u32(child, "reg", &port))
return -EINVAL;
- }
if (!of_property_read_u32(child, "hba-port-cap", &hpriv->saved_port_cap[port]))
hpriv->saved_port_cap[port] &= PORT_CMD_CAP;
@@ -435,6 +441,20 @@ static int ahci_platform_get_firmware(struct ahci_host_priv *hpriv,
return 0;
}
+static u32 ahci_platform_find_max_port_id(struct device *dev)
+{
+ u32 max_port = 0;
+
+ for_each_child_of_node_scoped(dev->of_node, child) {
+ u32 port;
+
+ if (!of_property_read_u32(child, "reg", &port))
+ max_port = max(max_port, port);
+ }
+
+ return max_port;
+}
+
/**
* ahci_platform_get_resources - Get platform resources
* @pdev: platform device to get resources for
@@ -460,8 +480,8 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
int child_nodes, rc = -ENOMEM, enabled_ports = 0;
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
- struct device_node *child;
u32 mask_port_map = 0;
+ u32 max_port;
if (!devres_open_group(dev, NULL, GFP_KERNEL))
return ERR_PTR(-ENOMEM);
@@ -553,15 +573,17 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
goto err_out;
}
+ /* find maximum port id for allocating structures */
+ max_port = ahci_platform_find_max_port_id(dev);
/*
- * If no sub-node was found, we still need to set nports to
- * one in order to be able to use the
+ * Set nports according to maximum port id. Clamp at
+ * AHCI_MAX_PORTS, warning message for invalid port id
+ * is generated later.
+ * When DT has no sub-nodes max_port is 0, nports is 1,
+ * in order to be able to use the
* ahci_platform_[en|dis]able_[phys|regulators] functions.
*/
- if (child_nodes)
- hpriv->nports = child_nodes;
- else
- hpriv->nports = 1;
+ hpriv->nports = min(AHCI_MAX_PORTS, max_port + 1);
hpriv->phys = devm_kcalloc(dev, hpriv->nports, sizeof(*hpriv->phys), GFP_KERNEL);
if (!hpriv->phys) {
@@ -579,7 +601,7 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
}
if (child_nodes) {
- for_each_child_of_node(dev->of_node, child) {
+ for_each_child_of_node_scoped(dev->of_node, child) {
u32 port;
struct platform_device *port_dev __maybe_unused;
@@ -588,7 +610,6 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
if (of_property_read_u32(child, "reg", &port)) {
rc = -EINVAL;
- of_node_put(child);
goto err_out;
}
@@ -606,18 +627,14 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
if (port_dev) {
rc = ahci_platform_get_regulator(hpriv, port,
&port_dev->dev);
- if (rc == -EPROBE_DEFER) {
- of_node_put(child);
+ if (rc == -EPROBE_DEFER)
goto err_out;
- }
}
#endif
rc = ahci_platform_get_phy(hpriv, port, dev, child);
- if (rc) {
- of_node_put(child);
+ if (rc)
goto err_out;
- }
enabled_ports++;
}
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index d36e71f475ab..15e18d50dcc6 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -86,7 +86,7 @@ static void ata_acpi_detach_device(struct ata_port *ap, struct ata_device *dev)
* @dev: ATA device ACPI event occurred (can be NULL)
* @event: ACPI event which occurred
*
- * All ACPI bay / device realted events end up in this function. If
+ * All ACPI bay / device related events end up in this function. If
* the event is port-wide @dev is NULL. If the event is specific to a
* device, @dev points to it.
*
@@ -246,6 +246,73 @@ void ata_acpi_bind_dev(struct ata_device *dev)
}
/**
+ * ata_acpi_dev_manage_restart - if the disk should be stopped (spun down) on
+ * system restart.
+ * @dev: target ATA device
+ *
+ * RETURNS:
+ * true if the disk should be stopped, otherwise false.
+ */
+bool ata_acpi_dev_manage_restart(struct ata_device *dev)
+{
+ struct device *tdev;
+
+ /*
+ * If ATA_FLAG_ACPI_SATA is set, the acpi fwnode is attached to the
+ * ata_device instead of the ata_port.
+ */
+ if (dev->link->ap->flags & ATA_FLAG_ACPI_SATA)
+ tdev = &dev->tdev;
+ else
+ tdev = &dev->link->ap->tdev;
+
+ if (!is_acpi_device_node(tdev->fwnode))
+ return false;
+ return acpi_bus_power_manageable(ACPI_HANDLE(tdev));
+}
+
+/**
+ * ata_acpi_port_power_on - set the power state of the ata port to D0
+ * @ap: target ATA port
+ *
+ * This function is called at the beginning of ata_port_probe().
+ */
+void ata_acpi_port_power_on(struct ata_port *ap)
+{
+ acpi_handle handle;
+ int i;
+
+ /*
+ * If ATA_FLAG_ACPI_SATA is set, the acpi fwnode is attached to the
+ * ata_device instead of the ata_port.
+ */
+ if (ap->flags & ATA_FLAG_ACPI_SATA) {
+ for (i = 0; i < ATA_MAX_DEVICES; i++) {
+ struct ata_device *dev = &ap->link.device[i];
+
+ if (!is_acpi_device_node(dev->tdev.fwnode))
+ continue;
+ handle = ACPI_HANDLE(&dev->tdev);
+ if (!acpi_bus_power_manageable(handle))
+ continue;
+ if (acpi_bus_set_power(handle, ACPI_STATE_D0))
+ ata_dev_err(dev,
+ "acpi: failed to set power state to D0\n");
+ }
+ return;
+ }
+
+ if (!is_acpi_device_node(ap->tdev.fwnode))
+ return;
+ handle = ACPI_HANDLE(&ap->tdev);
+ if (!acpi_bus_power_manageable(handle))
+ return;
+
+ if (acpi_bus_set_power(handle, ACPI_STATE_D0))
+ ata_port_err(ap, "acpi: failed to set power state to D0\n");
+}
+
+/**
* ata_acpi_dissociate - dissociate ATA host from ACPI objects
* @host: target ATA host
*
@@ -514,15 +581,19 @@ unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev,
EXPORT_SYMBOL_GPL(ata_acpi_gtm_xfermask);
/**
- * ata_acpi_cbl_80wire - Check for 80 wire cable
+ * ata_acpi_cbl_pata_type - Return PATA cable type
* @ap: Port to check
- * @gtm: GTM data to use
*
- * Return 1 if the @gtm indicates the BIOS selected an 80wire mode.
+ * Return ATA_CBL_PATA* according to the transfer mode selected by BIOS
*/
-int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm)
+int ata_acpi_cbl_pata_type(struct ata_port *ap)
{
struct ata_device *dev;
+ int ret = ATA_CBL_PATA_UNK;
+ const struct ata_acpi_gtm *gtm = ata_acpi_init_gtm(ap);
+
+ if (!gtm)
+ return ATA_CBL_PATA40;
ata_for_each_dev(dev, &ap->link, ENABLED) {
unsigned int xfer_mask, udma_mask;
@@ -530,13 +601,17 @@ int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm)
xfer_mask = ata_acpi_gtm_xfermask(dev, gtm);
ata_unpack_xfermask(xfer_mask, NULL, NULL, &udma_mask);
- if (udma_mask & ~ATA_UDMA_MASK_40C)
- return 1;
+ ret = ATA_CBL_PATA40;
+
+ if (udma_mask & ~ATA_UDMA_MASK_40C) {
+ ret = ATA_CBL_PATA80;
+ break;
+ }
}
- return 0;
+ return ret;
}
-EXPORT_SYMBOL_GPL(ata_acpi_cbl_80wire);
+EXPORT_SYMBOL_GPL(ata_acpi_cbl_pata_type);
static void ata_acpi_gtf_to_tf(struct ata_device *dev,
const struct ata_acpi_gtf *gtf,
@@ -832,7 +907,7 @@ void ata_acpi_on_resume(struct ata_port *ap)
dev->flags |= ATA_DFLAG_ACPI_PENDING;
}
} else {
- /* SATA _GTF needs to be evaulated after _SDD and
+ /* SATA _GTF needs to be evaluated after _SDD and
* there's no reason to evaluate IDE _GTF early
* without _STM. Clear cache and schedule _GTF.
*/
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 4f35aab81a0a..0b24bd169d61 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -50,7 +50,7 @@
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <asm/byteorder.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/cdrom.h>
#include <linux/ratelimit.h>
#include <linux/leds.h>
@@ -65,28 +65,20 @@
#include "libata-transport.h"
const struct ata_port_operations ata_base_port_ops = {
- .prereset = ata_std_prereset,
- .postreset = ata_std_postreset,
+ .reset.prereset = ata_std_prereset,
+ .reset.postreset = ata_std_postreset,
.error_handler = ata_std_error_handler,
.sched_eh = ata_std_sched_eh,
.end_eh = ata_std_end_eh,
};
-const struct ata_port_operations sata_port_ops = {
- .inherits = &ata_base_port_ops,
-
- .qc_defer = ata_std_qc_defer,
- .hardreset = sata_std_hardreset,
-};
-EXPORT_SYMBOL_GPL(sata_port_ops);
-
static unsigned int ata_dev_init_params(struct ata_device *dev,
u16 heads, u16 sectors);
static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
static void ata_dev_xfermask(struct ata_device *dev);
-static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
+static unsigned int ata_dev_quirks(const struct ata_device *dev);
-atomic_t ata_print_id = ATOMIC_INIT(0);
+static DEFINE_IDA(ata_ida);
#ifdef CONFIG_ATA_FORCE
struct ata_force_param {
@@ -94,8 +86,9 @@ struct ata_force_param {
u8 cbl;
u8 spd_limit;
unsigned int xfer_mask;
- unsigned int horkage_on;
- unsigned int horkage_off;
+ unsigned int quirk_on;
+ unsigned int quirk_off;
+ unsigned int pflags_on;
u16 lflags_on;
u16 lflags_off;
};
@@ -160,18 +153,13 @@ MODULE_DESCRIPTION("Library module for ATA devices");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
-static inline bool ata_dev_print_info(struct ata_device *dev)
+static inline bool ata_dev_print_info(const struct ata_device *dev)
{
struct ata_eh_context *ehc = &dev->link->eh_context;
return ehc->i.flags & ATA_EHI_PRINTINFO;
}
-static bool ata_sstatus_online(u32 sstatus)
-{
- return (sstatus & 0xf) == 0x3;
-}
-
/**
* ata_link_next - link iteration helper
* @link: the previous link, NULL to start
@@ -345,6 +333,35 @@ void ata_force_cbl(struct ata_port *ap)
}
/**
+ * ata_force_pflags - force port flags according to libata.force
+ * @ap: ATA port of interest
+ *
+ * Force port flags according to libata.force and whine about it.
+ *
+ * LOCKING:
+ * EH context.
+ */
+static void ata_force_pflags(struct ata_port *ap)
+{
+ int i;
+
+ for (i = ata_force_tbl_size - 1; i >= 0; i--) {
+ const struct ata_force_ent *fe = &ata_force_tbl[i];
+
+ if (fe->port != -1 && fe->port != ap->print_id)
+ continue;
+
+ /* let pflags stack */
+ if (fe->param.pflags_on) {
+ ap->pflags |= fe->param.pflags_on;
+ ata_port_notice(ap,
+ "FORCE: port flag 0x%x forced -> 0x%x\n",
+ fe->param.pflags_on, ap->pflags);
+ }
+ }
+}
+
+/**
* ata_force_link_limits - force link limits according to libata.force
* @link: ATA link of interest
*
@@ -457,17 +474,17 @@ static void ata_force_xfermask(struct ata_device *dev)
}
/**
- * ata_force_horkage - force horkage according to libata.force
+ * ata_force_quirks - force quirks according to libata.force
* @dev: ATA device of interest
*
- * Force horkage according to libata.force and whine about it.
+ * Force quirks according to libata.force and whine about it.
* For consistency with link selection, device number 15 selects
* the first device connected to the host link.
*
* LOCKING:
* EH context.
*/
-static void ata_force_horkage(struct ata_device *dev)
+static void ata_force_quirks(struct ata_device *dev)
{
int devno = dev->link->pmp + dev->devno;
int alt_devno = devno;
@@ -487,21 +504,22 @@ static void ata_force_horkage(struct ata_device *dev)
fe->device != alt_devno)
continue;
- if (!(~dev->horkage & fe->param.horkage_on) &&
- !(dev->horkage & fe->param.horkage_off))
+ if (!(~dev->quirks & fe->param.quirk_on) &&
+ !(dev->quirks & fe->param.quirk_off))
continue;
- dev->horkage |= fe->param.horkage_on;
- dev->horkage &= ~fe->param.horkage_off;
+ dev->quirks |= fe->param.quirk_on;
+ dev->quirks &= ~fe->param.quirk_off;
- ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
+ ata_dev_notice(dev, "FORCE: modified (%s)\n",
fe->param.name);
}
}
#else
+static inline void ata_force_pflags(struct ata_port *ap) { }
static inline void ata_force_link_limits(struct ata_link *link) { }
static inline void ata_force_xfermask(struct ata_device *dev) { }
-static inline void ata_force_horkage(struct ata_device *dev) { }
+static inline void ata_force_quirks(struct ata_device *dev) { }
#endif
/**
@@ -1221,7 +1239,7 @@ static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
*max_sectors = ata_tf_to_lba48(&tf) + 1;
else
*max_sectors = ata_tf_to_lba(&tf) + 1;
- if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
+ if (dev->quirks & ATA_QUIRK_HPA_SIZE)
(*max_sectors)--;
return 0;
}
@@ -1306,7 +1324,7 @@ static int ata_hpa_resize(struct ata_device *dev)
/* do we need to do it? */
if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ||
!ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
- (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
+ (dev->quirks & ATA_QUIRK_BROKEN_HPA))
return 0;
/* read native max address */
@@ -1318,7 +1336,7 @@ static int ata_hpa_resize(struct ata_device *dev)
if (rc == -EACCES || !unlock_hpa) {
ata_dev_warn(dev,
"HPA support seems broken, skipping HPA handling\n");
- dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
+ dev->quirks |= ATA_QUIRK_BROKEN_HPA;
/* we can continue if device aborted the command */
if (rc == -EACCES)
@@ -1355,7 +1373,7 @@ static int ata_hpa_resize(struct ata_device *dev)
"device aborted resize (%llu -> %llu), skipping HPA handling\n",
(unsigned long long)sectors,
(unsigned long long)native_sectors);
- dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
+ dev->quirks |= ATA_QUIRK_BROKEN_HPA;
return 0;
} else if (rc)
return rc;
@@ -1835,7 +1853,7 @@ retry:
goto err_out;
}
- if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
+ if (dev->quirks & ATA_QUIRK_DUMP_ID) {
ata_dev_info(dev, "dumping IDENTIFY data, "
"class=%d may_fallback=%d tried_spinup=%d\n",
class, may_fallback, tried_spinup);
@@ -2104,7 +2122,7 @@ unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
retry:
ata_tf_init(dev, &tf);
if (ata_dma_enabled(dev) && ata_id_has_read_log_dma_ext(dev->id) &&
- !(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) {
+ !(dev->quirks & ATA_QUIRK_NO_DMA_LOG)) {
tf.command = ATA_CMD_READ_LOG_DMA_EXT;
tf.protocol = ATA_PROT_DMA;
dma = true;
@@ -2124,7 +2142,7 @@ retry:
if (err_mask) {
if (dma) {
- dev->horkage |= ATA_HORKAGE_NO_DMA_LOG;
+ dev->quirks |= ATA_QUIRK_NO_DMA_LOG;
if (!ata_port_is_frozen(dev->link->ap))
goto retry;
}
@@ -2136,24 +2154,50 @@ retry:
return err_mask;
}
-static int ata_log_supported(struct ata_device *dev, u8 log)
+static inline void ata_clear_log_directory(struct ata_device *dev)
{
- struct ata_port *ap = dev->link->ap;
+ memset(dev->gp_log_dir, 0, ATA_SECT_SIZE);
+}
+
+static int ata_read_log_directory(struct ata_device *dev)
+{
+ u16 version;
+
+ /* If the log page is already cached, do nothing. */
+ version = get_unaligned_le16(&dev->gp_log_dir[0]);
+ if (version == 0x0001)
+ return 0;
- if (dev->horkage & ATA_HORKAGE_NO_LOG_DIR)
+ if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, dev->gp_log_dir, 1)) {
+ ata_clear_log_directory(dev);
+ return -EIO;
+ }
+
+ version = get_unaligned_le16(&dev->gp_log_dir[0]);
+ if (version != 0x0001)
+ ata_dev_warn_once(dev,
+ "Invalid log directory version 0x%04x\n",
+ version);
+
+ return 0;
+}
+
+static int ata_log_supported(struct ata_device *dev, u8 log)
+{
+ if (dev->quirks & ATA_QUIRK_NO_LOG_DIR)
return 0;
- if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1))
+ if (ata_read_log_directory(dev))
return 0;
- return get_unaligned_le16(&ap->sector_buf[log * 2]);
+
+ return get_unaligned_le16(&dev->gp_log_dir[log * 2]);
}
static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
{
- struct ata_port *ap = dev->link->ap;
unsigned int err, i;
- if (dev->horkage & ATA_HORKAGE_NO_ID_DEV_LOG)
+ if (dev->quirks & ATA_QUIRK_NO_ID_DEV_LOG)
return false;
if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE)) {
@@ -2165,7 +2209,7 @@ static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
if (ata_id_major_version(dev->id) >= 10)
ata_dev_warn(dev,
"ATA Identify Device Log not supported\n");
- dev->horkage |= ATA_HORKAGE_NO_ID_DEV_LOG;
+ dev->quirks |= ATA_QUIRK_NO_ID_DEV_LOG;
return false;
}
@@ -2173,20 +2217,20 @@ static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
* Read IDENTIFY DEVICE data log, page 0, to figure out if the page is
* supported.
*/
- err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 0, ap->sector_buf,
- 1);
+ err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 0,
+ dev->sector_buf, 1);
if (err)
return false;
- for (i = 0; i < ap->sector_buf[8]; i++) {
- if (ap->sector_buf[9 + i] == page)
+ for (i = 0; i < dev->sector_buf[8]; i++) {
+ if (dev->sector_buf[9 + i] == page)
return true;
}
return false;
}
-static int ata_do_link_spd_horkage(struct ata_device *dev)
+static int ata_do_link_spd_quirk(struct ata_device *dev)
{
struct ata_link *plink = ata_dev_phys_link(dev);
u32 target, target_limit;
@@ -2194,7 +2238,7 @@ static int ata_do_link_spd_horkage(struct ata_device *dev)
if (!sata_scr_valid(plink))
return 0;
- if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
+ if (dev->quirks & ATA_QUIRK_1_5_GBPS)
target = 1;
else
return 0;
@@ -2212,26 +2256,25 @@ static int ata_do_link_spd_horkage(struct ata_device *dev)
* guaranteed by setting sata_spd_limit to target_limit above.
*/
if (plink->sata_spd > target) {
- ata_dev_info(dev, "applying link speed limit horkage to %s\n",
+ ata_dev_info(dev, "applying link speed limit quirk to %s\n",
sata_spd_string(target));
return -EAGAIN;
}
return 0;
}
-static inline u8 ata_dev_knobble(struct ata_device *dev)
+static inline bool ata_dev_knobble(struct ata_device *dev)
{
struct ata_port *ap = dev->link->ap;
- if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
- return 0;
+ if (ata_dev_quirks(dev) & ATA_QUIRK_BRIDGE_OK)
+ return false;
return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
}
static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
{
- struct ata_port *ap = dev->link->ap;
unsigned int err_mask;
if (!ata_log_supported(dev, ATA_LOG_NCQ_SEND_RECV)) {
@@ -2239,14 +2282,14 @@ static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
return;
}
err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
- 0, ap->sector_buf, 1);
+ 0, dev->sector_buf, 1);
if (!err_mask) {
u8 *cmds = dev->ncq_send_recv_cmds;
dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
- memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
+ memcpy(cmds, dev->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
- if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
+ if (dev->quirks & ATA_QUIRK_NO_NCQ_TRIM) {
ata_dev_dbg(dev, "disabling queued TRIM support\n");
cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
@@ -2256,26 +2299,22 @@ static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
static void ata_dev_config_ncq_non_data(struct ata_device *dev)
{
- struct ata_port *ap = dev->link->ap;
unsigned int err_mask;
if (!ata_log_supported(dev, ATA_LOG_NCQ_NON_DATA)) {
ata_dev_warn(dev,
- "NCQ Send/Recv Log not supported\n");
+ "NCQ Non-Data Log not supported\n");
return;
}
err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
- 0, ap->sector_buf, 1);
- if (!err_mask) {
- u8 *cmds = dev->ncq_non_data_cmds;
-
- memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE);
- }
+ 0, dev->sector_buf, 1);
+ if (!err_mask)
+ memcpy(dev->ncq_non_data_cmds, dev->sector_buf,
+ ATA_LOG_NCQ_NON_DATA_SIZE);
}
static void ata_dev_config_ncq_prio(struct ata_device *dev)
{
- struct ata_port *ap = dev->link->ap;
unsigned int err_mask;
if (!ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS))
@@ -2284,12 +2323,11 @@ static void ata_dev_config_ncq_prio(struct ata_device *dev)
err_mask = ata_read_log_page(dev,
ATA_LOG_IDENTIFY_DEVICE,
ATA_LOG_SATA_SETTINGS,
- ap->sector_buf,
- 1);
+ dev->sector_buf, 1);
if (err_mask)
goto not_supported;
- if (!(ap->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)))
+ if (!(dev->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)))
goto not_supported;
dev->flags |= ATA_DFLAG_NCQ_PRIO;
@@ -2334,12 +2372,12 @@ static int ata_dev_config_ncq(struct ata_device *dev,
}
if (!IS_ENABLED(CONFIG_SATA_HOST))
return 0;
- if (dev->horkage & ATA_HORKAGE_NONCQ) {
+ if (dev->quirks & ATA_QUIRK_NONCQ) {
snprintf(desc, desc_sz, "NCQ (not used)");
return 0;
}
- if (dev->horkage & ATA_HORKAGE_NO_NCQ_ON_ATI &&
+ if (dev->quirks & ATA_QUIRK_NO_NCQ_ON_ATI &&
ata_dev_check_adapter(dev, PCI_VENDOR_ID_ATI)) {
snprintf(desc, desc_sz, "NCQ (not used)");
return 0;
@@ -2350,7 +2388,7 @@ static int ata_dev_config_ncq(struct ata_device *dev,
dev->flags |= ATA_DFLAG_NCQ;
}
- if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
+ if (!(dev->quirks & ATA_QUIRK_BROKEN_FPDMA_AA) &&
(ap->flags & ATA_FLAG_FPDMA_AA) &&
ata_id_has_fpdma_aa(dev->id)) {
err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
@@ -2360,7 +2398,7 @@ static int ata_dev_config_ncq(struct ata_device *dev,
"failed to enable AA (error_mask=0x%x)\n",
err_mask);
if (err_mask != AC_ERR_DEV) {
- dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
+ dev->quirks |= ATA_QUIRK_BROKEN_FPDMA_AA;
return -EIO;
}
} else
@@ -2405,26 +2443,14 @@ static void ata_dev_config_sense_reporting(struct ata_device *dev)
static void ata_dev_config_zac(struct ata_device *dev)
{
- struct ata_port *ap = dev->link->ap;
unsigned int err_mask;
- u8 *identify_buf = ap->sector_buf;
+ u8 *identify_buf = dev->sector_buf;
dev->zac_zones_optimal_open = U32_MAX;
dev->zac_zones_optimal_nonseq = U32_MAX;
dev->zac_zones_max_open = U32_MAX;
- /*
- * Always set the 'ZAC' flag for Host-managed devices.
- */
- if (dev->class == ATA_DEV_ZAC)
- dev->flags |= ATA_DFLAG_ZAC;
- else if (ata_id_zoned_cap(dev->id) == 0x01)
- /*
- * Check for host-aware devices.
- */
- dev->flags |= ATA_DFLAG_ZAC;
-
- if (!(dev->flags & ATA_DFLAG_ZAC))
+ if (!ata_dev_is_zac(dev))
return;
if (!ata_identify_page_supported(dev, ATA_LOG_ZONED_INFORMATION)) {
@@ -2459,7 +2485,6 @@ static void ata_dev_config_zac(struct ata_device *dev)
static void ata_dev_config_trusted(struct ata_device *dev)
{
- struct ata_port *ap = dev->link->ap;
u64 trusted_cap;
unsigned int err;
@@ -2473,11 +2498,11 @@ static void ata_dev_config_trusted(struct ata_device *dev)
}
err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, ATA_LOG_SECURITY,
- ap->sector_buf, 1);
+ dev->sector_buf, 1);
if (err)
return;
- trusted_cap = get_unaligned_le64(&ap->sector_buf[40]);
+ trusted_cap = get_unaligned_le64(&dev->sector_buf[40]);
if (!(trusted_cap & (1ULL << 63))) {
ata_dev_dbg(dev,
"Trusted Computing capability qword not valid!\n");
@@ -2488,12 +2513,41 @@ static void ata_dev_config_trusted(struct ata_device *dev)
dev->flags |= ATA_DFLAG_TRUSTED;
}
+static void ata_dev_cleanup_cdl_resources(struct ata_device *dev)
+{
+ kfree(dev->cdl);
+ dev->cdl = NULL;
+}
+
+static int ata_dev_init_cdl_resources(struct ata_device *dev)
+{
+ struct ata_cdl *cdl = dev->cdl;
+ unsigned int err_mask;
+
+ if (!cdl) {
+ cdl = kzalloc(sizeof(*cdl), GFP_KERNEL);
+ if (!cdl)
+ return -ENOMEM;
+ dev->cdl = cdl;
+ }
+
+ err_mask = ata_read_log_page(dev, ATA_LOG_CDL, 0, cdl->desc_log_buf,
+ ATA_LOG_CDL_SIZE / ATA_SECT_SIZE);
+ if (err_mask) {
+ ata_dev_warn(dev, "Read Command Duration Limits log failed\n");
+ ata_dev_cleanup_cdl_resources(dev);
+ return -EIO;
+ }
+
+ return 0;
+}
+
static void ata_dev_config_cdl(struct ata_device *dev)
{
- struct ata_port *ap = dev->link->ap;
unsigned int err_mask;
bool cdl_enabled;
u64 val;
+ int ret;
if (ata_id_major_version(dev->id) < 11)
goto not_supported;
@@ -2505,12 +2559,12 @@ static void ata_dev_config_cdl(struct ata_device *dev)
err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
ATA_LOG_SUPPORTED_CAPABILITIES,
- ap->sector_buf, 1);
+ dev->sector_buf, 1);
if (err_mask)
goto not_supported;
/* Check Command Duration Limit Supported bits */
- val = get_unaligned_le64(&ap->sector_buf[168]);
+ val = get_unaligned_le64(&dev->sector_buf[168]);
if (!(val & BIT_ULL(63)) || !(val & BIT_ULL(0)))
goto not_supported;
@@ -2523,7 +2577,7 @@ static void ata_dev_config_cdl(struct ata_device *dev)
* We must have support for the sense data for successful NCQ commands
* log indicated by the successful NCQ command sense data supported bit.
*/
- val = get_unaligned_le64(&ap->sector_buf[8]);
+ val = get_unaligned_le64(&dev->sector_buf[8]);
if (!(val & BIT_ULL(63)) || !(val & BIT_ULL(47))) {
ata_dev_warn(dev,
"CDL supported but Successful NCQ Command Sense Data is not supported\n");
@@ -2543,11 +2597,11 @@ static void ata_dev_config_cdl(struct ata_device *dev)
*/
err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
ATA_LOG_CURRENT_SETTINGS,
- ap->sector_buf, 1);
+ dev->sector_buf, 1);
if (err_mask)
goto not_supported;
- val = get_unaligned_le64(&ap->sector_buf[8]);
+ val = get_unaligned_le64(&dev->sector_buf[8]);
cdl_enabled = val & BIT_ULL(63) && val & BIT_ULL(21);
if (dev->flags & ATA_DFLAG_CDL_ENABLED) {
if (!cdl_enabled) {
@@ -2588,37 +2642,20 @@ static void ata_dev_config_cdl(struct ata_device *dev)
}
}
- /*
- * Allocate a buffer to handle reading the sense data for successful
- * NCQ Commands log page for commands using a CDL with one of the limit
- * policy set to 0xD (successful completion with sense data available
- * bit set).
- */
- if (!ap->ncq_sense_buf) {
- ap->ncq_sense_buf = kmalloc(ATA_LOG_SENSE_NCQ_SIZE, GFP_KERNEL);
- if (!ap->ncq_sense_buf)
- goto not_supported;
- }
-
- /*
- * Command duration limits is supported: cache the CDL log page 18h
- * (command duration descriptors).
- */
- err_mask = ata_read_log_page(dev, ATA_LOG_CDL, 0, ap->sector_buf, 1);
- if (err_mask) {
- ata_dev_warn(dev, "Read Command Duration Limits log failed\n");
+ /* CDL is supported: allocate and initialize needed resources. */
+ ret = ata_dev_init_cdl_resources(dev);
+ if (ret) {
+ ata_dev_warn(dev, "Initialize CDL resources failed\n");
goto not_supported;
}
- memcpy(dev->cdl, ap->sector_buf, ATA_LOG_CDL_SIZE);
dev->flags |= ATA_DFLAG_CDL;
return;
not_supported:
dev->flags &= ~(ATA_DFLAG_CDL | ATA_DFLAG_CDL_ENABLED);
- kfree(ap->ncq_sense_buf);
- ap->ncq_sense_buf = NULL;
+ ata_dev_cleanup_cdl_resources(dev);
}
static int ata_dev_config_lba(struct ata_device *dev)
@@ -2689,7 +2726,7 @@ static void ata_dev_config_fua(struct ata_device *dev)
goto nofua;
/* Ignore known bad devices and devices that lack NCQ support */
- if (!ata_ncq_supported(dev) || (dev->horkage & ATA_HORKAGE_NO_FUA))
+ if (!ata_ncq_supported(dev) || (dev->quirks & ATA_QUIRK_NO_FUA))
goto nofua;
dev->flags |= ATA_DFLAG_FUA;
@@ -2702,7 +2739,7 @@ nofua:
static void ata_dev_config_devslp(struct ata_device *dev)
{
- u8 *sata_setting = dev->link->ap->sector_buf;
+ u8 *sata_setting = dev->sector_buf;
unsigned int err_mask;
int i, j;
@@ -2782,17 +2819,70 @@ out:
kfree(buf);
}
+/*
+ * Configure features related to link power management.
+ */
+static void ata_dev_config_lpm(struct ata_device *dev)
+{
+ struct ata_port *ap = dev->link->ap;
+ unsigned int err_mask;
+
+ if (ap->flags & ATA_FLAG_NO_LPM) {
+ /*
+ * When the port does not support LPM, we cannot support it on
+ * the device either.
+ */
+ dev->quirks |= ATA_QUIRK_NOLPM;
+ } else {
+ /*
+ * Some WD SATA-1 drives have issues with LPM, turn on NOLPM for
+ * them.
+ */
+ if ((dev->quirks & ATA_QUIRK_WD_BROKEN_LPM) &&
+ (dev->id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
+ dev->quirks |= ATA_QUIRK_NOLPM;
+
+ /* ATI specific quirk */
+ if ((dev->quirks & ATA_QUIRK_NO_LPM_ON_ATI) &&
+ ata_dev_check_adapter(dev, PCI_VENDOR_ID_ATI))
+ dev->quirks |= ATA_QUIRK_NOLPM;
+ }
+
+ if (dev->quirks & ATA_QUIRK_NOLPM &&
+ ap->target_lpm_policy != ATA_LPM_MAX_POWER) {
+ ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
+ ap->target_lpm_policy = ATA_LPM_MAX_POWER;
+ }
+
+ /*
+ * Device Initiated Power Management (DIPM) is normally disabled by
+ * default on a device. However, DIPM may have been enabled and that
+ * setting kept even after COMRESET because of the Software Settings
+ * Preservation feature. So if the port does not support DIPM and the
+ * device does, disable DIPM on the device.
+ */
+ if (ap->flags & ATA_FLAG_NO_DIPM && ata_id_has_dipm(dev->id)) {
+ err_mask = ata_dev_set_feature(dev,
+ SETFEATURES_SATA_DISABLE, SATA_DIPM);
+ if (err_mask && err_mask != AC_ERR_DEV)
+ ata_dev_err(dev, "Disable DIPM failed, Emask 0x%x\n",
+ err_mask);
+ }
+}
+
static void ata_dev_print_features(struct ata_device *dev)
{
if (!(dev->flags & ATA_DFLAG_FEATURES_MASK))
return;
ata_dev_info(dev,
- "Features:%s%s%s%s%s%s%s%s\n",
+ "Features:%s%s%s%s%s%s%s%s%s%s\n",
dev->flags & ATA_DFLAG_FUA ? " FUA" : "",
dev->flags & ATA_DFLAG_TRUSTED ? " Trust" : "",
dev->flags & ATA_DFLAG_DA ? " Dev-Attention" : "",
dev->flags & ATA_DFLAG_DEVSLP ? " Dev-Sleep" : "",
+ ata_id_has_hipm(dev->id) ? " HIPM" : "",
+ ata_id_has_dipm(dev->id) ? " DIPM" : "",
dev->flags & ATA_DFLAG_NCQ_SEND_RECV ? " NCQ-sndrcv" : "",
dev->flags & ATA_DFLAG_NCQ_PRIO ? " NCQ-prio" : "",
dev->flags & ATA_DFLAG_CDL ? " CDL" : "",
@@ -2829,11 +2919,14 @@ int ata_dev_configure(struct ata_device *dev)
return 0;
}
- /* set horkage */
- dev->horkage |= ata_dev_blacklisted(dev);
- ata_force_horkage(dev);
+ /* Clear the general purpose log directory cache. */
+ ata_clear_log_directory(dev);
- if (dev->horkage & ATA_HORKAGE_DISABLE) {
+ /* Set quirks */
+ dev->quirks |= ata_dev_quirks(dev);
+ ata_force_quirks(dev);
+
+ if (dev->quirks & ATA_QUIRK_DISABLE) {
ata_dev_info(dev, "unsupported device, disabling\n");
ata_dev_disable(dev);
return 0;
@@ -2848,23 +2941,10 @@ int ata_dev_configure(struct ata_device *dev)
return 0;
}
- rc = ata_do_link_spd_horkage(dev);
+ rc = ata_do_link_spd_quirk(dev);
if (rc)
return rc;
- /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
- if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
- (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
- dev->horkage |= ATA_HORKAGE_NOLPM;
-
- if (ap->flags & ATA_FLAG_NO_LPM)
- dev->horkage |= ATA_HORKAGE_NOLPM;
-
- if (dev->horkage & ATA_HORKAGE_NOLPM) {
- ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
- dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
- }
-
/* let ACPI work its magic */
rc = ata_acpi_on_devcfg(dev);
if (rc)
@@ -2926,6 +3006,16 @@ int ata_dev_configure(struct ata_device *dev)
}
dev->n_sectors = ata_id_n_sectors(id);
+ if (ata_id_is_locked(id)) {
+ /*
+ * If Security locked, set capacity to zero to prevent
+ * any I/O, e.g. partition scanning, as any I/O to a
+ * locked drive will result in user visible errors.
+ */
+ ata_dev_info(dev,
+ "Security locked, setting capacity to zero\n");
+ dev->n_sectors = 0;
+ }
/* get current R/W Multiple count setting */
if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
@@ -2951,6 +3041,7 @@ int ata_dev_configure(struct ata_device *dev)
ata_dev_config_chs(dev);
}
+ ata_dev_config_lpm(dev);
ata_dev_config_fua(dev);
ata_dev_config_devslp(dev);
ata_dev_config_sense_reporting(dev);
@@ -3006,7 +3097,8 @@ int ata_dev_configure(struct ata_device *dev)
cdb_intr_string = ", CDB intr";
}
- if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
+ if (atapi_dmadir || (dev->quirks & ATA_QUIRK_ATAPI_DMADIR) ||
+ atapi_id_dmadir(dev->id)) {
dev->flags |= ATA_DFLAG_DMADIR;
dma_dir_string = ", DMADIR";
}
@@ -3043,24 +3135,28 @@ int ata_dev_configure(struct ata_device *dev)
if ((dev->class == ATA_DEV_ATAPI) &&
(atapi_command_packet_set(id) == TYPE_TAPE)) {
dev->max_sectors = ATA_MAX_SECTORS_TAPE;
- dev->horkage |= ATA_HORKAGE_STUCK_ERR;
+ dev->quirks |= ATA_QUIRK_STUCK_ERR;
}
- if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
+ if (dev->quirks & ATA_QUIRK_MAX_SEC_128)
dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
dev->max_sectors);
- if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
+ if (dev->quirks & ATA_QUIRK_MAX_SEC_1024)
dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
dev->max_sectors);
- if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
+ if (dev->quirks & ATA_QUIRK_MAX_SEC_8191)
+ dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_8191,
+ dev->max_sectors);
+
+ if (dev->quirks & ATA_QUIRK_MAX_SEC_LBA48)
dev->max_sectors = ATA_MAX_SECTORS_LBA48;
if (ap->ops->dev_config)
ap->ops->dev_config(dev);
- if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
+ if (dev->quirks & ATA_QUIRK_DIAGNOSTIC) {
/* Let the user know. We don't want to disallow opens for
rescue purposes, or in case the vendor is just a blithering
idiot. Do this after the dev_config call as some controllers
@@ -3075,7 +3171,7 @@ int ata_dev_configure(struct ata_device *dev)
}
}
- if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
+ if ((dev->quirks & ATA_QUIRK_FIRMWARE_WARN) && print_info) {
ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n");
}
@@ -3199,86 +3295,6 @@ struct ata_device *ata_dev_pair(struct ata_device *adev)
}
EXPORT_SYMBOL_GPL(ata_dev_pair);
-/**
- * sata_down_spd_limit - adjust SATA spd limit downward
- * @link: Link to adjust SATA spd limit for
- * @spd_limit: Additional limit
- *
- * Adjust SATA spd limit of @link downward. Note that this
- * function only adjusts the limit. The change must be applied
- * using sata_set_spd().
- *
- * If @spd_limit is non-zero, the speed is limited to equal to or
- * lower than @spd_limit if such speed is supported. If
- * @spd_limit is slower than any supported speed, only the lowest
- * supported speed is allowed.
- *
- * LOCKING:
- * Inherited from caller.
- *
- * RETURNS:
- * 0 on success, negative errno on failure
- */
-int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
-{
- u32 sstatus, spd, mask;
- int rc, bit;
-
- if (!sata_scr_valid(link))
- return -EOPNOTSUPP;
-
- /* If SCR can be read, use it to determine the current SPD.
- * If not, use cached value in link->sata_spd.
- */
- rc = sata_scr_read(link, SCR_STATUS, &sstatus);
- if (rc == 0 && ata_sstatus_online(sstatus))
- spd = (sstatus >> 4) & 0xf;
- else
- spd = link->sata_spd;
-
- mask = link->sata_spd_limit;
- if (mask <= 1)
- return -EINVAL;
-
- /* unconditionally mask off the highest bit */
- bit = fls(mask) - 1;
- mask &= ~(1 << bit);
-
- /*
- * Mask off all speeds higher than or equal to the current one. At
- * this point, if current SPD is not available and we previously
- * recorded the link speed from SStatus, the driver has already
- * masked off the highest bit so mask should already be 1 or 0.
- * Otherwise, we should not force 1.5Gbps on a link where we have
- * not previously recorded speed from SStatus. Just return in this
- * case.
- */
- if (spd > 1)
- mask &= (1 << (spd - 1)) - 1;
- else if (link->sata_spd)
- return -EINVAL;
-
- /* were we already at the bottom? */
- if (!mask)
- return -EINVAL;
-
- if (spd_limit) {
- if (mask & ((1 << spd_limit) - 1))
- mask &= (1 << spd_limit) - 1;
- else {
- bit = ffs(mask) - 1;
- mask = 1 << bit;
- }
- }
-
- link->sata_spd_limit = mask;
-
- ata_link_warn(link, "limiting SATA link speed to %s\n",
- sata_spd_string(fls(mask)));
-
- return 0;
-}
-
#ifdef CONFIG_ATA_ACPI
/**
* ata_timing_cycle2mode - find xfer mode for the specified cycle duration
@@ -3425,7 +3441,7 @@ static int ata_dev_set_mode(struct ata_device *dev)
{
struct ata_port *ap = dev->link->ap;
struct ata_eh_context *ehc = &dev->link->eh_context;
- const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
+ const bool nosetxfer = dev->quirks & ATA_QUIRK_NOSETXFER;
const char *dev_err_whine = "";
int ign_dev_err = 0;
unsigned int err_mask = 0;
@@ -3505,7 +3521,7 @@ static int ata_dev_set_mode(struct ata_device *dev)
}
/**
- * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
+ * ata_set_mode - Program timings and issue SET FEATURES - XFER
* @link: link on which timings will be programmed
* @r_failed_dev: out parameter for failed device
*
@@ -3521,7 +3537,7 @@ static int ata_dev_set_mode(struct ata_device *dev)
* 0 on success, negative errno otherwise
*/
-int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
+int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
{
struct ata_port *ap = link->ap;
struct ata_device *dev;
@@ -3602,7 +3618,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
*r_failed_dev = dev;
return rc;
}
-EXPORT_SYMBOL_GPL(ata_do_set_mode);
+EXPORT_SYMBOL_GPL(ata_set_mode);
/**
* ata_wait_ready - wait for link to become ready
@@ -3761,33 +3777,6 @@ int ata_std_prereset(struct ata_link *link, unsigned long deadline)
EXPORT_SYMBOL_GPL(ata_std_prereset);
/**
- * sata_std_hardreset - COMRESET w/o waiting or classification
- * @link: link to reset
- * @class: resulting class of attached device
- * @deadline: deadline jiffies for the operation
- *
- * Standard SATA COMRESET w/o waiting or classification.
- *
- * LOCKING:
- * Kernel thread context (may sleep)
- *
- * RETURNS:
- * 0 if link offline, -EAGAIN if link online, -errno on errors.
- */
-int sata_std_hardreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline)
-{
- const unsigned int *timing = sata_ehc_deb_timing(&link->eh_context);
- bool online;
- int rc;
-
- /* do hardreset */
- rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
- return online ? -EAGAIN : rc;
-}
-EXPORT_SYMBOL_GPL(sata_std_hardreset);
-
-/**
* ata_std_postreset - standard postreset callback
* @link: the target ata_link
* @classes: classes of attached devices
@@ -3878,7 +3867,7 @@ static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
{
unsigned int class = dev->class;
- u16 *id = (void *)dev->link->ap->sector_buf;
+ u16 *id = (void *)dev->sector_buf;
int rc;
/* read ID data */
@@ -3969,7 +3958,7 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
*/
if (dev->n_native_sectors == n_native_sectors &&
dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
- !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
+ !(dev->quirks & ATA_QUIRK_BROKEN_HPA)) {
ata_dev_warn(dev,
"old n_sectors matches native, probably "
"late HPA lock, will try to unlock HPA\n");
@@ -3987,217 +3976,307 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
return rc;
}
-struct ata_blacklist_entry {
+static const char * const ata_quirk_names[] = {
+ [__ATA_QUIRK_DIAGNOSTIC] = "diagnostic",
+ [__ATA_QUIRK_NODMA] = "nodma",
+ [__ATA_QUIRK_NONCQ] = "noncq",
+ [__ATA_QUIRK_MAX_SEC_128] = "maxsec128",
+ [__ATA_QUIRK_BROKEN_HPA] = "brokenhpa",
+ [__ATA_QUIRK_DISABLE] = "disable",
+ [__ATA_QUIRK_HPA_SIZE] = "hpasize",
+ [__ATA_QUIRK_IVB] = "ivb",
+ [__ATA_QUIRK_STUCK_ERR] = "stuckerr",
+ [__ATA_QUIRK_BRIDGE_OK] = "bridgeok",
+ [__ATA_QUIRK_ATAPI_MOD16_DMA] = "atapimod16dma",
+ [__ATA_QUIRK_FIRMWARE_WARN] = "firmwarewarn",
+ [__ATA_QUIRK_1_5_GBPS] = "1.5gbps",
+ [__ATA_QUIRK_NOSETXFER] = "nosetxfer",
+ [__ATA_QUIRK_BROKEN_FPDMA_AA] = "brokenfpdmaaa",
+ [__ATA_QUIRK_DUMP_ID] = "dumpid",
+ [__ATA_QUIRK_MAX_SEC_LBA48] = "maxseclba48",
+ [__ATA_QUIRK_ATAPI_DMADIR] = "atapidmadir",
+ [__ATA_QUIRK_NO_NCQ_TRIM] = "noncqtrim",
+ [__ATA_QUIRK_NOLPM] = "nolpm",
+ [__ATA_QUIRK_WD_BROKEN_LPM] = "wdbrokenlpm",
+ [__ATA_QUIRK_ZERO_AFTER_TRIM] = "zeroaftertrim",
+ [__ATA_QUIRK_NO_DMA_LOG] = "nodmalog",
+ [__ATA_QUIRK_NOTRIM] = "notrim",
+ [__ATA_QUIRK_MAX_SEC_1024] = "maxsec1024",
+ [__ATA_QUIRK_MAX_SEC_8191] = "maxsec8191",
+ [__ATA_QUIRK_MAX_TRIM_128M] = "maxtrim128m",
+ [__ATA_QUIRK_NO_NCQ_ON_ATI] = "noncqonati",
+ [__ATA_QUIRK_NO_LPM_ON_ATI] = "nolpmonati",
+ [__ATA_QUIRK_NO_ID_DEV_LOG] = "noiddevlog",
+ [__ATA_QUIRK_NO_LOG_DIR] = "nologdir",
+ [__ATA_QUIRK_NO_FUA] = "nofua",
+};
+
+static void ata_dev_print_quirks(const struct ata_device *dev,
+ const char *model, const char *rev,
+ unsigned int quirks)
+{
+ struct ata_eh_context *ehc = &dev->link->eh_context;
+ int n = 0, i;
+ size_t sz;
+ char *str;
+
+ if (!ata_dev_print_info(dev) || ehc->i.flags & ATA_EHI_DID_PRINT_QUIRKS)
+ return;
+
+ ehc->i.flags |= ATA_EHI_DID_PRINT_QUIRKS;
+
+ if (!quirks)
+ return;
+
+ sz = 64 + ARRAY_SIZE(ata_quirk_names) * 16;
+ str = kmalloc(sz, GFP_KERNEL);
+ if (!str)
+ return;
+
+ n = snprintf(str, sz, "Model '%s', rev '%s', applying quirks:",
+ model, rev);
+
+ for (i = 0; i < ARRAY_SIZE(ata_quirk_names); i++) {
+ if (quirks & (1U << i))
+ n += snprintf(str + n, sz - n,
+ " %s", ata_quirk_names[i]);
+ }
+
+ ata_dev_warn(dev, "%s\n", str);
+
+ kfree(str);
+}
+
+struct ata_dev_quirks_entry {
const char *model_num;
const char *model_rev;
- unsigned long horkage;
+ unsigned int quirks;
};
-static const struct ata_blacklist_entry ata_device_blacklist [] = {
+static const struct ata_dev_quirks_entry __ata_dev_quirks[] = {
/* Devices with DMA related problems under Linux */
- { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
- { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
- { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
- { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
- { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
- { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
- { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
- { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
- { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
- { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
- { "CRD-84", NULL, ATA_HORKAGE_NODMA },
- { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
- { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
- { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
- { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
- { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
- { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
- { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
- { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
- { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
- { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
- { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
- { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
- { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
- { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
- { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
- { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
- { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
- { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
- { "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA },
+ { "WDC AC11000H", NULL, ATA_QUIRK_NODMA },
+ { "WDC AC22100H", NULL, ATA_QUIRK_NODMA },
+ { "WDC AC32500H", NULL, ATA_QUIRK_NODMA },
+ { "WDC AC33100H", NULL, ATA_QUIRK_NODMA },
+ { "WDC AC31600H", NULL, ATA_QUIRK_NODMA },
+ { "WDC AC32100H", "24.09P07", ATA_QUIRK_NODMA },
+ { "WDC AC23200L", "21.10N21", ATA_QUIRK_NODMA },
+ { "Compaq CRD-8241B", NULL, ATA_QUIRK_NODMA },
+ { "CRD-8400B", NULL, ATA_QUIRK_NODMA },
+ { "CRD-848[02]B", NULL, ATA_QUIRK_NODMA },
+ { "CRD-84", NULL, ATA_QUIRK_NODMA },
+ { "SanDisk SDP3B", NULL, ATA_QUIRK_NODMA },
+ { "SanDisk SDP3B-64", NULL, ATA_QUIRK_NODMA },
+ { "SANYO CD-ROM CRD", NULL, ATA_QUIRK_NODMA },
+ { "HITACHI CDR-8", NULL, ATA_QUIRK_NODMA },
+ { "HITACHI CDR-8[34]35", NULL, ATA_QUIRK_NODMA },
+ { "Toshiba CD-ROM XM-6202B", NULL, ATA_QUIRK_NODMA },
+ { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_QUIRK_NODMA },
+ { "CD-532E-A", NULL, ATA_QUIRK_NODMA },
+ { "E-IDE CD-ROM CR-840", NULL, ATA_QUIRK_NODMA },
+ { "CD-ROM Drive/F5A", NULL, ATA_QUIRK_NODMA },
+ { "WPI CDD-820", NULL, ATA_QUIRK_NODMA },
+ { "SAMSUNG CD-ROM SC-148C", NULL, ATA_QUIRK_NODMA },
+ { "SAMSUNG CD-ROM SC", NULL, ATA_QUIRK_NODMA },
+ { "ATAPI CD-ROM DRIVE 40X MAXIMUM", NULL, ATA_QUIRK_NODMA },
+ { "_NEC DV5800A", NULL, ATA_QUIRK_NODMA },
+ { "SAMSUNG CD-ROM SN-124", "N001", ATA_QUIRK_NODMA },
+ { "Seagate STT20000A", NULL, ATA_QUIRK_NODMA },
+ { " 2GB ATA Flash Disk", "ADMA428M", ATA_QUIRK_NODMA },
+ { "VRFDFC22048UCHC-TE*", NULL, ATA_QUIRK_NODMA },
/* Odd clown on sil3726/4726 PMPs */
- { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
+ { "Config Disk", NULL, ATA_QUIRK_DISABLE },
/* Similar story with ASMedia 1092 */
- { "ASMT109x- Config", NULL, ATA_HORKAGE_DISABLE },
+ { "ASMT109x- Config", NULL, ATA_QUIRK_DISABLE },
/* Weird ATAPI devices */
- { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
- { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
- { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
- { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
+ { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_QUIRK_MAX_SEC_128 },
+ { "QUANTUM DAT DAT72-000", NULL, ATA_QUIRK_ATAPI_MOD16_DMA },
+ { "Slimtype DVD A DS8A8SH", NULL, ATA_QUIRK_MAX_SEC_LBA48 },
+ { "Slimtype DVD A DS8A9SH", NULL, ATA_QUIRK_MAX_SEC_LBA48 },
/*
* Causes silent data corruption with higher max sects.
* http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
*/
- { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
+ { "ST380013AS", "3.20", ATA_QUIRK_MAX_SEC_1024 },
/*
* These devices time out with higher max sects.
* https://bugzilla.kernel.org/show_bug.cgi?id=121671
*/
- { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
- { "LITEON EP1-*", NULL, ATA_HORKAGE_MAX_SEC_1024 },
+ { "LITEON CX1-JB*-HP", NULL, ATA_QUIRK_MAX_SEC_1024 },
+ { "LITEON EP1-*", NULL, ATA_QUIRK_MAX_SEC_1024 },
+
+ /*
+ * These devices time out with higher max sects.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=220693
+ */
+ { "DELLBOSS VD", "MV.R00-0", ATA_QUIRK_MAX_SEC_8191 },
/* Devices we expect to fail diagnostics */
/* Devices where NCQ should be avoided */
/* NCQ is slow */
- { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
- { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ },
+ { "WDC WD740ADFD-00", NULL, ATA_QUIRK_NONCQ },
+ { "WDC WD740ADFD-00NLR1", NULL, ATA_QUIRK_NONCQ },
/* http://thread.gmane.org/gmane.linux.ide/14907 */
- { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
+ { "FUJITSU MHT2060BH", NULL, ATA_QUIRK_NONCQ },
/* NCQ is broken */
- { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
- { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
- { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
- { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
- { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
+ { "Maxtor *", "BANC*", ATA_QUIRK_NONCQ },
+ { "Maxtor 7V300F0", "VA111630", ATA_QUIRK_NONCQ },
+ { "ST380817AS", "3.42", ATA_QUIRK_NONCQ },
+ { "ST3160023AS", "3.42", ATA_QUIRK_NONCQ },
+ { "OCZ CORE_SSD", "02.10104", ATA_QUIRK_NONCQ },
/* Seagate NCQ + FLUSH CACHE firmware bug */
- { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
- ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST31500341AS", "SD1[5-9]", ATA_QUIRK_NONCQ |
+ ATA_QUIRK_FIRMWARE_WARN },
- { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
- ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST31000333AS", "SD1[5-9]", ATA_QUIRK_NONCQ |
+ ATA_QUIRK_FIRMWARE_WARN },
- { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
- ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST3640[36]23AS", "SD1[5-9]", ATA_QUIRK_NONCQ |
+ ATA_QUIRK_FIRMWARE_WARN },
- { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
- ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST3320[68]13AS", "SD1[5-9]", ATA_QUIRK_NONCQ |
+ ATA_QUIRK_FIRMWARE_WARN },
/* drives which fail FPDMA_AA activation (some may freeze afterwards)
the ST disks also have LPM issues */
- { "ST1000LM024 HN-M101MBB", NULL, ATA_HORKAGE_BROKEN_FPDMA_AA |
- ATA_HORKAGE_NOLPM },
- { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
+ { "ST1000LM024 HN-M101MBB", NULL, ATA_QUIRK_BROKEN_FPDMA_AA |
+ ATA_QUIRK_NOLPM },
+ { "VB0250EAVER", "HPG7", ATA_QUIRK_BROKEN_FPDMA_AA },
/* Blacklist entries taken from Silicon Image 3124/3132
Windows driver .inf file - also several Linux problem reports */
- { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ },
- { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ },
- { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ },
+ { "HTS541060G9SA00", "MB3OC60D", ATA_QUIRK_NONCQ },
+ { "HTS541080G9SA00", "MB4OC60D", ATA_QUIRK_NONCQ },
+ { "HTS541010G9SA00", "MBZOC60D", ATA_QUIRK_NONCQ },
/* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
- { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ },
+ { "C300-CTFDDAC128MAG", "0001", ATA_QUIRK_NONCQ },
/* Sandisk SD7/8/9s lock up hard on large trims */
- { "SanDisk SD[789]*", NULL, ATA_HORKAGE_MAX_TRIM_128M },
+ { "SanDisk SD[789]*", NULL, ATA_QUIRK_MAX_TRIM_128M },
/* devices which puke on READ_NATIVE_MAX */
- { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA },
- { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
- { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
- { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
+ { "HDS724040KLSA80", "KFAOA20N", ATA_QUIRK_BROKEN_HPA },
+ { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_QUIRK_BROKEN_HPA },
+ { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_QUIRK_BROKEN_HPA },
+ { "MAXTOR 6L080L4", "A93.0500", ATA_QUIRK_BROKEN_HPA },
/* this one allows HPA unlocking but fails IOs on the area */
- { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
+ { "OCZ-VERTEX", "1.30", ATA_QUIRK_BROKEN_HPA },
/* Devices which report 1 sector over size HPA */
- { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE },
- { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE },
- { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE },
+ { "ST340823A", NULL, ATA_QUIRK_HPA_SIZE },
+ { "ST320413A", NULL, ATA_QUIRK_HPA_SIZE },
+ { "ST310211A", NULL, ATA_QUIRK_HPA_SIZE },
/* Devices which get the IVB wrong */
- { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB },
- /* Maybe we should just blacklist TSSTcorp... */
- { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB },
+ { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_QUIRK_IVB },
+ /* Maybe we should just add all TSSTcorp devices... */
+ { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_QUIRK_IVB },
/* Devices that do not need bridging limits applied */
- { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK },
- { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK },
+ { "MTRON MSP-SATA*", NULL, ATA_QUIRK_BRIDGE_OK },
+ { "BUFFALO HD-QSU2/R5", NULL, ATA_QUIRK_BRIDGE_OK },
/* Devices which aren't very happy with higher link speeds */
- { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS },
- { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS },
+ { "WD My Book", NULL, ATA_QUIRK_1_5_GBPS },
+ { "Seagate FreeAgent GoFlex", NULL, ATA_QUIRK_1_5_GBPS },
/*
* Devices which choke on SETXFER. Applies only if both the
* device and controller are SATA.
*/
- { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
- { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER },
- { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER },
- { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
- { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
+ { "PIONEER DVD-RW DVRTD08", NULL, ATA_QUIRK_NOSETXFER },
+ { "PIONEER DVD-RW DVRTD08A", NULL, ATA_QUIRK_NOSETXFER },
+ { "PIONEER DVD-RW DVR-215", NULL, ATA_QUIRK_NOSETXFER },
+ { "PIONEER DVD-RW DVR-212D", NULL, ATA_QUIRK_NOSETXFER },
+ { "PIONEER DVD-RW DVR-216D", NULL, ATA_QUIRK_NOSETXFER },
/* These specific Pioneer models have LPM issues */
- { "PIONEER BD-RW BDR-207M", NULL, ATA_HORKAGE_NOLPM },
- { "PIONEER BD-RW BDR-205", NULL, ATA_HORKAGE_NOLPM },
+ { "PIONEER BD-RW BDR-207M", NULL, ATA_QUIRK_NOLPM },
+ { "PIONEER BD-RW BDR-205", NULL, ATA_QUIRK_NOLPM },
- /* Crucial BX100 SSD 500GB has broken LPM support */
- { "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM },
+ /* Crucial devices with broken LPM support */
+ { "CT*0BX*00SSD1", NULL, ATA_QUIRK_NOLPM },
/* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
- { "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM |
- ATA_HORKAGE_NOLPM },
+ { "Crucial_CT512MX100*", "MU01", ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM |
+ ATA_QUIRK_NOLPM },
/* 512GB MX100 with newer firmware has only LPM issues */
- { "Crucial_CT512MX100*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM |
- ATA_HORKAGE_NOLPM },
+ { "Crucial_CT512MX100*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM |
+ ATA_QUIRK_NOLPM },
/* 480GB+ M500 SSDs have both queued TRIM and LPM issues */
- { "Crucial_CT480M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM |
- ATA_HORKAGE_NOLPM },
- { "Crucial_CT960M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM |
- ATA_HORKAGE_NOLPM },
+ { "Crucial_CT480M500*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM |
+ ATA_QUIRK_NOLPM },
+ { "Crucial_CT960M500*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM |
+ ATA_QUIRK_NOLPM },
+
+ /* AMD Radeon devices with broken LPM support */
+ { "R3SL240G", NULL, ATA_QUIRK_NOLPM },
+
+ /* Apacer models with LPM issues */
+ { "Apacer AS340*", NULL, ATA_QUIRK_NOLPM },
+
+ /* Silicon Motion models with LPM issues */
+ { "MD619HXCLDE3TC", "TCVAID", ATA_QUIRK_NOLPM },
+ { "MD619GXCLDE3TC", "TCV35D", ATA_QUIRK_NOLPM },
/* These specific Samsung models/firmware-revs do not handle LPM well */
- { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM },
- { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM },
- { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM },
- { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM },
+ { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_QUIRK_NOLPM },
+ { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_QUIRK_NOLPM },
+ { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_QUIRK_NOLPM },
+ { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_QUIRK_NOLPM },
/* devices that don't properly handle queued TRIM commands */
- { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "Micron_1100_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM, },
- { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "Samsung SSD 840 EVO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_NO_DMA_LOG |
- ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "Samsung SSD 860*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM |
- ATA_HORKAGE_NO_NCQ_ON_ATI },
- { "Samsung SSD 870*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM |
- ATA_HORKAGE_NO_NCQ_ON_ATI },
- { "SAMSUNG*MZ7LH*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM |
- ATA_HORKAGE_NO_NCQ_ON_ATI, },
- { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM },
+ { "Micron_M500IT_*", "MU01", ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "Micron_M500_*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "Micron_M5[15]0_*", "MU01", ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "Micron_1100_*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM, },
+ { "Crucial_CT*M500*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "Crucial_CT*M550*", "MU01", ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "Crucial_CT*MX100*", "MU01", ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "Samsung SSD 840 EVO*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_NO_DMA_LOG |
+ ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "Samsung SSD 840*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "Samsung SSD 850*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "Samsung SSD 860*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM |
+ ATA_QUIRK_NO_NCQ_ON_ATI |
+ ATA_QUIRK_NO_LPM_ON_ATI },
+ { "Samsung SSD 870*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM |
+ ATA_QUIRK_NO_NCQ_ON_ATI |
+ ATA_QUIRK_NO_LPM_ON_ATI },
+ { "SAMSUNG*MZ7LH*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM |
+ ATA_QUIRK_NO_NCQ_ON_ATI |
+ ATA_QUIRK_NO_LPM_ON_ATI },
+ { "FCCT*M500*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM },
/* devices that don't properly handle TRIM commands */
- { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM },
- { "M88V29*", NULL, ATA_HORKAGE_NOTRIM },
+ { "SuperSSpeed S238*", NULL, ATA_QUIRK_NOTRIM },
+ { "M88V29*", NULL, ATA_QUIRK_NOTRIM },
/*
* As defined, the DRAT (Deterministic Read After Trim) and RZAT
@@ -4217,14 +4296,14 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
*/
{ "INTEL*SSDSC2MH*", NULL, 0 },
- { "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "SAMSUNG*MZ7KM*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
+ { "Micron*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "Crucial*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "INTEL*SSD*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "SSD*INTEL*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "Samsung*SSD*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "SAMSUNG*SSD*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "SAMSUNG*MZ7KM*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "ST[1248][0248]0[FH]*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM },
/*
* Some WD SATA-I drives spin up and down erratically when the link
@@ -4235,62 +4314,66 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
*
* https://bugzilla.kernel.org/show_bug.cgi?id=57211
*/
- { "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
- { "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
- { "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
- { "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
- { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
- { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
- { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
+ { "WDC WD800JD-*", NULL, ATA_QUIRK_WD_BROKEN_LPM },
+ { "WDC WD1200JD-*", NULL, ATA_QUIRK_WD_BROKEN_LPM },
+ { "WDC WD1600JD-*", NULL, ATA_QUIRK_WD_BROKEN_LPM },
+ { "WDC WD2000JD-*", NULL, ATA_QUIRK_WD_BROKEN_LPM },
+ { "WDC WD2500JD-*", NULL, ATA_QUIRK_WD_BROKEN_LPM },
+ { "WDC WD3000JD-*", NULL, ATA_QUIRK_WD_BROKEN_LPM },
+ { "WDC WD3200JD-*", NULL, ATA_QUIRK_WD_BROKEN_LPM },
/*
* This sata dom device goes on a walkabout when the ATA_LOG_DIRECTORY
* log page is accessed. Ensure we never ask for this log page with
* these devices.
*/
- { "SATADOM-ML 3ME", NULL, ATA_HORKAGE_NO_LOG_DIR },
+ { "SATADOM-ML 3ME", NULL, ATA_QUIRK_NO_LOG_DIR },
/* Buggy FUA */
- { "Maxtor", "BANC1G10", ATA_HORKAGE_NO_FUA },
- { "WDC*WD2500J*", NULL, ATA_HORKAGE_NO_FUA },
- { "OCZ-VERTEX*", NULL, ATA_HORKAGE_NO_FUA },
- { "INTEL*SSDSC2CT*", NULL, ATA_HORKAGE_NO_FUA },
+ { "Maxtor", "BANC1G10", ATA_QUIRK_NO_FUA },
+ { "WDC*WD2500J*", NULL, ATA_QUIRK_NO_FUA },
+ { "OCZ-VERTEX*", NULL, ATA_QUIRK_NO_FUA },
+ { "INTEL*SSDSC2CT*", NULL, ATA_QUIRK_NO_FUA },
/* End Marker */
{ }
};
-static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
+static unsigned int ata_dev_quirks(const struct ata_device *dev)
{
unsigned char model_num[ATA_ID_PROD_LEN + 1];
unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
- const struct ata_blacklist_entry *ad = ata_device_blacklist;
+ const struct ata_dev_quirks_entry *ad = __ata_dev_quirks;
+
+ /* dev->quirks is an unsigned int. */
+ BUILD_BUG_ON(__ATA_QUIRK_MAX > 32);
ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
while (ad->model_num) {
- if (glob_match(ad->model_num, model_num)) {
- if (ad->model_rev == NULL)
- return ad->horkage;
- if (glob_match(ad->model_rev, model_rev))
- return ad->horkage;
+ if (glob_match(ad->model_num, model_num) &&
+ (!ad->model_rev || glob_match(ad->model_rev, model_rev))) {
+ ata_dev_print_quirks(dev, model_num, model_rev,
+ ad->quirks);
+ return ad->quirks;
}
ad++;
}
return 0;
}
-static int ata_dma_blacklisted(const struct ata_device *dev)
+static bool ata_dev_nodma(const struct ata_device *dev)
{
- /* We don't support polling DMA.
- * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
- * if the LLDD handles only interrupts in the HSM_ST_LAST state.
+ /*
+ * We do not support polling DMA. Deny DMA for those ATAPI devices
+ * with CDB-intr (and use PIO) if the LLDD handles only interrupts in
+ * the HSM_ST_LAST state.
*/
if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
(dev->flags & ATA_DFLAG_CDB_INTR))
- return 1;
- return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
+ return true;
+ return dev->quirks & ATA_QUIRK_NODMA;
}
/**
@@ -4303,7 +4386,7 @@ static int ata_dma_blacklisted(const struct ata_device *dev)
static int ata_is_40wire(struct ata_device *dev)
{
- if (dev->horkage & ATA_HORKAGE_IVB)
+ if (dev->quirks & ATA_QUIRK_IVB)
return ata_drive_40wire_relaxed(dev->id);
return ata_drive_40wire(dev->id);
}
@@ -4365,8 +4448,7 @@ static int cable_is_40wire(struct ata_port *ap)
*
* Compute supported xfermask of @dev and store it in
* dev->*_mask. This function is responsible for applying all
- * known limits including host controller limits, device
- * blacklist, etc...
+ * known limits including host controller limits, device quirks, etc...
*
* LOCKING:
* None.
@@ -4398,10 +4480,10 @@ static void ata_dev_xfermask(struct ata_device *dev)
xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
}
- if (ata_dma_blacklisted(dev)) {
+ if (ata_dev_nodma(dev)) {
xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
ata_dev_warn(dev,
- "device is on DMA blacklist, disabling DMA\n");
+ "device does not support DMA, disabling DMA\n");
}
if ((host->flags & ATA_HOST_SIMPLEX) &&
@@ -4542,7 +4624,7 @@ static unsigned int ata_dev_init_params(struct ata_device *dev,
return AC_ERR_INVALID;
/* set up init dev params taskfile */
- ata_dev_dbg(dev, "init dev params \n");
+ ata_dev_dbg(dev, "init dev params\n");
ata_tf_init(dev, &tf);
tf.command = ATA_CMD_INIT_DEV_PARAMS;
@@ -4582,9 +4664,9 @@ int atapi_check_dma(struct ata_queued_cmd *qc)
/* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
* few ATAPI devices choke on such DMA requests.
*/
- if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
+ if (!(qc->dev->quirks & ATA_QUIRK_ATAPI_MOD16_DMA) &&
unlikely(qc->nbytes & 15))
- return 1;
+ return -EOPNOTSUPP;
if (ap->ops->check_atapi_dma)
return ap->ops->check_atapi_dma(qc);
@@ -4623,12 +4705,6 @@ int ata_std_qc_defer(struct ata_queued_cmd *qc)
}
EXPORT_SYMBOL_GPL(ata_std_qc_defer);
-enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc)
-{
- return AC_ERR_OK;
-}
-EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
-
/**
* ata_sg_init - Associate command with scatter-gather table.
* @qc: Command to be associated
@@ -4756,8 +4832,9 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
struct ata_port *ap;
struct ata_link *link;
- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
- WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
+ if (WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE)))
+ return;
+
ap = qc->ap;
link = qc->dev->link;
@@ -4779,9 +4856,10 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
ap->excl_link == link))
ap->excl_link = NULL;
- /* atapi: mark qc as inactive to prevent the interrupt handler
- * from completing the command twice later, before the error handler
- * is called. (when rc != 0 and atapi request sense is needed)
+ /*
+ * Mark qc as inactive to prevent the port interrupt handler from
+ * completing the command twice later, before the error handler is
+ * called.
*/
qc->flags &= ~ATA_QCFLAG_ACTIVE;
ap->qc_active &= ~(1ULL << qc->tag);
@@ -4794,8 +4872,16 @@ static void fill_result_tf(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
+ /*
+ * rtf may already be filled (e.g. for successful NCQ commands).
+ * If that is the case, we have nothing to do.
+ */
+ if (qc->flags & ATA_QCFLAG_RTF_FILLED)
+ return;
+
qc->result_tf.flags = qc->tf.flags;
ap->ops->qc_fill_rtf(qc);
+ qc->flags |= ATA_QCFLAG_RTF_FILLED;
}
static void ata_verify_xfer(struct ata_queued_cmd *qc)
@@ -5007,10 +5093,13 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
return;
}
- trace_ata_qc_prep(qc);
- qc->err_mask |= ap->ops->qc_prep(qc);
- if (unlikely(qc->err_mask))
- goto err;
+ if (ap->ops->qc_prep) {
+ trace_ata_qc_prep(qc);
+ qc->err_mask |= ap->ops->qc_prep(qc);
+ if (unlikely(qc->err_mask))
+ goto err;
+ }
+
trace_ata_qc_issue(qc);
qc->err_mask |= ap->ops->qc_issue(qc);
if (unlikely(qc->err_mask))
@@ -5354,7 +5443,7 @@ void ata_dev_init(struct ata_device *dev)
*/
spin_lock_irqsave(ap->lock, flags);
dev->flags &= ~ATA_DFLAG_INIT_MASK;
- dev->horkage = 0;
+ dev->quirks = 0;
spin_unlock_irqrestore(ap->lock, flags);
memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
@@ -5449,6 +5538,7 @@ int sata_link_init_spd(struct ata_link *link)
struct ata_port *ata_port_alloc(struct ata_host *host)
{
struct ata_port *ap;
+ int id;
ap = kzalloc(sizeof(*ap), GFP_KERNEL);
if (!ap)
@@ -5456,8 +5546,12 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
ap->lock = &host->lock;
- ap->print_id = -1;
- ap->local_port_no = -1;
+ id = ida_alloc_min(&ata_ida, 1, GFP_KERNEL);
+ if (id < 0) {
+ kfree(ap);
+ return NULL;
+ }
+ ap->print_id = id;
ap->host = host;
ap->dev = host->dev;
@@ -5480,8 +5574,23 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
#endif
ata_sff_port_init(ap);
+ ata_force_pflags(ap);
+
return ap;
}
+EXPORT_SYMBOL_GPL(ata_port_alloc);
+
+void ata_port_free(struct ata_port *ap)
+{
+ if (!ap)
+ return;
+
+ kfree(ap->pmp_link);
+ kfree(ap->slave_link);
+ ida_free(&ata_ida, ap->print_id);
+ kfree(ap);
+}
+EXPORT_SYMBOL_GPL(ata_port_free);
static void ata_devres_release(struct device *gendev, void *res)
{
@@ -5509,12 +5618,7 @@ static void ata_host_release(struct kref *kref)
int i;
for (i = 0; i < host->n_ports; i++) {
- struct ata_port *ap = host->ports[i];
-
- kfree(ap->pmp_link);
- kfree(ap->slave_link);
- kfree(ap->ncq_sense_buf);
- kfree(ap);
+ ata_port_free(host->ports[i]);
host->ports[i] = NULL;
}
kfree(host);
@@ -5534,24 +5638,19 @@ EXPORT_SYMBOL_GPL(ata_host_put);
/**
* ata_host_alloc - allocate and init basic ATA host resources
* @dev: generic device this host is associated with
- * @max_ports: maximum number of ATA ports associated with this host
+ * @n_ports: the number of ATA ports associated with this host
*
* Allocate and initialize basic ATA host resources. LLD calls
* this function to allocate a host, initializes it fully and
* attaches it using ata_host_register().
*
- * @max_ports ports are allocated and host->n_ports is
- * initialized to @max_ports. The caller is allowed to decrease
- * host->n_ports before calling ata_host_register(). The unused
- * ports will be automatically freed on registration.
- *
* RETURNS:
* Allocate ATA host on success, NULL on failure.
*
* LOCKING:
* Inherited from calling layer (may sleep).
*/
-struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
+struct ata_host *ata_host_alloc(struct device *dev, int n_ports)
{
struct ata_host *host;
size_t sz;
@@ -5559,17 +5658,21 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
void *dr;
/* alloc a container for our list of ATA ports (buses) */
- sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
+ sz = sizeof(struct ata_host) + n_ports * sizeof(void *);
host = kzalloc(sz, GFP_KERNEL);
if (!host)
return NULL;
- if (!devres_open_group(dev, NULL, GFP_KERNEL))
- goto err_free;
+ if (!devres_open_group(dev, NULL, GFP_KERNEL)) {
+ kfree(host);
+ return NULL;
+ }
dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL);
- if (!dr)
+ if (!dr) {
+ kfree(host);
goto err_out;
+ }
devres_add(dev, dr);
dev_set_drvdata(dev, host);
@@ -5577,11 +5680,11 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
spin_lock_init(&host->lock);
mutex_init(&host->eh_mutex);
host->dev = dev;
- host->n_ports = max_ports;
+ host->n_ports = n_ports;
kref_init(&host->kref);
/* allocate ports bound to this host */
- for (i = 0; i < max_ports; i++) {
+ for (i = 0; i < n_ports; i++) {
struct ata_port *ap;
ap = ata_port_alloc(host);
@@ -5597,8 +5700,6 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
err_out:
devres_release_group(dev, NULL);
- err_free:
- kfree(host);
return NULL;
}
EXPORT_SYMBOL_GPL(ata_host_alloc);
@@ -5825,6 +5926,8 @@ void ata_port_probe(struct ata_port *ap)
struct ata_eh_info *ehi = &ap->link.eh_info;
unsigned long flags;
+ ata_acpi_port_power_on(ap);
+
/* kick EH for boot probing */
spin_lock_irqsave(ap->lock, flags);
@@ -5892,19 +5995,6 @@ int ata_host_register(struct ata_host *host, const struct scsi_host_template *sh
return -EINVAL;
}
- /* Blow away unused ports. This happens when LLD can't
- * determine the exact number of ports to allocate at
- * allocation time.
- */
- for (i = host->n_ports; host->ports[i]; i++)
- kfree(host->ports[i]);
-
- /* give ports names and add SCSI hosts */
- for (i = 0; i < host->n_ports; i++) {
- host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
- host->ports[i]->local_port_no = i + 1;
- }
-
/* Create associated sysfs transport objects */
for (i = 0; i < host->n_ports; i++) {
rc = ata_tport_add(host->dev,host->ports[i]);
@@ -6026,6 +6116,23 @@ int ata_host_activate(struct ata_host *host, int irq,
EXPORT_SYMBOL_GPL(ata_host_activate);
/**
+ * ata_dev_free_resources - Free a device resources
+ * @dev: Target ATA device
+ *
+ * Free resources allocated to support a device features.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ */
+void ata_dev_free_resources(struct ata_device *dev)
+{
+ if (zpodd_dev_enabled(dev))
+ zpodd_exit(dev);
+
+ ata_dev_cleanup_cdl_resources(dev);
+}
+
+/**
* ata_port_detach - Detach ATA port in preparation of device removal
* @ap: ATA port to be detached
*
@@ -6079,19 +6186,15 @@ static void ata_port_detach(struct ata_port *ap)
cancel_delayed_work_sync(&ap->hotplug_task);
cancel_delayed_work_sync(&ap->scsi_rescan_task);
- /* clean up zpodd on port removal */
- ata_for_each_link(link, ap, HOST_FIRST) {
- ata_for_each_dev(dev, link, ALL) {
- if (zpodd_dev_enabled(dev))
- zpodd_exit(dev);
- }
- }
+ /* Delete port multiplier link transport devices */
if (ap->pmp_link) {
int i;
+
for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
ata_tlink_delete(&ap->pmp_link[i]);
}
- /* remove the associated SCSI host */
+
+ /* Remove the associated SCSI host */
scsi_remove_host(ap->scsi_host);
ata_tport_delete(ap);
}
@@ -6287,12 +6390,15 @@ EXPORT_SYMBOL_GPL(ata_platform_remove_one);
{ "no" #name, .lflags_on = (flags) }, \
{ #name, .lflags_off = (flags) }
-#define force_horkage_on(name, flag) \
- { #name, .horkage_on = (flag) }
+#define force_pflag_on(name, flags) \
+ { #name, .pflags_on = (flags) }
+
+#define force_quirk_on(name, flag) \
+ { #name, .quirk_on = (flag) }
-#define force_horkage_onoff(name, flag) \
- { "no" #name, .horkage_on = (flag) }, \
- { #name, .horkage_off = (flag) }
+#define force_quirk_onoff(name, flag) \
+ { "no" #name, .quirk_on = (flag) }, \
+ { #name, .quirk_off = (flag) }
static const struct ata_force_param force_tbl[] __initconst = {
force_cbl(40c, ATA_CBL_PATA40),
@@ -6346,32 +6452,34 @@ static const struct ata_force_param force_tbl[] __initconst = {
force_lflag_on(rstonce, ATA_LFLAG_RST_ONCE),
force_lflag_onoff(dbdelay, ATA_LFLAG_NO_DEBOUNCE_DELAY),
- force_horkage_onoff(ncq, ATA_HORKAGE_NONCQ),
- force_horkage_onoff(ncqtrim, ATA_HORKAGE_NO_NCQ_TRIM),
- force_horkage_onoff(ncqati, ATA_HORKAGE_NO_NCQ_ON_ATI),
+ force_pflag_on(external, ATA_PFLAG_EXTERNAL),
- force_horkage_onoff(trim, ATA_HORKAGE_NOTRIM),
- force_horkage_on(trim_zero, ATA_HORKAGE_ZERO_AFTER_TRIM),
- force_horkage_on(max_trim_128m, ATA_HORKAGE_MAX_TRIM_128M),
+ force_quirk_onoff(ncq, ATA_QUIRK_NONCQ),
+ force_quirk_onoff(ncqtrim, ATA_QUIRK_NO_NCQ_TRIM),
+ force_quirk_onoff(ncqati, ATA_QUIRK_NO_NCQ_ON_ATI),
- force_horkage_onoff(dma, ATA_HORKAGE_NODMA),
- force_horkage_on(atapi_dmadir, ATA_HORKAGE_ATAPI_DMADIR),
- force_horkage_on(atapi_mod16_dma, ATA_HORKAGE_ATAPI_MOD16_DMA),
+ force_quirk_onoff(trim, ATA_QUIRK_NOTRIM),
+ force_quirk_on(trim_zero, ATA_QUIRK_ZERO_AFTER_TRIM),
+ force_quirk_on(max_trim_128m, ATA_QUIRK_MAX_TRIM_128M),
- force_horkage_onoff(dmalog, ATA_HORKAGE_NO_DMA_LOG),
- force_horkage_onoff(iddevlog, ATA_HORKAGE_NO_ID_DEV_LOG),
- force_horkage_onoff(logdir, ATA_HORKAGE_NO_LOG_DIR),
+ force_quirk_onoff(dma, ATA_QUIRK_NODMA),
+ force_quirk_on(atapi_dmadir, ATA_QUIRK_ATAPI_DMADIR),
+ force_quirk_on(atapi_mod16_dma, ATA_QUIRK_ATAPI_MOD16_DMA),
- force_horkage_on(max_sec_128, ATA_HORKAGE_MAX_SEC_128),
- force_horkage_on(max_sec_1024, ATA_HORKAGE_MAX_SEC_1024),
- force_horkage_on(max_sec_lba48, ATA_HORKAGE_MAX_SEC_LBA48),
+ force_quirk_onoff(dmalog, ATA_QUIRK_NO_DMA_LOG),
+ force_quirk_onoff(iddevlog, ATA_QUIRK_NO_ID_DEV_LOG),
+ force_quirk_onoff(logdir, ATA_QUIRK_NO_LOG_DIR),
- force_horkage_onoff(lpm, ATA_HORKAGE_NOLPM),
- force_horkage_onoff(setxfer, ATA_HORKAGE_NOSETXFER),
- force_horkage_on(dump_id, ATA_HORKAGE_DUMP_ID),
- force_horkage_onoff(fua, ATA_HORKAGE_NO_FUA),
+ force_quirk_on(max_sec_128, ATA_QUIRK_MAX_SEC_128),
+ force_quirk_on(max_sec_1024, ATA_QUIRK_MAX_SEC_1024),
+ force_quirk_on(max_sec_lba48, ATA_QUIRK_MAX_SEC_LBA48),
- force_horkage_on(disable, ATA_HORKAGE_DISABLE),
+ force_quirk_onoff(lpm, ATA_QUIRK_NOLPM),
+ force_quirk_onoff(setxfer, ATA_QUIRK_NOSETXFER),
+ force_quirk_on(dump_id, ATA_QUIRK_DUMP_ID),
+ force_quirk_onoff(fua, ATA_QUIRK_NO_FUA),
+
+ force_quirk_on(disable, ATA_QUIRK_DISABLE),
};
static int __init ata_parse_force_one(char **cur,
@@ -6647,7 +6755,6 @@ static void ata_dummy_error_handler(struct ata_port *ap)
}
struct ata_port_operations ata_dummy_port_ops = {
- .qc_prep = ata_noop_qc_prep,
.qc_issue = ata_dummy_qc_issue,
.error_handler = ata_dummy_error_handler,
.sched_eh = ata_std_sched_eh,
@@ -6660,12 +6767,6 @@ const struct ata_port_info ata_dummy_port_info = {
};
EXPORT_SYMBOL_GPL(ata_dummy_port_info);
-void ata_print_version(const struct device *dev, const char *version)
-{
- dev_printk(KERN_DEBUG, dev, "version %s\n", version);
-}
-EXPORT_SYMBOL(ata_print_version);
-
EXPORT_TRACEPOINT_SYMBOL_GPL(ata_tf_load);
EXPORT_TRACEPOINT_SYMBOL_GPL(ata_exec_command);
EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_setup);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 214b935c2ced..2586e77ebf45 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -153,8 +153,6 @@ ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
#undef CMDS
static void __ata_port_freeze(struct ata_port *ap);
-static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
- struct ata_device **r_failed_dev);
#ifdef CONFIG_PM
static void ata_eh_handle_port_suspend(struct ata_port *ap);
static void ata_eh_handle_port_resume(struct ata_port *ap);
@@ -500,10 +498,13 @@ static void ata_eh_dev_disable(struct ata_device *dev)
ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
dev->class++;
- /* From now till the next successful probe, ering is used to
+ /*
+ * From now till the next successful probe, ering is used to
* track probe failures. Clear accumulated device error info.
*/
ata_ering_clear(&dev->ering);
+
+ ata_dev_free_resources(dev);
}
static void ata_eh_unload(struct ata_port *ap)
@@ -630,6 +631,14 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
struct ata_queued_cmd *qc;
+ /*
+ * If the scmd was added to EH, via ata_qc_schedule_eh() ->
+ * scsi_timeout() -> scsi_eh_scmd_add(), scsi_timeout() will
+ * have set DID_TIME_OUT (since libata does not have an abort
+ * handler). Thus, to clear DID_TIME_OUT, clear the host byte.
+ */
+ set_host_byte(scmd, DID_OK);
+
ata_qc_for_each_raw(ap, qc, i) {
if (qc->flags & ATA_QCFLAG_ACTIVE &&
qc->scsicmd == scmd)
@@ -640,6 +649,7 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
/* the scmd has an associated qc */
if (!(qc->flags & ATA_QCFLAG_EH)) {
/* which hasn't failed yet, timeout */
+ set_host_byte(scmd, DID_TIME_OUT);
qc->err_mask |= AC_ERR_TIMEOUT;
qc->flags |= ATA_QCFLAG_EH;
nr_timedout++;
@@ -688,7 +698,7 @@ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
ata_eh_acquire(ap);
repeat:
/* kill fast drain timer */
- del_timer_sync(&ap->fastdrain_timer);
+ timer_delete_sync(&ap->fastdrain_timer);
/* process port resume request */
ata_eh_handle_port_resume(ap);
@@ -813,7 +823,7 @@ void ata_port_wait_eh(struct ata_port *ap)
retry:
spin_lock_irqsave(ap->lock, flags);
- while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
+ while (ata_port_eh_scheduled(ap)) {
prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
spin_unlock_irqrestore(ap->lock, flags);
schedule();
@@ -848,7 +858,7 @@ static unsigned int ata_eh_nr_in_flight(struct ata_port *ap)
void ata_eh_fastdrain_timerfn(struct timer_list *t)
{
- struct ata_port *ap = from_timer(ap, t, fastdrain_timer);
+ struct ata_port *ap = timer_container_of(ap, t, fastdrain_timer);
unsigned long flags;
unsigned int cnt;
@@ -897,7 +907,7 @@ void ata_eh_fastdrain_timerfn(struct timer_list *t)
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
+static void ata_eh_set_pending(struct ata_port *ap, bool fastdrain)
{
unsigned int cnt;
@@ -937,7 +947,7 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap;
qc->flags |= ATA_QCFLAG_EH;
- ata_eh_set_pending(ap, 1);
+ ata_eh_set_pending(ap, true);
/* The following will fail if timeout has already expired.
* ata_scsi_error() takes care of such scmds on EH entry.
@@ -959,7 +969,7 @@ void ata_std_sched_eh(struct ata_port *ap)
if (ap->pflags & ATA_PFLAG_INITIALIZING)
return;
- ata_eh_set_pending(ap, 1);
+ ata_eh_set_pending(ap, true);
scsi_schedule_eh(ap->scsi_host);
trace_ata_std_sched_eh(ap);
@@ -1010,7 +1020,7 @@ static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
int tag, nr_aborted = 0;
/* we're gonna abort all commands, no need for fast drain */
- ata_eh_set_pending(ap, 0);
+ ata_eh_set_pending(ap, false);
/* include internal tag in iteration */
ata_qc_for_each_with_internal(ap, qc, tag) {
@@ -1402,6 +1412,43 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
}
/**
+ * ata_eh_decide_disposition - Disposition a qc based on sense data
+ * @qc: qc to examine
+ *
+ * For a regular SCSI command, the SCSI completion callback (scsi_done())
+ * will call scsi_complete(), which will call scsi_decide_disposition(),
+ * which will call scsi_check_sense(). scsi_complete() finally calls
+ * scsi_finish_command(). This is fine for SCSI, since any eventual sense
+ * data is usually returned in the completion itself (without invoking SCSI
+ * EH). However, for a QC, we always need to fetch the sense data
+ * explicitly using SCSI EH.
+ *
+ * A command that is completed via SCSI EH will instead be completed using
+ * scsi_eh_flush_done_q(), which will call scsi_finish_command() directly
+ * (without ever calling scsi_check_sense()).
+ *
+ * For a command that went through SCSI EH, it is the responsibility of the
+ * SCSI EH strategy handler to call scsi_decide_disposition(), see e.g. how
+ * scsi_eh_get_sense() calls scsi_decide_disposition() for SCSI LLDDs that
+ * do not get the sense data as part of the completion.
+ *
+ * Thus, for QC commands that went via SCSI EH, we need to call
+ * scsi_check_sense() ourselves, similar to how scsi_eh_get_sense() calls
+ * scsi_decide_disposition(), which calls scsi_check_sense(), in order to
+ * set the correct SCSI ML byte (if any).
+ *
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * SUCCESS or FAILED or NEEDS_RETRY or ADD_TO_MLQUEUE
+ */
+enum scsi_disposition ata_eh_decide_disposition(struct ata_queued_cmd *qc)
+{
+ return scsi_check_sense(qc->scsicmd);
+}
+
+/**
* ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
* @qc: qc to perform REQUEST_SENSE_SENSE_DATA_EXT to
*
@@ -1493,8 +1540,15 @@ unsigned int atapi_eh_request_sense(struct ata_device *dev,
tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf.command = ATA_CMD_PACKET;
- /* is it pointless to prefer PIO for "safety reasons"? */
- if (ap->flags & ATA_FLAG_PIO_DMA) {
+ /*
+ * Do not use DMA if the connected device only supports PIO, even if the
+ * port prefers PIO commands via DMA.
+ *
+ * Ideally, we should call atapi_check_dma() to check if it is safe for
+ * the LLD to use DMA for REQUEST_SENSE, but we don't have a qc.
+ * Since we can't check the command, perhaps we should only use pio?
+ */
+ if ((ap->flags & ATA_FLAG_PIO_DMA) && !(dev->flags & ATA_DFLAG_PIO)) {
tf.protocol = ATAPI_PROT_DMA;
tf.feature |= ATAPI_PKT_DMA;
} else {
@@ -1627,7 +1681,8 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc)
}
if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
- enum scsi_disposition ret = scsi_check_sense(qc->scsicmd);
+ enum scsi_disposition ret = ata_eh_decide_disposition(qc);
+
/*
* SUCCESS here means that the sense code could be
* evaluated and should be passed to the upper layers
@@ -1924,7 +1979,7 @@ static inline bool ata_eh_quiet(struct ata_queued_cmd *qc)
return qc->flags & ATA_QCFLAG_QUIET;
}
-static int ata_eh_read_sense_success_non_ncq(struct ata_link *link)
+static int ata_eh_get_non_ncq_success_sense(struct ata_link *link)
{
struct ata_port *ap = link->ap;
struct ata_queued_cmd *qc;
@@ -1942,11 +1997,10 @@ static int ata_eh_read_sense_success_non_ncq(struct ata_link *link)
return -EIO;
/*
- * If we have sense data, call scsi_check_sense() in order to set the
- * correct SCSI ML byte (if any). No point in checking the return value,
- * since the command has already completed successfully.
+ * No point in checking the return value, since the command has already
+ * completed successfully.
*/
- scsi_check_sense(qc->scsicmd);
+ ata_eh_decide_disposition(qc);
return 0;
}
@@ -1976,9 +2030,9 @@ static void ata_eh_get_success_sense(struct ata_link *link)
* request sense ext command to retrieve the sense data.
*/
if (link->sactive)
- ret = ata_eh_read_sense_success_ncq_log(link);
+ ret = ata_eh_get_ncq_success_sense(link);
else
- ret = ata_eh_read_sense_success_non_ncq(link);
+ ret = ata_eh_get_non_ncq_success_sense(link);
if (ret)
goto out;
@@ -2017,6 +2071,188 @@ out:
ata_eh_done(link, dev, ATA_EH_GET_SUCCESS_SENSE);
}
+/*
+ * Check if a link is established. This is a relaxed version of
+ * ata_phys_link_online() which accounts for the fact that this is potentially
+ * called after changing the link power management policy, which may not be
+ * reflected immediately in the SStatus register (e.g., we may still be seeing
+ * the PHY in partial, slumber or devsleep Partial power management state.
+ * So check that:
+ * - A device is still present, that is, DET is 1h (Device presence detected
+ * but Phy communication not established) or 3h (Device presence detected and
+ * Phy communication established)
+ * - Communication is established, that is, IPM is not 0h, indicating that PHY
+ * is online or in a low power state.
+ */
+static bool ata_eh_link_established(struct ata_link *link)
+{
+ u32 sstatus;
+ u8 det, ipm;
+
+ /*
+ * For old IDE/PATA adapters that do not have a valid scr_read method,
+ * or if reading the SStatus register fails, assume that the device is
+ * present. Device probe will determine if that is really the case.
+ */
+ if (sata_scr_read(link, SCR_STATUS, &sstatus))
+ return true;
+
+ det = sstatus & 0x0f;
+ ipm = (sstatus >> 8) & 0x0f;
+
+ return (det & 0x01) && ipm;
+}
+
+/**
+ * ata_eh_link_set_lpm - configure SATA interface power management
+ * @link: link to configure
+ * @policy: the link power management policy
+ * @r_failed_dev: out parameter for failed device
+ *
+ * Enable SATA Interface power management. This will enable
+ * Device Interface Power Management (DIPM) for min_power and
+ * medium_power_with_dipm policies, and then call driver specific
+ * callbacks for enabling Host Initiated Power management.
+ *
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+static int ata_eh_link_set_lpm(struct ata_link *link,
+ enum ata_lpm_policy policy,
+ struct ata_device **r_failed_dev)
+{
+ struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL;
+ struct ata_eh_context *ehc = &link->eh_context;
+ struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
+ enum ata_lpm_policy old_policy = link->lpm_policy;
+ bool host_has_dipm = !(link->ap->flags & ATA_FLAG_NO_DIPM);
+ unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
+ unsigned int err_mask;
+ int rc;
+
+ /* if the link or host doesn't do LPM, noop */
+ if (!IS_ENABLED(CONFIG_SATA_HOST) ||
+ (link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm))
+ return 0;
+
+ /*
+ * This function currently assumes that it will never be supplied policy
+ * ATA_LPM_UNKNOWN.
+ */
+ if (WARN_ON_ONCE(policy == ATA_LPM_UNKNOWN))
+ return 0;
+
+ ata_link_dbg(link, "Set LPM policy: %d -> %d\n", old_policy, policy);
+
+ /*
+ * DIPM is enabled only for ATA_LPM_MIN_POWER,
+ * ATA_LPM_MIN_POWER_WITH_PARTIAL, and ATA_LPM_MED_POWER_WITH_DIPM, as
+ * some devices misbehave when the host NACKs transition to SLUMBER.
+ */
+ ata_for_each_dev(dev, link, ENABLED) {
+ bool dev_has_hipm = ata_id_has_hipm(dev->id);
+ bool dev_has_dipm = ata_id_has_dipm(dev->id);
+
+ /* find the first enabled and LPM enabled devices */
+ if (!link_dev)
+ link_dev = dev;
+
+ if (!lpm_dev &&
+ (dev_has_hipm || (dev_has_dipm && host_has_dipm)))
+ lpm_dev = dev;
+
+ hints &= ~ATA_LPM_EMPTY;
+ if (!dev_has_hipm)
+ hints &= ~ATA_LPM_HIPM;
+
+ /* disable DIPM before changing link config */
+ if (dev_has_dipm) {
+ err_mask = ata_dev_set_feature(dev,
+ SETFEATURES_SATA_DISABLE, SATA_DIPM);
+ if (err_mask && err_mask != AC_ERR_DEV) {
+ ata_dev_warn(dev,
+ "failed to disable DIPM, Emask 0x%x\n",
+ err_mask);
+ rc = -EIO;
+ goto fail;
+ }
+ }
+ }
+
+ if (ap) {
+ rc = ap->ops->set_lpm(link, policy, hints);
+ if (!rc && ap->slave_link)
+ rc = ap->ops->set_lpm(ap->slave_link, policy, hints);
+ } else
+ rc = sata_pmp_set_lpm(link, policy, hints);
+
+ /*
+ * Attribute link config failure to the first (LPM) enabled
+ * device on the link.
+ */
+ if (rc) {
+ if (rc == -EOPNOTSUPP) {
+ link->flags |= ATA_LFLAG_NO_LPM;
+ return 0;
+ }
+ dev = lpm_dev ? lpm_dev : link_dev;
+ goto fail;
+ }
+
+ /*
+ * Low level driver acked the transition. Issue DIPM command
+ * with the new policy set.
+ */
+ link->lpm_policy = policy;
+ if (ap && ap->slave_link)
+ ap->slave_link->lpm_policy = policy;
+
+ /*
+ * Host config updated, enable DIPM if transitioning to
+ * ATA_LPM_MIN_POWER, ATA_LPM_MIN_POWER_WITH_PARTIAL, or
+ * ATA_LPM_MED_POWER_WITH_DIPM.
+ */
+ ata_for_each_dev(dev, link, ENABLED) {
+ bool dev_has_dipm = ata_id_has_dipm(dev->id);
+
+ if (policy >= ATA_LPM_MED_POWER_WITH_DIPM && host_has_dipm &&
+ dev_has_dipm) {
+ err_mask = ata_dev_set_feature(dev,
+ SETFEATURES_SATA_ENABLE, SATA_DIPM);
+ if (err_mask && err_mask != AC_ERR_DEV) {
+ ata_dev_warn(dev,
+ "failed to enable DIPM, Emask 0x%x\n",
+ err_mask);
+ rc = -EIO;
+ goto fail;
+ }
+ }
+ }
+
+ link->last_lpm_change = jiffies;
+ link->flags |= ATA_LFLAG_CHANGED;
+
+ return 0;
+
+fail:
+ /* restore the old policy */
+ link->lpm_policy = old_policy;
+ if (ap && ap->slave_link)
+ ap->slave_link->lpm_policy = old_policy;
+
+ /* if no device or only one more chance is left, disable LPM */
+ if (!dev || ehc->tries[dev->devno] <= 2) {
+ ata_link_warn(link, "disabling LPM on the link\n");
+ link->flags |= ATA_LFLAG_NO_LPM;
+ }
+ if (r_failed_dev)
+ *r_failed_dev = dev;
+ return rc;
+}
+
/**
* ata_eh_link_autopsy - analyze error and determine recovery action
* @link: host link to perform autopsy on
@@ -2550,25 +2786,28 @@ static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
return reset(link, classes, deadline);
}
-static int ata_eh_followup_srst_needed(struct ata_link *link, int rc)
+static bool ata_eh_followup_srst_needed(struct ata_link *link, int rc)
{
if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
- return 0;
+ return false;
if (rc == -EAGAIN)
- return 1;
+ return true;
if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
- return 1;
- return 0;
+ return true;
+ return false;
}
int ata_eh_reset(struct ata_link *link, int classify,
- ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
- ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
+ struct ata_reset_operations *reset_ops)
{
struct ata_port *ap = link->ap;
struct ata_link *slave = ap->slave_link;
struct ata_eh_context *ehc = &link->eh_context;
struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
+ ata_reset_fn_t hardreset = reset_ops->hardreset;
+ ata_reset_fn_t softreset = reset_ops->softreset;
+ ata_prereset_fn_t prereset = reset_ops->prereset;
+ ata_postreset_fn_t postreset = reset_ops->postreset;
unsigned int *classes = ehc->classes;
unsigned int lflags = link->flags;
int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
@@ -3067,13 +3306,13 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
* to ap->target_lpm_policy after revalidation is done.
*/
if (link->lpm_policy > ATA_LPM_MAX_POWER) {
- rc = ata_eh_set_lpm(link, ATA_LPM_MAX_POWER,
- r_failed_dev);
+ rc = ata_eh_link_set_lpm(link, ATA_LPM_MAX_POWER,
+ r_failed_dev);
if (rc)
goto err;
}
- if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
+ if (!ata_eh_link_established(ata_dev_phys_link(dev))) {
rc = -EIO;
goto err;
}
@@ -3177,12 +3416,12 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
}
/**
- * ata_set_mode - Program timings and issue SET FEATURES - XFER
+ * ata_eh_set_mode - Program timings and issue SET FEATURES - XFER
* @link: link on which timings will be programmed
* @r_failed_dev: out parameter for failed device
*
* Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
- * ata_set_mode() fails, pointer to the failing device is
+ * ata_eh_set_mode() fails, pointer to the failing device is
* returned in @r_failed_dev.
*
* LOCKING:
@@ -3191,7 +3430,8 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
* RETURNS:
* 0 on success, negative errno otherwise
*/
-int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
+static int ata_eh_set_mode(struct ata_link *link,
+ struct ata_device **r_failed_dev)
{
struct ata_port *ap = link->ap;
struct ata_device *dev;
@@ -3212,7 +3452,7 @@ int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
if (ap->ops->set_mode)
rc = ap->ops->set_mode(link, r_failed_dev);
else
- rc = ata_do_set_mode(link, r_failed_dev);
+ rc = ata_set_mode(link, r_failed_dev);
/* if transfer mode has changed, set DUBIOUS_XFER on device */
ata_for_each_dev(dev, link, ENABLED) {
@@ -3247,7 +3487,7 @@ static int atapi_eh_clear_ua(struct ata_device *dev)
int i;
for (i = 0; i < ATA_EH_UA_TRIES; i++) {
- u8 *sense_buffer = dev->link->ap->sector_buf;
+ u8 *sense_buffer = dev->sector_buf;
u8 sense_key = 0;
unsigned int err_mask;
@@ -3352,140 +3592,6 @@ static int ata_eh_maybe_retry_flush(struct ata_device *dev)
return rc;
}
-/**
- * ata_eh_set_lpm - configure SATA interface power management
- * @link: link to configure power management
- * @policy: the link power management policy
- * @r_failed_dev: out parameter for failed device
- *
- * Enable SATA Interface power management. This will enable
- * Device Interface Power Management (DIPM) for min_power and
- * medium_power_with_dipm policies, and then call driver specific
- * callbacks for enabling Host Initiated Power management.
- *
- * LOCKING:
- * EH context.
- *
- * RETURNS:
- * 0 on success, -errno on failure.
- */
-static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
- struct ata_device **r_failed_dev)
-{
- struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL;
- struct ata_eh_context *ehc = &link->eh_context;
- struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
- enum ata_lpm_policy old_policy = link->lpm_policy;
- bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM;
- unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
- unsigned int err_mask;
- int rc;
-
- /* if the link or host doesn't do LPM, noop */
- if (!IS_ENABLED(CONFIG_SATA_HOST) ||
- (link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm))
- return 0;
-
- /*
- * DIPM is enabled only for MIN_POWER as some devices
- * misbehave when the host NACKs transition to SLUMBER. Order
- * device and link configurations such that the host always
- * allows DIPM requests.
- */
- ata_for_each_dev(dev, link, ENABLED) {
- bool hipm = ata_id_has_hipm(dev->id);
- bool dipm = ata_id_has_dipm(dev->id) && !no_dipm;
-
- /* find the first enabled and LPM enabled devices */
- if (!link_dev)
- link_dev = dev;
-
- if (!lpm_dev && (hipm || dipm))
- lpm_dev = dev;
-
- hints &= ~ATA_LPM_EMPTY;
- if (!hipm)
- hints &= ~ATA_LPM_HIPM;
-
- /* disable DIPM before changing link config */
- if (policy < ATA_LPM_MED_POWER_WITH_DIPM && dipm) {
- err_mask = ata_dev_set_feature(dev,
- SETFEATURES_SATA_DISABLE, SATA_DIPM);
- if (err_mask && err_mask != AC_ERR_DEV) {
- ata_dev_warn(dev,
- "failed to disable DIPM, Emask 0x%x\n",
- err_mask);
- rc = -EIO;
- goto fail;
- }
- }
- }
-
- if (ap) {
- rc = ap->ops->set_lpm(link, policy, hints);
- if (!rc && ap->slave_link)
- rc = ap->ops->set_lpm(ap->slave_link, policy, hints);
- } else
- rc = sata_pmp_set_lpm(link, policy, hints);
-
- /*
- * Attribute link config failure to the first (LPM) enabled
- * device on the link.
- */
- if (rc) {
- if (rc == -EOPNOTSUPP) {
- link->flags |= ATA_LFLAG_NO_LPM;
- return 0;
- }
- dev = lpm_dev ? lpm_dev : link_dev;
- goto fail;
- }
-
- /*
- * Low level driver acked the transition. Issue DIPM command
- * with the new policy set.
- */
- link->lpm_policy = policy;
- if (ap && ap->slave_link)
- ap->slave_link->lpm_policy = policy;
-
- /* host config updated, enable DIPM if transitioning to MIN_POWER */
- ata_for_each_dev(dev, link, ENABLED) {
- if (policy >= ATA_LPM_MED_POWER_WITH_DIPM && !no_dipm &&
- ata_id_has_dipm(dev->id)) {
- err_mask = ata_dev_set_feature(dev,
- SETFEATURES_SATA_ENABLE, SATA_DIPM);
- if (err_mask && err_mask != AC_ERR_DEV) {
- ata_dev_warn(dev,
- "failed to enable DIPM, Emask 0x%x\n",
- err_mask);
- rc = -EIO;
- goto fail;
- }
- }
- }
-
- link->last_lpm_change = jiffies;
- link->flags |= ATA_LFLAG_CHANGED;
-
- return 0;
-
-fail:
- /* restore the old policy */
- link->lpm_policy = old_policy;
- if (ap && ap->slave_link)
- ap->slave_link->lpm_policy = old_policy;
-
- /* if no device or only one more chance is left, disable LPM */
- if (!dev || ehc->tries[dev->devno] <= 2) {
- ata_link_warn(link, "disabling LPM on the link\n");
- link->flags |= ATA_LFLAG_NO_LPM;
- }
- if (r_failed_dev)
- *r_failed_dev = dev;
- return rc;
-}
-
int ata_link_nr_enabled(struct ata_link *link)
{
struct ata_device *dev;
@@ -3658,10 +3764,7 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
/**
* ata_eh_recover - recover host port after error
* @ap: host port to recover
- * @prereset: prereset method (can be NULL)
- * @softreset: softreset method (can be NULL)
- * @hardreset: hardreset method (can be NULL)
- * @postreset: postreset method (can be NULL)
+ * @reset_ops: The set of reset operations to use
* @r_failed_link: out parameter for failed link
*
* This is the alpha and omega, eum and yang, heart and soul of
@@ -3677,9 +3780,7 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
* RETURNS:
* 0 on success, -errno on failure.
*/
-int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
- ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
- ata_postreset_fn_t postreset,
+int ata_eh_recover(struct ata_port *ap, struct ata_reset_operations *reset_ops,
struct ata_link **r_failed_link)
{
struct ata_link *link;
@@ -3747,8 +3848,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
if (!(ehc->i.action & ATA_EH_RESET))
continue;
- rc = ata_eh_reset(link, ata_link_nr_vacant(link),
- prereset, softreset, hardreset, postreset);
+ rc = ata_eh_reset(link, ata_link_nr_vacant(link), reset_ops);
if (rc) {
ata_link_err(link, "reset failed, giving up\n");
goto out;
@@ -3829,7 +3929,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
/* configure transfer mode if necessary */
if (ehc->i.flags & ATA_EHI_SETMODE) {
- rc = ata_set_mode(link, &dev);
+ rc = ata_eh_set_mode(link, &dev);
if (rc)
goto rest_fail;
ehc->i.flags &= ~ATA_EHI_SETMODE;
@@ -3874,7 +3974,8 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
config_lpm:
/* configure link power saving */
if (link->lpm_policy != ap->target_lpm_policy) {
- rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev);
+ rc = ata_eh_link_set_lpm(link, ap->target_lpm_policy,
+ &dev);
if (rc)
goto rest_fail;
}
@@ -3968,59 +4069,39 @@ void ata_eh_finish(struct ata_port *ap)
}
/**
- * ata_do_eh - do standard error handling
+ * ata_std_error_handler - standard error handler
* @ap: host port to handle error for
*
- * @prereset: prereset method (can be NULL)
- * @softreset: softreset method (can be NULL)
- * @hardreset: hardreset method (can be NULL)
- * @postreset: postreset method (can be NULL)
- *
* Perform standard error handling sequence.
*
* LOCKING:
* Kernel thread context (may sleep).
*/
-void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
- ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
- ata_postreset_fn_t postreset)
+void ata_std_error_handler(struct ata_port *ap)
{
- struct ata_device *dev;
+ struct ata_reset_operations *reset_ops = &ap->ops->reset;
+ struct ata_link *link = &ap->link;
int rc;
+ /* Ignore built-in hardresets if SCR access is not available */
+ if ((reset_ops->hardreset == sata_std_hardreset ||
+ reset_ops->hardreset == sata_sff_hardreset) &&
+ !sata_scr_valid(link))
+ link->flags |= ATA_LFLAG_NO_HRST;
+
ata_eh_autopsy(ap);
ata_eh_report(ap);
- rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
- NULL);
+ rc = ata_eh_recover(ap, reset_ops, NULL);
if (rc) {
- ata_for_each_dev(dev, &ap->link, ALL)
+ struct ata_device *dev;
+
+ ata_for_each_dev(dev, link, ALL)
ata_dev_disable(dev);
}
ata_eh_finish(ap);
}
-
-/**
- * ata_std_error_handler - standard error handler
- * @ap: host port to handle error for
- *
- * Standard error handler
- *
- * LOCKING:
- * Kernel thread context (may sleep).
- */
-void ata_std_error_handler(struct ata_port *ap)
-{
- struct ata_port_operations *ops = ap->ops;
- ata_reset_fn_t hardreset = ops->hardreset;
-
- /* ignore built-in hardreset if SCR access is not available */
- if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link))
- hardreset = NULL;
-
- ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
-}
EXPORT_SYMBOL_GPL(ata_std_error_handler);
#ifdef CONFIG_PM
@@ -4051,10 +4132,20 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
- /* Set all devices attached to the port in standby mode */
- ata_for_each_link(link, ap, HOST_FIRST) {
- ata_for_each_dev(dev, link, ENABLED)
- ata_dev_power_set_standby(dev);
+ /*
+ * We will reach this point for all of the PM events:
+ * PM_EVENT_SUSPEND (if runtime pm, PM_EVENT_AUTO will also be set)
+ * PM_EVENT_FREEZE, and PM_EVENT_HIBERNATE.
+ *
+ * We do not want to perform disk spin down for PM_EVENT_FREEZE.
+ * (Spin down will be performed by the subsequent PM_EVENT_HIBERNATE.)
+ */
+ if (!(ap->pm_mesg.event & PM_EVENT_FREEZE)) {
+ /* Set all devices attached to the port in standby mode */
+ ata_for_each_link(link, ap, HOST_FIRST) {
+ ata_for_each_dev(dev, link, ENABLED)
+ ata_dev_power_set_standby(dev);
+ }
}
/*
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index e2e9cbd405fa..57023324a56f 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -15,9 +15,9 @@
const struct ata_port_operations sata_pmp_port_ops = {
.inherits = &sata_port_ops,
- .pmp_prereset = ata_std_prereset,
- .pmp_hardreset = sata_std_hardreset,
- .pmp_postreset = ata_std_postreset,
+ .pmp_reset.prereset = ata_std_prereset,
+ .pmp_reset.hardreset = sata_std_hardreset,
+ .pmp_reset.postreset = ata_std_postreset,
.error_handler = sata_pmp_error_handler,
};
@@ -648,8 +648,7 @@ static int sata_pmp_same_pmp(struct ata_device *dev, const u32 *new_gscr)
static int sata_pmp_revalidate(struct ata_device *dev, unsigned int new_class)
{
struct ata_link *link = dev->link;
- struct ata_port *ap = link->ap;
- u32 *gscr = (void *)ap->sector_buf;
+ u32 *gscr = (void *)dev->sector_buf;
int rc;
ata_eh_about_to_do(link, NULL, ATA_EH_REVALIDATE);
@@ -728,10 +727,7 @@ static int sata_pmp_revalidate_quick(struct ata_device *dev)
/**
* sata_pmp_eh_recover_pmp - recover PMP
* @ap: ATA port PMP is attached to
- * @prereset: prereset method (can be NULL)
- * @softreset: softreset method
- * @hardreset: hardreset method
- * @postreset: postreset method (can be NULL)
+ * @reset_ops: The set of reset operations to use
*
* Recover PMP attached to @ap. Recovery procedure is somewhat
* similar to that of ata_eh_recover() except that reset should
@@ -745,8 +741,7 @@ static int sata_pmp_revalidate_quick(struct ata_device *dev)
* 0 on success, -errno on failure.
*/
static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
- ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
- ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
+ struct ata_reset_operations *reset_ops)
{
struct ata_link *link = &ap->link;
struct ata_eh_context *ehc = &link->eh_context;
@@ -768,8 +763,7 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
struct ata_link *tlink;
/* reset */
- rc = ata_eh_reset(link, 0, prereset, softreset, hardreset,
- postreset);
+ rc = ata_eh_reset(link, 0, reset_ops);
if (rc) {
ata_link_err(link, "failed to reset PMP, giving up\n");
goto fail;
@@ -933,8 +927,7 @@ static int sata_pmp_eh_recover(struct ata_port *ap)
retry:
/* PMP attached? */
if (!sata_pmp_attached(ap)) {
- rc = ata_eh_recover(ap, ops->prereset, ops->softreset,
- ops->hardreset, ops->postreset, NULL);
+ rc = ata_eh_recover(ap, &ops->reset, NULL);
if (rc) {
ata_for_each_dev(dev, &ap->link, ALL)
ata_dev_disable(dev);
@@ -952,8 +945,7 @@ static int sata_pmp_eh_recover(struct ata_port *ap)
}
/* recover pmp */
- rc = sata_pmp_eh_recover_pmp(ap, ops->prereset, ops->softreset,
- ops->hardreset, ops->postreset);
+ rc = sata_pmp_eh_recover_pmp(ap, &ops->reset);
if (rc)
goto pmp_fail;
@@ -979,8 +971,7 @@ static int sata_pmp_eh_recover(struct ata_port *ap)
goto pmp_fail;
/* recover links */
- rc = ata_eh_recover(ap, ops->pmp_prereset, ops->pmp_softreset,
- ops->pmp_hardreset, ops->pmp_postreset, &link);
+ rc = ata_eh_recover(ap, &ops->pmp_reset, &link);
if (rc)
goto link_fail;
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index 9e047bf912b1..b2817a2995d6 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -13,7 +13,7 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_eh.h>
#include <linux/libata.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "libata.h"
#include "libata-transport.h"
@@ -518,6 +518,86 @@ int sata_set_spd(struct ata_link *link)
EXPORT_SYMBOL_GPL(sata_set_spd);
/**
+ * sata_down_spd_limit - adjust SATA spd limit downward
+ * @link: Link to adjust SATA spd limit for
+ * @spd_limit: Additional limit
+ *
+ * Adjust SATA spd limit of @link downward. Note that this
+ * function only adjusts the limit. The change must be applied
+ * using sata_set_spd().
+ *
+ * If @spd_limit is non-zero, the speed is limited to equal to or
+ * lower than @spd_limit if such speed is supported. If
+ * @spd_limit is slower than any supported speed, only the lowest
+ * supported speed is allowed.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ *
+ * RETURNS:
+ * 0 on success, negative errno on failure
+ */
+int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
+{
+ u32 sstatus, spd, mask;
+ int rc, bit;
+
+ if (!sata_scr_valid(link))
+ return -EOPNOTSUPP;
+
+ /* If SCR can be read, use it to determine the current SPD.
+ * If not, use cached value in link->sata_spd.
+ */
+ rc = sata_scr_read(link, SCR_STATUS, &sstatus);
+ if (rc == 0 && ata_sstatus_online(sstatus))
+ spd = (sstatus >> 4) & 0xf;
+ else
+ spd = link->sata_spd;
+
+ mask = link->sata_spd_limit;
+ if (mask <= 1)
+ return -EINVAL;
+
+ /* unconditionally mask off the highest bit */
+ bit = fls(mask) - 1;
+ mask &= ~(1 << bit);
+
+ /*
+ * Mask off all speeds higher than or equal to the current one. At
+ * this point, if current SPD is not available and we previously
+ * recorded the link speed from SStatus, the driver has already
+ * masked off the highest bit so mask should already be 1 or 0.
+ * Otherwise, we should not force 1.5Gbps on a link where we have
+ * not previously recorded speed from SStatus. Just return in this
+ * case.
+ */
+ if (spd > 1)
+ mask &= (1 << (spd - 1)) - 1;
+ else if (link->sata_spd)
+ return -EINVAL;
+
+ /* were we already at the bottom? */
+ if (!mask)
+ return -EINVAL;
+
+ if (spd_limit) {
+ if (mask & ((1 << spd_limit) - 1))
+ mask &= (1 << spd_limit) - 1;
+ else {
+ bit = ffs(mask) - 1;
+ mask = 1 << bit;
+ }
+ }
+
+ link->sata_spd_limit = mask;
+
+ ata_link_warn(link, "limiting SATA link speed to %s\n",
+ sata_spd_string(fls(mask)));
+
+ return 0;
+}
+
+/**
* sata_link_hardreset - reset link via SATA phy reset
* @link: link to reset
* @timing: timing parameters { interval, duration, timeout } in msec
@@ -627,6 +707,34 @@ int sata_link_hardreset(struct ata_link *link, const unsigned int *timing,
EXPORT_SYMBOL_GPL(sata_link_hardreset);
/**
+ * sata_std_hardreset - COMRESET w/o waiting or classification
+ * @link: link to reset
+ * @class: resulting class of attached device
+ * @deadline: deadline jiffies for the operation
+ *
+ * Standard SATA COMRESET w/o waiting or classification.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ *
+ * RETURNS:
+ * 0 if link offline, -EAGAIN if link online, -errno on errors.
+ */
+int sata_std_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ const unsigned int *timing = sata_ehc_deb_timing(&link->eh_context);
+ bool online;
+ int rc;
+
+ rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
+ if (online)
+ return -EAGAIN;
+ return rc;
+}
+EXPORT_SYMBOL_GPL(sata_std_hardreset);
+
+/**
* ata_qc_complete_multiple - Complete multiple qcs successfully
* @ap: port in question
* @qc_active: new qc_active mask
@@ -792,14 +900,52 @@ static const char *ata_lpm_policy_names[] = {
[ATA_LPM_MIN_POWER] = "min_power",
};
+/*
+ * Check if a port supports link power management.
+ * Must be called with the port locked.
+ */
+static bool ata_scsi_lpm_supported(struct ata_port *ap)
+{
+ struct ata_link *link;
+ struct ata_device *dev;
+
+ if (ap->flags & ATA_FLAG_NO_LPM)
+ return false;
+
+ ata_for_each_link(link, ap, EDGE) {
+ ata_for_each_dev(dev, &ap->link, ENABLED) {
+ if (dev->quirks & ATA_QUIRK_NOLPM)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static ssize_t ata_scsi_lpm_supported_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ unsigned long flags;
+ bool supported;
+
+ spin_lock_irqsave(ap->lock, flags);
+ supported = ata_scsi_lpm_supported(ap);
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ return sysfs_emit(buf, "%d\n", supported);
+}
+DEVICE_ATTR(link_power_management_supported, S_IRUGO,
+ ata_scsi_lpm_supported_show, NULL);
+EXPORT_SYMBOL_GPL(dev_attr_link_power_management_supported);
+
static ssize_t ata_scsi_lpm_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(device);
struct ata_port *ap = ata_shost_to_port(shost);
- struct ata_link *link;
- struct ata_device *dev;
enum ata_lpm_policy policy;
unsigned long flags;
@@ -816,13 +962,9 @@ static ssize_t ata_scsi_lpm_store(struct device *device,
spin_lock_irqsave(ap->lock, flags);
- ata_for_each_link(link, ap, EDGE) {
- ata_for_each_dev(dev, &ap->link, ENABLED) {
- if (dev->horkage & ATA_HORKAGE_NOLPM) {
- count = -EOPNOTSUPP;
- goto out_unlock;
- }
- }
+ if (!ata_scsi_lpm_supported(ap)) {
+ count = -EOPNOTSUPP;
+ goto out_unlock;
}
ap->target_lpm_policy = policy;
@@ -1205,56 +1347,7 @@ int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
/**
- * ata_sas_port_alloc - Allocate port for a SAS attached SATA device
- * @host: ATA host container for all SAS ports
- * @port_info: Information from low-level host driver
- * @shost: SCSI host that the scsi device is attached to
- *
- * LOCKING:
- * PCI/etc. bus probe sem.
- *
- * RETURNS:
- * ata_port pointer on success / NULL on failure.
- */
-
-struct ata_port *ata_sas_port_alloc(struct ata_host *host,
- struct ata_port_info *port_info,
- struct Scsi_Host *shost)
-{
- struct ata_port *ap;
-
- ap = ata_port_alloc(host);
- if (!ap)
- return NULL;
-
- ap->port_no = 0;
- ap->lock = &host->lock;
- ap->pio_mask = port_info->pio_mask;
- ap->mwdma_mask = port_info->mwdma_mask;
- ap->udma_mask = port_info->udma_mask;
- ap->flags |= port_info->flags;
- ap->ops = port_info->port_ops;
- ap->cbl = ATA_CBL_SATA;
- ap->print_id = atomic_inc_return(&ata_print_id);
-
- return ap;
-}
-EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
-
-int ata_sas_tport_add(struct device *parent, struct ata_port *ap)
-{
- return ata_tport_add(parent, ap);
-}
-EXPORT_SYMBOL_GPL(ata_sas_tport_add);
-
-void ata_sas_tport_delete(struct ata_port *ap)
-{
- ata_tport_delete(ap);
-}
-EXPORT_SYMBOL_GPL(ata_sas_tport_delete);
-
-/**
- * ata_sas_device_configure - Default device_configure routine for libata
+ * ata_sas_sdev_configure - Default sdev_configure routine for libata
* devices
* @sdev: SCSI device to configure
* @lim: queue limits
@@ -1264,14 +1357,14 @@ EXPORT_SYMBOL_GPL(ata_sas_tport_delete);
* Zero.
*/
-int ata_sas_device_configure(struct scsi_device *sdev, struct queue_limits *lim,
- struct ata_port *ap)
+int ata_sas_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim,
+ struct ata_port *ap)
{
ata_scsi_sdev_config(sdev);
return ata_scsi_dev_config(sdev, lim, ap->link.device);
}
-EXPORT_SYMBOL_GPL(ata_sas_device_configure);
+EXPORT_SYMBOL_GPL(ata_sas_sdev_configure);
/**
* ata_sas_queuecmd - Issue SCSI cdb to libata-managed device
@@ -1389,7 +1482,7 @@ EXPORT_SYMBOL_GPL(sata_async_notification);
static int ata_eh_read_log_10h(struct ata_device *dev,
int *tag, struct ata_taskfile *tf)
{
- u8 *buf = dev->link->ap->sector_buf;
+ u8 *buf = dev->sector_buf;
unsigned int err_mask;
u8 csum;
int i;
@@ -1428,8 +1521,8 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
}
/**
- * ata_eh_read_sense_success_ncq_log - Read the sense data for successful
- * NCQ commands log
+ * ata_eh_get_ncq_success_sense - Read and process the sense data for
+ * successful NCQ commands log page
* @link: ATA link to get sense data for
*
* Read the sense data for successful NCQ commands log page to obtain
@@ -1442,15 +1535,18 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
* RETURNS:
* 0 on success, -errno otherwise.
*/
-int ata_eh_read_sense_success_ncq_log(struct ata_link *link)
+int ata_eh_get_ncq_success_sense(struct ata_link *link)
{
struct ata_device *dev = link->device;
struct ata_port *ap = dev->link->ap;
- u8 *buf = ap->ncq_sense_buf;
+ u8 *buf = dev->cdl->ncq_sense_log_buf;
struct ata_queued_cmd *qc;
unsigned int err_mask, tag;
u8 *sense, sk = 0, asc = 0, ascq = 0;
- u64 sense_valid, val;
+ u16 extended_sense;
+ bool aux_icc_valid;
+ u32 sense_valid;
+ u64 val;
int ret = 0;
err_mask = ata_read_log_page(dev, ATA_LOG_SENSE_NCQ, 0, buf, 2);
@@ -1468,8 +1564,9 @@ int ata_eh_read_sense_success_ncq_log(struct ata_link *link)
return -EIO;
}
- sense_valid = (u64)buf[8] | ((u64)buf[9] << 8) |
- ((u64)buf[10] << 16) | ((u64)buf[11] << 24);
+ sense_valid = get_unaligned_le32(&buf[8]);
+ extended_sense = get_unaligned_le16(&buf[14]);
+ aux_icc_valid = extended_sense & BIT(15);
ata_qc_for_each_raw(ap, qc, tag) {
if (!(qc->flags & ATA_QCFLAG_EH) ||
@@ -1482,7 +1579,7 @@ int ata_eh_read_sense_success_ncq_log(struct ata_link *link)
* If the command does not have any sense data, clear ATA_SENSE.
* Keep ATA_QCFLAG_EH_SUCCESS_CMD so that command is finished.
*/
- if (!(sense_valid & (1ULL << tag))) {
+ if (!(sense_valid & BIT(tag))) {
qc->result_tf.status &= ~ATA_SENSE;
continue;
}
@@ -1497,6 +1594,17 @@ int ata_eh_read_sense_success_ncq_log(struct ata_link *link)
continue;
}
+ qc->result_tf.nsect = sense[6];
+ qc->result_tf.hob_nsect = sense[7];
+ qc->result_tf.lbal = sense[8];
+ qc->result_tf.lbam = sense[9];
+ qc->result_tf.lbah = sense[10];
+ qc->result_tf.hob_lbal = sense[11];
+ qc->result_tf.hob_lbam = sense[12];
+ qc->result_tf.hob_lbah = sense[13];
+ if (aux_icc_valid)
+ qc->result_tf.auxiliary = get_unaligned_le32(&sense[16]);
+
/* Set sense without also setting scsicmd->result */
scsi_build_sense_buffer(dev->flags & ATA_DFLAG_D_SENSE,
qc->scsicmd->sense_buffer, sk,
@@ -1504,17 +1612,14 @@ int ata_eh_read_sense_success_ncq_log(struct ata_link *link)
qc->flags |= ATA_QCFLAG_SENSE_VALID;
/*
- * If we have sense data, call scsi_check_sense() in order to
- * set the correct SCSI ML byte (if any). No point in checking
- * the return value, since the command has already completed
- * successfully.
+ * No point in checking the return value, since the command has
+ * already completed successfully.
*/
- scsi_check_sense(qc->scsicmd);
+ ata_eh_decide_disposition(qc);
}
return ret;
}
-EXPORT_SYMBOL_GPL(ata_eh_read_sense_success_ncq_log);
/**
* ata_eh_analyze_ncq_error - analyze NCQ error
@@ -1563,7 +1668,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
return;
}
- if (!(link->sactive & (1 << tag))) {
+ if (!(link->sactive & BIT(tag))) {
ata_link_err(link, "log page 10h reported inactive tag %d\n",
tag);
return;
@@ -1588,8 +1693,6 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
if (ata_scsi_sense_is_valid(sense_key, asc, ascq)) {
ata_scsi_set_sense(dev, qc->scsicmd, sense_key, asc,
ascq);
- ata_scsi_set_sense_information(dev, qc->scsicmd,
- &qc->result_tf);
qc->flags |= ATA_QCFLAG_SENSE_VALID;
}
}
@@ -1625,3 +1728,11 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
ehc->i.err_mask &= ~AC_ERR_DEV;
}
EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
+
+const struct ata_port_operations sata_port_ops = {
+ .inherits = &ata_base_port_ops,
+
+ .qc_defer = ata_std_qc_defer,
+ .reset.hardreset = sata_std_hardreset,
+};
+EXPORT_SYMBOL_GPL(sata_port_ops);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index cdf29b178ddc..721d3f270c8e 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -30,7 +30,7 @@
#include <linux/hdreg.h>
#include <linux/uaccess.h>
#include <linux/suspend.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/ioprio.h>
#include <linux/of.h>
@@ -216,20 +216,105 @@ void ata_scsi_set_sense(struct ata_device *dev, struct scsi_cmnd *cmd,
scsi_build_sense(cmd, d_sense, sk, asc, ascq);
}
-void ata_scsi_set_sense_information(struct ata_device *dev,
- struct scsi_cmnd *cmd,
- const struct ata_taskfile *tf)
+static void ata_scsi_set_sense_information(struct ata_queued_cmd *qc)
{
u64 information;
- information = ata_tf_read_block(tf, dev);
+ if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
+ ata_dev_dbg(qc->dev,
+ "missing result TF: can't set INFORMATION sense field\n");
+ return;
+ }
+
+ information = ata_tf_read_block(&qc->result_tf, qc->dev);
if (information == U64_MAX)
return;
- scsi_set_sense_information(cmd->sense_buffer,
+ scsi_set_sense_information(qc->scsicmd->sense_buffer,
SCSI_SENSE_BUFFERSIZE, information);
}
+/**
+ * ata_scsi_set_passthru_sense_fields - Set ATA fields in sense buffer
+ * @qc: ATA PASS-THROUGH command.
+ *
+ * Populates "ATA Status Return sense data descriptor" / "Fixed format
+ * sense data" with ATA taskfile fields.
+ *
+ * LOCKING:
+ * None.
+ */
+static void ata_scsi_set_passthru_sense_fields(struct ata_queued_cmd *qc)
+{
+ struct ata_device *dev = qc->dev;
+ struct scsi_cmnd *cmd = qc->scsicmd;
+ struct ata_taskfile *tf = &qc->result_tf;
+ unsigned char *sb = cmd->sense_buffer;
+
+ if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
+ ata_dev_dbg(dev,
+ "missing result TF: can't set ATA PT sense fields\n");
+ return;
+ }
+
+ if ((sb[0] & 0x7f) >= 0x72) {
+ unsigned char *desc;
+ u8 len;
+
+ /* descriptor format */
+ len = sb[7];
+ desc = (char *)scsi_sense_desc_find(sb, len + 8, 9);
+ if (!desc) {
+ if (SCSI_SENSE_BUFFERSIZE < len + 14)
+ return;
+ sb[7] = len + 14;
+ desc = sb + 8 + len;
+ }
+ desc[0] = 9;
+ desc[1] = 12;
+ /*
+ * Copy registers into sense buffer.
+ */
+ desc[2] = 0x00;
+ desc[3] = tf->error;
+ desc[5] = tf->nsect;
+ desc[7] = tf->lbal;
+ desc[9] = tf->lbam;
+ desc[11] = tf->lbah;
+ desc[12] = tf->device;
+ desc[13] = tf->status;
+
+ /*
+ * Fill in Extend bit, and the high order bytes
+ * if applicable.
+ */
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ desc[2] |= 0x01;
+ desc[4] = tf->hob_nsect;
+ desc[6] = tf->hob_lbal;
+ desc[8] = tf->hob_lbam;
+ desc[10] = tf->hob_lbah;
+ }
+ } else {
+ /* Fixed sense format */
+ sb[0] |= 0x80;
+ sb[3] = tf->error;
+ sb[4] = tf->status;
+ sb[5] = tf->device;
+ sb[6] = tf->nsect;
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ sb[8] |= 0x80;
+ if (tf->hob_nsect)
+ sb[8] |= 0x40;
+ if (tf->hob_lbal || tf->hob_lbam || tf->hob_lbah)
+ sb[8] |= 0x20;
+ }
+ sb[9] = tf->lbal;
+ sb[10] = tf->lbam;
+ sb[11] = tf->lbah;
+ }
+}
+
static void ata_scsi_set_invalid_field(struct ata_device *dev,
struct scsi_cmnd *cmd, u16 field, u8 bit)
{
@@ -266,7 +351,7 @@ EXPORT_SYMBOL_GPL(ata_common_sdev_groups);
/**
* ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd.
* @sdev: SCSI device for which BIOS geometry is to be determined
- * @bdev: block device associated with @sdev
+ * @unused: gendisk associated with @sdev
* @capacity: capacity of SCSI device
* @geom: location to which geometry will be output
*
@@ -281,7 +366,7 @@ EXPORT_SYMBOL_GPL(ata_common_sdev_groups);
* RETURNS:
* Zero.
*/
-int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev,
+int ata_std_bios_param(struct scsi_device *sdev, struct gendisk *unused,
sector_t capacity, int geom[])
{
geom[0] = 255;
@@ -711,7 +796,6 @@ static void ata_qc_set_pc_nbytes(struct ata_queued_cmd *qc)
/**
* ata_to_sense_error - convert ATA error to SCSI error
- * @id: ATA device number
* @drv_stat: value contained in ATA status register
* @drv_err: value contained in ATA error register
* @sk: the sense key we'll fill out
@@ -725,8 +809,8 @@ static void ata_qc_set_pc_nbytes(struct ata_queued_cmd *qc)
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk,
- u8 *asc, u8 *ascq)
+static void ata_to_sense_error(u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
+ u8 *ascq)
{
int i;
@@ -775,18 +859,14 @@ static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk,
{0xFF, 0xFF, 0xFF, 0xFF}, // END mark
};
static const unsigned char stat_table[][4] = {
- /* Must be first because BUSY means no other bits valid */
- {0x80, ABORTED_COMMAND, 0x47, 0x00},
- // Busy, fake parity for now
- {0x40, ILLEGAL_REQUEST, 0x21, 0x04},
- // Device ready, unaligned write command
- {0x20, HARDWARE_ERROR, 0x44, 0x00},
- // Device fault, internal target failure
- {0x08, ABORTED_COMMAND, 0x47, 0x00},
- // Timed out in xfer, fake parity for now
- {0x04, RECOVERED_ERROR, 0x11, 0x00},
- // Recovered ECC error Medium error, recovered
- {0xFF, 0xFF, 0xFF, 0xFF}, // END mark
+ /* Busy: must be first because BUSY means no other bits valid */
+ { ATA_BUSY, ABORTED_COMMAND, 0x00, 0x00 },
+ /* Device fault: INTERNAL TARGET FAILURE */
+ { ATA_DF, HARDWARE_ERROR, 0x44, 0x00 },
+ /* Corrected data error */
+ { ATA_CORR, RECOVERED_ERROR, 0x00, 0x00 },
+
+ { 0xFF, 0xFF, 0xFF, 0xFF }, /* END mark */
};
/*
@@ -837,10 +917,8 @@ static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk,
* ata_gen_passthru_sense - Generate check condition sense block.
* @qc: Command that completed.
*
- * This function is specific to the ATA descriptor format sense
- * block specified for the ATA pass through commands. Regardless
- * of whether the command errored or not, return a sense
- * block. Copy all controller registers into the sense
+ * This function is specific to the ATA pass through commands.
+ * Regardless of whether the command errored or not, return a sense
* block. If there was no error, we get the request from an ATA
* passthrough command, so we use the following sense data:
* sk = RECOVERED ERROR
@@ -852,13 +930,18 @@ static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk,
*/
static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
{
+ struct ata_device *dev = qc->dev;
struct scsi_cmnd *cmd = qc->scsicmd;
struct ata_taskfile *tf = &qc->result_tf;
- unsigned char *sb = cmd->sense_buffer;
- unsigned char *desc = sb + 8;
u8 sense_key, asc, ascq;
- memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
+ if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
+ ata_dev_dbg(dev,
+ "missing result TF: can't generate ATA PT sense data\n");
+ if (qc->err_mask)
+ ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0);
+ return;
+ }
/*
* Use ata_to_sense_error() to map status register bits
@@ -866,80 +949,31 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
*/
if (qc->err_mask ||
tf->status & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
- ata_to_sense_error(qc->ap->print_id, tf->status, tf->error,
+ ata_to_sense_error(tf->status, tf->error,
&sense_key, &asc, &ascq);
ata_scsi_set_sense(qc->dev, cmd, sense_key, asc, ascq);
} else {
/*
* ATA PASS-THROUGH INFORMATION AVAILABLE
- * Always in descriptor format sense.
+ *
+ * Note: we are supposed to call ata_scsi_set_sense(), which
+ * respects the D_SENSE bit, instead of unconditionally
+ * generating the sense data in descriptor format. However,
+ * because hdparm, hddtemp, and udisks incorrectly assume sense
+ * data in descriptor format, without even looking at the
+ * RESPONSE CODE field in the returned sense data (to see which
+ * format the returned sense data is in), we are stuck with
+ * being bug compatible with older kernels.
*/
scsi_build_sense(cmd, 1, RECOVERED_ERROR, 0, 0x1D);
}
-
- if ((cmd->sense_buffer[0] & 0x7f) >= 0x72) {
- u8 len;
-
- /* descriptor format */
- len = sb[7];
- desc = (char *)scsi_sense_desc_find(sb, len + 8, 9);
- if (!desc) {
- if (SCSI_SENSE_BUFFERSIZE < len + 14)
- return;
- sb[7] = len + 14;
- desc = sb + 8 + len;
- }
- desc[0] = 9;
- desc[1] = 12;
- /*
- * Copy registers into sense buffer.
- */
- desc[2] = 0x00;
- desc[3] = tf->error;
- desc[5] = tf->nsect;
- desc[7] = tf->lbal;
- desc[9] = tf->lbam;
- desc[11] = tf->lbah;
- desc[12] = tf->device;
- desc[13] = tf->status;
-
- /*
- * Fill in Extend bit, and the high order bytes
- * if applicable.
- */
- if (tf->flags & ATA_TFLAG_LBA48) {
- desc[2] |= 0x01;
- desc[4] = tf->hob_nsect;
- desc[6] = tf->hob_lbal;
- desc[8] = tf->hob_lbam;
- desc[10] = tf->hob_lbah;
- }
- } else {
- /* Fixed sense format */
- desc[0] = tf->error;
- desc[1] = tf->status;
- desc[2] = tf->device;
- desc[3] = tf->nsect;
- desc[7] = 0;
- if (tf->flags & ATA_TFLAG_LBA48) {
- desc[8] |= 0x80;
- if (tf->hob_nsect)
- desc[8] |= 0x40;
- if (tf->hob_lbal || tf->hob_lbam || tf->hob_lbah)
- desc[8] |= 0x20;
- }
- desc[9] = tf->lbal;
- desc[10] = tf->lbam;
- desc[11] = tf->lbah;
- }
}
/**
* ata_gen_ata_sense - generate a SCSI fixed sense block
* @qc: Command that we are erroring out
*
- * Generate sense block for a failed ATA command @qc. Descriptor
- * format is used to accommodate LBA48 block address.
+ * Generate sense block for a failed ATA command @qc.
*
* LOCKING:
* None.
@@ -949,39 +983,45 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
struct ata_device *dev = qc->dev;
struct scsi_cmnd *cmd = qc->scsicmd;
struct ata_taskfile *tf = &qc->result_tf;
- unsigned char *sb = cmd->sense_buffer;
- u64 block;
u8 sense_key, asc, ascq;
- memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
-
if (ata_dev_disabled(dev)) {
/* Device disabled after error recovery */
/* LOGICAL UNIT NOT READY, HARD RESET REQUIRED */
ata_scsi_set_sense(dev, cmd, NOT_READY, 0x04, 0x21);
return;
}
+
+ if (ata_id_is_locked(dev->id)) {
+ /* Security locked */
+ /* LOGICAL UNIT ACCESS NOT AUTHORIZED */
+ ata_scsi_set_sense(dev, cmd, DATA_PROTECT, 0x74, 0x71);
+ return;
+ }
+
+ if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
+ ata_dev_dbg(dev,
+ "Missing result TF: reporting aborted command\n");
+ goto aborted;
+ }
+
/* Use ata_to_sense_error() to map status register bits
* onto sense key, asc & ascq.
*/
if (qc->err_mask ||
tf->status & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
- ata_to_sense_error(qc->ap->print_id, tf->status, tf->error,
+ ata_to_sense_error(tf->status, tf->error,
&sense_key, &asc, &ascq);
ata_scsi_set_sense(dev, cmd, sense_key, asc, ascq);
- } else {
- /* Could not decode error */
- ata_dev_warn(dev, "could not decode error status 0x%x err_mask 0x%x\n",
- tf->status, qc->err_mask);
- ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0);
return;
}
- block = ata_tf_read_block(&qc->result_tf, dev);
- if (block == U64_MAX)
- return;
-
- scsi_set_sense_information(sb, SCSI_SENSE_BUFFERSIZE, block);
+ /* Could not decode error */
+ ata_dev_warn(dev,
+ "Could not decode error 0x%x, status 0x%x (err_mask=0x%x)\n",
+ tf->error, tf->status, qc->err_mask);
+aborted:
+ ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0);
}
void ata_scsi_sdev_config(struct scsi_device *sdev)
@@ -1024,7 +1064,6 @@ EXPORT_SYMBOL_GPL(ata_scsi_dma_need_drain);
int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim,
struct ata_device *dev)
{
- struct request_queue *q = sdev->request_queue;
int depth = 1;
if (!ata_id_has_unload(dev->id))
@@ -1038,7 +1077,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim,
sdev->sector_size = ATA_SECT_SIZE;
/* set DMA padding */
- blk_queue_update_dma_pad(q, ATA_DMA_PAD_SZ - 1);
+ lim->dma_pad_mask = ATA_DMA_PAD_SZ - 1;
/* make room for appending the drain */
lim->max_segments--;
@@ -1063,6 +1102,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim,
*/
sdev->manage_runtime_start_stop = 1;
sdev->manage_shutdown = 1;
+ sdev->manage_restart = ata_acpi_dev_manage_restart(dev);
sdev->force_runtime_start_on_system_start = 1;
}
@@ -1096,7 +1136,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim,
}
/**
- * ata_scsi_slave_alloc - Early setup of SCSI device
+ * ata_scsi_sdev_init - Early setup of SCSI device
* @sdev: SCSI device to examine
*
* This is called from scsi_alloc_sdev() when the scsi device
@@ -1106,7 +1146,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim,
* Defined by SCSI layer. We don't really care.
*/
-int ata_scsi_slave_alloc(struct scsi_device *sdev)
+int ata_scsi_sdev_init(struct scsi_device *sdev)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct device_link *link;
@@ -1129,10 +1169,10 @@ int ata_scsi_slave_alloc(struct scsi_device *sdev)
return 0;
}
-EXPORT_SYMBOL_GPL(ata_scsi_slave_alloc);
+EXPORT_SYMBOL_GPL(ata_scsi_sdev_init);
/**
- * ata_scsi_device_configure - Set SCSI device attributes
+ * ata_scsi_sdev_configure - Set SCSI device attributes
* @sdev: SCSI device to examine
* @lim: queue limits
*
@@ -1144,8 +1184,7 @@ EXPORT_SYMBOL_GPL(ata_scsi_slave_alloc);
* Defined by SCSI layer. We don't really care.
*/
-int ata_scsi_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+int ata_scsi_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
@@ -1155,10 +1194,10 @@ int ata_scsi_device_configure(struct scsi_device *sdev,
return 0;
}
-EXPORT_SYMBOL_GPL(ata_scsi_device_configure);
+EXPORT_SYMBOL_GPL(ata_scsi_sdev_configure);
/**
- * ata_scsi_slave_destroy - SCSI device is about to be destroyed
+ * ata_scsi_sdev_destroy - SCSI device is about to be destroyed
* @sdev: SCSI device to be destroyed
*
* @sdev is about to be destroyed for hot/warm unplugging. If
@@ -1171,7 +1210,7 @@ EXPORT_SYMBOL_GPL(ata_scsi_device_configure);
* LOCKING:
* Defined by SCSI layer. We don't really care.
*/
-void ata_scsi_slave_destroy(struct scsi_device *sdev)
+void ata_scsi_sdev_destroy(struct scsi_device *sdev)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
unsigned long flags;
@@ -1191,7 +1230,7 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev)
kfree(sdev->dma_drain_buf);
}
-EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
+EXPORT_SYMBOL_GPL(ata_scsi_sdev_destroy);
/**
* ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
@@ -1297,17 +1336,8 @@ static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc)
*/
static void scsi_6_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
{
- u64 lba = 0;
- u32 len;
-
- lba |= ((u64)(cdb[1] & 0x1f)) << 16;
- lba |= ((u64)cdb[2]) << 8;
- lba |= ((u64)cdb[3]);
-
- len = cdb[4];
-
- *plba = lba;
- *plen = len;
+ *plba = get_unaligned_be24(&cdb[1]) & 0x1fffff;
+ *plen = cdb[4];
}
/**
@@ -1632,26 +1662,31 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *cmd = qc->scsicmd;
u8 *cdb = cmd->cmnd;
- int need_sense = (qc->err_mask != 0) &&
- !(qc->flags & ATA_QCFLAG_SENSE_VALID);
+ bool have_sense = qc->flags & ATA_QCFLAG_SENSE_VALID;
+ bool is_ata_passthru = cdb[0] == ATA_16 || cdb[0] == ATA_12;
+ bool is_ck_cond_request = cdb[2] & 0x20;
+ bool is_error = qc->err_mask != 0;
/* For ATA pass thru (SAT) commands, generate a sense block if
* user mandated it or if there's an error. Note that if we
- * generate because the user forced us to [CK_COND =1], a check
+ * generate because the user forced us to [CK_COND=1], a check
* condition is generated and the ATA register values are returned
* whether the command completed successfully or not. If there
- * was no error, we use the following sense data:
+ * was no error, and CK_COND=1, we use the following sense data:
* sk = RECOVERED ERROR
* asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE
*/
- if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
- ((cdb[2] & 0x20) || need_sense))
- ata_gen_passthru_sense(qc);
- else if (need_sense)
- ata_gen_ata_sense(qc);
- else
- /* Keep the SCSI ML and status byte, clear host byte. */
- cmd->result &= 0x0000ffff;
+ if (is_ata_passthru && (is_ck_cond_request || is_error || have_sense)) {
+ if (!have_sense)
+ ata_gen_passthru_sense(qc);
+ ata_scsi_set_passthru_sense_fields(qc);
+ if (is_ck_cond_request)
+ set_status_byte(qc->scsicmd, SAM_STAT_CHECK_CONDITION);
+ } else if (is_error) {
+ if (!have_sense)
+ ata_gen_ata_sense(qc);
+ ata_scsi_set_sense_information(qc);
+ }
ata_qc_done(qc);
}
@@ -1741,15 +1776,10 @@ defer:
return SCSI_MLQUEUE_HOST_BUSY;
}
-struct ata_scsi_args {
- struct ata_device *dev;
- u16 *id;
- struct scsi_cmnd *cmd;
-};
-
/**
* ata_scsi_rbuf_fill - wrapper for SCSI command simulators
- * @args: device IDENTIFY data / SCSI command of interest.
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @actor: Callback hook for desired SCSI command simulator
*
* Takes care of the hard work of simulating a SCSI command...
@@ -1762,30 +1792,32 @@ struct ata_scsi_args {
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
- unsigned int (*actor)(struct ata_scsi_args *args, u8 *rbuf))
+static void ata_scsi_rbuf_fill(struct ata_device *dev, struct scsi_cmnd *cmd,
+ unsigned int (*actor)(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf))
{
- unsigned int rc;
- struct scsi_cmnd *cmd = args->cmd;
unsigned long flags;
+ unsigned int len;
spin_lock_irqsave(&ata_scsi_rbuf_lock, flags);
memset(ata_scsi_rbuf, 0, ATA_SCSI_RBUF_SIZE);
- rc = actor(args, ata_scsi_rbuf);
- if (rc == 0)
+ len = actor(dev, cmd, ata_scsi_rbuf);
+ if (len) {
sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
ata_scsi_rbuf, ATA_SCSI_RBUF_SIZE);
+ cmd->result = SAM_STAT_GOOD;
+ if (scsi_bufflen(cmd) > len)
+ scsi_set_resid(cmd, scsi_bufflen(cmd) - len);
+ }
spin_unlock_irqrestore(&ata_scsi_rbuf_lock, flags);
-
- if (rc == 0)
- cmd->result = SAM_STAT_GOOD;
}
/**
- * ata_scsiop_inq_std - Simulate INQUIRY command
- * @args: device IDENTIFY data / SCSI command of interest.
+ * ata_scsiop_inq_std - Simulate standard INQUIRY command
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Returns standard device identification data associated
@@ -1794,7 +1826,8 @@ static void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
+static unsigned int ata_scsiop_inq_std(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
static const u8 versions[] = {
0x00,
@@ -1831,44 +1864,49 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
2
};
- /* set scsi removable (RMB) bit per ata bit, or if the
- * AHCI port says it's external (Hotplug-capable, eSATA).
+ /*
+ * Set the SCSI Removable Media Bit (RMB) if the ATA removable media
+ * device bit (obsolete since ATA-8 ACS) is set.
*/
- if (ata_id_removable(args->id) ||
- (args->dev->link->ap->pflags & ATA_PFLAG_EXTERNAL))
+ if (ata_id_removable(dev->id))
hdr[1] |= (1 << 7);
- if (args->dev->class == ATA_DEV_ZAC) {
+ if (dev->class == ATA_DEV_ZAC) {
hdr[0] = TYPE_ZBC;
hdr[2] = 0x7; /* claim SPC-5 version compatibility */
}
- if (args->dev->flags & ATA_DFLAG_CDL)
+ if (dev->flags & ATA_DFLAG_CDL)
hdr[2] = 0xd; /* claim SPC-6 version compatibility */
memcpy(rbuf, hdr, sizeof(hdr));
memcpy(&rbuf[8], "ATA ", 8);
- ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16);
+ ata_id_string(dev->id, &rbuf[16], ATA_ID_PROD, 16);
/* From SAT, use last 2 words from fw rev unless they are spaces */
- ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV + 2, 4);
+ ata_id_string(dev->id, &rbuf[32], ATA_ID_FW_REV + 2, 4);
if (strncmp(&rbuf[32], " ", 4) == 0)
- ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4);
+ ata_id_string(dev->id, &rbuf[32], ATA_ID_FW_REV, 4);
if (rbuf[32] == 0 || rbuf[32] == ' ')
memcpy(&rbuf[32], "n/a ", 4);
- if (ata_id_zoned_cap(args->id) || args->dev->class == ATA_DEV_ZAC)
+ if (ata_id_zoned_cap(dev->id) || dev->class == ATA_DEV_ZAC)
memcpy(rbuf + 58, versions_zbc, sizeof(versions_zbc));
else
memcpy(rbuf + 58, versions, sizeof(versions));
- return 0;
+ /*
+ * Include all 8 possible version descriptors, even if not all of
+ * them are popoulated.
+ */
+ return 96;
}
/**
* ata_scsiop_inq_00 - Simulate INQUIRY VPD page 0, list of pages
- * @args: device IDENTIFY data / SCSI command of interest.
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Returns list of inquiry VPD pages available.
@@ -1876,7 +1914,8 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
+static unsigned int ata_scsiop_inq_00(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
int i, num_pages = 0;
static const u8 pages[] = {
@@ -1892,19 +1931,20 @@ static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
};
for (i = 0; i < sizeof(pages); i++) {
- if (pages[i] == 0xb6 &&
- !(args->dev->flags & ATA_DFLAG_ZAC))
+ if (pages[i] == 0xb6 && !ata_dev_is_zac(dev))
continue;
rbuf[num_pages + 4] = pages[i];
num_pages++;
}
rbuf[3] = num_pages; /* number of supported VPD pages */
- return 0;
+
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
/**
* ata_scsiop_inq_80 - Simulate INQUIRY VPD page 80, device serial number
- * @args: device IDENTIFY data / SCSI command of interest.
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Returns ATA device serial number.
@@ -1912,7 +1952,8 @@ static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf)
+static unsigned int ata_scsiop_inq_80(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
static const u8 hdr[] = {
0,
@@ -1922,14 +1963,16 @@ static unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf)
};
memcpy(rbuf, hdr, sizeof(hdr));
- ata_id_string(args->id, (unsigned char *) &rbuf[4],
+ ata_id_string(dev->id, (unsigned char *) &rbuf[4],
ATA_ID_SERNO, ATA_ID_SERNO_LEN);
- return 0;
+
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
/**
* ata_scsiop_inq_83 - Simulate INQUIRY VPD page 83, device identity
- * @args: device IDENTIFY data / SCSI command of interest.
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Yields two logical unit device identification designators:
@@ -1940,7 +1983,8 @@ static unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf)
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf)
+static unsigned int ata_scsiop_inq_83(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
const int sat_model_serial_desc_len = 68;
int num;
@@ -1952,7 +1996,7 @@ static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf)
rbuf[num + 0] = 2;
rbuf[num + 3] = ATA_ID_SERNO_LEN;
num += 4;
- ata_id_string(args->id, (unsigned char *) rbuf + num,
+ ata_id_string(dev->id, (unsigned char *) rbuf + num,
ATA_ID_SERNO, ATA_ID_SERNO_LEN);
num += ATA_ID_SERNO_LEN;
@@ -1964,31 +2008,33 @@ static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf)
num += 4;
memcpy(rbuf + num, "ATA ", 8);
num += 8;
- ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_PROD,
+ ata_id_string(dev->id, (unsigned char *) rbuf + num, ATA_ID_PROD,
ATA_ID_PROD_LEN);
num += ATA_ID_PROD_LEN;
- ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_SERNO,
+ ata_id_string(dev->id, (unsigned char *) rbuf + num, ATA_ID_SERNO,
ATA_ID_SERNO_LEN);
num += ATA_ID_SERNO_LEN;
- if (ata_id_has_wwn(args->id)) {
+ if (ata_id_has_wwn(dev->id)) {
/* SAT defined lu world wide name */
/* piv=0, assoc=lu, code_set=binary, designator=NAA */
rbuf[num + 0] = 1;
rbuf[num + 1] = 3;
rbuf[num + 3] = ATA_ID_WWN_LEN;
num += 4;
- ata_id_string(args->id, (unsigned char *) rbuf + num,
+ ata_id_string(dev->id, (unsigned char *) rbuf + num,
ATA_ID_WWN, ATA_ID_WWN_LEN);
num += ATA_ID_WWN_LEN;
}
rbuf[3] = num - 4; /* page len (assume less than 256 bytes) */
- return 0;
+
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
/**
* ata_scsiop_inq_89 - Simulate INQUIRY VPD page 89, ATA info
- * @args: device IDENTIFY data / SCSI command of interest.
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Yields SAT-specified ATA VPD page.
@@ -1996,7 +2042,8 @@ static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf)
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf)
+static unsigned int ata_scsiop_inq_89(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
rbuf[1] = 0x89; /* our page code */
rbuf[2] = (0x238 >> 8); /* page size fixed at 238h */
@@ -2017,13 +2064,25 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf)
rbuf[56] = ATA_CMD_ID_ATA;
- memcpy(&rbuf[60], &args->id[0], 512);
- return 0;
+ memcpy(&rbuf[60], &dev->id[0], 512);
+
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
-static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
+/**
+ * ata_scsiop_inq_b0 - Simulate INQUIRY VPD page B0, Block Limits
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Return data for the VPD page B0h (Block Limits).
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_inq_b0(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
- struct ata_device *dev = args->dev;
u16 min_io_sectors;
rbuf[1] = 0xb0;
@@ -2036,7 +2095,7 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
* logical than physical sector size we need to figure out what the
* latter is.
*/
- min_io_sectors = 1 << ata_id_log2_per_physical_sector(args->id);
+ min_io_sectors = 1 << ata_id_log2_per_physical_sector(dev->id);
put_unaligned_be16(min_io_sectors, &rbuf[6]);
/*
@@ -2048,24 +2107,37 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
* that we support some form of unmap - in thise case via WRITE SAME
* with the unmap bit set.
*/
- if (ata_id_has_trim(args->id)) {
+ if (ata_id_has_trim(dev->id)) {
u64 max_blocks = 65535 * ATA_MAX_TRIM_RNUM;
- if (dev->horkage & ATA_HORKAGE_MAX_TRIM_128M)
+ if (dev->quirks & ATA_QUIRK_MAX_TRIM_128M)
max_blocks = 128 << (20 - SECTOR_SHIFT);
put_unaligned_be64(max_blocks, &rbuf[36]);
put_unaligned_be32(1, &rbuf[28]);
}
- return 0;
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
-static unsigned int ata_scsiop_inq_b1(struct ata_scsi_args *args, u8 *rbuf)
+/**
+ * ata_scsiop_inq_b1 - Simulate INQUIRY VPD page B1, Block Device
+ * Characteristics
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Return data for the VPD page B1h (Block Device Characteristics).
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_inq_b1(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
- int form_factor = ata_id_form_factor(args->id);
- int media_rotation_rate = ata_id_rotation_rate(args->id);
- u8 zoned = ata_id_zoned_cap(args->id);
+ int form_factor = ata_id_form_factor(dev->id);
+ int media_rotation_rate = ata_id_rotation_rate(dev->id);
+ u8 zoned = ata_id_zoned_cap(dev->id);
rbuf[1] = 0xb1;
rbuf[3] = 0x3c;
@@ -2075,21 +2147,52 @@ static unsigned int ata_scsiop_inq_b1(struct ata_scsi_args *args, u8 *rbuf)
if (zoned)
rbuf[8] = (zoned << 4);
- return 0;
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
-static unsigned int ata_scsiop_inq_b2(struct ata_scsi_args *args, u8 *rbuf)
+/**
+ * ata_scsiop_inq_b2 - Simulate INQUIRY VPD page B2, Logical Block
+ * Provisioning
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Return data for the VPD page B2h (Logical Block Provisioning).
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_inq_b2(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
/* SCSI Thin Provisioning VPD page: SBC-3 rev 22 or later */
rbuf[1] = 0xb2;
rbuf[3] = 0x4;
rbuf[5] = 1 << 6; /* TPWS */
- return 0;
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
-static unsigned int ata_scsiop_inq_b6(struct ata_scsi_args *args, u8 *rbuf)
+/**
+ * ata_scsiop_inq_b6 - Simulate INQUIRY VPD page B6, Zoned Block Device
+ * Characteristics
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Return data for the VPD page B2h (Zoned Block Device Characteristics).
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_inq_b6(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
+ if (!ata_dev_is_zac(dev)) {
+ ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
+ return 0;
+ }
+
/*
* zbc-r05 SCSI Zoned Block device characteristics VPD page
*/
@@ -2099,21 +2202,39 @@ static unsigned int ata_scsiop_inq_b6(struct ata_scsi_args *args, u8 *rbuf)
/*
* URSWRZ bit is only meaningful for host-managed ZAC drives
*/
- if (args->dev->zac_zoned_cap & 1)
+ if (dev->zac_zoned_cap & 1)
rbuf[4] |= 1;
- put_unaligned_be32(args->dev->zac_zones_optimal_open, &rbuf[8]);
- put_unaligned_be32(args->dev->zac_zones_optimal_nonseq, &rbuf[12]);
- put_unaligned_be32(args->dev->zac_zones_max_open, &rbuf[16]);
+ put_unaligned_be32(dev->zac_zones_optimal_open, &rbuf[8]);
+ put_unaligned_be32(dev->zac_zones_optimal_nonseq, &rbuf[12]);
+ put_unaligned_be32(dev->zac_zones_max_open, &rbuf[16]);
- return 0;
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
-static unsigned int ata_scsiop_inq_b9(struct ata_scsi_args *args, u8 *rbuf)
+/**
+ * ata_scsiop_inq_b9 - Simulate INQUIRY VPD page B9, Concurrent Positioning
+ * Ranges
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Return data for the VPD page B9h (Concurrent Positioning Ranges).
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_inq_b9(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
- struct ata_cpr_log *cpr_log = args->dev->cpr_log;
+ struct ata_cpr_log *cpr_log = dev->cpr_log;
u8 *desc = &rbuf[64];
int i;
+ if (!cpr_log) {
+ ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
+ return 0;
+ }
+
/* SCSI Concurrent Positioning Ranges VPD page: SBC-5 rev 1 or later */
rbuf[1] = 0xb9;
put_unaligned_be16(64 + (int)cpr_log->nr_cpr * 32 - 4, &rbuf[2]);
@@ -2125,7 +2246,58 @@ static unsigned int ata_scsiop_inq_b9(struct ata_scsi_args *args, u8 *rbuf)
put_unaligned_be64(cpr_log->cpr[i].num_lbas, &desc[16]);
}
- return 0;
+ return get_unaligned_be16(&rbuf[2]) + 4;
+}
+
+/**
+ * ata_scsiop_inquiry - Simulate INQUIRY command
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Returns data associated with an INQUIRY command output.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_inquiry(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
+{
+ const u8 *scsicmd = cmd->cmnd;
+
+ /* is CmdDt set? */
+ if (scsicmd[1] & 2) {
+ ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
+ return 0;
+ }
+
+ /* Is EVPD clear? */
+ if ((scsicmd[1] & 1) == 0)
+ return ata_scsiop_inq_std(dev, cmd, rbuf);
+
+ switch (scsicmd[2]) {
+ case 0x00:
+ return ata_scsiop_inq_00(dev, cmd, rbuf);
+ case 0x80:
+ return ata_scsiop_inq_80(dev, cmd, rbuf);
+ case 0x83:
+ return ata_scsiop_inq_83(dev, cmd, rbuf);
+ case 0x89:
+ return ata_scsiop_inq_89(dev, cmd, rbuf);
+ case 0xb0:
+ return ata_scsiop_inq_b0(dev, cmd, rbuf);
+ case 0xb1:
+ return ata_scsiop_inq_b1(dev, cmd, rbuf);
+ case 0xb2:
+ return ata_scsiop_inq_b2(dev, cmd, rbuf);
+ case 0xb6:
+ return ata_scsiop_inq_b6(dev, cmd, rbuf);
+ case 0xb9:
+ return ata_scsiop_inq_b9(dev, cmd, rbuf);
+ default:
+ ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
+ return 0;
+ }
}
/**
@@ -2216,10 +2388,15 @@ static inline u16 ata_xlat_cdl_limit(u8 *buf)
static unsigned int ata_msense_control_spgt2(struct ata_device *dev, u8 *buf,
u8 spg)
{
- u8 *b, *cdl = dev->cdl, *desc;
+ u8 *b, *cdl, *desc;
u32 policy;
int i;
+ if (!(dev->flags & ATA_DFLAG_CDL) || !dev->cdl)
+ return 0;
+
+ cdl = dev->cdl->desc_log_buf;
+
/*
* Fill the subpage. The first four bytes of the T2A/T2B mode pages
* are a header. The PAGE LENGTH field is the size of the page
@@ -2280,8 +2457,8 @@ static unsigned int ata_msense_control_ata_feature(struct ata_device *dev,
*/
put_unaligned_be16(ATA_FEATURE_SUB_MPAGE_LEN - 4, &buf[2]);
- if (dev->flags & ATA_DFLAG_CDL)
- buf[4] = 0x02; /* Support T2A and T2B pages */
+ if (dev->flags & ATA_DFLAG_CDL_ENABLED)
+ buf[4] = 0x02; /* T2A and T2B pages enabled */
else
buf[4] = 0;
@@ -2316,7 +2493,7 @@ static unsigned int ata_msense_control(struct ata_device *dev, u8 *buf,
case ALL_SUB_MPAGES:
n = ata_msense_control_spg0(dev, buf, changeable);
n += ata_msense_control_spgt2(dev, buf + n, CDL_T2A_SUB_MPAGE);
- n += ata_msense_control_spgt2(dev, buf + n, CDL_T2A_SUB_MPAGE);
+ n += ata_msense_control_spgt2(dev, buf + n, CDL_T2B_SUB_MPAGE);
n += ata_msense_control_ata_feature(dev, buf + n);
return n;
default:
@@ -2343,7 +2520,8 @@ static unsigned int ata_msense_rw_recovery(u8 *buf, bool changeable)
/**
* ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands
- * @args: device IDENTIFY data / SCSI command of interest.
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Simulate MODE SENSE commands. Assume this is invoked for direct
@@ -2353,10 +2531,10 @@ static unsigned int ata_msense_rw_recovery(u8 *buf, bool changeable)
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
+static unsigned int ata_scsiop_mode_sense(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
- struct ata_device *dev = args->dev;
- u8 *scsicmd = args->cmd->cmnd, *p = rbuf;
+ u8 *scsicmd = cmd->cmnd, *p = rbuf;
static const u8 sat_blk_desc[] = {
0, 0, 0, 0, /* number of blocks: sat unspecified */
0,
@@ -2421,17 +2599,17 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
break;
case CACHE_MPAGE:
- p += ata_msense_caching(args->id, p, page_control == 1);
+ p += ata_msense_caching(dev->id, p, page_control == 1);
break;
case CONTROL_MPAGE:
- p += ata_msense_control(args->dev, p, spg, page_control == 1);
+ p += ata_msense_control(dev, p, spg, page_control == 1);
break;
case ALL_MPAGES:
p += ata_msense_rw_recovery(p, page_control == 1);
- p += ata_msense_caching(args->id, p, page_control == 1);
- p += ata_msense_control(args->dev, p, spg, page_control == 1);
+ p += ata_msense_caching(dev->id, p, page_control == 1);
+ p += ata_msense_control(dev, p, spg, page_control == 1);
break;
default: /* invalid page code */
@@ -2449,29 +2627,33 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
rbuf[3] = sizeof(sat_blk_desc);
memcpy(rbuf + 4, sat_blk_desc, sizeof(sat_blk_desc));
}
- } else {
- put_unaligned_be16(p - rbuf - 2, &rbuf[0]);
- rbuf[3] |= dpofua;
- if (ebd) {
- rbuf[7] = sizeof(sat_blk_desc);
- memcpy(rbuf + 8, sat_blk_desc, sizeof(sat_blk_desc));
- }
+
+ return rbuf[0] + 1;
}
- return 0;
+
+ put_unaligned_be16(p - rbuf - 2, &rbuf[0]);
+ rbuf[3] |= dpofua;
+ if (ebd) {
+ rbuf[7] = sizeof(sat_blk_desc);
+ memcpy(rbuf + 8, sat_blk_desc, sizeof(sat_blk_desc));
+ }
+
+ return get_unaligned_be16(&rbuf[0]) + 2;
invalid_fld:
- ata_scsi_set_invalid_field(dev, args->cmd, fp, bp);
- return 1;
+ ata_scsi_set_invalid_field(dev, cmd, fp, bp);
+ return 0;
saving_not_supp:
- ata_scsi_set_sense(dev, args->cmd, ILLEGAL_REQUEST, 0x39, 0x0);
+ ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x39, 0x0);
/* "Saving parameters not supported" */
- return 1;
+ return 0;
}
/**
* ata_scsiop_read_cap - Simulate READ CAPACITY[ 16] commands
- * @args: device IDENTIFY data / SCSI command of interest.
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Simulate READ CAPACITY commands.
@@ -2479,9 +2661,10 @@ saving_not_supp:
* LOCKING:
* None.
*/
-static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
+static unsigned int ata_scsiop_read_cap(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
- struct ata_device *dev = args->dev;
+ u8 *scsicmd = cmd->cmnd;
u64 last_lba = dev->n_sectors - 1; /* LBA of the last block */
u32 sector_size; /* physical sector size in bytes */
u8 log2_per_phys;
@@ -2491,7 +2674,7 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
log2_per_phys = ata_id_log2_per_physical_sector(dev->id);
lowest_aligned = ata_id_logical_sector_offset(dev->id, log2_per_phys);
- if (args->cmd->cmnd[0] == READ_CAPACITY) {
+ if (scsicmd[0] == READ_CAPACITY) {
if (last_lba >= 0xffffffffULL)
last_lba = 0xffffffff;
@@ -2506,48 +2689,59 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
rbuf[5] = sector_size >> (8 * 2);
rbuf[6] = sector_size >> (8 * 1);
rbuf[7] = sector_size;
- } else {
- /* sector count, 64-bit */
- rbuf[0] = last_lba >> (8 * 7);
- rbuf[1] = last_lba >> (8 * 6);
- rbuf[2] = last_lba >> (8 * 5);
- rbuf[3] = last_lba >> (8 * 4);
- rbuf[4] = last_lba >> (8 * 3);
- rbuf[5] = last_lba >> (8 * 2);
- rbuf[6] = last_lba >> (8 * 1);
- rbuf[7] = last_lba;
- /* sector size */
- rbuf[ 8] = sector_size >> (8 * 3);
- rbuf[ 9] = sector_size >> (8 * 2);
- rbuf[10] = sector_size >> (8 * 1);
- rbuf[11] = sector_size;
-
- rbuf[12] = 0;
- rbuf[13] = log2_per_phys;
- rbuf[14] = (lowest_aligned >> 8) & 0x3f;
- rbuf[15] = lowest_aligned;
-
- if (ata_id_has_trim(args->id) &&
- !(dev->horkage & ATA_HORKAGE_NOTRIM)) {
- rbuf[14] |= 0x80; /* LBPME */
-
- if (ata_id_has_zero_after_trim(args->id) &&
- dev->horkage & ATA_HORKAGE_ZERO_AFTER_TRIM) {
- ata_dev_info(dev, "Enabling discard_zeroes_data\n");
- rbuf[14] |= 0x40; /* LBPRZ */
- }
+ return 8;
+ }
+
+ /*
+ * READ CAPACITY 16 command is defined as a service action
+ * (SERVICE_ACTION_IN_16 command).
+ */
+ if (scsicmd[0] != SERVICE_ACTION_IN_16 ||
+ (scsicmd[1] & 0x1f) != SAI_READ_CAPACITY_16) {
+ ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
+ return 0;
+ }
+
+ /* sector count, 64-bit */
+ rbuf[0] = last_lba >> (8 * 7);
+ rbuf[1] = last_lba >> (8 * 6);
+ rbuf[2] = last_lba >> (8 * 5);
+ rbuf[3] = last_lba >> (8 * 4);
+ rbuf[4] = last_lba >> (8 * 3);
+ rbuf[5] = last_lba >> (8 * 2);
+ rbuf[6] = last_lba >> (8 * 1);
+ rbuf[7] = last_lba;
+
+ /* sector size */
+ rbuf[ 8] = sector_size >> (8 * 3);
+ rbuf[ 9] = sector_size >> (8 * 2);
+ rbuf[10] = sector_size >> (8 * 1);
+ rbuf[11] = sector_size;
+
+ if (ata_id_zoned_cap(dev->id) || dev->class == ATA_DEV_ZAC)
+ rbuf[12] = (1 << 4); /* RC_BASIS */
+ rbuf[13] = log2_per_phys;
+ rbuf[14] = (lowest_aligned >> 8) & 0x3f;
+ rbuf[15] = lowest_aligned;
+
+ if (ata_id_has_trim(dev->id) && !(dev->quirks & ATA_QUIRK_NOTRIM)) {
+ rbuf[14] |= 0x80; /* LBPME */
+
+ if (ata_id_has_zero_after_trim(dev->id) &&
+ dev->quirks & ATA_QUIRK_ZERO_AFTER_TRIM) {
+ ata_dev_info(dev, "Enabling discard_zeroes_data\n");
+ rbuf[14] |= 0x40; /* LBPRZ */
}
- if (ata_id_zoned_cap(args->id) ||
- args->dev->class == ATA_DEV_ZAC)
- rbuf[12] = (1 << 4); /* RC_BASIS */
}
- return 0;
+
+ return 16;
}
/**
* ata_scsiop_report_luns - Simulate REPORT LUNS command
- * @args: device IDENTIFY data / SCSI command of interest.
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Simulate REPORT LUNS command.
@@ -2555,11 +2749,12 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf)
+static unsigned int ata_scsiop_report_luns(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
rbuf[3] = 8; /* just one lun, LUN 0, size 8 bytes */
- return 0;
+ return 16;
}
/*
@@ -2590,14 +2785,8 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
/* handle completion from EH */
if (unlikely(err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID)) {
- if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
- /* FIXME: not quite right; we don't want the
- * translation of taskfile registers into a
- * sense descriptors, since that's only
- * correct for ATA, not ATAPI
- */
+ if (!(qc->flags & ATA_QCFLAG_SENSE_VALID))
ata_gen_passthru_sense(qc);
- }
/* SCSI EH automatically locks door if sdev->locked is
* set. Sometimes door lock request continues to
@@ -3203,8 +3392,7 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
}
scsi_16_lba_len(cdb, &block, &n_block);
- if (!unmap ||
- (dev->horkage & ATA_HORKAGE_NOTRIM) ||
+ if (!unmap || (dev->quirks & ATA_QUIRK_NOTRIM) ||
!ata_id_has_trim(dev->id)) {
fp = 1;
bp = 3;
@@ -3274,7 +3462,8 @@ invalid_opcode:
/**
* ata_scsiop_maint_in - Simulate a subset of MAINTENANCE_IN
- * @args: device MAINTENANCE_IN data / SCSI command of interest.
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Yields a subset to satisfy scsi_report_opcode()
@@ -3282,17 +3471,21 @@ invalid_opcode:
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf)
+static unsigned int ata_scsiop_maint_in(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
- struct ata_device *dev = args->dev;
- u8 *cdb = args->cmd->cmnd;
+ u8 *cdb = cmd->cmnd;
u8 supported = 0, cdlp = 0, rwcdlp = 0;
- unsigned int err = 0;
+
+ if ((cdb[1] & 0x1f) != MI_REPORT_SUPPORTED_OPERATION_CODES) {
+ ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
+ return 0;
+ }
if (cdb[2] != 1 && cdb[2] != 3) {
ata_dev_warn(dev, "invalid command format %d\n", cdb[2]);
- err = 2;
- goto out;
+ ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
+ return 0;
}
switch (cdb[3]) {
@@ -3360,11 +3553,12 @@ static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf)
default:
break;
}
-out:
+
/* One command format */
rbuf[0] = rwcdlp;
rbuf[1] = cdlp | supported;
- return err;
+
+ return 4;
}
/**
@@ -3696,12 +3890,11 @@ static int ata_mselect_control_spg0(struct ata_queued_cmd *qc,
}
/*
- * Translate MODE SELECT control mode page, sub-pages f2h (ATA feature mode
+ * Translate MODE SELECT control mode page, sub-page f2h (ATA feature mode
* page) into a SET FEATURES command.
*/
-static unsigned int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
- const u8 *buf, int len,
- u16 *fp)
+static int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
+ const u8 *buf, int len, u16 *fp)
{
struct ata_device *dev = qc->dev;
struct ata_taskfile *tf = &qc->tf;
@@ -3720,16 +3913,21 @@ static unsigned int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
switch (buf[0] & 0x03) {
case 0:
/* Disable CDL */
+ ata_dev_dbg(dev, "Disabling CDL\n");
cdl_action = 0;
dev->flags &= ~ATA_DFLAG_CDL_ENABLED;
break;
case 0x02:
- /* Enable CDL T2A/T2B: NCQ priority must be disabled */
+ /*
+ * Enable CDL. Since CDL is mutually exclusive with NCQ
+ * priority, allow this only if NCQ priority is disabled.
+ */
if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED) {
ata_dev_err(dev,
"NCQ priority must be disabled to enable CDL\n");
return -EINVAL;
}
+ ata_dev_dbg(dev, "Enabling CDL\n");
cdl_action = 1;
dev->flags |= ATA_DFLAG_CDL_ENABLED;
break;
@@ -4121,9 +4319,10 @@ int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev)
* scsi_queue_rq() will defer commands if scsi_host_in_recovery().
* However, this check is done without holding the ap->lock (a libata
* specific lock), so we can have received an error irq since then,
- * therefore we must check if EH is pending, while holding ap->lock.
+ * therefore we must check if EH is pending or running, while holding
+ * ap->lock.
*/
- if (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS))
+ if (ata_port_eh_scheduled(ap))
return SCSI_MLQUEUE_DEVICE_BUSY;
if (unlikely(!scmd->cmd_len))
@@ -4224,78 +4423,26 @@ EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
{
- struct ata_scsi_args args;
const u8 *scsicmd = cmd->cmnd;
u8 tmp8;
- args.dev = dev;
- args.id = dev->id;
- args.cmd = cmd;
-
switch(scsicmd[0]) {
case INQUIRY:
- if (scsicmd[1] & 2) /* is CmdDt set? */
- ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
- else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
- else switch (scsicmd[2]) {
- case 0x00:
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00);
- break;
- case 0x80:
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80);
- break;
- case 0x83:
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83);
- break;
- case 0x89:
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_89);
- break;
- case 0xb0:
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b0);
- break;
- case 0xb1:
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b1);
- break;
- case 0xb2:
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b2);
- break;
- case 0xb6:
- if (dev->flags & ATA_DFLAG_ZAC)
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b6);
- else
- ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
- break;
- case 0xb9:
- if (dev->cpr_log)
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b9);
- else
- ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
- break;
- default:
- ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
- break;
- }
+ ata_scsi_rbuf_fill(dev, cmd, ata_scsiop_inquiry);
break;
case MODE_SENSE:
case MODE_SENSE_10:
- ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense);
+ ata_scsi_rbuf_fill(dev, cmd, ata_scsiop_mode_sense);
break;
case READ_CAPACITY:
- ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
- break;
-
case SERVICE_ACTION_IN_16:
- if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
- ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
- else
- ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
+ ata_scsi_rbuf_fill(dev, cmd, ata_scsiop_read_cap);
break;
case REPORT_LUNS:
- ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns);
+ ata_scsi_rbuf_fill(dev, cmd, ata_scsiop_report_luns);
break;
case REQUEST_SENSE:
@@ -4323,10 +4470,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
break;
case MAINTENANCE_IN:
- if ((scsicmd[1] & 0x1f) == MI_REPORT_SUPPORTED_OPERATION_CODES)
- ata_scsi_rbuf_fill(&args, ata_scsiop_maint_in);
- else
- ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
+ ata_scsi_rbuf_fill(dev, cmd, ata_scsiop_maint_in);
break;
/* all other commands */
@@ -4493,24 +4637,23 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync)
* ata_scsi_offline_dev - offline attached SCSI device
* @dev: ATA device to offline attached SCSI device for
*
- * This function is called from ata_eh_hotplug() and responsible
- * for taking the SCSI device attached to @dev offline. This
- * function is called with host lock which protects dev->sdev
- * against clearing.
+ * This function is called from ata_eh_detach_dev() and is responsible for
+ * taking the SCSI device attached to @dev offline. This function is
+ * called with host lock which protects dev->sdev against clearing.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
- * 1 if attached SCSI device exists, 0 otherwise.
+ * true if attached SCSI device exists, false otherwise.
*/
-int ata_scsi_offline_dev(struct ata_device *dev)
+bool ata_scsi_offline_dev(struct ata_device *dev)
{
if (dev->sdev) {
scsi_device_set_state(dev->sdev, SDEV_OFFLINE);
- return 1;
+ return true;
}
- return 0;
+ return false;
}
/**
@@ -4580,16 +4723,15 @@ static void ata_scsi_handle_link_detach(struct ata_link *link)
ata_for_each_dev(dev, link, ALL) {
unsigned long flags;
- if (!(dev->flags & ATA_DFLAG_DETACHED))
+ spin_lock_irqsave(ap->lock, flags);
+ if (!(dev->flags & ATA_DFLAG_DETACHED)) {
+ spin_unlock_irqrestore(ap->lock, flags);
continue;
+ }
- spin_lock_irqsave(ap->lock, flags);
dev->flags &= ~ATA_DFLAG_DETACHED;
spin_unlock_irqrestore(ap->lock, flags);
- if (zpodd_dev_enabled(dev))
- zpodd_exit(dev);
-
ata_scsi_remove_dev(dev);
}
}
@@ -4760,8 +4902,10 @@ void ata_scsi_dev_rescan(struct work_struct *work)
spin_unlock_irqrestore(ap->lock, flags);
if (do_resume) {
ret = scsi_resume_device(sdev);
- if (ret == -EWOULDBLOCK)
+ if (ret == -EWOULDBLOCK) {
+ scsi_device_put(sdev);
goto unlock_scan;
+ }
dev->flags &= ~ATA_DFLAG_RESUMING;
}
ret = scsi_rescan_device(sdev);
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 250f7dae05fd..785b6e371abf 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -26,16 +26,15 @@ static struct workqueue_struct *ata_sff_wq;
const struct ata_port_operations ata_sff_port_ops = {
.inherits = &ata_base_port_ops,
- .qc_prep = ata_noop_qc_prep,
.qc_issue = ata_sff_qc_issue,
.qc_fill_rtf = ata_sff_qc_fill_rtf,
.freeze = ata_sff_freeze,
.thaw = ata_sff_thaw,
- .prereset = ata_sff_prereset,
- .softreset = ata_sff_softreset,
- .hardreset = sata_sff_hardreset,
- .postreset = ata_sff_postreset,
+ .reset.prereset = ata_sff_prereset,
+ .reset.softreset = ata_sff_softreset,
+ .reset.hardreset = sata_sff_hardreset,
+ .reset.postreset = ata_sff_postreset,
.error_handler = ata_sff_error_handler,
.sff_dev_select = ata_sff_dev_select,
@@ -602,7 +601,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct page *page;
- unsigned int offset;
+ unsigned int offset, count;
if (!qc->cursg) {
qc->curbytes = qc->nbytes;
@@ -615,28 +614,30 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
offset = qc->cursg->offset + qc->cursg_ofs;
/* get the current page and offset */
- page = nth_page(page, (offset >> PAGE_SHIFT));
+ page += offset >> PAGE_SHIFT;
offset %= PAGE_SIZE;
- trace_ata_sff_pio_transfer_data(qc, offset, qc->sect_size);
+ /* don't overrun current sg */
+ count = min(qc->cursg->length - qc->cursg_ofs, qc->sect_size);
+
+ trace_ata_sff_pio_transfer_data(qc, offset, count);
/*
* Split the transfer when it splits a page boundary. Note that the
* split still has to be dword aligned like all ATA data transfers.
*/
WARN_ON_ONCE(offset % 4);
- if (offset + qc->sect_size > PAGE_SIZE) {
+ if (offset + count > PAGE_SIZE) {
unsigned int split_len = PAGE_SIZE - offset;
ata_pio_xfer(qc, page, offset, split_len);
- ata_pio_xfer(qc, nth_page(page, 1), 0,
- qc->sect_size - split_len);
+ ata_pio_xfer(qc, page + 1, 0, count - split_len);
} else {
- ata_pio_xfer(qc, page, offset, qc->sect_size);
+ ata_pio_xfer(qc, page, offset, count);
}
- qc->curbytes += qc->sect_size;
- qc->cursg_ofs += qc->sect_size;
+ qc->curbytes += count;
+ qc->cursg_ofs += count;
if (qc->cursg_ofs == qc->cursg->length) {
qc->cursg = sg_next(qc->cursg);
@@ -750,7 +751,7 @@ next_sg:
offset = sg->offset + qc->cursg_ofs;
/* get the current page and offset */
- page = nth_page(page, (offset >> PAGE_SHIFT));
+ page += offset >> PAGE_SHIFT;
offset %= PAGE_SIZE;
/* don't overrun current sg */
@@ -970,7 +971,7 @@ fsm_start:
* We ignore ERR here to workaround and proceed sending
* the CDB.
*/
- if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
+ if (!(qc->dev->quirks & ATA_QUIRK_STUCK_ERR)) {
ata_ehi_push_desc(ehi, "ST_FIRST: "
"DRQ=1 with device error, "
"dev_stat 0x%X", status);
@@ -1045,8 +1046,8 @@ fsm_start:
* IDENTIFY, it's likely a phantom
* device. Mark hint.
*/
- if (qc->dev->horkage &
- ATA_HORKAGE_DIAGNOSTIC)
+ if (qc->dev->quirks &
+ ATA_QUIRK_DIAGNOSTIC)
qc->err_mask |=
AC_ERR_NODEV_HINT;
} else {
@@ -1762,7 +1763,7 @@ unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
/* see if device passed diags: continue and warn later */
if (err == 0)
/* diagnostic fail : do nothing _YET_ */
- dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
+ dev->quirks |= ATA_QUIRK_DIAGNOSTIC;
else if (err == 1)
/* do nothing */ ;
else if ((dev->devno == 0) && (err == 0x81))
@@ -1781,7 +1782,7 @@ unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
* device signature is invalid with diagnostic
* failure.
*/
- if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
+ if (present && (dev->quirks & ATA_QUIRK_DIAGNOSTIC))
class = ATA_DEV_ATA;
else
class = ATA_DEV_NONE;
@@ -2053,8 +2054,6 @@ EXPORT_SYMBOL_GPL(ata_sff_drain_fifo);
*/
void ata_sff_error_handler(struct ata_port *ap)
{
- ata_reset_fn_t softreset = ap->ops->softreset;
- ata_reset_fn_t hardreset = ap->ops->hardreset;
struct ata_queued_cmd *qc;
unsigned long flags;
@@ -2076,13 +2075,7 @@ void ata_sff_error_handler(struct ata_port *ap)
spin_unlock_irqrestore(ap->lock, flags);
- /* ignore built-in hardresets if SCR access is not available */
- if ((hardreset == sata_std_hardreset ||
- hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link))
- hardreset = NULL;
-
- ata_do_eh(ap, ap->ops->prereset, softreset, hardreset,
- ap->ops->postreset);
+ ata_std_error_handler(ap);
}
EXPORT_SYMBOL_GPL(ata_sff_error_handler);
@@ -3198,7 +3191,8 @@ void ata_sff_port_init(struct ata_port *ap)
int __init ata_sff_init(void)
{
- ata_sff_wq = alloc_workqueue("ata_sff", WQ_MEM_RECLAIM, WQ_MAX_ACTIVE);
+ ata_sff_wq = alloc_workqueue("ata_sff", WQ_MEM_RECLAIM | WQ_PERCPU,
+ WQ_MAX_ACTIVE);
if (!ata_sff_wq)
return -ENOMEM;
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index 3e49a877500e..62415fe67a11 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -80,12 +80,6 @@ struct ata_internal {
#define transport_class_to_port(dev) \
tdev_to_port((dev)->parent)
-
-/* Device objects are always created whit link objects */
-static int ata_tdev_add(struct ata_device *dev);
-static void ata_tdev_delete(struct ata_device *dev);
-
-
/*
* Hack to allow attributes of the same name in different objects.
*/
@@ -208,7 +202,7 @@ show_ata_port_##name(struct device *dev, \
{ \
struct ata_port *ap = transport_class_to_port(dev); \
\
- return scnprintf(buf, 20, format_string, cast ap->field); \
+ return sysfs_emit(buf, format_string, cast ap->field); \
}
#define ata_port_simple_attr(field, name, format_string, type) \
@@ -217,7 +211,8 @@ static DEVICE_ATTR(name, S_IRUGO, show_ata_port_##name, NULL)
ata_port_simple_attr(nr_pmp_links, nr_pmp_links, "%d\n", int);
ata_port_simple_attr(stats.idle_irq, idle_irq, "%ld\n", unsigned long);
-ata_port_simple_attr(local_port_no, port_no, "%u\n", unsigned int);
+/* We want the port_no sysfs attibute to start at 1 (ap->port_no starts at 0) */
+ata_port_simple_attr(port_no + 1, port_no, "%u\n", unsigned int);
static DECLARE_TRANSPORT_CLASS(ata_port_class,
"ata_port", NULL, NULL, NULL);
@@ -265,6 +260,7 @@ void ata_tport_delete(struct ata_port *ap)
transport_destroy_device(dev);
put_device(dev);
}
+EXPORT_SYMBOL_GPL(ata_tport_delete);
static const struct device_type ata_port_sas_type = {
.name = ATA_PORT_TYPE_NAME,
@@ -329,6 +325,7 @@ int ata_tport_add(struct device *parent,
put_device(dev);
return error;
}
+EXPORT_SYMBOL_GPL(ata_tport_add);
/**
* ata_port_classify - determine device type based on ATA-spec signature
@@ -362,135 +359,6 @@ unsigned int ata_port_classify(struct ata_port *ap,
EXPORT_SYMBOL_GPL(ata_port_classify);
/*
- * ATA link attributes
- */
-static int noop(int x) { return x; }
-
-#define ata_link_show_linkspeed(field, format) \
-static ssize_t \
-show_ata_link_##field(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct ata_link *link = transport_class_to_link(dev); \
- \
- return sprintf(buf, "%s\n", sata_spd_string(format(link->field))); \
-}
-
-#define ata_link_linkspeed_attr(field, format) \
- ata_link_show_linkspeed(field, format) \
-static DEVICE_ATTR(field, S_IRUGO, show_ata_link_##field, NULL)
-
-ata_link_linkspeed_attr(hw_sata_spd_limit, fls);
-ata_link_linkspeed_attr(sata_spd_limit, fls);
-ata_link_linkspeed_attr(sata_spd, noop);
-
-
-static DECLARE_TRANSPORT_CLASS(ata_link_class,
- "ata_link", NULL, NULL, NULL);
-
-static void ata_tlink_release(struct device *dev)
-{
-}
-
-/**
- * ata_is_link -- check if a struct device represents a ATA link
- * @dev: device to check
- *
- * Returns:
- * %1 if the device represents a ATA link, %0 else
- */
-static int ata_is_link(const struct device *dev)
-{
- return dev->release == ata_tlink_release;
-}
-
-static int ata_tlink_match(struct attribute_container *cont,
- struct device *dev)
-{
- struct ata_internal* i = to_ata_internal(ata_scsi_transport_template);
- if (!ata_is_link(dev))
- return 0;
- return &i->link_attr_cont.ac == cont;
-}
-
-/**
- * ata_tlink_delete -- remove ATA LINK
- * @link: ATA LINK to remove
- *
- * Removes the specified ATA LINK. remove associated ATA device(s) as well.
- */
-void ata_tlink_delete(struct ata_link *link)
-{
- struct device *dev = &link->tdev;
- struct ata_device *ata_dev;
-
- ata_for_each_dev(ata_dev, link, ALL) {
- ata_tdev_delete(ata_dev);
- }
-
- transport_remove_device(dev);
- device_del(dev);
- transport_destroy_device(dev);
- put_device(dev);
-}
-
-/**
- * ata_tlink_add -- initialize a transport ATA link structure
- * @link: allocated ata_link structure.
- *
- * Initialize an ATA LINK structure for sysfs. It will be added in the
- * device tree below the ATA PORT it belongs to.
- *
- * Returns %0 on success
- */
-int ata_tlink_add(struct ata_link *link)
-{
- struct device *dev = &link->tdev;
- struct ata_port *ap = link->ap;
- struct ata_device *ata_dev;
- int error;
-
- device_initialize(dev);
- dev->parent = &ap->tdev;
- dev->release = ata_tlink_release;
- if (ata_is_host_link(link))
- dev_set_name(dev, "link%d", ap->print_id);
- else
- dev_set_name(dev, "link%d.%d", ap->print_id, link->pmp);
-
- transport_setup_device(dev);
-
- error = device_add(dev);
- if (error) {
- goto tlink_err;
- }
-
- error = transport_add_device(dev);
- if (error)
- goto tlink_transport_err;
- transport_configure_device(dev);
-
- ata_for_each_dev(ata_dev, link, ALL) {
- error = ata_tdev_add(ata_dev);
- if (error) {
- goto tlink_dev_err;
- }
- }
- return 0;
- tlink_dev_err:
- while (--ata_dev >= link->device) {
- ata_tdev_delete(ata_dev);
- }
- transport_remove_device(dev);
- tlink_transport_err:
- device_del(dev);
- tlink_err:
- transport_destroy_device(dev);
- put_device(dev);
- return error;
-}
-
-/*
* ATA device attributes
*/
@@ -521,7 +389,7 @@ show_ata_dev_##field(struct device *dev, \
{ \
struct ata_device *ata_dev = transport_class_to_dev(dev); \
\
- return scnprintf(buf, 20, format_string, cast ata_dev->field); \
+ return sysfs_emit(buf, format_string, cast ata_dev->field); \
}
#define ata_dev_simple_attr(field, format_string, type) \
@@ -614,10 +482,10 @@ show_ata_dev_trim(struct device *dev,
if (!ata_id_has_trim(ata_dev->id))
mode = "unsupported";
- else if (ata_dev->horkage & ATA_HORKAGE_NOTRIM)
+ else if (ata_dev->quirks & ATA_QUIRK_NOTRIM)
mode = "forced_unsupported";
- else if (ata_dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM)
- mode = "forced_unqueued";
+ else if (ata_dev->quirks & ATA_QUIRK_NO_NCQ_TRIM)
+ mode = "forced_unqueued";
else if (ata_fpdma_dsm_supported(ata_dev))
mode = "queued";
else
@@ -640,9 +508,9 @@ static void ata_tdev_release(struct device *dev)
* @dev: device to check
*
* Returns:
- * %1 if the device represents a ATA device, %0 else
+ * true if the device represents a ATA device, false otherwise
*/
-static int ata_is_ata_dev(const struct device *dev)
+static bool ata_is_ata_dev(const struct device *dev)
{
return dev->release == ata_tdev_release;
}
@@ -650,21 +518,22 @@ static int ata_is_ata_dev(const struct device *dev)
static int ata_tdev_match(struct attribute_container *cont,
struct device *dev)
{
- struct ata_internal* i = to_ata_internal(ata_scsi_transport_template);
+ struct ata_internal *i = to_ata_internal(ata_scsi_transport_template);
+
if (!ata_is_ata_dev(dev))
return 0;
return &i->dev_attr_cont.ac == cont;
}
/**
- * ata_tdev_free -- free a ATA LINK
- * @dev: ATA PHY to free
+ * ata_tdev_free -- free an ATA transport device
+ * @dev: struct ata_device owning the transport device to free
*
- * Frees the specified ATA PHY.
+ * Free the ATA transport device for the specified ATA device.
*
* Note:
- * This function must only be called on a PHY that has not
- * successfully been added using ata_tdev_add().
+ * This function must only be called for a ATA transport device that has not
+ * yet successfully been added using ata_tdev_add().
*/
static void ata_tdev_free(struct ata_device *dev)
{
@@ -673,10 +542,10 @@ static void ata_tdev_free(struct ata_device *dev)
}
/**
- * ata_tdev_delete -- remove ATA device
- * @ata_dev: ATA device to remove
+ * ata_tdev_delete -- remove an ATA transport device
+ * @ata_dev: struct ata_device owning the transport device to delete
*
- * Removes the specified ATA device.
+ * Removes the ATA transport device for the specified ATA device.
*/
static void ata_tdev_delete(struct ata_device *ata_dev)
{
@@ -687,15 +556,14 @@ static void ata_tdev_delete(struct ata_device *ata_dev)
ata_tdev_free(ata_dev);
}
-
/**
- * ata_tdev_add -- initialize a transport ATA device structure.
- * @ata_dev: ata_dev structure.
+ * ata_tdev_add -- initialize an ATA transport device
+ * @ata_dev: struct ata_device owning the transport device to add
*
- * Initialize an ATA device structure for sysfs. It will be added in the
- * device tree below the ATA LINK device it belongs to.
+ * Initialize an ATA transport device for sysfs. It will be added in the
+ * device tree below the ATA link device it belongs to.
*
- * Returns %0 on success
+ * Returns %0 on success and a negative error code on error.
*/
static int ata_tdev_add(struct ata_device *ata_dev)
{
@@ -731,6 +599,136 @@ static int ata_tdev_add(struct ata_device *ata_dev)
return 0;
}
+/*
+ * ATA link attributes
+ */
+static int noop(int x)
+{
+ return x;
+}
+
+#define ata_link_show_linkspeed(field, format) \
+static ssize_t \
+show_ata_link_##field(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct ata_link *link = transport_class_to_link(dev); \
+ \
+ return sprintf(buf, "%s\n", \
+ sata_spd_string(format(link->field))); \
+}
+
+#define ata_link_linkspeed_attr(field, format) \
+ ata_link_show_linkspeed(field, format) \
+static DEVICE_ATTR(field, 0444, show_ata_link_##field, NULL)
+
+ata_link_linkspeed_attr(hw_sata_spd_limit, fls);
+ata_link_linkspeed_attr(sata_spd_limit, fls);
+ata_link_linkspeed_attr(sata_spd, noop);
+
+static DECLARE_TRANSPORT_CLASS(ata_link_class,
+ "ata_link", NULL, NULL, NULL);
+
+static void ata_tlink_release(struct device *dev)
+{
+}
+
+/**
+ * ata_is_link -- check if a struct device represents a ATA link
+ * @dev: device to check
+ *
+ * Returns:
+ * true if the device represents a ATA link, false otherwise
+ */
+static bool ata_is_link(const struct device *dev)
+{
+ return dev->release == ata_tlink_release;
+}
+
+static int ata_tlink_match(struct attribute_container *cont,
+ struct device *dev)
+{
+ struct ata_internal *i = to_ata_internal(ata_scsi_transport_template);
+
+ if (!ata_is_link(dev))
+ return 0;
+ return &i->link_attr_cont.ac == cont;
+}
+
+/**
+ * ata_tlink_delete -- remove an ATA link transport device
+ * @link: struct ata_link owning the link transport device to remove
+ *
+ * Removes the link transport device of the specified ATA link. This also
+ * removes the ATA device(s) associated with the link as well.
+ */
+void ata_tlink_delete(struct ata_link *link)
+{
+ struct device *dev = &link->tdev;
+ struct ata_device *ata_dev;
+
+ ata_for_each_dev(ata_dev, link, ALL) {
+ ata_tdev_delete(ata_dev);
+ }
+
+ transport_remove_device(dev);
+ device_del(dev);
+ transport_destroy_device(dev);
+ put_device(dev);
+}
+
+/**
+ * ata_tlink_add -- initialize an ATA link transport device
+ * @link: struct ata_link owning the link transport device to initialize
+ *
+ * Initialize an ATA link transport device for sysfs. It will be added in the
+ * device tree below the ATA port it belongs to.
+ *
+ * Returns %0 on success and a negative error code on error.
+ */
+int ata_tlink_add(struct ata_link *link)
+{
+ struct device *dev = &link->tdev;
+ struct ata_port *ap = link->ap;
+ struct ata_device *ata_dev;
+ int error;
+
+ device_initialize(dev);
+ dev->parent = &ap->tdev;
+ dev->release = ata_tlink_release;
+ if (ata_is_host_link(link))
+ dev_set_name(dev, "link%d", ap->print_id);
+ else
+ dev_set_name(dev, "link%d.%d", ap->print_id, link->pmp);
+
+ transport_setup_device(dev);
+
+ error = device_add(dev);
+ if (error)
+ goto tlink_err;
+
+ error = transport_add_device(dev);
+ if (error)
+ goto tlink_transport_err;
+ transport_configure_device(dev);
+
+ ata_for_each_dev(ata_dev, link, ALL) {
+ error = ata_tdev_add(ata_dev);
+ if (error)
+ goto tlink_dev_err;
+ }
+ return 0;
+ tlink_dev_err:
+ while (--ata_dev >= link->device)
+ ata_tdev_delete(ata_dev);
+ transport_remove_device(dev);
+ tlink_transport_err:
+ device_del(dev);
+ tlink_err:
+ transport_destroy_device(dev);
+ put_device(dev);
+ return error;
+}
/*
* Setup / Teardown code
diff --git a/drivers/ata/libata-transport.h b/drivers/ata/libata-transport.h
index 08a57fb9dc61..50cd2cbe8eea 100644
--- a/drivers/ata/libata-transport.h
+++ b/drivers/ata/libata-transport.h
@@ -8,9 +8,6 @@ extern struct scsi_transport_template *ata_scsi_transport_template;
int ata_tlink_add(struct ata_link *link);
void ata_tlink_delete(struct ata_link *link);
-int ata_tport_add(struct device *parent, struct ata_port *ap);
-void ata_tport_delete(struct ata_port *ap);
-
struct scsi_transport_template *ata_attach_transport(void);
void ata_release_transport(struct scsi_transport_template *t);
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
index eefda51f97d3..799531218ea2 100644
--- a/drivers/ata/libata-zpodd.c
+++ b/drivers/ata/libata-zpodd.c
@@ -112,7 +112,7 @@ static bool zpready(struct ata_device *dev)
if (!ret || sense_key != NOT_READY)
return false;
- sense_buf = dev->link->ap->sector_buf;
+ sense_buf = dev->sector_buf;
ret = atapi_eh_request_sense(dev, sense_buf, sense_key);
if (ret)
return false;
@@ -160,8 +160,7 @@ void zpodd_on_suspend(struct ata_device *dev)
return;
}
- expires = zpodd->last_ready +
- msecs_to_jiffies(zpodd_poweroff_delay * 1000);
+ expires = zpodd->last_ready + secs_to_jiffies(zpodd_poweroff_delay);
if (time_before(jiffies, expires))
return;
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 38ce13b55474..0e7ecac73680 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -32,13 +32,30 @@ enum {
#define ATA_PORT_TYPE_NAME "ata_port"
-extern atomic_t ata_print_id;
extern int atapi_passthru16;
extern int libata_fua;
extern int libata_noacpi;
extern int libata_allow_tpm;
extern const struct device_type ata_port_type;
extern struct ata_link *ata_dev_phys_link(struct ata_device *dev);
+
+static inline bool ata_sstatus_online(u32 sstatus)
+{
+ return (sstatus & 0xf) == 0x3;
+}
+
+static inline bool ata_dev_is_zac(struct ata_device *dev)
+{
+ /* Host managed device or host aware device */
+ return dev->class == ATA_DEV_ZAC ||
+ ata_id_zoned_cap(dev->id) == 0x01;
+}
+
+static inline bool ata_port_eh_scheduled(struct ata_port *ap)
+{
+ return ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS);
+}
+
#ifdef CONFIG_ATA_FORCE
extern void ata_force_cbl(struct ata_port *ap);
#else
@@ -66,7 +83,7 @@ extern bool ata_dev_power_init_tf(struct ata_device *dev,
struct ata_taskfile *tf, bool set_active);
extern void ata_dev_power_set_standby(struct ata_device *dev);
extern void ata_dev_power_set_active(struct ata_device *dev);
-extern int sata_down_spd_limit(struct ata_link *link, u32 spd_limit);
+void ata_dev_free_resources(struct ata_device *dev);
extern int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel);
extern unsigned int ata_dev_set_feature(struct ata_device *dev,
u8 subcmd, u8 action);
@@ -82,13 +99,27 @@ extern void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp);
extern int sata_link_init_spd(struct ata_link *link);
extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg);
extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
-extern struct ata_port *ata_port_alloc(struct ata_host *host);
extern const char *sata_spd_string(unsigned int spd);
extern unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
u8 page, void *buf, unsigned int sectors);
#define to_ata_port(d) container_of(d, struct ata_port, tdev)
+/* libata-sata.c */
+#ifdef CONFIG_SATA_HOST
+int sata_down_spd_limit(struct ata_link *link, u32 spd_limit);
+int ata_eh_get_ncq_success_sense(struct ata_link *link);
+#else
+static inline int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
+{
+ return -EOPNOTSUPP;
+}
+static inline int ata_eh_get_ncq_success_sense(struct ata_link *link)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
/* libata-acpi.c */
#ifdef CONFIG_ATA_ACPI
extern unsigned int ata_acpi_gtf_filter;
@@ -99,6 +130,8 @@ extern void ata_acpi_on_disable(struct ata_device *dev);
extern void ata_acpi_set_state(struct ata_port *ap, pm_message_t state);
extern void ata_acpi_bind_port(struct ata_port *ap);
extern void ata_acpi_bind_dev(struct ata_device *dev);
+extern void ata_acpi_port_power_on(struct ata_port *ap);
+extern bool ata_acpi_dev_manage_restart(struct ata_device *dev);
extern acpi_handle ata_dev_acpi_handle(struct ata_device *dev);
#else
static inline void ata_acpi_dissociate(struct ata_host *host) { }
@@ -109,6 +142,8 @@ static inline void ata_acpi_set_state(struct ata_port *ap,
pm_message_t state) { }
static inline void ata_acpi_bind_port(struct ata_port *ap) {}
static inline void ata_acpi_bind_dev(struct ata_device *dev) {}
+static inline void ata_acpi_port_power_on(struct ata_port *ap) {}
+static inline bool ata_acpi_dev_manage_restart(struct ata_device *dev) { return 0; }
#endif
/* libata-scsi.c */
@@ -117,16 +152,12 @@ extern struct ata_device *ata_scsi_find_dev(struct ata_port *ap,
extern int ata_scsi_add_hosts(struct ata_host *host,
const struct scsi_host_template *sht);
extern void ata_scsi_scan_host(struct ata_port *ap, int sync);
-extern int ata_scsi_offline_dev(struct ata_device *dev);
+extern bool ata_scsi_offline_dev(struct ata_device *dev);
extern bool ata_scsi_sense_is_valid(u8 sk, u8 asc, u8 ascq);
extern void ata_scsi_set_sense(struct ata_device *dev,
struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq);
-extern void ata_scsi_set_sense_information(struct ata_device *dev,
- struct scsi_cmnd *cmd,
- const struct ata_taskfile *tf);
extern void ata_scsi_media_change_notify(struct ata_device *dev);
extern void ata_scsi_hotplug(struct work_struct *work);
-extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
extern void ata_scsi_dev_rescan(struct work_struct *work);
extern int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
unsigned int id, u64 lun);
@@ -153,17 +184,15 @@ extern void ata_eh_autopsy(struct ata_port *ap);
const char *ata_get_cmd_name(u8 command);
extern void ata_eh_report(struct ata_port *ap);
extern int ata_eh_reset(struct ata_link *link, int classify,
- ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
- ata_reset_fn_t hardreset, ata_postreset_fn_t postreset);
-extern int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
-extern int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
- ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
- ata_postreset_fn_t postreset,
+ struct ata_reset_operations *reset_ops);
+extern int ata_eh_recover(struct ata_port *ap,
+ struct ata_reset_operations *reset_ops,
struct ata_link **r_failed_disk);
extern void ata_eh_finish(struct ata_port *ap);
extern int ata_ering_map(struct ata_ering *ering,
int (*map_fn)(struct ata_ering_entry *, void *),
void *arg);
+enum scsi_disposition ata_eh_decide_disposition(struct ata_queued_cmd *qc);
extern unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key);
extern unsigned int atapi_eh_request_sense(struct ata_device *dev,
u8 *sense_buf, u8 dfl_sense_key);
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
index ab38871b5e00..23fff10af2ac 100644
--- a/drivers/ata/pata_acpi.c
+++ b/drivers/ata/pata_acpi.c
@@ -216,7 +216,7 @@ static struct ata_port_operations pacpi_ops = {
.mode_filter = pacpi_mode_filter,
.set_piomode = pacpi_set_piomode,
.set_dmamode = pacpi_set_dmamode,
- .prereset = pacpi_pre_reset,
+ .reset.prereset = pacpi_pre_reset,
.port_start = pacpi_port_start,
};
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index bb790edd6036..9d5cb9c34c52 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -392,11 +392,11 @@ static struct ata_port_operations ali_20_port_ops = {
* Port operations for DMA capable ALi with cable detect
*/
static struct ata_port_operations ali_c2_port_ops = {
- .inherits = &ali_dma_base_ops,
- .check_atapi_dma = ali_check_atapi_dma,
- .cable_detect = ali_c2_cable_detect,
- .dev_config = ali_lock_sectors,
- .postreset = ali_c2_c3_postreset,
+ .inherits = &ali_dma_base_ops,
+ .check_atapi_dma = ali_check_atapi_dma,
+ .cable_detect = ali_c2_cable_detect,
+ .dev_config = ali_lock_sectors,
+ .reset.postreset = ali_c2_c3_postreset,
};
/*
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index 5b02b89748b7..a2fecadc927d 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -394,7 +394,7 @@ static const struct scsi_host_template amd_sht = {
static const struct ata_port_operations amd_base_port_ops = {
.inherits = &ata_bmdma32_port_ops,
- .prereset = amd_pre_reset,
+ .reset.prereset = amd_pre_reset,
};
static struct ata_port_operations amd33_port_ops = {
@@ -429,7 +429,7 @@ static const struct ata_port_operations nv_base_port_ops = {
.inherits = &ata_bmdma_port_ops,
.cable_detect = ata_cable_ignore,
.mode_filter = nv_mode_filter,
- .prereset = nv_pre_reset,
+ .reset.prereset = nv_pre_reset,
.host_stop = nv_host_stop,
};
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index d0c6924d25b6..514d549286b5 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -964,7 +964,7 @@ MODULE_DEVICE_TABLE(of, arasan_cf_id_table);
static struct platform_driver arasan_cf_driver = {
.probe = arasan_cf_probe,
- .remove_new = arasan_cf_remove,
+ .remove = arasan_cf_remove,
.driver = {
.name = DRIVER_NAME,
.pm = &arasan_cf_pm_ops,
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index 40544282f455..6160414172a3 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -301,7 +301,7 @@ static struct ata_port_operations artop6210_ops = {
.cable_detect = ata_cable_40wire,
.set_piomode = artop6210_set_piomode,
.set_dmamode = artop6210_set_dmamode,
- .prereset = artop62x0_pre_reset,
+ .reset.prereset = artop62x0_pre_reset,
.qc_defer = artop6210_qc_defer,
};
@@ -310,7 +310,7 @@ static struct ata_port_operations artop6260_ops = {
.cable_detect = artop6260_cable_detect,
.set_piomode = artop6260_set_piomode,
.set_dmamode = artop6260_set_dmamode,
- .prereset = artop62x0_pre_reset,
+ .reset.prereset = artop62x0_pre_reset,
};
static void atp8xx_fixup(struct pci_dev *pdev)
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 8c5cc803aab3..4c612f9543f6 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -264,7 +264,7 @@ static struct ata_port_operations atiixp_port_ops = {
.bmdma_start = atiixp_bmdma_start,
.bmdma_stop = atiixp_bmdma_stop,
- .prereset = atiixp_prereset,
+ .reset.prereset = atiixp_prereset,
.cable_detect = atiixp_cable_detect,
.set_piomode = atiixp_set_piomode,
.set_dmamode = atiixp_set_dmamode,
diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
index aaef5924f636..308f86f9e2f0 100644
--- a/drivers/ata/pata_atp867x.c
+++ b/drivers/ata/pata_atp867x.c
@@ -525,7 +525,7 @@ static int atp867x_reinit_one(struct pci_dev *pdev)
}
#endif
-static struct pci_device_id atp867x_pci_tbl[] = {
+static const struct pci_device_id atp867x_pci_tbl[] = {
{ PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP867A), 0 },
{ PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP867B), 0 },
{ },
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index 38795508c2e9..3163c8d9cef5 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -8,9 +8,9 @@
* PIO mode and smarter silicon.
*
* The practical upshot of this is that we must always tune the
- * drive for the right PIO mode. We must also ignore all the blacklists
- * and the drive bus mastering DMA information. Also to confuse matters
- * further we can do DMA on PIO only drives.
+ * drive for the right PIO mode and ignore the drive bus mastering DMA
+ * information. Also to confuse matters further we can do DMA on PIO only
+ * drives.
*
* DMA on the 5510 also requires we disable_hlt() during DMA on early
* revisions.
@@ -151,12 +151,6 @@ static int cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (!host)
return -ENOMEM;
- /* Perform set up for DMA */
- if (pci_enable_device_io(pdev)) {
- dev_err(&pdev->dev, "unable to configure BAR2.\n");
- return -ENODEV;
- }
-
if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
dev_err(&pdev->dev, "unable to configure DMA mask.\n");
return -ENODEV;
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
index b811efd2cc34..73e81e160c91 100644
--- a/drivers/ata/pata_cs5536.c
+++ b/drivers/ata/pata_cs5536.c
@@ -27,7 +27,7 @@
#include <scsi/scsi_host.h>
#include <linux/dmi.h>
-#ifdef CONFIG_X86_32
+#if defined(CONFIG_X86) && defined(CONFIG_X86_32)
#include <asm/msr.h>
static int use_msr;
module_param_named(msr, use_msr, int, 0644);
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
index 2e6eccf2902f..6fe49b303fee 100644
--- a/drivers/ata/pata_efar.c
+++ b/drivers/ata/pata_efar.c
@@ -243,7 +243,7 @@ static struct ata_port_operations efar_ops = {
.cable_detect = efar_cable_detect,
.set_piomode = efar_set_piomode,
.set_dmamode = efar_set_dmamode,
- .prereset = efar_pre_reset,
+ .reset.prereset = efar_pre_reset,
};
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index c84a20892f1b..b2b9e0058333 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -44,8 +44,8 @@
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/ktime.h>
+#include <linux/mod_devicetable.h>
-#include <linux/platform_data/dma-ep93xx.h>
#include <linux/soc/cirrus/ep93xx.h>
#define DRV_NAME "ep93xx-ide"
@@ -126,7 +126,7 @@ enum {
};
struct ep93xx_pata_data {
- const struct platform_device *pdev;
+ struct platform_device *pdev;
void __iomem *ide_base;
struct ata_timing t;
bool iordy;
@@ -135,9 +135,7 @@ struct ep93xx_pata_data {
unsigned long udma_out_phys;
struct dma_chan *dma_rx_channel;
- struct ep93xx_dma_data dma_rx_data;
struct dma_chan *dma_tx_channel;
- struct ep93xx_dma_data dma_tx_data;
};
static void ep93xx_pata_clear_regs(void __iomem *base)
@@ -637,20 +635,13 @@ static void ep93xx_pata_release_dma(struct ep93xx_pata_data *drv_data)
}
}
-static bool ep93xx_pata_dma_filter(struct dma_chan *chan, void *filter_param)
+static int ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
{
- if (ep93xx_dma_chan_is_m2p(chan))
- return false;
-
- chan->private = filter_param;
- return true;
-}
-
-static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
-{
- const struct platform_device *pdev = drv_data->pdev;
+ struct platform_device *pdev = drv_data->pdev;
+ struct device *dev = &pdev->dev;
dma_cap_mask_t mask;
struct dma_slave_config conf;
+ int ret;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
@@ -660,22 +651,16 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
* to request only one channel, and reprogram it's direction at
* start of new transfer.
*/
- drv_data->dma_rx_data.port = EP93XX_DMA_IDE;
- drv_data->dma_rx_data.direction = DMA_DEV_TO_MEM;
- drv_data->dma_rx_data.name = "ep93xx-pata-rx";
- drv_data->dma_rx_channel = dma_request_channel(mask,
- ep93xx_pata_dma_filter, &drv_data->dma_rx_data);
- if (!drv_data->dma_rx_channel)
- return;
-
- drv_data->dma_tx_data.port = EP93XX_DMA_IDE;
- drv_data->dma_tx_data.direction = DMA_MEM_TO_DEV;
- drv_data->dma_tx_data.name = "ep93xx-pata-tx";
- drv_data->dma_tx_channel = dma_request_channel(mask,
- ep93xx_pata_dma_filter, &drv_data->dma_tx_data);
- if (!drv_data->dma_tx_channel) {
- dma_release_channel(drv_data->dma_rx_channel);
- return;
+ drv_data->dma_rx_channel = dma_request_chan(dev, "rx");
+ if (IS_ERR(drv_data->dma_rx_channel))
+ return dev_err_probe(dev, PTR_ERR(drv_data->dma_rx_channel),
+ "rx DMA setup failed\n");
+
+ drv_data->dma_tx_channel = dma_request_chan(&pdev->dev, "tx");
+ if (IS_ERR(drv_data->dma_tx_channel)) {
+ ret = dev_err_probe(dev, PTR_ERR(drv_data->dma_tx_channel),
+ "tx DMA setup failed\n");
+ goto fail_release_rx;
}
/* Configure receive channel direction and source address */
@@ -683,10 +668,10 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
conf.direction = DMA_DEV_TO_MEM;
conf.src_addr = drv_data->udma_in_phys;
conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- if (dmaengine_slave_config(drv_data->dma_rx_channel, &conf)) {
- dev_err(&pdev->dev, "failed to configure rx dma channel\n");
- ep93xx_pata_release_dma(drv_data);
- return;
+ ret = dmaengine_slave_config(drv_data->dma_rx_channel, &conf);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to configure rx dma channel");
+ goto fail_release_dma;
}
/* Configure transmit channel direction and destination address */
@@ -694,10 +679,20 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
conf.direction = DMA_MEM_TO_DEV;
conf.dst_addr = drv_data->udma_out_phys;
conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- if (dmaengine_slave_config(drv_data->dma_tx_channel, &conf)) {
- dev_err(&pdev->dev, "failed to configure tx dma channel\n");
- ep93xx_pata_release_dma(drv_data);
+ ret = dmaengine_slave_config(drv_data->dma_tx_channel, &conf);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to configure tx dma channel");
+ goto fail_release_dma;
}
+
+ return 0;
+
+fail_release_rx:
+ dma_release_channel(drv_data->dma_rx_channel);
+fail_release_dma:
+ ep93xx_pata_release_dma(drv_data);
+
+ return ret;
}
static void ep93xx_pata_dma_start(struct ata_queued_cmd *qc)
@@ -884,10 +879,8 @@ static const struct scsi_host_template ep93xx_pata_sht = {
static struct ata_port_operations ep93xx_pata_port_ops = {
.inherits = &ata_bmdma_port_ops,
- .qc_prep = ata_noop_qc_prep,
-
- .softreset = ep93xx_pata_softreset,
- .hardreset = ATA_OP_NULL,
+ .reset.softreset = ep93xx_pata_softreset,
+ .reset.hardreset = ATA_OP_NULL,
.sff_dev_select = ep93xx_pata_dev_select,
.sff_set_devctl = ep93xx_pata_set_devctl,
@@ -927,34 +920,26 @@ static int ep93xx_pata_probe(struct platform_device *pdev)
void __iomem *ide_base;
int err;
- err = ep93xx_ide_acquire_gpio(pdev);
- if (err)
- return err;
-
/* INT[3] (IRQ_EP93XX_EXT3) line connected as pull down */
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- err = irq;
- goto err_rel_gpio;
- }
+ if (irq < 0)
+ return irq;
ide_base = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res);
- if (IS_ERR(ide_base)) {
- err = PTR_ERR(ide_base);
- goto err_rel_gpio;
- }
+ if (IS_ERR(ide_base))
+ return PTR_ERR(ide_base);
drv_data = devm_kzalloc(&pdev->dev, sizeof(*drv_data), GFP_KERNEL);
- if (!drv_data) {
- err = -ENOMEM;
- goto err_rel_gpio;
- }
+ if (!drv_data)
+ return -ENOMEM;
drv_data->pdev = pdev;
drv_data->ide_base = ide_base;
drv_data->udma_in_phys = mem_res->start + IDEUDMADATAIN;
drv_data->udma_out_phys = mem_res->start + IDEUDMADATAOUT;
- ep93xx_pata_dma_init(drv_data);
+ err = ep93xx_pata_dma_init(drv_data);
+ if (err)
+ return err;
/* allocate host */
host = ata_host_alloc(&pdev->dev, 1);
@@ -1005,8 +990,6 @@ static int ep93xx_pata_probe(struct platform_device *pdev)
err_rel_dma:
ep93xx_pata_release_dma(drv_data);
-err_rel_gpio:
- ep93xx_ide_release_gpio(pdev);
return err;
}
@@ -1018,15 +1001,21 @@ static void ep93xx_pata_remove(struct platform_device *pdev)
ata_host_detach(host);
ep93xx_pata_release_dma(drv_data);
ep93xx_pata_clear_regs(drv_data->ide_base);
- ep93xx_ide_release_gpio(pdev);
}
+static const struct of_device_id ep93xx_pata_of_ids[] = {
+ { .compatible = "cirrus,ep9312-pata" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ep93xx_pata_of_ids);
+
static struct platform_driver ep93xx_pata_platform_driver = {
.driver = {
.name = DRV_NAME,
+ .of_match_table = ep93xx_pata_of_ids,
},
.probe = ep93xx_pata_probe,
- .remove_new = ep93xx_pata_remove,
+ .remove = ep93xx_pata_remove,
};
module_platform_driver(ep93xx_pata_platform_driver);
diff --git a/drivers/ata/pata_falcon.c b/drivers/ata/pata_falcon.c
index 18ceefd176df..334c4eea41ec 100644
--- a/drivers/ata/pata_falcon.c
+++ b/drivers/ata/pata_falcon.c
@@ -225,8 +225,8 @@ static void pata_falcon_remove_one(struct platform_device *pdev)
static struct platform_driver pata_falcon_driver = {
.probe = pata_falcon_init_one,
- .remove_new = pata_falcon_remove_one,
- .driver = {
+ .remove = pata_falcon_remove_one,
+ .driver = {
.name = "atari-falcon-ide",
},
};
diff --git a/drivers/ata/pata_ftide010.c b/drivers/ata/pata_ftide010.c
index 4d6ef90ccc77..c3a8384c3e04 100644
--- a/drivers/ata/pata_ftide010.c
+++ b/drivers/ata/pata_ftide010.c
@@ -549,6 +549,7 @@ static const struct of_device_id pata_ftide010_of_match[] = {
{ .compatible = "faraday,ftide010", },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, pata_ftide010_of_match);
static struct platform_driver pata_ftide010_driver = {
.driver = {
@@ -556,7 +557,7 @@ static struct platform_driver pata_ftide010_driver = {
.of_match_table = pata_ftide010_of_match,
},
.probe = pata_ftide010_probe,
- .remove_new = pata_ftide010_remove,
+ .remove = pata_ftide010_remove,
};
module_platform_driver(pata_ftide010_driver);
diff --git a/drivers/ata/pata_gayle.c b/drivers/ata/pata_gayle.c
index 94df60ac2307..8602c3889948 100644
--- a/drivers/ata/pata_gayle.c
+++ b/drivers/ata/pata_gayle.c
@@ -202,9 +202,9 @@ static void pata_gayle_remove_one(struct platform_device *pdev)
static struct platform_driver pata_gayle_driver = {
.probe = pata_gayle_init_one,
- .remove_new = pata_gayle_remove_one,
- .driver = {
- .name = "amiga-gayle-ide",
+ .remove = pata_gayle_remove_one,
+ .driver = {
+ .name = "amiga-gayle-ide",
},
};
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index bdccd1ba1524..b96e8bd2a3f8 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -170,8 +170,8 @@ static const char * const bad_ata66_3[] = {
NULL
};
-static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr,
- const char * const list[])
+static int hpt_dma_broken(const struct ata_device *dev, char *modestr,
+ const char * const list[])
{
unsigned char model_num[ATA_ID_PROD_LEN + 1];
int i;
@@ -197,11 +197,11 @@ static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr,
static unsigned int hpt366_filter(struct ata_device *adev, unsigned int mask)
{
if (adev->class == ATA_DEV_ATA) {
- if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33))
+ if (hpt_dma_broken(adev, "UDMA", bad_ata33))
mask &= ~ATA_MASK_UDMA;
- if (hpt_dma_blacklisted(adev, "UDMA3", bad_ata66_3))
+ if (hpt_dma_broken(adev, "UDMA3", bad_ata66_3))
mask &= ~(0xF8 << ATA_SHIFT_UDMA);
- if (hpt_dma_blacklisted(adev, "UDMA4", bad_ata66_4))
+ if (hpt_dma_broken(adev, "UDMA4", bad_ata66_4))
mask &= ~(0xF0 << ATA_SHIFT_UDMA);
} else if (adev->class == ATA_DEV_ATAPI)
mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
@@ -322,7 +322,7 @@ static const struct scsi_host_template hpt36x_sht = {
static struct ata_port_operations hpt366_port_ops = {
.inherits = &ata_bmdma_port_ops,
- .prereset = hpt366_prereset,
+ .reset.prereset = hpt366_prereset,
.cable_detect = hpt36x_cable_detect,
.mode_filter = hpt366_filter,
.set_piomode = hpt366_set_piomode,
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index c0329cf01135..07e3a984cbb1 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -218,8 +218,8 @@ static u32 hpt37x_find_mode(struct ata_port *ap, int speed)
return 0xffffffffU; /* silence compiler warning */
}
-static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr,
- const char * const list[])
+static int hpt_dma_broken(const struct ata_device *dev, char *modestr,
+ const char * const list[])
{
unsigned char model_num[ATA_ID_PROD_LEN + 1];
int i;
@@ -281,9 +281,9 @@ static const char * const bad_ata100_5[] = {
static unsigned int hpt370_filter(struct ata_device *adev, unsigned int mask)
{
if (adev->class == ATA_DEV_ATA) {
- if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33))
+ if (hpt_dma_broken(adev, "UDMA", bad_ata33))
mask &= ~ATA_MASK_UDMA;
- if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
+ if (hpt_dma_broken(adev, "UDMA100", bad_ata100_5))
mask &= ~(0xE0 << ATA_SHIFT_UDMA);
}
return mask;
@@ -300,7 +300,7 @@ static unsigned int hpt370_filter(struct ata_device *adev, unsigned int mask)
static unsigned int hpt370a_filter(struct ata_device *adev, unsigned int mask)
{
if (adev->class == ATA_DEV_ATA) {
- if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
+ if (hpt_dma_broken(adev, "UDMA100", bad_ata100_5))
mask &= ~(0xE0 << ATA_SHIFT_UDMA);
}
return mask;
@@ -543,7 +543,7 @@ static struct ata_port_operations hpt370_port_ops = {
.cable_detect = hpt37x_cable_detect,
.set_piomode = hpt37x_set_piomode,
.set_dmamode = hpt37x_set_dmamode,
- .prereset = hpt37x_pre_reset,
+ .reset.prereset = hpt37x_pre_reset,
};
/*
@@ -567,7 +567,7 @@ static struct ata_port_operations hpt302_port_ops = {
.cable_detect = hpt37x_cable_detect,
.set_piomode = hpt37x_set_piomode,
.set_dmamode = hpt37x_set_dmamode,
- .prereset = hpt37x_pre_reset,
+ .reset.prereset = hpt37x_pre_reset,
};
/*
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index 5b1ecccf3c83..2cc57fcf2c46 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -356,7 +356,7 @@ static struct ata_port_operations hpt3xxn_port_ops = {
.cable_detect = hpt3x2n_cable_detect,
.set_piomode = hpt3x2n_set_piomode,
.set_dmamode = hpt3x2n_set_dmamode,
- .prereset = hpt3x2n_pre_reset,
+ .reset.prereset = hpt3x2n_pre_reset,
};
/*
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
index 9cfb064782c3..70f056e47e6b 100644
--- a/drivers/ata/pata_icside.c
+++ b/drivers/ata/pata_icside.c
@@ -328,8 +328,6 @@ static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)
static struct ata_port_operations pata_icside_port_ops = {
.inherits = &ata_bmdma_port_ops,
- /* no need to build any PRD tables for DMA */
- .qc_prep = ata_noop_qc_prep,
.sff_data_xfer = ata_sff_data_xfer32,
.bmdma_setup = pata_icside_bmdma_setup,
.bmdma_start = pata_icside_bmdma_start,
@@ -338,7 +336,7 @@ static struct ata_port_operations pata_icside_port_ops = {
.cable_detect = ata_cable_40wire,
.set_dmamode = pata_icside_set_dmamode,
- .postreset = pata_icside_postreset,
+ .reset.postreset = pata_icside_postreset,
.port_start = ATA_OP_NULL, /* don't need PRD table */
};
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
index d0aa8fc929b4..b37682b0578f 100644
--- a/drivers/ata/pata_imx.c
+++ b/drivers/ata/pata_imx.c
@@ -249,7 +249,7 @@ MODULE_DEVICE_TABLE(of, imx_pata_dt_ids);
static struct platform_driver pata_imx_driver = {
.probe = pata_imx_probe,
- .remove_new = pata_imx_remove,
+ .remove = pata_imx_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = imx_pata_dt_ids,
diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
index b7ac56103c8a..a6f2cfc1602e 100644
--- a/drivers/ata/pata_it8213.c
+++ b/drivers/ata/pata_it8213.c
@@ -81,7 +81,7 @@ static void it8213_set_piomode (struct ata_port *ap, struct ata_device *adev)
int control = 0;
/*
- * See Intel Document 298600-004 for the timing programing rules
+ * See Intel Document 298600-004 for the timing programming rules
* for PIIX/ICH. The 8213 is a clone so very similar
*/
@@ -238,7 +238,7 @@ static struct ata_port_operations it8213_ops = {
.cable_detect = it8213_cable_detect,
.set_piomode = it8213_set_piomode,
.set_dmamode = it8213_set_dmamode,
- .prereset = it8213_pre_reset,
+ .reset.prereset = it8213_pre_reset,
};
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index 2fe3fb6102ce..fc762dcc61bf 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -75,6 +75,7 @@
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
@@ -519,9 +520,9 @@ static void it821x_dev_config(struct ata_device *adev)
}
/* This is a controller firmware triggered funny, don't
report the drive faulty! */
- adev->horkage &= ~ATA_HORKAGE_DIAGNOSTIC;
+ adev->quirks &= ~ATA_QUIRK_DIAGNOSTIC;
/* No HPA in 'smart' mode */
- adev->horkage |= ATA_HORKAGE_BROKEN_HPA;
+ adev->quirks |= ATA_QUIRK_BROKEN_HPA;
}
/**
@@ -632,9 +633,9 @@ static void it821x_display_disk(struct ata_port *ap, int n, u8 *buf)
cbl = "";
if (mode)
- snprintf(mbuf, 8, "%5s%d", mtype, mode - 1);
+ snprintf(mbuf, sizeof(mbuf), "%5s%d", mtype, mode - 1);
else
- strcpy(mbuf, "PIO");
+ strscpy(mbuf, "PIO");
if (buf[52] == 4)
ata_port_info(ap, "%d: %-6s %-8s %s %s\n",
n, mbuf, types[buf[52]], id, cbl);
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index 246bb4f8f1f7..80f6a91acf6f 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -290,6 +290,7 @@ static const struct of_device_id ixp4xx_pata_of_match[] = {
{ .compatible = "intel,ixp4xx-compact-flash", },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, ixp4xx_pata_of_match);
static struct platform_driver ixp4xx_pata_platform_driver = {
.driver = {
@@ -297,7 +298,7 @@ static struct platform_driver ixp4xx_pata_platform_driver = {
.of_match_table = ixp4xx_pata_of_match,
},
.probe = ixp4xx_pata_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
};
module_platform_driver(ixp4xx_pata_platform_driver);
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
index f51fb8219762..b885f33e8980 100644
--- a/drivers/ata/pata_jmicron.c
+++ b/drivers/ata/pata_jmicron.c
@@ -113,7 +113,7 @@ static const struct scsi_host_template jmicron_sht = {
static struct ata_port_operations jmicron_ops = {
.inherits = &ata_bmdma_port_ops,
- .prereset = jmicron_pre_reset,
+ .reset.prereset = jmicron_pre_reset,
};
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index 817838e2f70e..9eefdc5df5df 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -208,6 +208,19 @@ static const char* macio_ata_names[] = {
/* Don't let a DMA segment go all the way to 64K */
#define MAX_DBDMA_SEG 0xff00
+#ifdef CONFIG_PAGE_SIZE_64KB
+/*
+ * The SCSI core requires the segment size to cover at least a page, so
+ * for 64K page size kernels it must be at least 64K. However the
+ * hardware can't handle 64K, so pata_macio_qc_prep() will split large
+ * requests. To handle the split requests the tablesize must be halved.
+ */
+#define PATA_MACIO_MAX_SEGMENT_SIZE SZ_64K
+#define PATA_MACIO_SG_TABLESIZE (MAX_DCMDS / 2)
+#else
+#define PATA_MACIO_MAX_SEGMENT_SIZE MAX_DBDMA_SEG
+#define PATA_MACIO_SG_TABLESIZE MAX_DCMDS
+#endif
/*
* Wait 1s for disk to answer on IDE bus after a hard reset
@@ -541,7 +554,8 @@ static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc)
while (sg_len) {
/* table overflow should never happen */
- BUG_ON (pi++ >= MAX_DCMDS);
+ if (WARN_ON_ONCE(pi >= MAX_DCMDS))
+ return AC_ERR_SYSTEM;
len = (sg_len < MAX_DBDMA_SEG) ? sg_len : MAX_DBDMA_SEG;
table->command = cpu_to_le16(write ? OUTPUT_MORE: INPUT_MORE);
@@ -553,11 +567,13 @@ static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc)
addr += len;
sg_len -= len;
++table;
+ ++pi;
}
}
/* Should never happen according to Tejun */
- BUG_ON(!pi);
+ if (WARN_ON_ONCE(!pi))
+ return AC_ERR_SYSTEM;
/* Convert the last command to an input/output */
table--;
@@ -742,7 +758,7 @@ static void pata_macio_irq_clear(struct ata_port *ap)
static void pata_macio_reset_hw(struct pata_macio_priv *priv, int resume)
{
- dev_dbg(priv->dev, "Enabling & resetting... \n");
+ dev_dbg(priv->dev, "Enabling & resetting...\n");
if (priv->mediabay)
return;
@@ -796,8 +812,8 @@ static void pata_macio_reset_hw(struct pata_macio_priv *priv, int resume)
/* Hook the standard slave config to fixup some HW related alignment
* restrictions
*/
-static int pata_macio_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int pata_macio_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct pata_macio_priv *priv = ap->private_data;
@@ -806,7 +822,7 @@ static int pata_macio_device_configure(struct scsi_device *sdev,
int rc;
/* First call original */
- rc = ata_scsi_device_configure(sdev, lim);
+ rc = ata_scsi_sdev_configure(sdev, lim);
if (rc)
return rc;
@@ -816,7 +832,7 @@ static int pata_macio_device_configure(struct scsi_device *sdev,
/* OHare has issues with non cache aligned DMA on some chipsets */
if (priv->kind == controller_ohare) {
lim->dma_alignment = 31;
- blk_queue_update_dma_pad(sdev->request_queue, 31);
+ lim->dma_pad_mask = 31;
/* Tell the world about it */
ata_dev_info(dev, "OHare alignment limits applied\n");
@@ -831,7 +847,7 @@ static int pata_macio_device_configure(struct scsi_device *sdev,
if (priv->kind == controller_sh_ata6 || priv->kind == controller_k2_ata6) {
/* Allright these are bad, apply restrictions */
lim->dma_alignment = 15;
- blk_queue_update_dma_pad(sdev->request_queue, 15);
+ lim->dma_pad_mask = 15;
/* We enable MWI and hack cache line size directly here, this
* is specific to this chipset and not normal values, we happen
@@ -912,17 +928,14 @@ static int pata_macio_do_resume(struct pata_macio_priv *priv)
static const struct scsi_host_template pata_macio_sht = {
__ATA_BASE_SHT(DRV_NAME),
- .sg_tablesize = MAX_DCMDS,
+ .sg_tablesize = PATA_MACIO_SG_TABLESIZE,
/* We may not need that strict one */
.dma_boundary = ATA_DMA_BOUNDARY,
- /* Not sure what the real max is but we know it's less than 64K, let's
- * use 64K minus 256
- */
- .max_segment_size = MAX_DBDMA_SEG,
- .device_configure = pata_macio_device_configure,
+ .max_segment_size = PATA_MACIO_MAX_SEGMENT_SIZE,
+ .sdev_configure = pata_macio_sdev_configure,
.sdev_groups = ata_common_sdev_groups,
.can_queue = ATA_DEF_QUEUE,
- .tag_alloc_policy = BLK_TAG_ALLOC_RR,
+ .tag_alloc_policy_rr = true,
};
static struct ata_port_operations pata_macio_ops = {
@@ -1285,7 +1298,7 @@ static int pata_macio_pci_attach(struct pci_dev *pdev,
priv->dev = &pdev->dev;
/* Get MMIO regions */
- if (pci_request_regions(pdev, "pata-macio")) {
+ if (pcim_request_all_regions(pdev, "pata-macio")) {
dev_err(&pdev->dev,
"Cannot obtain PCI resources\n");
return -EBUSY;
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index 8119caaad605..deab67328388 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -99,7 +99,7 @@ static const struct scsi_host_template marvell_sht = {
static struct ata_port_operations marvell_ops = {
.inherits = &ata_bmdma_port_ops,
.cable_detect = marvell_cable_detect,
- .prereset = marvell_pre_reset,
+ .reset.prereset = marvell_pre_reset,
};
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 6c317a461a1f..210a63283f62 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -620,7 +620,6 @@ static struct ata_port_operations mpc52xx_ata_port_ops = {
.bmdma_start = mpc52xx_bmdma_start,
.bmdma_stop = mpc52xx_bmdma_stop,
.bmdma_status = mpc52xx_bmdma_status,
- .qc_prep = ata_noop_qc_prep,
};
static int mpc52xx_ata_init_one(struct device *dev,
@@ -855,7 +854,7 @@ static const struct of_device_id mpc52xx_ata_of_match[] = {
static struct platform_driver mpc52xx_ata_of_platform_driver = {
.probe = mpc52xx_ata_probe,
- .remove_new = mpc52xx_ata_remove,
+ .remove = mpc52xx_ata_remove,
#ifdef CONFIG_PM_SLEEP
.suspend = mpc52xx_ata_suspend,
.resume = mpc52xx_ata_resume,
diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
index 69e4baf27d72..ce310ae7c93a 100644
--- a/drivers/ata/pata_mpiix.c
+++ b/drivers/ata/pata_mpiix.c
@@ -145,7 +145,7 @@ static struct ata_port_operations mpiix_port_ops = {
.qc_issue = mpiix_qc_issue,
.cable_detect = ata_cable_40wire,
.set_piomode = mpiix_set_piomode,
- .prereset = mpiix_pre_reset,
+ .reset.prereset = mpiix_pre_reset,
.sff_data_xfer = ata_sff_data_xfer32,
};
diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
index 44cc24d21d5f..bdb55c1a3280 100644
--- a/drivers/ata/pata_ns87410.c
+++ b/drivers/ata/pata_ns87410.c
@@ -123,7 +123,7 @@ static struct ata_port_operations ns87410_port_ops = {
.qc_issue = ns87410_qc_issue,
.cable_detect = ata_cable_40wire,
.set_piomode = ns87410_set_piomode,
- .prereset = ns87410_pre_reset,
+ .reset.prereset = ns87410_pre_reset,
};
static int ns87410_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 2884acfc4863..df42ebe98db7 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -183,7 +183,7 @@ static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
reg_tim.s.ale = 0;
/* Not used */
reg_tim.s.page = 0;
- /* Time after IORDY to coninue to assert the data */
+ /* Time after IORDY to continue to assert the data */
reg_tim.s.wait = 0;
/* Time to wait to complete the cycle. */
reg_tim.s.pause = pause;
@@ -789,7 +789,6 @@ static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc)
static struct ata_port_operations octeon_cf_ops = {
.inherits = &ata_sff_port_ops,
.check_atapi_dma = octeon_cf_check_atapi_dma,
- .qc_prep = ata_noop_qc_prep,
.qc_issue = octeon_cf_qc_issue,
.sff_dev_select = octeon_cf_dev_select,
.sff_irq_on = octeon_cf_ata_port_noaction,
@@ -936,14 +935,13 @@ static int octeon_cf_probe(struct platform_device *pdev)
ap->mwdma_mask = enable_dma ? ATA_MWDMA4 : 0;
/* True IDE mode needs a timer to poll for not-busy. */
- hrtimer_init(&cf_port->delayed_finish, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- cf_port->delayed_finish.function = octeon_cf_delayed_finish;
+ hrtimer_setup(&cf_port->delayed_finish, octeon_cf_delayed_finish, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
} else {
/* 16 bit but not True IDE */
base = cs0 + 0x800;
octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16;
- octeon_cf_ops.softreset = octeon_cf_softreset16;
+ octeon_cf_ops.reset.softreset = octeon_cf_softreset16;
octeon_cf_ops.sff_check_status = octeon_cf_check_status16;
octeon_cf_ops.sff_tf_read = octeon_cf_tf_read16;
octeon_cf_ops.sff_tf_load = octeon_cf_tf_load16;
diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c
index 4956f0f5b93f..178b28eff170 100644
--- a/drivers/ata/pata_of_platform.c
+++ b/drivers/ata/pata_of_platform.c
@@ -89,7 +89,7 @@ static struct platform_driver pata_of_platform_driver = {
.of_match_table = pata_of_platform_match,
},
.probe = pata_of_platform_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
};
module_platform_driver(pata_of_platform_driver);
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
index dca82d92b004..81a7f3eb5654 100644
--- a/drivers/ata/pata_oldpiix.c
+++ b/drivers/ata/pata_oldpiix.c
@@ -70,7 +70,7 @@ static void oldpiix_set_piomode (struct ata_port *ap, struct ata_device *adev)
int control = 0;
/*
- * See Intel Document 298600-004 for the timing programing rules
+ * See Intel Document 298600-004 for the timing programming rules
* for PIIX/ICH. Note that the early PIIX does not have the slave
* timing port at 0x44.
*/
@@ -214,7 +214,7 @@ static struct ata_port_operations oldpiix_pata_ops = {
.cable_detect = ata_cable_40wire,
.set_piomode = oldpiix_set_piomode,
.set_dmamode = oldpiix_set_dmamode,
- .prereset = oldpiix_pre_reset,
+ .reset.prereset = oldpiix_pre_reset,
};
diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
index 3d23f57eb128..3db1b95d1404 100644
--- a/drivers/ata/pata_opti.c
+++ b/drivers/ata/pata_opti.c
@@ -156,7 +156,7 @@ static struct ata_port_operations opti_port_ops = {
.inherits = &ata_sff_port_ops,
.cable_detect = ata_cable_40wire,
.set_piomode = opti_set_piomode,
- .prereset = opti_pre_reset,
+ .reset.prereset = opti_pre_reset,
};
static int opti_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
index dfc36b4ec9c6..b42dba5f4e05 100644
--- a/drivers/ata/pata_optidma.c
+++ b/drivers/ata/pata_optidma.c
@@ -322,7 +322,9 @@ static int optidma_set_mode(struct ata_link *link, struct ata_device **r_failed)
u8 r;
int nybble = 4 * ap->port_no;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- int rc = ata_do_set_mode(link, r_failed);
+ int rc;
+
+ rc = ata_set_mode(link, r_failed);
if (rc == 0) {
pci_read_config_byte(pdev, 0x43, &r);
@@ -344,7 +346,7 @@ static struct ata_port_operations optidma_port_ops = {
.set_piomode = optidma_set_pio_mode,
.set_dmamode = optidma_set_dma_mode,
.set_mode = optidma_set_mode,
- .prereset = optidma_pre_reset,
+ .reset.prereset = optidma_pre_reset,
};
static struct ata_port_operations optiplus_port_ops = {
diff --git a/drivers/ata/pata_parport/pata_parport.c b/drivers/ata/pata_parport/pata_parport.c
index 9a2cb9ca9d1d..22bd3ff6b7ae 100644
--- a/drivers/ata/pata_parport/pata_parport.c
+++ b/drivers/ata/pata_parport/pata_parport.c
@@ -321,8 +321,8 @@ static void pata_parport_drain_fifo(struct ata_queued_cmd *qc)
static struct ata_port_operations pata_parport_port_ops = {
.inherits = &ata_sff_port_ops,
- .softreset = pata_parport_softreset,
- .hardreset = NULL,
+ .reset.softreset = pata_parport_softreset,
+ .reset.hardreset = NULL,
.sff_dev_select = pata_parport_dev_select,
.sff_set_devctl = pata_parport_set_devctl,
@@ -768,7 +768,6 @@ static struct parport_driver pata_parport_driver = {
.name = DRV_NAME,
.match_port = pata_parport_attach,
.detach = pata_parport_detach,
- .devmodel = true,
};
static __init int pata_parport_init(void)
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index 5b602206c522..caefcd8c4b3c 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -46,7 +46,7 @@ static int pcmcia_set_mode(struct ata_link *link, struct ata_device **r_failed_d
struct ata_device *slave = &link->device[1];
if (!ata_dev_enabled(master) || !ata_dev_enabled(slave))
- return ata_do_set_mode(link, r_failed_dev);
+ return ata_set_mode(link, r_failed_dev);
if (memcmp(master->id + ATA_ID_FW_REV, slave->id + ATA_ID_FW_REV,
ATA_ID_FW_REV_LEN + ATA_ID_PROD_LEN) == 0) {
@@ -58,7 +58,7 @@ static int pcmcia_set_mode(struct ata_link *link, struct ata_device **r_failed_d
ata_dev_disable(slave);
}
}
- return ata_do_set_mode(link, r_failed_dev);
+ return ata_set_mode(link, r_failed_dev);
}
/**
@@ -344,6 +344,7 @@ static const struct pcmcia_device_id pcmcia_devices[] = {
PCMCIA_DEVICE_PROD_ID2("NinjaATA-", 0xebe0bd79),
PCMCIA_DEVICE_PROD_ID12("PCMCIA", "CD-ROM", 0x281f1c5d, 0x66536591),
PCMCIA_DEVICE_PROD_ID12("PCMCIA", "PnPIDE", 0x281f1c5d, 0x0c694728),
+ PCMCIA_DEVICE_PROD_ID2("PCMCIA ATA/ATAPI Adapter", 0x888d7b73),
PCMCIA_DEVICE_PROD_ID12("SHUTTLE TECHNOLOGY LTD.", "PCCARD-IDE/ATAPI Adapter", 0x4a3f0ba0, 0x322560e1),
PCMCIA_DEVICE_PROD_ID12("SEAGATE", "ST1", 0x87c1b330, 0xe1f30883),
PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "04/05/06", 0x43d74cb4, 0x6a22777d),
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index 6820c5597b14..ae914dcb0c83 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -130,7 +130,7 @@ static struct ata_port_operations pdc2027x_pata100_ops = {
.inherits = &ata_bmdma_port_ops,
.check_atapi_dma = pdc2027x_check_atapi_dma,
.cable_detect = pdc2027x_cable_detect,
- .prereset = pdc2027x_prereset,
+ .reset.prereset = pdc2027x_prereset,
};
static struct ata_port_operations pdc2027x_pata133_ops = {
@@ -295,7 +295,7 @@ static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev)
}
/* Set the PIO timing registers using value table for 133MHz */
- ata_port_dbg(ap, "Set pio regs... \n");
+ ata_port_dbg(ap, "Set PIO regs...\n");
ctcr0 = ioread32(dev_mmio(ap, adev, PDC_CTCR0));
ctcr0 &= 0xffff0000;
@@ -308,7 +308,7 @@ static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev)
ctcr1 |= (pdc2027x_pio_timing_tbl[pio].value2 << 24);
iowrite32(ctcr1, dev_mmio(ap, adev, PDC_CTCR1));
- ata_port_dbg(ap, "Set to pio mode[%u] \n", pio);
+ ata_port_dbg(ap, "Set to PIO mode[%u]\n", pio);
}
/**
@@ -341,7 +341,7 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
iowrite32(ctcr1 & ~(1 << 7), dev_mmio(ap, adev, PDC_CTCR1));
}
- ata_port_dbg(ap, "Set udma regs... \n");
+ ata_port_dbg(ap, "Set UDMA regs...\n");
ctcr1 = ioread32(dev_mmio(ap, adev, PDC_CTCR1));
ctcr1 &= 0xff000000;
@@ -350,14 +350,14 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
(pdc2027x_udma_timing_tbl[udma_mode].value2 << 16);
iowrite32(ctcr1, dev_mmio(ap, adev, PDC_CTCR1));
- ata_port_dbg(ap, "Set to udma mode[%u] \n", udma_mode);
+ ata_port_dbg(ap, "Set to UDMA mode[%u]\n", udma_mode);
} else if ((dma_mode >= XFER_MW_DMA_0) &&
(dma_mode <= XFER_MW_DMA_2)) {
/* Set the MDMA timing registers with value table for 133MHz */
unsigned int mdma_mode = dma_mode & 0x07;
- ata_port_dbg(ap, "Set mdma regs... \n");
+ ata_port_dbg(ap, "Set MDMA regs...\n");
ctcr0 = ioread32(dev_mmio(ap, adev, PDC_CTCR0));
ctcr0 &= 0x0000ffff;
@@ -366,7 +366,7 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
iowrite32(ctcr0, dev_mmio(ap, adev, PDC_CTCR0));
- ata_port_dbg(ap, "Set to mdma mode[%u] \n", mdma_mode);
+ ata_port_dbg(ap, "Set to MDMA mode[%u]\n", mdma_mode);
} else {
ata_port_err(ap, "Unknown dma mode [%u] ignored\n", dma_mode);
}
@@ -387,7 +387,7 @@ static int pdc2027x_set_mode(struct ata_link *link, struct ata_device **r_failed
struct ata_device *dev;
int rc;
- rc = ata_do_set_mode(link, r_failed);
+ rc = ata_set_mode(link, r_failed);
if (rc < 0)
return rc;
diff --git a/drivers/ata/pata_piccolo.c b/drivers/ata/pata_piccolo.c
index ced906bf56be..beb53bd990be 100644
--- a/drivers/ata/pata_piccolo.c
+++ b/drivers/ata/pata_piccolo.c
@@ -97,7 +97,7 @@ static int ata_tosh_init_one(struct pci_dev *dev, const struct pci_device_id *id
return ata_pci_bmdma_init_one(dev, ppi, &tosh_sht, NULL, 0);
}
-static struct pci_device_id ata_tosh[] = {
+static const struct pci_device_id ata_tosh[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), },
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), },
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_3), },
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index 232c3dad7ee8..87479bc893b2 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -223,7 +223,7 @@ static int pata_platform_probe(struct platform_device *pdev)
static struct platform_driver pata_platform_driver = {
.probe = pata_platform_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
},
diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
index 538bd3423d85..03dbaf4a13a7 100644
--- a/drivers/ata/pata_pxa.c
+++ b/drivers/ata/pata_pxa.c
@@ -223,10 +223,16 @@ static int pxa_ata_probe(struct platform_device *pdev)
ap->ioaddr.cmd_addr = devm_ioremap(&pdev->dev, cmd_res->start,
resource_size(cmd_res));
+ if (!ap->ioaddr.cmd_addr)
+ return -ENOMEM;
ap->ioaddr.ctl_addr = devm_ioremap(&pdev->dev, ctl_res->start,
resource_size(ctl_res));
+ if (!ap->ioaddr.ctl_addr)
+ return -ENOMEM;
ap->ioaddr.bmdma_addr = devm_ioremap(&pdev->dev, dma_res->start,
resource_size(dma_res));
+ if (!ap->ioaddr.bmdma_addr)
+ return -ENOMEM;
/*
* Adjust register offsets
@@ -306,7 +312,7 @@ static void pxa_ata_remove(struct platform_device *pdev)
static struct platform_driver pxa_ata_driver = {
.probe = pxa_ata_probe,
- .remove_new = pxa_ata_remove,
+ .remove = pxa_ata_remove,
.driver = {
.name = DRV_NAME,
},
diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
index 84b001097093..40ef8072c159 100644
--- a/drivers/ata/pata_radisys.c
+++ b/drivers/ata/pata_radisys.c
@@ -45,7 +45,7 @@ static void radisys_set_piomode (struct ata_port *ap, struct ata_device *adev)
int control = 0;
/*
- * See Intel Document 298600-004 for the timing programing rules
+ * See Intel Document 298600-004 for the timing programming rules
* for PIIX/ICH. Note that the early PIIX does not have the slave
* timing port at 0x44. The Radisys is a relative of the PIIX
* but not the same so be careful.
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index 0fa253ad7c93..fd81e75c9402 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -164,7 +164,7 @@ static void rb532_pata_driver_remove(struct platform_device *pdev)
static struct platform_driver rb532_pata_platform_driver = {
.probe = rb532_pata_driver_probe,
- .remove_new = rb532_pata_driver_remove,
+ .remove = rb532_pata_driver_remove,
.driver = {
.name = DRV_NAME,
},
diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
index 0a9689862f71..6ff4c11e937d 100644
--- a/drivers/ata/pata_rdc.c
+++ b/drivers/ata/pata_rdc.c
@@ -276,7 +276,7 @@ static struct ata_port_operations rdc_pata_ops = {
.cable_detect = rdc_pata_cable_detect,
.set_piomode = rdc_set_piomode,
.set_dmamode = rdc_set_dmamode,
- .prereset = rdc_pata_prereset,
+ .reset.prereset = rdc_pata_prereset,
};
static const struct ata_port_info rdc_port_info = {
@@ -340,7 +340,7 @@ static int rdc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return rc;
host->private_data = hpriv;
- pci_intx(pdev, 1);
+ pcim_intx(pdev, 1);
host->flags |= ATA_HOST_PARALLEL_SCAN;
@@ -359,8 +359,8 @@ static void rdc_remove_one(struct pci_dev *pdev)
}
static const struct pci_device_id rdc_pci_tbl[] = {
- { PCI_DEVICE(0x17F3, 0x1011), },
- { PCI_DEVICE(0x17F3, 0x1012), },
+ { PCI_VDEVICE(RDC, 0x1011) },
+ { PCI_VDEVICE(RDC, 0x1012) },
{ } /* terminate list */
};
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
index 549ff24a9823..4edddf6bcc15 100644
--- a/drivers/ata/pata_serverworks.c
+++ b/drivers/ata/pata_serverworks.c
@@ -46,10 +46,11 @@
#define SVWKS_CSB5_REVISION_NEW 0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */
#define SVWKS_CSB6_REVISION 0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */
-/* Seagate Barracuda ATA IV Family drives in UDMA mode 5
- * can overrun their FIFOs when used with the CSB5 */
-
-static const char *csb_bad_ata100[] = {
+/*
+ * Seagate Barracuda ATA IV Family drives in UDMA mode 5
+ * can overrun their FIFOs when used with the CSB5.
+ */
+static const char * const csb_bad_ata100[] = {
"ST320011A",
"ST340016A",
"ST360021A",
@@ -163,10 +164,11 @@ static unsigned int serverworks_osb4_filter(struct ata_device *adev, unsigned in
* @adev: ATA device
* @mask: Mask of proposed modes
*
- * Check the blacklist and disable UDMA5 if matched
+ * Check the list of devices with broken UDMA5 and
+ * disable UDMA5 if matched.
*/
-
-static unsigned int serverworks_csb_filter(struct ata_device *adev, unsigned int mask)
+static unsigned int serverworks_csb_filter(struct ata_device *adev,
+ unsigned int mask)
{
const char *p;
char model_num[ATA_ID_PROD_LEN + 1];
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index 31de06b66221..2b751e393771 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -552,7 +552,7 @@ static struct ata_port_operations sis_133_for_sata_ops = {
static struct ata_port_operations sis_base_ops = {
.inherits = &ata_bmdma_port_ops,
- .prereset = sis_pre_reset,
+ .reset.prereset = sis_pre_reset,
};
static struct ata_port_operations sis_133_ops = {
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
index 93882e976ede..2d24c6b3e9d9 100644
--- a/drivers/ata/pata_sl82c105.c
+++ b/drivers/ata/pata_sl82c105.c
@@ -248,7 +248,7 @@ static struct ata_port_operations sl82c105_port_ops = {
.bmdma_stop = sl82c105_bmdma_stop,
.cable_detect = ata_cable_40wire,
.set_piomode = sl82c105_set_piomode,
- .prereset = sl82c105_pre_reset,
+ .reset.prereset = sl82c105_pre_reset,
.sff_irq_check = sl82c105_sff_irq_check,
};
diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
index 26d448a869e2..596e86a031b3 100644
--- a/drivers/ata/pata_triflex.c
+++ b/drivers/ata/pata_triflex.c
@@ -170,7 +170,7 @@ static struct ata_port_operations triflex_port_ops = {
.bmdma_stop = triflex_bmdma_stop,
.cable_detect = ata_cable_40wire,
.set_piomode = triflex_set_piomode,
- .prereset = triflex_prereset,
+ .reset.prereset = triflex_prereset,
};
static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 696b99720dcb..a8c9cf685b4b 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -201,11 +201,9 @@ static int via_cable_detect(struct ata_port *ap) {
two drives */
if (ata66 & (0x10100000 >> (16 * ap->port_no)))
return ATA_CBL_PATA80;
+
/* Check with ACPI so we can spot BIOS reported SATA bridges */
- if (ata_acpi_init_gtm(ap) &&
- ata_acpi_cbl_80wire(ap, ata_acpi_init_gtm(ap)))
- return ATA_CBL_PATA80;
- return ATA_CBL_PATA40;
+ return ata_acpi_cbl_pata_type(ap);
}
static int via_pre_reset(struct ata_link *link, unsigned long deadline)
@@ -368,7 +366,8 @@ static unsigned int via_mode_filter(struct ata_device *dev, unsigned int mask)
}
if (dev->class == ATA_DEV_ATAPI &&
- dmi_check_system(no_atapi_dma_dmi_table)) {
+ (dmi_check_system(no_atapi_dma_dmi_table) ||
+ config->id == PCI_DEVICE_ID_VIA_6415)) {
ata_dev_warn(dev, "controller locks up on ATAPI DMA, forcing PIO\n");
mask &= ATA_MASK_PIO;
}
@@ -452,7 +451,7 @@ static struct ata_port_operations via_port_ops = {
.cable_detect = via_cable_detect,
.set_piomode = via_set_piomode,
.set_dmamode = via_set_dmamode,
- .prereset = via_pre_reset,
+ .reset.prereset = via_pre_reset,
.sff_tf_load = via_tf_load,
.port_start = via_port_start,
.mode_filter = via_mode_filter,
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index 8e6b2599f0d5..17a5a59861c3 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -140,7 +140,7 @@ static struct ata_port_operations adma_ata_ops = {
.freeze = adma_freeze,
.thaw = adma_thaw,
- .prereset = adma_prereset,
+ .reset.prereset = adma_prereset,
.port_start = adma_port_start,
.port_stop = adma_port_stop,
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 52f5168e4db5..7a4f59202156 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -1097,7 +1097,7 @@ static struct ata_port_operations sata_dwc_ops = {
.inherits = &ata_sff_port_ops,
.error_handler = sata_dwc_error_handler,
- .hardreset = sata_dwc_hardreset,
+ .reset.hardreset = sata_dwc_hardreset,
.qc_issue = sata_dwc_qc_issue,
@@ -1240,7 +1240,7 @@ static struct platform_driver sata_dwc_driver = {
.of_match_table = sata_dwc_match,
},
.probe = sata_dwc_probe,
- .remove_new = sata_dwc_remove,
+ .remove = sata_dwc_remove,
};
module_platform_driver(sata_dwc_driver);
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 01aa05f4c3f5..84da8d6ef28e 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -1395,9 +1395,9 @@ static struct ata_port_operations sata_fsl_ops = {
.freeze = sata_fsl_freeze,
.thaw = sata_fsl_thaw,
- .softreset = sata_fsl_softreset,
- .hardreset = sata_fsl_hardreset,
- .pmp_softreset = sata_fsl_softreset,
+ .reset.softreset = sata_fsl_softreset,
+ .reset.hardreset = sata_fsl_hardreset,
+ .pmp_reset.softreset = sata_fsl_softreset,
.error_handler = sata_fsl_error_handler,
.post_internal_cmd = sata_fsl_post_internal_cmd,
@@ -1589,7 +1589,7 @@ static struct platform_driver fsl_sata_driver = {
.of_match_table = fsl_sata_match,
},
.probe = sata_fsl_probe,
- .remove_new = sata_fsl_remove,
+ .remove = sata_fsl_remove,
#ifdef CONFIG_PM_SLEEP
.suspend = sata_fsl_suspend,
.resume = sata_fsl_resume,
diff --git a/drivers/ata/sata_gemini.c b/drivers/ata/sata_gemini.c
index 4c270999ba3c..530ee26b3012 100644
--- a/drivers/ata/sata_gemini.c
+++ b/drivers/ata/sata_gemini.c
@@ -11,7 +11,6 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/delay.h>
-#include <linux/reset.h>
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/io.h>
@@ -27,8 +26,6 @@
* @muxmode: the current muxing mode
* @ide_pins: if the device is using the plain IDE interface pins
* @sata_bridge: if the device enables the SATA bridge
- * @sata0_reset: SATA0 reset handler
- * @sata1_reset: SATA1 reset handler
* @sata0_pclk: SATA0 PCLK handler
* @sata1_pclk: SATA1 PCLK handler
*/
@@ -38,8 +35,6 @@ struct sata_gemini {
enum gemini_muxmode muxmode;
bool ide_pins;
bool sata_bridge;
- struct reset_control *sata0_reset;
- struct reset_control *sata1_reset;
struct clk *sata0_pclk;
struct clk *sata1_pclk;
};
@@ -224,18 +219,6 @@ void gemini_sata_stop_bridge(struct sata_gemini *sg, unsigned int bridge)
}
EXPORT_SYMBOL(gemini_sata_stop_bridge);
-int gemini_sata_reset_bridge(struct sata_gemini *sg,
- unsigned int bridge)
-{
- if (bridge == 0)
- reset_control_reset(sg->sata0_reset);
- else
- reset_control_reset(sg->sata1_reset);
- msleep(10);
- return gemini_sata_setup_bridge(sg, bridge);
-}
-EXPORT_SYMBOL(gemini_sata_reset_bridge);
-
static int gemini_sata_bridge_init(struct sata_gemini *sg)
{
struct device *dev = sg->dev;
@@ -265,21 +248,6 @@ static int gemini_sata_bridge_init(struct sata_gemini *sg)
return ret;
}
- sg->sata0_reset = devm_reset_control_get_exclusive(dev, "sata0");
- if (IS_ERR(sg->sata0_reset)) {
- dev_err(dev, "no SATA0 reset controller\n");
- clk_disable_unprepare(sg->sata1_pclk);
- clk_disable_unprepare(sg->sata0_pclk);
- return PTR_ERR(sg->sata0_reset);
- }
- sg->sata1_reset = devm_reset_control_get_exclusive(dev, "sata1");
- if (IS_ERR(sg->sata1_reset)) {
- dev_err(dev, "no SATA1 reset controller\n");
- clk_disable_unprepare(sg->sata1_pclk);
- clk_disable_unprepare(sg->sata0_pclk);
- return PTR_ERR(sg->sata1_reset);
- }
-
sata_id = readl(sg->base + GEMINI_SATA_ID);
sata_phy_id = readl(sg->base + GEMINI_SATA_PHY_ID);
sg->sata_bridge = true;
@@ -417,6 +385,7 @@ static const struct of_device_id gemini_sata_of_match[] = {
{ .compatible = "cortina,gemini-sata-bridge", },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, gemini_sata_of_match);
static struct platform_driver gemini_sata_driver = {
.driver = {
@@ -424,7 +393,7 @@ static struct platform_driver gemini_sata_driver = {
.of_match_table = gemini_sata_of_match,
},
.probe = gemini_sata_probe,
- .remove_new = gemini_sata_remove,
+ .remove = gemini_sata_remove,
};
module_platform_driver(gemini_sata_driver);
diff --git a/drivers/ata/sata_gemini.h b/drivers/ata/sata_gemini.h
index 6f6e691d6007..b6e4a5c86e01 100644
--- a/drivers/ata/sata_gemini.h
+++ b/drivers/ata/sata_gemini.h
@@ -17,6 +17,5 @@ bool gemini_sata_bridge_enabled(struct sata_gemini *sg, bool is_ata1);
enum gemini_muxmode gemini_sata_get_muxmode(struct sata_gemini *sg);
int gemini_sata_start_bridge(struct sata_gemini *sg, unsigned int bridge);
void gemini_sata_stop_bridge(struct sata_gemini *sg, unsigned int bridge);
-int gemini_sata_reset_bridge(struct sata_gemini *sg, unsigned int bridge);
#endif
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index 63ef7bb073ce..3421039f4bae 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -348,6 +348,7 @@ static int highbank_initialize_phys(struct device *dev, void __iomem *addr)
phy_nodes[phy] = phy_data.np;
cphy_base[phy] = of_iomap(phy_nodes[phy], 0);
if (cphy_base[phy] == NULL) {
+ of_node_put(phy_data.np);
return 0;
}
phy_count += 1;
@@ -427,7 +428,7 @@ static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
static struct ata_port_operations ahci_highbank_ops = {
.inherits = &ahci_ops,
- .hardreset = ahci_highbank_hardreset,
+ .reset.hardreset = ahci_highbank_hardreset,
.transmit_led_message = ecx_transmit_led_message,
};
@@ -614,12 +615,12 @@ static SIMPLE_DEV_PM_OPS(ahci_highbank_pm_ops,
ahci_highbank_suspend, ahci_highbank_resume);
static struct platform_driver ahci_highbank_driver = {
- .remove_new = ata_platform_remove_one,
- .driver = {
- .name = "highbank-ahci",
- .of_match_table = ahci_of_match,
- .pm = &ahci_highbank_pm_ops,
- },
+ .remove = ata_platform_remove_one,
+ .driver = {
+ .name = "highbank-ahci",
+ .of_match_table = ahci_of_match,
+ .pm = &ahci_highbank_pm_ops,
+ },
.probe = ahci_highbank_probe,
};
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index db9c255dc9f2..46a8c20daf18 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -730,7 +730,7 @@ static struct ata_port_operations inic_port_ops = {
.freeze = inic_freeze,
.thaw = inic_thaw,
- .hardreset = inic_hardreset,
+ .reset.hardreset = inic_hardreset,
.error_handler = inic_error_handler,
.post_internal_cmd = inic_post_internal_cmd,
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 05c905827dc5..ffb396f61731 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -672,8 +672,8 @@ static const struct scsi_host_template mv6_sht = {
.dma_boundary = MV_DMA_BOUNDARY,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
- .tag_alloc_policy = BLK_TAG_ALLOC_RR,
- .device_configure = ata_scsi_device_configure
+ .tag_alloc_policy_rr = true,
+ .sdev_configure = ata_scsi_sdev_configure
};
static struct ata_port_operations mv5_ops = {
@@ -687,7 +687,7 @@ static struct ata_port_operations mv5_ops = {
.freeze = mv_eh_freeze,
.thaw = mv_eh_thaw,
- .hardreset = mv_hardreset,
+ .reset.hardreset = mv_hardreset,
.scr_read = mv5_scr_read,
.scr_write = mv5_scr_write,
@@ -709,10 +709,10 @@ static struct ata_port_operations mv6_ops = {
.freeze = mv_eh_freeze,
.thaw = mv_eh_thaw,
- .hardreset = mv_hardreset,
- .softreset = mv_softreset,
- .pmp_hardreset = mv_pmp_hardreset,
- .pmp_softreset = mv_softreset,
+ .reset.hardreset = mv_hardreset,
+ .reset.softreset = mv_softreset,
+ .pmp_reset.hardreset = mv_pmp_hardreset,
+ .pmp_reset.softreset = mv_softreset,
.error_handler = mv_pmp_error_handler,
.scr_read = mv_scr_read,
@@ -4255,7 +4255,7 @@ MODULE_DEVICE_TABLE(of, mv_sata_dt_ids);
static struct platform_driver mv_platform_driver = {
.probe = mv_platform_probe,
- .remove_new = mv_platform_remove,
+ .remove = mv_platform_remove,
.suspend = mv_platform_suspend,
.resume = mv_platform_resume,
.driver = {
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 36d99043ef50..841e7de2bba6 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -296,8 +296,8 @@ static void nv_nf2_freeze(struct ata_port *ap);
static void nv_nf2_thaw(struct ata_port *ap);
static void nv_ck804_freeze(struct ata_port *ap);
static void nv_ck804_thaw(struct ata_port *ap);
-static int nv_adma_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim);
+static int nv_adma_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim);
static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
@@ -319,8 +319,8 @@ static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
static void nv_mcp55_thaw(struct ata_port *ap);
static void nv_mcp55_freeze(struct ata_port *ap);
static void nv_swncq_error_handler(struct ata_port *ap);
-static int nv_swncq_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim);
+static int nv_swncq_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim);
static int nv_swncq_port_start(struct ata_port *ap);
static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
@@ -382,10 +382,10 @@ static const struct scsi_host_template nv_adma_sht = {
.can_queue = NV_ADMA_MAX_CPBS,
.sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
.dma_boundary = NV_ADMA_DMA_BOUNDARY,
- .device_configure = nv_adma_device_configure,
+ .sdev_configure = nv_adma_sdev_configure,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
- .tag_alloc_policy = BLK_TAG_ALLOC_RR,
+ .tag_alloc_policy_rr = true,
};
static const struct scsi_host_template nv_swncq_sht = {
@@ -393,10 +393,10 @@ static const struct scsi_host_template nv_swncq_sht = {
.can_queue = ATA_MAX_QUEUE - 1,
.sg_tablesize = LIBATA_MAX_PRD,
.dma_boundary = ATA_DMA_BOUNDARY,
- .device_configure = nv_swncq_device_configure,
+ .sdev_configure = nv_swncq_sdev_configure,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
- .tag_alloc_policy = BLK_TAG_ALLOC_RR,
+ .tag_alloc_policy_rr = true,
};
/*
@@ -462,7 +462,7 @@ static struct ata_port_operations nv_generic_ops = {
.lost_interrupt = ATA_OP_NULL,
.scr_read = nv_scr_read,
.scr_write = nv_scr_write,
- .hardreset = nv_hardreset,
+ .reset.hardreset = nv_hardreset,
};
static struct ata_port_operations nv_nf2_ops = {
@@ -663,8 +663,8 @@ static void nv_adma_mode(struct ata_port *ap)
pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
}
-static int nv_adma_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int nv_adma_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct nv_adma_port_priv *pp = ap->private_data;
@@ -676,7 +676,7 @@ static int nv_adma_device_configure(struct scsi_device *sdev,
int adma_enable;
u32 current_reg, new_reg, config_mask;
- rc = ata_scsi_device_configure(sdev, lim);
+ rc = ata_scsi_sdev_configure(sdev, lim);
if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
/* Not a proper libata device, ignore */
@@ -1871,8 +1871,8 @@ static void nv_swncq_host_init(struct ata_host *host)
writel(~0x0, mmio + NV_INT_STATUS_MCP55);
}
-static int nv_swncq_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int nv_swncq_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
@@ -1882,7 +1882,7 @@ static int nv_swncq_device_configure(struct scsi_device *sdev,
u8 check_maxtor = 0;
unsigned char model_num[ATA_ID_PROD_LEN + 1];
- rc = ata_scsi_device_configure(sdev, lim);
+ rc = ata_scsi_sdev_configure(sdev, lim);
if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
/* Not a proper libata device, ignore */
return rc;
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 2df1a070b25a..2a005aede123 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -188,7 +188,7 @@ static struct ata_port_operations pdc_sata_ops = {
.scr_read = pdc_sata_scr_read,
.scr_write = pdc_sata_scr_write,
.port_start = pdc_sata_port_start,
- .hardreset = pdc_sata_hardreset,
+ .reset.hardreset = pdc_sata_hardreset,
};
/* First-generation chips need a more restrictive ->check_atapi_dma op,
@@ -206,7 +206,7 @@ static struct ata_port_operations pdc_pata_ops = {
.freeze = pdc_freeze,
.thaw = pdc_thaw,
.port_start = pdc_common_port_start,
- .softreset = pdc_pata_softreset,
+ .reset.softreset = pdc_pata_softreset,
};
static const struct ata_port_info pdc_port_info[] = {
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index 8a6286159044..cfb9b5b61cd7 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -123,8 +123,8 @@ static struct ata_port_operations qs_ata_ops = {
.freeze = qs_freeze,
.thaw = qs_thaw,
- .prereset = qs_prereset,
- .softreset = ATA_OP_NULL,
+ .reset.prereset = qs_prereset,
+ .reset.softreset = ATA_OP_NULL,
.error_handler = qs_error_handler,
.lost_interrupt = ATA_OP_NULL,
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index c1469d076880..487eadd4073f 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -624,7 +624,7 @@ static struct ata_port_operations sata_rcar_port_ops = {
.freeze = sata_rcar_freeze,
.thaw = sata_rcar_thaw,
- .softreset = sata_rcar_softreset,
+ .reset.softreset = sata_rcar_softreset,
.scr_read = sata_rcar_scr_read,
.scr_write = sata_rcar_scr_write,
@@ -1009,7 +1009,7 @@ static const struct dev_pm_ops sata_rcar_pm_ops = {
static struct platform_driver sata_rcar_driver = {
.probe = sata_rcar_probe,
- .remove_new = sata_rcar_remove,
+ .remove = sata_rcar_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = sata_rcar_match,
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index cc77c0248284..1b6dc950a42a 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -128,7 +128,7 @@ static const struct pci_device_id sil_pci_tbl[] = {
static const struct sil_drivelist {
const char *product;
unsigned int quirk;
-} sil_blacklist [] = {
+} sil_quirks[] = {
{ "ST320012AS", SIL_QUIRK_MOD15WRITE },
{ "ST330013AS", SIL_QUIRK_MOD15WRITE },
{ "ST340017AS", SIL_QUIRK_MOD15WRITE },
@@ -351,7 +351,7 @@ static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed)
u32 tmp, dev_mode[2] = { };
int rc;
- rc = ata_do_set_mode(link, r_failed);
+ rc = ata_set_mode(link, r_failed);
if (rc)
return rc;
@@ -600,8 +600,8 @@ static void sil_thaw(struct ata_port *ap)
* list, and apply the fixups to only the specific
* devices/hosts/firmwares that need it.
*
- * 20040111 - Seagate drives affected by the Mod15Write bug are blacklisted
- * The Maxtor quirk is in the blacklist, but I'm keeping the original
+ * 20040111 - Seagate drives affected by the Mod15Write bug are quirked
+ * The Maxtor quirk is in sil_quirks, but I'm keeping the original
* pessimistic fix for the following reasons...
* - There seems to be less info on it, only one device gleaned off the
* Windows driver, maybe only one is affected. More info would be greatly
@@ -616,13 +616,13 @@ static void sil_dev_config(struct ata_device *dev)
unsigned char model_num[ATA_ID_PROD_LEN + 1];
/* This controller doesn't support trim */
- dev->horkage |= ATA_HORKAGE_NOTRIM;
+ dev->quirks |= ATA_QUIRK_NOTRIM;
ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
- for (n = 0; sil_blacklist[n].product; n++)
- if (!strcmp(sil_blacklist[n].product, model_num)) {
- quirks = sil_blacklist[n].quirk;
+ for (n = 0; sil_quirks[n].product; n++)
+ if (!strcmp(sil_quirks[n].product, model_num)) {
+ quirks = sil_quirks[n].quirk;
break;
}
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 72c03cbdaff4..d642ece9f07a 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -378,10 +378,9 @@ static const struct scsi_host_template sil24_sht = {
.can_queue = SIL24_MAX_CMDS,
.sg_tablesize = SIL24_MAX_SGE,
.dma_boundary = ATA_DMA_BOUNDARY,
- .tag_alloc_policy = BLK_TAG_ALLOC_FIFO,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
- .device_configure = ata_scsi_device_configure
+ .sdev_configure = ata_scsi_sdev_configure
};
static struct ata_port_operations sil24_ops = {
@@ -394,10 +393,10 @@ static struct ata_port_operations sil24_ops = {
.freeze = sil24_freeze,
.thaw = sil24_thaw,
- .softreset = sil24_softreset,
- .hardreset = sil24_hardreset,
- .pmp_softreset = sil24_softreset,
- .pmp_hardreset = sil24_pmp_hardreset,
+ .reset.softreset = sil24_softreset,
+ .reset.hardreset = sil24_hardreset,
+ .pmp_reset.softreset = sil24_softreset,
+ .pmp_reset.hardreset = sil24_pmp_hardreset,
.error_handler = sil24_error_handler,
.post_internal_cmd = sil24_post_internal_cmd,
.dev_config = sil24_dev_config,
@@ -1317,7 +1316,7 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (sata_sil24_msi && !pci_enable_msi(pdev)) {
dev_info(&pdev->dev, "Using MSI\n");
- pci_intx(pdev, 0);
+ pcim_intx(pdev, 0);
}
pci_set_master(pdev);
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index ef8724986de3..b8b6d9eff3b8 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -290,7 +290,7 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_set_master(pdev);
- pci_intx(pdev, 1);
+ pcim_intx(pdev, 1);
return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &sis_sht);
}
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index 598a872f6a08..c5d6aa36c9c3 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -340,8 +340,8 @@ static const struct scsi_host_template k2_sata_sht = {
static struct ata_port_operations k2_sata_ops = {
.inherits = &ata_bmdma_port_ops,
- .softreset = k2_sata_softreset,
- .hardreset = k2_sata_hardreset,
+ .reset.softreset = k2_sata_softreset,
+ .reset.hardreset = k2_sata_hardreset,
.sff_tf_load = k2_sata_tf_load,
.sff_tf_read = k2_sata_tf_read,
.sff_check_status = k2_stat_check_status,
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index a482741eb181..0986ebd1eb4e 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -241,7 +241,7 @@ static struct ata_port_operations pdc_20621_ops = {
.freeze = pdc_freeze,
.thaw = pdc_thaw,
- .softreset = pdc_softreset,
+ .reset.softreset = pdc_softreset,
.error_handler = pdc_error_handler,
.lost_interrupt = ATA_OP_NULL,
.post_internal_cmd = pdc_post_internal_cmd,
@@ -1117,9 +1117,14 @@ static int pdc20621_prog_dimm0(struct ata_host *host)
mmio += PDC_CHIP0_OFS;
for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++)
- pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
- pdc_i2c_read_data[i].reg,
- &spd0[pdc_i2c_read_data[i].ofs]);
+ if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
+ pdc_i2c_read_data[i].reg,
+ &spd0[pdc_i2c_read_data[i].ofs])) {
+ dev_err(host->dev,
+ "Failed in i2c read at index %d: device=%#x, reg=%#x\n",
+ i, PDC_DIMM0_SPD_DEV_ADDRESS, pdc_i2c_read_data[i].reg);
+ return -EIO;
+ }
data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
@@ -1284,6 +1289,8 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
/* Programming DIMM0 Module Control Register (index_CID0:80h) */
size = pdc20621_prog_dimm0(host);
+ if (size < 0)
+ return size;
dev_dbg(host->dev, "Local DIMM Size = %dMB\n", size);
/* Programming DIMM Module Global Control Register (index_CID0:88h) */
@@ -1294,32 +1301,32 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
}
if (dimm_test) {
- u8 test_parttern1[40] =
+ u8 test_pattern1[40] =
{0x55,0xAA,'P','r','o','m','i','s','e',' ',
'N','o','t',' ','Y','e','t',' ',
'D','e','f','i','n','e','d',' ',
'1','.','1','0',
'9','8','0','3','1','6','1','2',0,0};
- u8 test_parttern2[40] = {0};
+ u8 test_pattern2[40] = {0};
- pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40);
- pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40);
+ pdc20621_put_to_dimm(host, test_pattern2, 0x10040, 40);
+ pdc20621_put_to_dimm(host, test_pattern2, 0x40, 40);
- pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40);
- pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
- dev_info(host->dev, "DIMM test pattern 1: %x, %x, %s\n", test_parttern2[0],
- test_parttern2[1], &(test_parttern2[2]));
- pdc20621_get_from_dimm(host, test_parttern2, 0x10040,
+ pdc20621_put_to_dimm(host, test_pattern1, 0x10040, 40);
+ pdc20621_get_from_dimm(host, test_pattern2, 0x40, 40);
+ dev_info(host->dev, "DIMM test pattern 1: %x, %x, %s\n", test_pattern2[0],
+ test_pattern2[1], &(test_pattern2[2]));
+ pdc20621_get_from_dimm(host, test_pattern2, 0x10040,
40);
dev_info(host->dev, "DIMM test pattern 2: %x, %x, %s\n",
- test_parttern2[0],
- test_parttern2[1], &(test_parttern2[2]));
+ test_pattern2[0],
+ test_pattern2[1], &(test_pattern2[2]));
- pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40);
- pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
+ pdc20621_put_to_dimm(host, test_pattern1, 0x40, 40);
+ pdc20621_get_from_dimm(host, test_pattern2, 0x40, 40);
dev_info(host->dev, "DIMM test pattern 3: %x, %x, %s\n",
- test_parttern2[0],
- test_parttern2[1], &(test_parttern2[2]));
+ test_pattern2[0],
+ test_pattern2[1], &(test_pattern2[2]));
}
/* ECC initiliazation. */
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
index 60ea45926cd1..44985796cc47 100644
--- a/drivers/ata/sata_uli.c
+++ b/drivers/ata/sata_uli.c
@@ -67,7 +67,7 @@ static struct ata_port_operations uli_ops = {
.inherits = &ata_bmdma_port_ops,
.scr_read = uli_scr_read,
.scr_write = uli_scr_write,
- .hardreset = ATA_OP_NULL,
+ .reset.hardreset = ATA_OP_NULL,
};
static const struct ata_port_info uli_port_info = {
@@ -221,7 +221,7 @@ static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_set_master(pdev);
- pci_intx(pdev, 1);
+ pcim_intx(pdev, 1);
return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &uli_sht);
}
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 57cbf2cef618..68e9003ec2d4 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -25,6 +25,7 @@
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
+#include <linux/string_choices.h>
#define DRV_NAME "sata_via"
#define DRV_VERSION "2.6"
@@ -119,7 +120,7 @@ static struct ata_port_operations svia_base_ops = {
static struct ata_port_operations vt6420_sata_ops = {
.inherits = &svia_base_ops,
.freeze = svia_noop_freeze,
- .prereset = vt6420_prereset,
+ .reset.prereset = vt6420_prereset,
.bmdma_start = vt6420_bmdma_start,
};
@@ -139,7 +140,7 @@ static struct ata_port_operations vt6421_sata_ops = {
static struct ata_port_operations vt8251_ops = {
.inherits = &svia_base_ops,
- .hardreset = sata_std_hardreset,
+ .reset.hardreset = sata_std_hardreset,
.scr_read = vt8251_scr_read,
.scr_write = vt8251_scr_write,
};
@@ -359,7 +360,7 @@ static int vt6420_prereset(struct ata_link *link, unsigned long deadline)
ata_port_info(ap,
"SATA link %s 1.5 Gbps (SStatus %X SControl %X)\n",
- online ? "up" : "down", sstatus, scontrol);
+ str_up_down(online), sstatus, scontrol);
/* SStatus is read one more time */
svia_scr_read(link, SCR_STATUS, &sstatus);
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index d39b87537168..a53a2dfc1e17 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -384,7 +384,7 @@ static int vsc_sata_init_one(struct pci_dev *pdev,
pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x80);
if (pci_enable_msi(pdev) == 0)
- pci_intx(pdev, 0);
+ pcim_intx(pdev, 0);
/*
* Config offset 0x98 is "Extended Control and Status Register 0"
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index d4aa0f353b6c..fa3c76a2b49d 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -279,6 +279,19 @@ static struct atm_vcc *find_vcc(struct atm_dev *dev, short vpi, int vci)
return NULL;
}
+static int atmtcp_c_pre_send(struct atm_vcc *vcc, struct sk_buff *skb)
+{
+ struct atmtcp_hdr *hdr;
+
+ if (skb->len < sizeof(struct atmtcp_hdr))
+ return -EINVAL;
+
+ hdr = (struct atmtcp_hdr *)skb->data;
+ if (hdr->length == ATMTCP_HDR_MAGIC)
+ return -EINVAL;
+
+ return 0;
+}
static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
{
@@ -288,7 +301,6 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
struct sk_buff *new_skb;
int result = 0;
- if (!skb->len) return 0;
dev = vcc->dev_data;
hdr = (struct atmtcp_hdr *) skb->data;
if (hdr->length == ATMTCP_HDR_MAGIC) {
@@ -345,6 +357,7 @@ static const struct atmdev_ops atmtcp_v_dev_ops = {
static const struct atmdev_ops atmtcp_c_dev_ops = {
.close = atmtcp_c_close,
+ .pre_send = atmtcp_c_pre_send,
.send = atmtcp_c_send
};
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index cb00f8244e41..f62e38571440 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -1374,7 +1374,9 @@ fore200e_open(struct atm_vcc *vcc)
vcc->dev_data = NULL;
+ mutex_lock(&fore200e->rate_mtx);
fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
+ mutex_unlock(&fore200e->rate_mtx);
kfree(fore200e_vcc);
return -EINVAL;
@@ -2569,7 +2571,7 @@ static struct platform_driver fore200e_sba_driver = {
.of_match_table = fore200e_sba_match,
},
.probe = fore200e_sba_probe,
- .remove_new = fore200e_sba_remove,
+ .remove = fore200e_sba_remove,
};
#endif
diff --git a/drivers/atm/idt77105.c b/drivers/atm/idt77105.c
index fcd70e094a2e..e6a300203e6c 100644
--- a/drivers/atm/idt77105.c
+++ b/drivers/atm/idt77105.c
@@ -366,8 +366,8 @@ EXPORT_SYMBOL(idt77105_init);
static void __exit idt77105_exit(void)
{
/* turn off timers */
- del_timer_sync(&stats_timer);
- del_timer_sync(&restart_timer);
+ timer_delete_sync(&stats_timer);
+ timer_delete_sync(&restart_timer);
}
module_exit(idt77105_exit);
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index e7f713cd70d3..f2e91b7d79f0 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -852,6 +852,8 @@ queue_skb(struct idt77252_dev *card, struct vc_map *vc,
IDT77252_PRV_PADDR(skb) = dma_map_single(&card->pcidev->dev, skb->data,
skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&card->pcidev->dev, IDT77252_PRV_PADDR(skb)))
+ return -ENOMEM;
error = -EINVAL;
@@ -1118,8 +1120,8 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
rpp->len += skb->len;
if (stat & SAR_RSQE_EPDU) {
+ unsigned int len, truesize;
unsigned char *l1l2;
- unsigned int len;
l1l2 = (unsigned char *) ((unsigned long) skb->data + skb->len - 6);
@@ -1189,14 +1191,15 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
ATM_SKB(skb)->vcc = vcc;
__net_timestamp(skb);
+ truesize = skb->truesize;
vcc->push(vcc, skb);
atomic_inc(&vcc->stats->rx);
- if (skb->truesize > SAR_FB_SIZE_3)
+ if (truesize > SAR_FB_SIZE_3)
add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
- else if (skb->truesize > SAR_FB_SIZE_2)
+ else if (truesize > SAR_FB_SIZE_2)
add_rx_skb(card, 2, SAR_FB_SIZE_2, 1);
- else if (skb->truesize > SAR_FB_SIZE_1)
+ else if (truesize > SAR_FB_SIZE_1)
add_rx_skb(card, 1, SAR_FB_SIZE_1, 1);
else
add_rx_skb(card, 0, SAR_FB_SIZE_0, 1);
@@ -1530,7 +1533,7 @@ idt77252_tx(struct idt77252_dev *card)
static void
tst_timer(struct timer_list *t)
{
- struct idt77252_dev *card = from_timer(card, t, tst_timer);
+ struct idt77252_dev *card = timer_container_of(card, t, tst_timer);
unsigned long base, idle, jump;
unsigned long flags;
u32 pc;
@@ -1856,6 +1859,8 @@ add_rx_skb(struct idt77252_dev *card, int queue,
paddr = dma_map_single(&card->pcidev->dev, skb->data,
skb_end_pointer(skb) - skb->data,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(&card->pcidev->dev, paddr))
+ goto outpoolrm;
IDT77252_PRV_PADDR(skb) = paddr;
if (push_rx_skb(card, skb, queue)) {
@@ -1870,6 +1875,7 @@ outunmap:
dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
skb_end_pointer(skb) - skb->data, DMA_FROM_DEVICE);
+outpoolrm:
handle = IDT77252_PRV_POOL(skb);
card->sbpool[POOL_QUEUE(handle)].skb[POOL_INDEX(handle)] = NULL;
@@ -2069,7 +2075,7 @@ idt77252_rate_logindex(struct idt77252_dev *card, int pcr)
static void
idt77252_est_timer(struct timer_list *t)
{
- struct rate_estimator *est = from_timer(est, t, timer);
+ struct rate_estimator *est = timer_container_of(est, t, timer);
struct vc_map *vc = est->vc;
struct idt77252_dev *card = vc->card;
unsigned long flags;
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index d213adcefe33..301e697e22ad 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -3283,7 +3283,7 @@ static void __exit ia_module_exit(void)
{
pci_unregister_driver(&ia_driver);
- del_timer_sync(&ia_timer);
+ timer_delete_sync(&ia_timer);
}
module_init(ia_module_init);
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index 32d7aa141d96..0dfa2cdc897c 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -755,7 +755,7 @@ static void lanai_shutdown_rx_vci(const struct lanai_vcc *lvcc)
/* Shutdown transmitting on card.
* Unfortunately the lanai needs us to wait until all the data
* drains out of the buffer before we can dealloc it, so this
- * can take awhile -- up to 370ms for a full 128KB buffer
+ * can take a while -- up to 370ms for a full 128KB buffer
* assuming everone else is quiet. In theory the time is
* boundless if there's a CBR VCC holding things up.
*/
@@ -1758,7 +1758,7 @@ static void iter_dequeue(struct lanai_dev *lanai, vci_t vci)
static void lanai_timed_poll(struct timer_list *t)
{
- struct lanai_dev *lanai = from_timer(lanai, t, timer);
+ struct lanai_dev *lanai = timer_container_of(lanai, t, timer);
#ifndef DEBUG_RW
unsigned long flags;
#ifdef USE_POWERDOWN
@@ -1792,7 +1792,7 @@ static inline void lanai_timed_poll_start(struct lanai_dev *lanai)
static inline void lanai_timed_poll_stop(struct lanai_dev *lanai)
{
- del_timer_sync(&lanai->timer);
+ timer_delete_sync(&lanai->timer);
}
/* -------------------- INTERRUPT SERVICE: */
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 27153d6bc781..45952cfea06b 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -300,7 +300,7 @@ static void __exit nicstar_cleanup(void)
{
XPRINTK("nicstar: nicstar_cleanup() called.\n");
- del_timer_sync(&ns_timer);
+ timer_delete_sync(&ns_timer);
pci_unregister_driver(&nicstar_driver);
diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
index 32802ea9521c..7d0fa729c2fe 100644
--- a/drivers/atm/suni.c
+++ b/drivers/atm/suni.c
@@ -347,7 +347,7 @@ static int suni_stop(struct atm_dev *dev)
for (walk = &sunis; *walk != PRIV(dev);
walk = &PRIV((*walk)->dev)->next);
*walk = PRIV((*walk)->dev)->next;
- if (!sunis) del_timer_sync(&poll_timer);
+ if (!sunis) timer_delete_sync(&poll_timer);
spin_unlock_irqrestore(&sunis_lock,flags);
kfree(PRIV(dev));
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index 69d2138d7efb..bedc6133f970 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -316,7 +316,7 @@ endif # PARPORT_PANEL
config PANEL_CHANGE_MESSAGE
bool "Change LCD initialization message ?"
- depends on CHARLCD
+ depends on CHARLCD || LINEDISP
help
This allows you to replace the boot message indicating the kernel version
and the driver version with a custom message. This is useful on appliances
@@ -489,7 +489,7 @@ config IMG_ASCII_LCD
config HT16K33
tristate "Holtek Ht16K33 LED controller with keyscan"
- depends on FB && I2C && INPUT
+ depends on FB && I2C && INPUT && BACKLIGHT_CLASS_DEVICE
select FB_SYSMEM_HELPERS
select INPUT_MATRIXKMAP
select FB_BACKLIGHT
@@ -503,6 +503,7 @@ config HT16K33
config MAX6959
tristate "Maxim MAX6958/6959 7-segment LED controller"
depends on I2C
+ select BITREVERSE
select REGMAP_I2C
select LINEDISP
help
diff --git a/drivers/auxdisplay/arm-charlcd.c b/drivers/auxdisplay/arm-charlcd.c
index 0b1c99cca733..a7eae99a48f7 100644
--- a/drivers/auxdisplay/arm-charlcd.c
+++ b/drivers/auxdisplay/arm-charlcd.c
@@ -270,7 +270,7 @@ static int __init charlcd_probe(struct platform_device *pdev)
struct charlcd *lcd;
struct resource *res;
- lcd = kzalloc(sizeof(struct charlcd), GFP_KERNEL);
+ lcd = kzalloc(sizeof(*lcd), GFP_KERNEL);
if (!lcd)
return -ENOMEM;
diff --git a/drivers/auxdisplay/cfag12864b.c b/drivers/auxdisplay/cfag12864b.c
index 6526aa51fb1d..e1a94ae3eb0c 100644
--- a/drivers/auxdisplay/cfag12864b.c
+++ b/drivers/auxdisplay/cfag12864b.c
@@ -37,11 +37,6 @@ module_param(cfag12864b_rate, uint, 0444);
MODULE_PARM_DESC(cfag12864b_rate,
"Refresh rate (hertz)");
-unsigned int cfag12864b_getrate(void)
-{
- return cfag12864b_rate;
-}
-
/*
* cfag12864b Commands
*
@@ -249,11 +244,6 @@ void cfag12864b_disable(void)
mutex_unlock(&cfag12864b_mutex);
}
-unsigned char cfag12864b_isenabled(void)
-{
- return cfag12864b_updating;
-}
-
static void cfag12864b_update(struct work_struct *work)
{
unsigned char c;
@@ -293,10 +283,8 @@ static void cfag12864b_update(struct work_struct *work)
*/
EXPORT_SYMBOL_GPL(cfag12864b_buffer);
-EXPORT_SYMBOL_GPL(cfag12864b_getrate);
EXPORT_SYMBOL_GPL(cfag12864b_enable);
EXPORT_SYMBOL_GPL(cfag12864b_disable);
-EXPORT_SYMBOL_GPL(cfag12864b_isenabled);
/*
* Is the module inited?
diff --git a/drivers/auxdisplay/cfag12864bfb.c b/drivers/auxdisplay/cfag12864bfb.c
index 2b74dabe7e17..24baf6b2c587 100644
--- a/drivers/auxdisplay/cfag12864bfb.c
+++ b/drivers/auxdisplay/cfag12864bfb.c
@@ -108,7 +108,7 @@ static void cfag12864bfb_remove(struct platform_device *device)
static struct platform_driver cfag12864bfb_driver = {
.probe = cfag12864bfb_probe,
- .remove_new = cfag12864bfb_remove,
+ .remove = cfag12864bfb_remove,
.driver = {
.name = CFAG12864BFB_NAME,
},
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index bb9463814454..09020bb8ad15 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -526,7 +526,6 @@ static const struct file_operations charlcd_fops = {
.write = charlcd_write,
.open = charlcd_open,
.release = charlcd_release,
- .llseek = no_llseek,
};
static struct miscdevice charlcd_dev = {
@@ -596,18 +595,19 @@ static int charlcd_init(struct charlcd *lcd)
return 0;
}
-struct charlcd *charlcd_alloc(void)
+struct charlcd *charlcd_alloc(unsigned int drvdata_size)
{
struct charlcd_priv *priv;
struct charlcd *lcd;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ priv = kzalloc(sizeof(*priv) + drvdata_size, GFP_KERNEL);
if (!priv)
return NULL;
priv->esc_seq.len = -1;
lcd = &priv->lcd;
+ lcd->drvdata = priv->drvdata;
return lcd;
}
diff --git a/drivers/auxdisplay/charlcd.h b/drivers/auxdisplay/charlcd.h
index eed80063a6d2..d10b89740bca 100644
--- a/drivers/auxdisplay/charlcd.h
+++ b/drivers/auxdisplay/charlcd.h
@@ -36,6 +36,8 @@ enum charlcd_lines {
CHARLCD_LINES_2,
};
+struct charlcd_ops;
+
struct charlcd {
const struct charlcd_ops *ops;
const unsigned char *char_conv; /* Optional */
@@ -49,7 +51,7 @@ struct charlcd {
unsigned long y;
} addr;
- void *drvdata;
+ void *drvdata; /* Set by charlcd_alloc() */
};
/**
@@ -93,7 +95,8 @@ struct charlcd_ops {
};
void charlcd_backlight(struct charlcd *lcd, enum charlcd_onoff on);
-struct charlcd *charlcd_alloc(void);
+
+struct charlcd *charlcd_alloc(unsigned int drvdata_size);
void charlcd_free(struct charlcd *lcd);
int charlcd_register(struct charlcd *lcd);
diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
index 7ac0b1b1d548..cef42656c4b0 100644
--- a/drivers/auxdisplay/hd44780.c
+++ b/drivers/auxdisplay/hd44780.c
@@ -222,20 +222,17 @@ static int hd44780_probe(struct platform_device *pdev)
return -EINVAL;
}
- hdc = hd44780_common_alloc();
- if (!hdc)
- return -ENOMEM;
-
- lcd = charlcd_alloc();
+ lcd = hd44780_common_alloc();
if (!lcd)
- goto fail1;
+ return -ENOMEM;
- hd = kzalloc(sizeof(struct hd44780), GFP_KERNEL);
+ hd = kzalloc(sizeof(*hd), GFP_KERNEL);
if (!hd)
goto fail2;
+ hdc = lcd->drvdata;
hdc->hd44780 = hd;
- lcd->drvdata = hdc;
+
for (i = 0; i < ifwidth; i++) {
hd->pins[base + i] = devm_gpiod_get_index(dev, "data", i,
GPIOD_OUT_LOW);
@@ -313,9 +310,7 @@ static int hd44780_probe(struct platform_device *pdev)
fail3:
kfree(hd);
fail2:
- kfree(lcd);
-fail1:
- kfree(hdc);
+ hd44780_common_free(lcd);
return ret;
}
@@ -326,9 +321,7 @@ static void hd44780_remove(struct platform_device *pdev)
charlcd_unregister(lcd);
kfree(hdc->hd44780);
- kfree(lcd->drvdata);
-
- kfree(lcd);
+ hd44780_common_free(lcd);
}
static const struct of_device_id hd44780_of_match[] = {
@@ -339,7 +332,7 @@ MODULE_DEVICE_TABLE(of, hd44780_of_match);
static struct platform_driver hd44780_driver = {
.probe = hd44780_probe,
- .remove_new = hd44780_remove,
+ .remove = hd44780_remove,
.driver = {
.name = "hd44780",
.of_match_table = hd44780_of_match,
diff --git a/drivers/auxdisplay/hd44780_common.c b/drivers/auxdisplay/hd44780_common.c
index 7cbf375b0fa5..1792fe2a4460 100644
--- a/drivers/auxdisplay/hd44780_common.c
+++ b/drivers/auxdisplay/hd44780_common.c
@@ -351,19 +351,28 @@ int hd44780_common_redefine_char(struct charlcd *lcd, char *esc)
}
EXPORT_SYMBOL_GPL(hd44780_common_redefine_char);
-struct hd44780_common *hd44780_common_alloc(void)
+struct charlcd *hd44780_common_alloc(void)
{
- struct hd44780_common *hd;
+ struct hd44780_common *hdc;
+ struct charlcd *lcd;
- hd = kzalloc(sizeof(*hd), GFP_KERNEL);
- if (!hd)
+ lcd = charlcd_alloc(sizeof(*hdc));
+ if (!lcd)
return NULL;
- hd->ifwidth = 8;
- hd->bwidth = DEFAULT_LCD_BWIDTH;
- hd->hwidth = DEFAULT_LCD_HWIDTH;
- return hd;
+ hdc = lcd->drvdata;
+ hdc->ifwidth = 8;
+ hdc->bwidth = DEFAULT_LCD_BWIDTH;
+ hdc->hwidth = DEFAULT_LCD_HWIDTH;
+ return lcd;
}
EXPORT_SYMBOL_GPL(hd44780_common_alloc);
+void hd44780_common_free(struct charlcd *lcd)
+{
+ charlcd_free(lcd);
+}
+EXPORT_SYMBOL_GPL(hd44780_common_free);
+
+MODULE_DESCRIPTION("Common functions for HD44780 (and compatibles) LCD displays");
MODULE_LICENSE("GPL");
diff --git a/drivers/auxdisplay/hd44780_common.h b/drivers/auxdisplay/hd44780_common.h
index a16aa8c29c99..4c87f55722b6 100644
--- a/drivers/auxdisplay/hd44780_common.h
+++ b/drivers/auxdisplay/hd44780_common.h
@@ -30,4 +30,6 @@ int hd44780_common_blink(struct charlcd *lcd, enum charlcd_onoff on);
int hd44780_common_fontsize(struct charlcd *lcd, enum charlcd_fontsize size);
int hd44780_common_lines(struct charlcd *lcd, enum charlcd_lines lines);
int hd44780_common_redefine_char(struct charlcd *lcd, char *esc);
-struct hd44780_common *hd44780_common_alloc(void);
+
+struct charlcd *hd44780_common_alloc(void);
+void hd44780_common_free(struct charlcd *lcd);
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
index 96acfb2b58cd..0b8ba754b343 100644
--- a/drivers/auxdisplay/ht16k33.c
+++ b/drivers/auxdisplay/ht16k33.c
@@ -25,7 +25,7 @@
#include <linux/map_to_7segment.h>
#include <linux/map_to_14segment.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "line-display.h"
@@ -284,27 +284,14 @@ static int ht16k33_initialize(struct ht16k33_priv *priv)
static int ht16k33_bl_update_status(struct backlight_device *bl)
{
- int brightness = bl->props.brightness;
+ const int brightness = backlight_get_brightness(bl);
struct ht16k33_priv *priv = bl_get_data(bl);
- if (bl->props.power != FB_BLANK_UNBLANK ||
- bl->props.fb_blank != FB_BLANK_UNBLANK ||
- bl->props.state & BL_CORE_FBBLANK)
- brightness = 0;
-
return ht16k33_brightness_set(priv, brightness);
}
-static int ht16k33_bl_check_fb(struct backlight_device *bl, struct fb_info *fi)
-{
- struct ht16k33_priv *priv = bl_get_data(bl);
-
- return (fi == NULL) || (fi->par == priv);
-}
-
static const struct backlight_ops ht16k33_bl_ops = {
.update_status = ht16k33_bl_update_status,
- .check_fb = ht16k33_bl_check_fb,
};
/*
@@ -496,6 +483,7 @@ static int ht16k33_led_probe(struct device *dev, struct led_classdev *led,
led->max_brightness = MAX_BRIGHTNESS;
err = devm_led_classdev_register_ext(dev, led, &init_data);
+ fwnode_handle_put(init_data.fwnode);
if (err)
dev_err(dev, "Failed to register LED\n");
@@ -669,7 +657,6 @@ static int ht16k33_seg_probe(struct device *dev, struct ht16k33_priv *priv,
static int ht16k33_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
- const struct of_device_id *id;
struct ht16k33_priv *priv;
uint32_t dft_brightness;
int err;
@@ -684,9 +671,8 @@ static int ht16k33_probe(struct i2c_client *client)
return -ENOMEM;
priv->client = client;
- id = i2c_of_match_device(dev->driver->of_match_table, client);
- if (id)
- priv->type = (uintptr_t)id->data;
+ priv->type = (uintptr_t)i2c_get_match_data(client);
+
i2c_set_clientdata(client, priv);
err = ht16k33_initialize(priv);
@@ -759,7 +745,9 @@ static void ht16k33_remove(struct i2c_client *client)
}
static const struct i2c_device_id ht16k33_i2c_match[] = {
- { "ht16k33", 0 },
+ { "3108", DISP_QUAD_7SEG },
+ { "3130", DISP_QUAD_14SEG },
+ { "ht16k33", DISP_MATRIX },
{ }
};
MODULE_DEVICE_TABLE(i2c, ht16k33_i2c_match);
@@ -792,5 +780,5 @@ module_i2c_driver(ht16k33_driver);
MODULE_DESCRIPTION("Holtek HT16K33 driver");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(LINEDISP);
+MODULE_IMPORT_NS("LINEDISP");
MODULE_AUTHOR("Robin van der Gracht <robin@protonic.nl>");
diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c
index 9ba132dc6143..32e1863ef4b2 100644
--- a/drivers/auxdisplay/img-ascii-lcd.c
+++ b/drivers/auxdisplay/img-ascii-lcd.c
@@ -36,7 +36,6 @@ struct img_ascii_lcd_config {
* @base: the base address of the LCD registers
* @regmap: the regmap through which LCD registers are accessed
* @offset: the offset within regmap to the start of the LCD registers
- * @cfg: pointer to the LCD model configuration
*/
struct img_ascii_lcd_ctx {
struct linedisp linedisp;
@@ -45,7 +44,6 @@ struct img_ascii_lcd_ctx {
struct regmap *regmap;
};
u32 offset;
- const struct img_ascii_lcd_config *cfg;
};
/*
@@ -71,7 +69,7 @@ static void boston_update(struct linedisp *linedisp)
#endif
}
-static struct img_ascii_lcd_config boston_config = {
+static const struct img_ascii_lcd_config boston_config = {
.num_chars = 8,
.ops = {
.update = boston_update,
@@ -100,7 +98,7 @@ static void malta_update(struct linedisp *linedisp)
pr_err_ratelimited("Failed to update LCD display: %d\n", err);
}
-static struct img_ascii_lcd_config malta_config = {
+static const struct img_ascii_lcd_config malta_config = {
.num_chars = 8,
.external_regmap = true,
.ops = {
@@ -202,7 +200,7 @@ static void sead3_update(struct linedisp *linedisp)
pr_err_ratelimited("Failed to update LCD display: %d\n", err);
}
-static struct img_ascii_lcd_config sead3_config = {
+static const struct img_ascii_lcd_config sead3_config = {
.num_chars = 16,
.external_regmap = true,
.ops = {
@@ -291,11 +289,11 @@ static struct platform_driver img_ascii_lcd_driver = {
.of_match_table = img_ascii_lcd_matches,
},
.probe = img_ascii_lcd_probe,
- .remove_new = img_ascii_lcd_remove,
+ .remove = img_ascii_lcd_remove,
};
module_platform_driver(img_ascii_lcd_driver);
MODULE_DESCRIPTION("Imagination Technologies ASCII LCD Display");
MODULE_AUTHOR("Paul Burton <paul.burton@mips.com>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(LINEDISP);
+MODULE_IMPORT_NS("LINEDISP");
diff --git a/drivers/auxdisplay/ks0108.c b/drivers/auxdisplay/ks0108.c
index 234f9dbe6e30..51587f0fdaae 100644
--- a/drivers/auxdisplay/ks0108.c
+++ b/drivers/auxdisplay/ks0108.c
@@ -162,7 +162,6 @@ static struct parport_driver ks0108_parport_driver = {
.name = "ks0108",
.match_port = ks0108_parport_attach,
.detach = ks0108_parport_detach,
- .devmodel = true,
};
module_parport_driver(ks0108_parport_driver);
diff --git a/drivers/auxdisplay/lcd2s.c b/drivers/auxdisplay/lcd2s.c
index 6422be0dfe20..045dbef49dee 100644
--- a/drivers/auxdisplay/lcd2s.c
+++ b/drivers/auxdisplay/lcd2s.c
@@ -298,20 +298,18 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c)
I2C_FUNC_SMBUS_WRITE_BLOCK_DATA))
return -EIO;
- lcd2s = devm_kzalloc(&i2c->dev, sizeof(*lcd2s), GFP_KERNEL);
- if (!lcd2s)
- return -ENOMEM;
-
/* Test, if the display is responding */
err = lcd2s_i2c_smbus_write_byte(i2c, LCD2S_CMD_DISPLAY_OFF);
if (err < 0)
return err;
- lcd = charlcd_alloc();
+ lcd = charlcd_alloc(sizeof(*lcd2s));
if (!lcd)
return -ENOMEM;
- lcd->drvdata = lcd2s;
+ lcd->ops = &lcd2s_ops;
+
+ lcd2s = lcd->drvdata;
lcd2s->i2c = i2c;
lcd2s->charlcd = lcd;
@@ -326,8 +324,6 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c)
if (err)
goto fail1;
- lcd->ops = &lcd2s_ops;
-
err = charlcd_register(lcd2s->charlcd);
if (err)
goto fail1;
@@ -349,7 +345,7 @@ static void lcd2s_i2c_remove(struct i2c_client *i2c)
}
static const struct i2c_device_id lcd2s_i2c_id[] = {
- { "lcd2s", 0 },
+ { "lcd2s" },
{ }
};
MODULE_DEVICE_TABLE(i2c, lcd2s_i2c_id);
diff --git a/drivers/auxdisplay/line-display.c b/drivers/auxdisplay/line-display.c
index e2b546210f8d..4e22373fcc1a 100644
--- a/drivers/auxdisplay/line-display.c
+++ b/drivers/auxdisplay/line-display.c
@@ -6,18 +6,23 @@
* Author: Paul Burton <paul.burton@mips.com>
*
* Copyright (C) 2021 Glider bv
+ * Copyright (C) 2025 Jean-François Lessard
*/
+#ifndef CONFIG_PANEL_BOOT_MESSAGE
#include <generated/utsrelease.h>
+#endif
-#include <linux/container_of.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/idr.h>
#include <linux/jiffies.h>
#include <linux/kstrtox.h>
+#include <linux/list.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/sysfs.h>
#include <linux/timer.h>
@@ -30,6 +35,87 @@
#define DEFAULT_SCROLL_RATE (HZ / 2)
/**
+ * struct linedisp_attachment - Holds the device to linedisp mapping
+ * @list: List entry for the linedisp_attachments list
+ * @device: Pointer to the device where linedisp attributes are added
+ * @linedisp: Pointer to the linedisp mapped to the device
+ * @direct: true for directly attached device using linedisp_attach(),
+ * false for child registered device using linedisp_register()
+ */
+struct linedisp_attachment {
+ struct list_head list;
+ struct device *device;
+ struct linedisp *linedisp;
+ bool direct;
+};
+
+static LIST_HEAD(linedisp_attachments);
+static DEFINE_SPINLOCK(linedisp_attachments_lock);
+
+static int create_attachment(struct device *dev, struct linedisp *linedisp, bool direct)
+{
+ struct linedisp_attachment *attachment;
+
+ attachment = kzalloc(sizeof(*attachment), GFP_KERNEL);
+ if (!attachment)
+ return -ENOMEM;
+
+ attachment->device = dev;
+ attachment->linedisp = linedisp;
+ attachment->direct = direct;
+
+ guard(spinlock)(&linedisp_attachments_lock);
+ list_add(&attachment->list, &linedisp_attachments);
+
+ return 0;
+}
+
+static struct linedisp *delete_attachment(struct device *dev, bool direct)
+{
+ struct linedisp_attachment *attachment;
+ struct linedisp *linedisp;
+
+ guard(spinlock)(&linedisp_attachments_lock);
+
+ list_for_each_entry(attachment, &linedisp_attachments, list) {
+ if (attachment->device == dev &&
+ attachment->direct == direct)
+ break;
+ }
+
+ if (list_entry_is_head(attachment, &linedisp_attachments, list))
+ return NULL;
+
+ linedisp = attachment->linedisp;
+ list_del(&attachment->list);
+ kfree(attachment);
+
+ return linedisp;
+}
+
+static struct linedisp *to_linedisp(struct device *dev)
+{
+ struct linedisp_attachment *attachment;
+
+ guard(spinlock)(&linedisp_attachments_lock);
+
+ list_for_each_entry(attachment, &linedisp_attachments, list) {
+ if (attachment->device == dev)
+ break;
+ }
+
+ if (list_entry_is_head(attachment, &linedisp_attachments, list))
+ return NULL;
+
+ return attachment->linedisp;
+}
+
+static inline bool should_scroll(struct linedisp *linedisp)
+{
+ return linedisp->message_len > linedisp->num_chars && linedisp->scroll_rate;
+}
+
+/**
* linedisp_scroll() - scroll the display by a character
* @t: really a pointer to the private data structure
*
@@ -38,7 +124,7 @@
*/
static void linedisp_scroll(struct timer_list *t)
{
- struct linedisp *linedisp = from_timer(linedisp, t, timer);
+ struct linedisp *linedisp = timer_container_of(linedisp, t, timer);
unsigned int i, ch = linedisp->scroll_pos;
unsigned int num_chars = linedisp->num_chars;
@@ -60,8 +146,7 @@ static void linedisp_scroll(struct timer_list *t)
linedisp->scroll_pos %= linedisp->message_len;
/* rearm the timer */
- if (linedisp->message_len > num_chars && linedisp->scroll_rate)
- mod_timer(&linedisp->timer, jiffies + linedisp->scroll_rate);
+ mod_timer(&linedisp->timer, jiffies + linedisp->scroll_rate);
}
/**
@@ -82,7 +167,7 @@ static int linedisp_display(struct linedisp *linedisp, const char *msg,
char *new_msg;
/* stop the scroll timer */
- del_timer_sync(&linedisp->timer);
+ timer_delete_sync(&linedisp->timer);
if (count == -1)
count = strlen(msg);
@@ -111,8 +196,16 @@ static int linedisp_display(struct linedisp *linedisp, const char *msg,
linedisp->message_len = count;
linedisp->scroll_pos = 0;
- /* update the display */
- linedisp_scroll(&linedisp->timer);
+ if (should_scroll(linedisp)) {
+ /* display scrolling message */
+ linedisp_scroll(&linedisp->timer);
+ } else {
+ /* display static message */
+ memset(linedisp->buf, ' ', linedisp->num_chars);
+ memcpy(linedisp->buf, linedisp->message,
+ umin(linedisp->num_chars, linedisp->message_len));
+ linedisp->ops->update(linedisp);
+ }
return 0;
}
@@ -131,7 +224,7 @@ static int linedisp_display(struct linedisp *linedisp, const char *msg,
static ssize_t message_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct linedisp *linedisp = container_of(dev, struct linedisp, dev);
+ struct linedisp *linedisp = to_linedisp(dev);
return sysfs_emit(buf, "%s\n", linedisp->message);
}
@@ -150,7 +243,7 @@ static ssize_t message_show(struct device *dev, struct device_attribute *attr,
static ssize_t message_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct linedisp *linedisp = container_of(dev, struct linedisp, dev);
+ struct linedisp *linedisp = to_linedisp(dev);
int err;
err = linedisp_display(linedisp, buf, count);
@@ -159,10 +252,20 @@ static ssize_t message_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RW(message);
+static ssize_t num_chars_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct linedisp *linedisp = to_linedisp(dev);
+
+ return sysfs_emit(buf, "%u\n", linedisp->num_chars);
+}
+
+static DEVICE_ATTR_RO(num_chars);
+
static ssize_t scroll_step_ms_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct linedisp *linedisp = container_of(dev, struct linedisp, dev);
+ struct linedisp *linedisp = to_linedisp(dev);
return sysfs_emit(buf, "%u\n", jiffies_to_msecs(linedisp->scroll_rate));
}
@@ -171,7 +274,7 @@ static ssize_t scroll_step_ms_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct linedisp *linedisp = container_of(dev, struct linedisp, dev);
+ struct linedisp *linedisp = to_linedisp(dev);
unsigned int ms;
int err;
@@ -179,12 +282,12 @@ static ssize_t scroll_step_ms_store(struct device *dev,
if (err)
return err;
+ timer_delete_sync(&linedisp->timer);
+
linedisp->scroll_rate = msecs_to_jiffies(ms);
- if (linedisp->message && linedisp->message_len > linedisp->num_chars) {
- del_timer_sync(&linedisp->timer);
- if (linedisp->scroll_rate)
- linedisp_scroll(&linedisp->timer);
- }
+
+ if (should_scroll(linedisp))
+ linedisp_scroll(&linedisp->timer);
return count;
}
@@ -193,7 +296,7 @@ static DEVICE_ATTR_RW(scroll_step_ms);
static ssize_t map_seg_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct linedisp *linedisp = container_of(dev, struct linedisp, dev);
+ struct linedisp *linedisp = to_linedisp(dev);
struct linedisp_map *map = linedisp->map;
memcpy(buf, &map->map, map->size);
@@ -203,7 +306,7 @@ static ssize_t map_seg_show(struct device *dev, struct device_attribute *attr, c
static ssize_t map_seg_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct linedisp *linedisp = container_of(dev, struct linedisp, dev);
+ struct linedisp *linedisp = to_linedisp(dev);
struct linedisp_map *map = linedisp->map;
if (count != map->size)
@@ -221,6 +324,7 @@ static DEVICE_ATTR(map_seg14, 0644, map_seg_show, map_seg_store);
static struct attribute *linedisp_attrs[] = {
&dev_attr_message.attr,
+ &dev_attr_num_chars.attr,
&dev_attr_scroll_step_ms.attr,
&dev_attr_map_seg7.attr,
&dev_attr_map_seg14.attr,
@@ -230,7 +334,7 @@ static struct attribute *linedisp_attrs[] = {
static umode_t linedisp_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
- struct linedisp *linedisp = container_of(dev, struct linedisp, dev);
+ struct linedisp *linedisp = to_linedisp(dev);
struct linedisp_map *map = linedisp->map;
umode_t mode = attr->mode;
@@ -261,7 +365,7 @@ static DEFINE_IDA(linedisp_id);
static void linedisp_release(struct device *dev)
{
- struct linedisp *linedisp = container_of(dev, struct linedisp, dev);
+ struct linedisp *linedisp = to_linedisp(dev);
kfree(linedisp->map);
kfree(linedisp->message);
@@ -312,6 +416,96 @@ static int linedisp_init_map(struct linedisp *linedisp)
return 0;
}
+#ifdef CONFIG_PANEL_BOOT_MESSAGE
+#define LINEDISP_INIT_TEXT CONFIG_PANEL_BOOT_MESSAGE
+#else
+#define LINEDISP_INIT_TEXT "Linux " UTS_RELEASE " "
+#endif
+
+/**
+ * linedisp_attach - attach a character line display
+ * @linedisp: pointer to character line display structure
+ * @dev: pointer of the device to attach to
+ * @num_chars: the number of characters that can be displayed
+ * @ops: character line display operations
+ *
+ * Directly attach the line-display sysfs attributes to the passed device.
+ * The caller is responsible for calling linedisp_detach() to release resources
+ * after use.
+ *
+ * Return: zero on success, else a negative error code.
+ */
+int linedisp_attach(struct linedisp *linedisp, struct device *dev,
+ unsigned int num_chars, const struct linedisp_ops *ops)
+{
+ int err;
+
+ memset(linedisp, 0, sizeof(*linedisp));
+ linedisp->ops = ops;
+ linedisp->num_chars = num_chars;
+ linedisp->scroll_rate = DEFAULT_SCROLL_RATE;
+
+ linedisp->buf = kzalloc(linedisp->num_chars, GFP_KERNEL);
+ if (!linedisp->buf)
+ return -ENOMEM;
+
+ /* initialise a character mapping, if required */
+ err = linedisp_init_map(linedisp);
+ if (err)
+ goto out_free_buf;
+
+ /* initialise a timer for scrolling the message */
+ timer_setup(&linedisp->timer, linedisp_scroll, 0);
+
+ err = create_attachment(dev, linedisp, true);
+ if (err)
+ goto out_del_timer;
+
+ /* display a default message */
+ err = linedisp_display(linedisp, LINEDISP_INIT_TEXT, -1);
+ if (err)
+ goto out_del_attach;
+
+ /* add attribute groups to target device */
+ err = device_add_groups(dev, linedisp_groups);
+ if (err)
+ goto out_del_attach;
+
+ return 0;
+
+out_del_attach:
+ delete_attachment(dev, true);
+out_del_timer:
+ timer_delete_sync(&linedisp->timer);
+out_free_buf:
+ kfree(linedisp->buf);
+ return err;
+}
+EXPORT_SYMBOL_NS_GPL(linedisp_attach, "LINEDISP");
+
+/**
+ * linedisp_detach - detach a character line display
+ * @dev: pointer of the device to detach from, that was previously
+ * attached with linedisp_attach()
+ */
+void linedisp_detach(struct device *dev)
+{
+ struct linedisp *linedisp;
+
+ linedisp = delete_attachment(dev, true);
+ if (!linedisp)
+ return;
+
+ timer_delete_sync(&linedisp->timer);
+
+ device_remove_groups(dev, linedisp_groups);
+
+ kfree(linedisp->map);
+ kfree(linedisp->message);
+ kfree(linedisp->buf);
+}
+EXPORT_SYMBOL_NS_GPL(linedisp_detach, "LINEDISP");
+
/**
* linedisp_register - register a character line display
* @linedisp: pointer to character line display structure
@@ -319,6 +513,11 @@ static int linedisp_init_map(struct linedisp *linedisp)
* @num_chars: the number of characters that can be displayed
* @ops: character line display operations
*
+ * Register the line-display sysfs attributes to a new device named
+ * "linedisp.N" added to the passed parent device.
+ * The caller is responsible for calling linedisp_unregister() to release
+ * resources after use.
+ *
* Return: zero on success, else a negative error code.
*/
int linedisp_register(struct linedisp *linedisp, struct device *parent,
@@ -354,26 +553,30 @@ int linedisp_register(struct linedisp *linedisp, struct device *parent,
/* initialise a timer for scrolling the message */
timer_setup(&linedisp->timer, linedisp_scroll, 0);
- err = device_add(&linedisp->dev);
+ err = create_attachment(&linedisp->dev, linedisp, false);
if (err)
goto out_del_timer;
/* display a default message */
- err = linedisp_display(linedisp, "Linux " UTS_RELEASE " ", -1);
+ err = linedisp_display(linedisp, LINEDISP_INIT_TEXT, -1);
+ if (err)
+ goto out_del_attach;
+
+ err = device_add(&linedisp->dev);
if (err)
- goto out_del_dev;
+ goto out_del_attach;
return 0;
-out_del_dev:
- device_del(&linedisp->dev);
+out_del_attach:
+ delete_attachment(&linedisp->dev, false);
out_del_timer:
- del_timer_sync(&linedisp->timer);
+ timer_delete_sync(&linedisp->timer);
out_put_device:
put_device(&linedisp->dev);
return err;
}
-EXPORT_SYMBOL_NS_GPL(linedisp_register, LINEDISP);
+EXPORT_SYMBOL_NS_GPL(linedisp_register, "LINEDISP");
/**
* linedisp_unregister - unregister a character line display
@@ -383,9 +586,11 @@ EXPORT_SYMBOL_NS_GPL(linedisp_register, LINEDISP);
void linedisp_unregister(struct linedisp *linedisp)
{
device_del(&linedisp->dev);
- del_timer_sync(&linedisp->timer);
+ delete_attachment(&linedisp->dev, false);
+ timer_delete_sync(&linedisp->timer);
put_device(&linedisp->dev);
}
-EXPORT_SYMBOL_NS_GPL(linedisp_unregister, LINEDISP);
+EXPORT_SYMBOL_NS_GPL(linedisp_unregister, "LINEDISP");
+MODULE_DESCRIPTION("Character line display core support");
MODULE_LICENSE("GPL");
diff --git a/drivers/auxdisplay/line-display.h b/drivers/auxdisplay/line-display.h
index 4348d7a2f69a..36853b639711 100644
--- a/drivers/auxdisplay/line-display.h
+++ b/drivers/auxdisplay/line-display.h
@@ -6,6 +6,7 @@
* Author: Paul Burton <paul.burton@mips.com>
*
* Copyright (C) 2021 Glider bv
+ * Copyright (C) 2025 Jean-François Lessard
*/
#ifndef _LINEDISP_H
@@ -81,6 +82,9 @@ struct linedisp {
unsigned int id;
};
+int linedisp_attach(struct linedisp *linedisp, struct device *dev,
+ unsigned int num_chars, const struct linedisp_ops *ops);
+void linedisp_detach(struct device *dev);
int linedisp_register(struct linedisp *linedisp, struct device *parent,
unsigned int num_chars, const struct linedisp_ops *ops);
void linedisp_unregister(struct linedisp *linedisp);
diff --git a/drivers/auxdisplay/max6959.c b/drivers/auxdisplay/max6959.c
index 5519c014bd29..962488197b9e 100644
--- a/drivers/auxdisplay/max6959.c
+++ b/drivers/auxdisplay/max6959.c
@@ -191,4 +191,4 @@ module_i2c_driver(max6959_i2c_driver);
MODULE_DESCRIPTION("MAX6958/6959 7-segment LED controller");
MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(LINEDISP);
+MODULE_IMPORT_NS("LINEDISP");
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
index 049ff443e790..958c0e31e84a 100644
--- a/drivers/auxdisplay/panel.c
+++ b/drivers/auxdisplay/panel.c
@@ -831,18 +831,12 @@ static void lcd_init(void)
struct charlcd *charlcd;
struct hd44780_common *hdc;
- hdc = hd44780_common_alloc();
- if (!hdc)
+ charlcd = hd44780_common_alloc();
+ if (!charlcd)
return;
- charlcd = charlcd_alloc();
- if (!charlcd) {
- kfree(hdc);
- return;
- }
-
+ hdc = charlcd->drvdata;
hdc->hd44780 = &lcd;
- charlcd->drvdata = hdc;
/*
* Init lcd struct with load-time values to preserve exact
@@ -1660,11 +1654,11 @@ static void panel_attach(struct parport *port)
err_lcd_unreg:
if (scan_timer.function)
- del_timer_sync(&scan_timer);
+ timer_delete_sync(&scan_timer);
if (lcd.enabled)
charlcd_unregister(lcd.charlcd);
err_unreg_device:
- kfree(lcd.charlcd);
+ hd44780_common_free(lcd.charlcd);
lcd.charlcd = NULL;
parport_unregister_device(pprt);
pprt = NULL;
@@ -1681,7 +1675,7 @@ static void panel_detach(struct parport *port)
return;
}
if (scan_timer.function)
- del_timer_sync(&scan_timer);
+ timer_delete_sync(&scan_timer);
if (keypad.enabled) {
misc_deregister(&keypad_dev);
@@ -1691,8 +1685,7 @@ static void panel_detach(struct parport *port)
if (lcd.enabled) {
charlcd_unregister(lcd.charlcd);
lcd.initialized = false;
- kfree(lcd.charlcd->drvdata);
- kfree(lcd.charlcd);
+ hd44780_common_free(lcd.charlcd);
lcd.charlcd = NULL;
}
@@ -1706,7 +1699,6 @@ static struct parport_driver panel_driver = {
.name = "panel",
.match_port = panel_attach,
.detach = panel_detach,
- .devmodel = true,
};
module_parport_driver(panel_driver);
diff --git a/drivers/auxdisplay/seg-led-gpio.c b/drivers/auxdisplay/seg-led-gpio.c
index 183ab3011cbb..dfb62e9ce9b4 100644
--- a/drivers/auxdisplay/seg-led-gpio.c
+++ b/drivers/auxdisplay/seg-led-gpio.c
@@ -36,8 +36,7 @@ static void seg_led_update(struct work_struct *work)
bitmap_set_value8(values, map_to_seg7(&map->map.seg7, linedisp->buf[0]), 0);
- gpiod_set_array_value_cansleep(priv->segment_gpios->ndescs, priv->segment_gpios->desc,
- priv->segment_gpios->info, values);
+ gpiod_multi_set_value_cansleep(priv->segment_gpios, values);
}
static int seg_led_linedisp_get_map_type(struct linedisp *linedisp)
@@ -97,7 +96,7 @@ MODULE_DEVICE_TABLE(of, seg_led_of_match);
static struct platform_driver seg_led_driver = {
.probe = seg_led_probe,
- .remove_new = seg_led_remove,
+ .remove = seg_led_remove,
.driver = {
.name = "seg-led-gpio",
.of_match_table = seg_led_of_match,
@@ -108,4 +107,4 @@ module_platform_driver(seg_led_driver);
MODULE_AUTHOR("Chris Packham <chris.packham@alliedtelesis.co.nz>");
MODULE_DESCRIPTION("7 segment LED driver");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(LINEDISP);
+MODULE_IMPORT_NS("LINEDISP");
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 2b8fd6bb7da0..1786d87b29e2 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -167,6 +167,12 @@ config PM_QOS_KUNIT_TEST
depends on KUNIT=y
default KUNIT_ALL_TESTS
+config PM_RUNTIME_KUNIT_TEST
+ tristate "KUnit Tests for runtime PM" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ depends on PM
+ default KUNIT_ALL_TESTS
+
config HMEM_REPORTING
bool
default n
@@ -226,6 +232,7 @@ config GENERIC_ARCH_TOPOLOGY
config GENERIC_ARCH_NUMA
bool
+ select NUMA_MEMBLKS
help
Enable support for generic NUMA implementation. Currently, RISC-V
and ARM64 use it.
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 3079bfe53d04..8074a10183dc 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -6,7 +6,7 @@ obj-y := component.o core.o bus.o dd.o syscore.o \
cpu.o firmware.o init.o map.o devres.o \
attribute_container.o transport_class.o \
topology.o container.o property.o cacheinfo.o \
- swnode.o
+ swnode.o faux.o
obj-$(CONFIG_AUXILIARY_BUS) += auxiliary.o
obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
obj-y += power/
@@ -16,6 +16,7 @@ obj-$(CONFIG_NUMA) += node.o
obj-$(CONFIG_MEMORY_HOTPLUG) += memory.o
ifeq ($(CONFIG_SYSFS),y)
obj-$(CONFIG_MODULES) += module.o
+obj-$(CONFIG_AUXILIARY_BUS) += auxiliary_sysfs.o
endif
obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o
obj-$(CONFIG_REGMAP) += regmap/
diff --git a/drivers/base/arch_numa.c b/drivers/base/arch_numa.c
index 5b59d133b6af..c99f2ab105e5 100644
--- a/drivers/base/arch_numa.c
+++ b/drivers/base/arch_numa.c
@@ -12,16 +12,12 @@
#include <linux/memblock.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/numa_memblks.h>
#include <asm/sections.h>
-struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
-EXPORT_SYMBOL(node_data);
-nodemask_t numa_nodes_parsed __initdata;
static int cpu_to_node_map[NR_CPUS] = { [0 ... NR_CPUS-1] = NUMA_NO_NODE };
-static int numa_distance_cnt;
-static u8 *numa_distance;
bool numa_off;
static __init int numa_parse_early_param(char *opt)
@@ -30,6 +26,8 @@ static __init int numa_parse_early_param(char *opt)
return -EINVAL;
if (str_has_prefix(opt, "off"))
numa_off = true;
+ if (!strncmp(opt, "fake=", 5))
+ return numa_emu_cmdline(opt + 5);
return 0;
}
@@ -61,6 +59,7 @@ EXPORT_SYMBOL(cpumask_of_node);
#endif
+#ifndef CONFIG_NUMA_EMU
static void numa_update_cpu(unsigned int cpu, bool remove)
{
int nid = cpu_to_node(cpu);
@@ -83,6 +82,7 @@ void numa_remove_cpu(unsigned int cpu)
{
numa_update_cpu(cpu, true);
}
+#endif
void numa_clear_node(unsigned int cpu)
{
@@ -144,7 +144,7 @@ void __init early_map_cpu_to_node(unsigned int cpu, int nid)
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(__per_cpu_offset);
-int __init early_cpu_to_node(int cpu)
+int early_cpu_to_node(int cpu)
{
return cpu_to_node_map[cpu];
}
@@ -189,174 +189,28 @@ void __init setup_per_cpu_areas(void)
}
#endif
-/**
- * numa_add_memblk() - Set node id to memblk
- * @nid: NUMA node ID of the new memblk
- * @start: Start address of the new memblk
- * @end: End address of the new memblk
- *
- * RETURNS:
- * 0 on success, -errno on failure.
- */
-int __init numa_add_memblk(int nid, u64 start, u64 end)
-{
- int ret;
-
- ret = memblock_set_node(start, (end - start), &memblock.memory, nid);
- if (ret < 0) {
- pr_err("memblock [0x%llx - 0x%llx] failed to add on node %d\n",
- start, (end - 1), nid);
- return ret;
- }
-
- node_set(nid, numa_nodes_parsed);
- return ret;
-}
-
/*
* Initialize NODE_DATA for a node on the local memory
*/
static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
{
- const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
- u64 nd_pa;
- void *nd;
- int tnid;
-
if (start_pfn >= end_pfn)
pr_info("Initmem setup node %d [<memory-less node>]\n", nid);
- nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
- if (!nd_pa)
- panic("Cannot allocate %zu bytes for node %d data\n",
- nd_size, nid);
+ alloc_node_data(nid);
- nd = __va(nd_pa);
-
- /* report and initialize */
- pr_info("NODE_DATA [mem %#010Lx-%#010Lx]\n",
- nd_pa, nd_pa + nd_size - 1);
- tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
- if (tnid != nid)
- pr_info("NODE_DATA(%d) on node %d\n", nid, tnid);
-
- node_data[nid] = nd;
- memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
NODE_DATA(nid)->node_id = nid;
NODE_DATA(nid)->node_start_pfn = start_pfn;
NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
}
-/*
- * numa_free_distance
- *
- * The current table is freed.
- */
-void __init numa_free_distance(void)
-{
- size_t size;
-
- if (!numa_distance)
- return;
-
- size = numa_distance_cnt * numa_distance_cnt *
- sizeof(numa_distance[0]);
-
- memblock_free(numa_distance, size);
- numa_distance_cnt = 0;
- numa_distance = NULL;
-}
-
-/*
- * Create a new NUMA distance table.
- */
-static int __init numa_alloc_distance(void)
-{
- size_t size;
- int i, j;
-
- size = nr_node_ids * nr_node_ids * sizeof(numa_distance[0]);
- numa_distance = memblock_alloc(size, PAGE_SIZE);
- if (WARN_ON(!numa_distance))
- return -ENOMEM;
-
- numa_distance_cnt = nr_node_ids;
-
- /* fill with the default distances */
- for (i = 0; i < numa_distance_cnt; i++)
- for (j = 0; j < numa_distance_cnt; j++)
- numa_distance[i * numa_distance_cnt + j] = i == j ?
- LOCAL_DISTANCE : REMOTE_DISTANCE;
-
- pr_debug("Initialized distance table, cnt=%d\n", numa_distance_cnt);
-
- return 0;
-}
-
-/**
- * numa_set_distance() - Set inter node NUMA distance from node to node.
- * @from: the 'from' node to set distance
- * @to: the 'to' node to set distance
- * @distance: NUMA distance
- *
- * Set the distance from node @from to @to to @distance.
- * If distance table doesn't exist, a warning is printed.
- *
- * If @from or @to is higher than the highest known node or lower than zero
- * or @distance doesn't make sense, the call is ignored.
- */
-void __init numa_set_distance(int from, int to, int distance)
-{
- if (!numa_distance) {
- pr_warn_once("Warning: distance table not allocated yet\n");
- return;
- }
-
- if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
- from < 0 || to < 0) {
- pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
- from, to, distance);
- return;
- }
-
- if ((u8)distance != distance ||
- (from == to && distance != LOCAL_DISTANCE)) {
- pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
- from, to, distance);
- return;
- }
-
- numa_distance[from * numa_distance_cnt + to] = distance;
-}
-
-/*
- * Return NUMA distance @from to @to
- */
-int __node_distance(int from, int to)
-{
- if (from >= numa_distance_cnt || to >= numa_distance_cnt)
- return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
- return numa_distance[from * numa_distance_cnt + to];
-}
-EXPORT_SYMBOL(__node_distance);
-
static int __init numa_register_nodes(void)
{
int nid;
- struct memblock_region *mblk;
-
- /* Check that valid nid is set to memblks */
- for_each_mem_region(mblk) {
- int mblk_nid = memblock_get_region_node(mblk);
- phys_addr_t start = mblk->base;
- phys_addr_t end = mblk->base + mblk->size - 1;
-
- if (mblk_nid == NUMA_NO_NODE || mblk_nid >= MAX_NUMNODES) {
- pr_warn("Warning: invalid memblk node %d [mem %pap-%pap]\n",
- mblk_nid, &start, &end);
- return -EINVAL;
- }
- }
+
+ /* Check the validity of the memblock/node mapping */
+ if (!memblock_validate_numa_coverage(0))
+ return -EINVAL;
/* Finally register nodes. */
for_each_node_mask(nid, numa_nodes_parsed) {
@@ -381,11 +235,7 @@ static int __init numa_init(int (*init_func)(void))
nodes_clear(node_possible_map);
nodes_clear(node_online_map);
- ret = numa_alloc_distance();
- if (ret < 0)
- return ret;
-
- ret = init_func();
+ ret = numa_memblks_init(init_func, /* memblock_force_top_down */ false);
if (ret < 0)
goto out_free_distance;
@@ -403,7 +253,7 @@ static int __init numa_init(int (*init_func)(void))
return 0;
out_free_distance:
- numa_free_distance();
+ numa_reset_distance();
return ret;
}
@@ -433,6 +283,7 @@ static int __init dummy_numa_init(void)
pr_err("NUMA init failed\n");
return ret;
}
+ node_set(0, numa_nodes_parsed);
numa_off = true;
return 0;
@@ -445,7 +296,7 @@ static int __init arch_acpi_numa_init(void)
ret = acpi_numa_init();
if (ret) {
- pr_info("Failed to initialise from firmware\n");
+ pr_debug("Failed to initialise from firmware\n");
return ret;
}
@@ -475,3 +326,54 @@ void __init arch_numa_init(void)
numa_init(dummy_numa_init);
}
+
+#ifdef CONFIG_NUMA_EMU
+void __init numa_emu_update_cpu_to_node(int *emu_nid_to_phys,
+ unsigned int nr_emu_nids)
+{
+ int i, j;
+
+ /*
+ * Transform cpu_to_node_map table to use emulated nids by
+ * reverse-mapping phys_nid. The maps should always exist but fall
+ * back to zero just in case.
+ */
+ for (i = 0; i < ARRAY_SIZE(cpu_to_node_map); i++) {
+ if (cpu_to_node_map[i] == NUMA_NO_NODE)
+ continue;
+ for (j = 0; j < nr_emu_nids; j++)
+ if (cpu_to_node_map[i] == emu_nid_to_phys[j])
+ break;
+ cpu_to_node_map[i] = j < nr_emu_nids ? j : 0;
+ }
+}
+
+u64 __init numa_emu_dma_end(void)
+{
+ return memblock_start_of_DRAM() + SZ_4G;
+}
+
+void debug_cpumask_set_cpu(unsigned int cpu, int node, bool enable)
+{
+ struct cpumask *mask;
+
+ if (node == NUMA_NO_NODE)
+ return;
+
+ mask = node_to_cpumask_map[node];
+ if (!cpumask_available(mask)) {
+ pr_err("node_to_cpumask_map[%i] NULL\n", node);
+ dump_stack();
+ return;
+ }
+
+ if (enable)
+ cpumask_set_cpu(cpu, mask);
+ else
+ cpumask_clear_cpu(cpu, mask);
+
+ pr_debug("%s cpu %d node %d: mask now %*pbl\n",
+ enable ? "numa_add_cpu" : "numa_remove_cpu",
+ cpu, node, cpumask_pr_args(mask));
+}
+#endif /* CONFIG_NUMA_EMU */
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index c66d070207a0..84ec92bff642 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -8,8 +8,10 @@
#include <linux/acpi.h>
#include <linux/cacheinfo.h>
+#include <linux/cleanup.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
+#include <linux/cpu_smt.h>
#include <linux/device.h>
#include <linux/of.h>
#include <linux/slab.h>
@@ -27,7 +29,7 @@
static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
static struct cpumask scale_freq_counters_mask;
static bool scale_freq_invariant;
-DEFINE_PER_CPU(unsigned long, capacity_freq_ref) = 1;
+DEFINE_PER_CPU(unsigned long, capacity_freq_ref) = 0;
EXPORT_PER_CPU_SYMBOL_GPL(capacity_freq_ref);
static bool supports_scale_freq_counters(const struct cpumask *cpus)
@@ -152,14 +154,6 @@ void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
per_cpu(arch_freq_scale, i) = scale;
}
-DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
-EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale);
-
-void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
-{
- per_cpu(cpu_scale, cpu) = capacity;
-}
-
DEFINE_PER_CPU(unsigned long, hw_pressure);
/**
@@ -205,53 +199,9 @@ void topology_update_hw_pressure(const struct cpumask *cpus,
}
EXPORT_SYMBOL_GPL(topology_update_hw_pressure);
-static ssize_t cpu_capacity_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cpu *cpu = container_of(dev, struct cpu, dev);
-
- return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
-}
-
static void update_topology_flags_workfn(struct work_struct *work);
static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn);
-static DEVICE_ATTR_RO(cpu_capacity);
-
-static int cpu_capacity_sysctl_add(unsigned int cpu)
-{
- struct device *cpu_dev = get_cpu_device(cpu);
-
- if (!cpu_dev)
- return -ENOENT;
-
- device_create_file(cpu_dev, &dev_attr_cpu_capacity);
-
- return 0;
-}
-
-static int cpu_capacity_sysctl_remove(unsigned int cpu)
-{
- struct device *cpu_dev = get_cpu_device(cpu);
-
- if (!cpu_dev)
- return -ENOENT;
-
- device_remove_file(cpu_dev, &dev_attr_cpu_capacity);
-
- return 0;
-}
-
-static int register_cpu_capacity_sysctl(void)
-{
- cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "topology/cpu-capacity",
- cpu_capacity_sysctl_add, cpu_capacity_sysctl_remove);
-
- return 0;
-}
-subsys_initcall(register_cpu_capacity_sysctl);
-
static int update_topology;
int topology_update_cpu_topology(void)
@@ -292,13 +242,15 @@ void topology_normalize_cpu_scale(void)
capacity_scale = 1;
for_each_possible_cpu(cpu) {
- capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu);
+ capacity = raw_capacity[cpu] *
+ (per_cpu(capacity_freq_ref, cpu) ?: 1);
capacity_scale = max(capacity, capacity_scale);
}
pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale);
for_each_possible_cpu(cpu) {
- capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu);
+ capacity = raw_capacity[cpu] *
+ (per_cpu(capacity_freq_ref, cpu) ?: 1);
capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
capacity_scale);
topology_set_cpu_scale(cpu, capacity);
@@ -340,7 +292,7 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
* frequency (by keeping the initial capacity_freq_ref value).
*/
cpu_clk = of_clk_get(cpu_node, 0);
- if (!PTR_ERR_OR_ZERO(cpu_clk)) {
+ if (!IS_ERR_OR_NULL(cpu_clk)) {
per_cpu(capacity_freq_ref, cpu) =
clk_get_rate(cpu_clk) / HZ_PER_KHZ;
clk_put(cpu_clk);
@@ -365,7 +317,7 @@ void __weak freq_inv_set_max_ratio(int cpu, u64 max_rate)
#ifdef CONFIG_ACPI_CPPC_LIB
#include <acpi/cppc_acpi.h>
-void topology_init_cpu_capacity_cppc(void)
+static inline void topology_init_cpu_capacity_cppc(void)
{
u64 capacity, capacity_scale = 0;
struct cppc_perf_caps perf_caps;
@@ -416,6 +368,10 @@ void topology_init_cpu_capacity_cppc(void)
exit:
free_raw_capacity();
}
+void acpi_processor_init_invariance_cppc(void)
+{
+ topology_init_cpu_capacity_cppc();
+}
#endif
#ifdef CONFIG_CPU_FREQ
@@ -501,6 +457,10 @@ core_initcall(free_raw_capacity);
#endif
#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
+
+/* Used to enable the SMT control */
+static unsigned int max_smt_thread_num = 1;
+
/*
* This function returns the logic cpu number of the node.
* There are basically three kinds of return values:
@@ -513,10 +473,10 @@ core_initcall(free_raw_capacity);
*/
static int __init get_cpu_for_node(struct device_node *node)
{
- struct device_node *cpu_node;
int cpu;
+ struct device_node *cpu_node __free(device_node) =
+ of_parse_phandle(node, "cpu", 0);
- cpu_node = of_parse_phandle(node, "cpu", 0);
if (!cpu_node)
return -1;
@@ -527,7 +487,6 @@ static int __init get_cpu_for_node(struct device_node *node)
pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n",
cpu_node, cpumask_pr_args(cpu_possible_mask));
- of_node_put(cpu_node);
return cpu;
}
@@ -538,28 +497,30 @@ static int __init parse_core(struct device_node *core, int package_id,
bool leaf = true;
int i = 0;
int cpu;
- struct device_node *t;
do {
snprintf(name, sizeof(name), "thread%d", i);
- t = of_get_child_by_name(core, name);
- if (t) {
- leaf = false;
- cpu = get_cpu_for_node(t);
- if (cpu >= 0) {
- cpu_topology[cpu].package_id = package_id;
- cpu_topology[cpu].cluster_id = cluster_id;
- cpu_topology[cpu].core_id = core_id;
- cpu_topology[cpu].thread_id = i;
- } else if (cpu != -ENODEV) {
- pr_err("%pOF: Can't get CPU for thread\n", t);
- of_node_put(t);
- return -EINVAL;
- }
- of_node_put(t);
+ struct device_node *t __free(device_node) =
+ of_get_child_by_name(core, name);
+
+ if (!t)
+ break;
+
+ leaf = false;
+ cpu = get_cpu_for_node(t);
+ if (cpu >= 0) {
+ cpu_topology[cpu].package_id = package_id;
+ cpu_topology[cpu].cluster_id = cluster_id;
+ cpu_topology[cpu].core_id = core_id;
+ cpu_topology[cpu].thread_id = i;
+ } else if (cpu != -ENODEV) {
+ pr_err("%pOF: Can't get CPU for thread\n", t);
+ return -EINVAL;
}
i++;
- } while (t);
+ } while (1);
+
+ max_smt_thread_num = max_t(unsigned int, max_smt_thread_num, i);
cpu = get_cpu_for_node(core);
if (cpu >= 0) {
@@ -586,7 +547,6 @@ static int __init parse_cluster(struct device_node *cluster, int package_id,
char name[20];
bool leaf = true;
bool has_cores = false;
- struct device_node *c;
int core_id = 0;
int i, ret;
@@ -598,49 +558,50 @@ static int __init parse_cluster(struct device_node *cluster, int package_id,
i = 0;
do {
snprintf(name, sizeof(name), "cluster%d", i);
- c = of_get_child_by_name(cluster, name);
- if (c) {
- leaf = false;
- ret = parse_cluster(c, package_id, i, depth + 1);
- if (depth > 0)
- pr_warn("Topology for clusters of clusters not yet supported\n");
- of_node_put(c);
- if (ret != 0)
- return ret;
- }
+ struct device_node *c __free(device_node) =
+ of_get_child_by_name(cluster, name);
+
+ if (!c)
+ break;
+
+ leaf = false;
+ ret = parse_cluster(c, package_id, i, depth + 1);
+ if (depth > 0)
+ pr_warn("Topology for clusters of clusters not yet supported\n");
+ if (ret != 0)
+ return ret;
i++;
- } while (c);
+ } while (1);
/* Now check for cores */
i = 0;
do {
snprintf(name, sizeof(name), "core%d", i);
- c = of_get_child_by_name(cluster, name);
- if (c) {
- has_cores = true;
-
- if (depth == 0) {
- pr_err("%pOF: cpu-map children should be clusters\n",
- c);
- of_node_put(c);
- return -EINVAL;
- }
+ struct device_node *c __free(device_node) =
+ of_get_child_by_name(cluster, name);
- if (leaf) {
- ret = parse_core(c, package_id, cluster_id,
- core_id++);
- } else {
- pr_err("%pOF: Non-leaf cluster with core %s\n",
- cluster, name);
- ret = -EINVAL;
- }
+ if (!c)
+ break;
- of_node_put(c);
+ has_cores = true;
+
+ if (depth == 0) {
+ pr_err("%pOF: cpu-map children should be clusters\n", c);
+ return -EINVAL;
+ }
+
+ if (leaf) {
+ ret = parse_core(c, package_id, cluster_id, core_id++);
if (ret != 0)
return ret;
+ } else {
+ pr_err("%pOF: Non-leaf cluster with core %s\n",
+ cluster, name);
+ return -EINVAL;
}
+
i++;
- } while (c);
+ } while (1);
if (leaf && !has_cores)
pr_warn("%pOF: empty cluster\n", cluster);
@@ -651,36 +612,49 @@ static int __init parse_cluster(struct device_node *cluster, int package_id,
static int __init parse_socket(struct device_node *socket)
{
char name[20];
- struct device_node *c;
bool has_socket = false;
int package_id = 0, ret;
do {
snprintf(name, sizeof(name), "socket%d", package_id);
- c = of_get_child_by_name(socket, name);
- if (c) {
- has_socket = true;
- ret = parse_cluster(c, package_id, -1, 0);
- of_node_put(c);
- if (ret != 0)
- return ret;
- }
+ struct device_node *c __free(device_node) =
+ of_get_child_by_name(socket, name);
+
+ if (!c)
+ break;
+
+ has_socket = true;
+ ret = parse_cluster(c, package_id, -1, 0);
+ if (ret != 0)
+ return ret;
+
package_id++;
- } while (c);
+ } while (1);
if (!has_socket)
ret = parse_cluster(socket, 0, -1, 0);
+ /*
+ * Reset the max_smt_thread_num to 1 on failure. Since on failure
+ * we need to notify the framework the SMT is not supported, but
+ * max_smt_thread_num can be initialized to the SMT thread number
+ * of the cores which are successfully parsed.
+ */
+ if (ret)
+ max_smt_thread_num = 1;
+
+ cpu_smt_set_num_threads(max_smt_thread_num, max_smt_thread_num);
+
return ret;
}
static int __init parse_dt_topology(void)
{
- struct device_node *cn, *map;
int ret = 0;
int cpu;
+ struct device_node *cn __free(device_node) =
+ of_find_node_by_path("/cpus");
- cn = of_find_node_by_path("/cpus");
if (!cn) {
pr_err("No CPU information found in DT\n");
return 0;
@@ -690,13 +664,15 @@ static int __init parse_dt_topology(void)
* When topology is provided cpu-map is essentially a root
* cluster with restricted subnodes.
*/
- map = of_get_child_by_name(cn, "cpu-map");
+ struct device_node *map __free(device_node) =
+ of_get_child_by_name(cn, "cpu-map");
+
if (!map)
- goto out;
+ return ret;
ret = parse_socket(map);
if (ret != 0)
- goto out_map;
+ return ret;
topology_normalize_cpu_scale();
@@ -706,14 +682,9 @@ static int __init parse_dt_topology(void)
*/
for_each_possible_cpu(cpu)
if (cpu_topology[cpu].package_id < 0) {
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
-out_map:
- of_node_put(map);
-out:
- of_node_put(cn);
return ret;
}
#endif
@@ -852,12 +823,106 @@ void remove_cpu_topology(unsigned int cpu)
clear_cpu_topology(cpu);
}
+#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
+struct cpu_smt_info {
+ unsigned int thread_num;
+ int core_id;
+};
+
+static bool __init acpi_cpu_is_threaded(int cpu)
+{
+ int is_threaded = acpi_pptt_cpu_is_thread(cpu);
+
+ /*
+ * if the PPTT doesn't have thread information, check for architecture
+ * specific fallback if available
+ */
+ if (is_threaded < 0)
+ is_threaded = arch_cpu_is_threaded();
+
+ return !!is_threaded;
+}
+
+/*
+ * Propagate the topology information of the processor_topology_node tree to the
+ * cpu_topology array.
+ */
__weak int __init parse_acpi_topology(void)
{
+ unsigned int max_smt_thread_num = 1;
+ struct cpu_smt_info *entry;
+ struct xarray hetero_cpu;
+ unsigned long hetero_id;
+ int cpu, topology_id;
+
+ if (acpi_disabled)
+ return 0;
+
+ xa_init(&hetero_cpu);
+
+ for_each_possible_cpu(cpu) {
+ topology_id = find_acpi_cpu_topology(cpu, 0);
+ if (topology_id < 0)
+ return topology_id;
+
+ if (acpi_cpu_is_threaded(cpu)) {
+ cpu_topology[cpu].thread_id = topology_id;
+ topology_id = find_acpi_cpu_topology(cpu, 1);
+ cpu_topology[cpu].core_id = topology_id;
+
+ /*
+ * In the PPTT, CPUs below a node with the 'identical
+ * implementation' flag have the same number of threads.
+ * Count the number of threads for only one CPU (i.e.
+ * one core_id) among those with the same hetero_id.
+ * See the comment of find_acpi_cpu_topology_hetero_id()
+ * for more details.
+ *
+ * One entry is created for each node having:
+ * - the 'identical implementation' flag
+ * - its parent not having the flag
+ */
+ hetero_id = find_acpi_cpu_topology_hetero_id(cpu);
+ entry = xa_load(&hetero_cpu, hetero_id);
+ if (!entry) {
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ WARN_ON_ONCE(!entry);
+
+ if (entry) {
+ entry->core_id = topology_id;
+ entry->thread_num = 1;
+ xa_store(&hetero_cpu, hetero_id,
+ entry, GFP_KERNEL);
+ }
+ } else if (entry->core_id == topology_id) {
+ entry->thread_num++;
+ }
+ } else {
+ cpu_topology[cpu].thread_id = -1;
+ cpu_topology[cpu].core_id = topology_id;
+ }
+ topology_id = find_acpi_cpu_topology_cluster(cpu);
+ cpu_topology[cpu].cluster_id = topology_id;
+ topology_id = find_acpi_cpu_topology_package(cpu);
+ cpu_topology[cpu].package_id = topology_id;
+ }
+
+ /*
+ * This is a short loop since the number of XArray elements is the
+ * number of heterogeneous CPU clusters. On a homogeneous system
+ * there's only one entry in the XArray.
+ */
+ xa_for_each(&hetero_cpu, hetero_id, entry) {
+ max_smt_thread_num = max(max_smt_thread_num, entry->thread_num);
+ xa_erase(&hetero_cpu, hetero_id);
+ kfree(entry);
+ }
+
+ cpu_smt_set_num_threads(max_smt_thread_num, max_smt_thread_num);
+ xa_destroy(&hetero_cpu);
return 0;
}
-#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
void __init init_cpu_topology(void)
{
int cpu, ret;
diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
index 01ef796c2055..b6f941a6ab69 100644
--- a/drivers/base/attribute_container.c
+++ b/drivers/base/attribute_container.c
@@ -346,8 +346,7 @@ attribute_container_device_trigger_safe(struct device *dev,
* @fn: the function to execute for each classdev.
*
* This function is for executing a trigger when you need to know both
- * the container and the classdev. If you only care about the
- * container, then use attribute_container_trigger() instead.
+ * the container and the classdev.
*/
void
attribute_container_device_trigger(struct device *dev,
@@ -379,33 +378,6 @@ attribute_container_device_trigger(struct device *dev,
}
/**
- * attribute_container_trigger - trigger a function for each matching container
- *
- * @dev: The generic device to activate the trigger for
- * @fn: the function to trigger
- *
- * This routine triggers a function that only needs to know the
- * matching containers (not the classdev) associated with a device.
- * It is more lightweight than attribute_container_device_trigger, so
- * should be used in preference unless the triggering function
- * actually needs to know the classdev.
- */
-void
-attribute_container_trigger(struct device *dev,
- int (*fn)(struct attribute_container *,
- struct device *))
-{
- struct attribute_container *cont;
-
- mutex_lock(&attribute_container_mutex);
- list_for_each_entry(cont, &attribute_container_list, node) {
- if (cont->match(cont, dev))
- fn(cont, dev);
- }
- mutex_unlock(&attribute_container_mutex);
-}
-
-/**
* attribute_container_add_attrs - add attributes
*
* @classdev: The class device
@@ -459,24 +431,6 @@ attribute_container_add_class_device(struct device *classdev)
}
/**
- * attribute_container_add_class_device_adapter - simple adapter for triggers
- *
- * @cont: the container to register.
- * @dev: the generic device to activate the trigger for
- * @classdev: the class device to add
- *
- * This function is identical to attribute_container_add_class_device except
- * that it is designed to be called from the triggers
- */
-int
-attribute_container_add_class_device_adapter(struct attribute_container *cont,
- struct device *dev,
- struct device *classdev)
-{
- return attribute_container_add_class_device(classdev);
-}
-
-/**
* attribute_container_remove_attrs - remove any attribute files
*
* @classdev: The class device to remove the files from
diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c
index d3a2c40c2f12..04bdbff4dbe5 100644
--- a/drivers/base/auxiliary.c
+++ b/drivers/base/auxiliary.c
@@ -92,7 +92,7 @@
* Auxiliary devices are created and registered by a subsystem-level core
* device that needs to break up its functionality into smaller fragments. One
* way to extend the scope of an auxiliary_device is to encapsulate it within a
- * domain- pecific structure defined by the parent device. This structure
+ * domain-specific structure defined by the parent device. This structure
* contains the auxiliary_device and any associated shared data/callbacks
* needed to establish the connection with the parent.
*
@@ -156,31 +156,42 @@
* },
* .ops = my_custom_ops,
* };
+ *
+ * Please note that such custom ops approach is valid, but it is hard to implement
+ * it right without global locks per-device to protect from auxiliary_drv removal
+ * during call to that ops. In addition, this implementation lacks proper module
+ * dependency, which causes to load/unload races between auxiliary parent and devices
+ * modules.
+ *
+ * The most easiest way to provide these ops reliably without needing to
+ * have a lock is to EXPORT_SYMBOL*() them and rely on already existing
+ * modules infrastructure for validity and correct dependencies chains.
*/
static const struct auxiliary_device_id *auxiliary_match_id(const struct auxiliary_device_id *id,
const struct auxiliary_device *auxdev)
{
- for (; id->name[0]; id++) {
- const char *p = strrchr(dev_name(&auxdev->dev), '.');
- int match_size;
+ const char *auxdev_name = dev_name(&auxdev->dev);
+ const char *p = strrchr(auxdev_name, '.');
+ int match_size;
- if (!p)
- continue;
- match_size = p - dev_name(&auxdev->dev);
+ if (!p)
+ return NULL;
+ match_size = p - auxdev_name;
+ for (; id->name[0]; id++) {
/* use dev_name(&auxdev->dev) prefix before last '.' char to match to */
if (strlen(id->name) == match_size &&
- !strncmp(dev_name(&auxdev->dev), id->name, match_size))
+ !strncmp(auxdev_name, id->name, match_size))
return id;
}
return NULL;
}
-static int auxiliary_match(struct device *dev, struct device_driver *drv)
+static int auxiliary_match(struct device *dev, const struct device_driver *drv)
{
struct auxiliary_device *auxdev = to_auxiliary_dev(dev);
- struct auxiliary_driver *auxdrv = to_auxiliary_drv(drv);
+ const struct auxiliary_driver *auxdrv = to_auxiliary_drv(drv);
return !!auxiliary_match_id(auxdrv->id_table, auxdev);
}
@@ -203,36 +214,32 @@ static const struct dev_pm_ops auxiliary_dev_pm_ops = {
static int auxiliary_bus_probe(struct device *dev)
{
- struct auxiliary_driver *auxdrv = to_auxiliary_drv(dev->driver);
+ const struct auxiliary_driver *auxdrv = to_auxiliary_drv(dev->driver);
struct auxiliary_device *auxdev = to_auxiliary_dev(dev);
int ret;
- ret = dev_pm_domain_attach(dev, true);
+ ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON |
+ PD_FLAG_DETACH_POWER_OFF);
if (ret) {
dev_warn(dev, "Failed to attach to PM Domain : %d\n", ret);
return ret;
}
- ret = auxdrv->probe(auxdev, auxiliary_match_id(auxdrv->id_table, auxdev));
- if (ret)
- dev_pm_domain_detach(dev, true);
-
- return ret;
+ return auxdrv->probe(auxdev, auxiliary_match_id(auxdrv->id_table, auxdev));
}
static void auxiliary_bus_remove(struct device *dev)
{
- struct auxiliary_driver *auxdrv = to_auxiliary_drv(dev->driver);
+ const struct auxiliary_driver *auxdrv = to_auxiliary_drv(dev->driver);
struct auxiliary_device *auxdev = to_auxiliary_dev(dev);
if (auxdrv->remove)
auxdrv->remove(auxdev);
- dev_pm_domain_detach(dev, true);
}
static void auxiliary_bus_shutdown(struct device *dev)
{
- struct auxiliary_driver *auxdrv = NULL;
+ const struct auxiliary_driver *auxdrv = NULL;
struct auxiliary_device *auxdev;
if (dev->driver) {
@@ -287,6 +294,7 @@ int auxiliary_device_init(struct auxiliary_device *auxdev)
dev->bus = &auxiliary_bus_type;
device_initialize(&auxdev->dev);
+ mutex_init(&auxdev->sysfs.lock);
return 0;
}
EXPORT_SYMBOL_GPL(auxiliary_device_init);
@@ -335,35 +343,6 @@ int __auxiliary_device_add(struct auxiliary_device *auxdev, const char *modname)
EXPORT_SYMBOL_GPL(__auxiliary_device_add);
/**
- * auxiliary_find_device - auxiliary device iterator for locating a particular device.
- * @start: Device to begin with
- * @data: Data to pass to match function
- * @match: Callback function to check device
- *
- * This function returns a reference to a device that is 'found'
- * for later use, as determined by the @match callback.
- *
- * The reference returned should be released with put_device().
- *
- * The callback should return 0 if the device doesn't match and non-zero
- * if it does. If the callback returns non-zero, this function will
- * return to the caller and not iterate over any more devices.
- */
-struct auxiliary_device *auxiliary_find_device(struct device *start,
- const void *data,
- int (*match)(struct device *dev, const void *data))
-{
- struct device *dev;
-
- dev = bus_find_device(&auxiliary_bus_type, start, data, match);
- if (!dev)
- return NULL;
-
- return to_auxiliary_dev(dev);
-}
-EXPORT_SYMBOL_GPL(auxiliary_find_device);
-
-/**
* __auxiliary_driver_register - register a driver for auxiliary bus devices
* @auxdrv: auxiliary_driver structure
* @owner: owning module/driver
@@ -413,6 +392,116 @@ void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv)
}
EXPORT_SYMBOL_GPL(auxiliary_driver_unregister);
+static void auxiliary_device_release(struct device *dev)
+{
+ struct auxiliary_device *auxdev = to_auxiliary_dev(dev);
+
+ of_node_put(dev->of_node);
+ kfree(auxdev);
+}
+
+/**
+ * auxiliary_device_create - create a device on the auxiliary bus
+ * @dev: parent device
+ * @modname: module name used to create the auxiliary driver name.
+ * @devname: auxiliary bus device name
+ * @platform_data: auxiliary bus device platform data
+ * @id: auxiliary bus device id
+ *
+ * Helper to create an auxiliary bus device.
+ * The device created matches driver 'modname.devname' on the auxiliary bus.
+ */
+struct auxiliary_device *auxiliary_device_create(struct device *dev,
+ const char *modname,
+ const char *devname,
+ void *platform_data,
+ int id)
+{
+ struct auxiliary_device *auxdev;
+ int ret;
+
+ auxdev = kzalloc(sizeof(*auxdev), GFP_KERNEL);
+ if (!auxdev)
+ return NULL;
+
+ auxdev->id = id;
+ auxdev->name = devname;
+ auxdev->dev.parent = dev;
+ auxdev->dev.platform_data = platform_data;
+ auxdev->dev.release = auxiliary_device_release;
+ device_set_of_node_from_dev(&auxdev->dev, dev);
+
+ ret = auxiliary_device_init(auxdev);
+ if (ret) {
+ of_node_put(auxdev->dev.of_node);
+ kfree(auxdev);
+ return NULL;
+ }
+
+ ret = __auxiliary_device_add(auxdev, modname);
+ if (ret) {
+ /*
+ * It may look odd but auxdev should not be freed here.
+ * auxiliary_device_uninit() calls device_put() which call
+ * the device release function, freeing auxdev.
+ */
+ auxiliary_device_uninit(auxdev);
+ return NULL;
+ }
+
+ return auxdev;
+}
+EXPORT_SYMBOL_GPL(auxiliary_device_create);
+
+/**
+ * auxiliary_device_destroy - remove an auxiliary device
+ * @auxdev: pointer to the auxdev to be removed
+ *
+ * Helper to remove an auxiliary device created with
+ * auxiliary_device_create()
+ */
+void auxiliary_device_destroy(void *auxdev)
+{
+ struct auxiliary_device *_auxdev = auxdev;
+
+ auxiliary_device_delete(_auxdev);
+ auxiliary_device_uninit(_auxdev);
+}
+EXPORT_SYMBOL_GPL(auxiliary_device_destroy);
+
+/**
+ * __devm_auxiliary_device_create - create a managed device on the auxiliary bus
+ * @dev: parent device
+ * @modname: module name used to create the auxiliary driver name.
+ * @devname: auxiliary bus device name
+ * @platform_data: auxiliary bus device platform data
+ * @id: auxiliary bus device id
+ *
+ * Device managed helper to create an auxiliary bus device.
+ * The device created matches driver 'modname.devname' on the auxiliary bus.
+ */
+struct auxiliary_device *__devm_auxiliary_device_create(struct device *dev,
+ const char *modname,
+ const char *devname,
+ void *platform_data,
+ int id)
+{
+ struct auxiliary_device *auxdev;
+ int ret;
+
+ auxdev = auxiliary_device_create(dev, modname, devname, platform_data, id);
+ if (!auxdev)
+ return NULL;
+
+ ret = devm_add_action_or_reset(dev, auxiliary_device_destroy,
+ auxdev);
+ if (ret)
+ return NULL;
+
+ return auxdev;
+}
+EXPORT_SYMBOL_GPL(__devm_auxiliary_device_create);
+
void __init auxiliary_bus_init(void)
{
WARN_ON(bus_register(&auxiliary_bus_type));
diff --git a/drivers/base/auxiliary_sysfs.c b/drivers/base/auxiliary_sysfs.c
new file mode 100644
index 000000000000..754f21730afd
--- /dev/null
+++ b/drivers/base/auxiliary_sysfs.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/slab.h>
+
+#define AUXILIARY_MAX_IRQ_NAME 11
+
+struct auxiliary_irq_info {
+ struct device_attribute sysfs_attr;
+ char name[AUXILIARY_MAX_IRQ_NAME];
+};
+
+static struct attribute *auxiliary_irq_attrs[] = {
+ NULL
+};
+
+static const struct attribute_group auxiliary_irqs_group = {
+ .name = "irqs",
+ .attrs = auxiliary_irq_attrs,
+};
+
+static int auxiliary_irq_dir_prepare(struct auxiliary_device *auxdev)
+{
+ int ret = 0;
+
+ guard(mutex)(&auxdev->sysfs.lock);
+ if (auxdev->sysfs.irq_dir_exists)
+ return 0;
+
+ ret = devm_device_add_group(&auxdev->dev, &auxiliary_irqs_group);
+ if (ret)
+ return ret;
+
+ auxdev->sysfs.irq_dir_exists = true;
+ xa_init(&auxdev->sysfs.irqs);
+ return 0;
+}
+
+/**
+ * auxiliary_device_sysfs_irq_add - add a sysfs entry for the given IRQ
+ * @auxdev: auxiliary bus device to add the sysfs entry.
+ * @irq: The associated interrupt number.
+ *
+ * This function should be called after auxiliary device have successfully
+ * received the irq.
+ * The driver is responsible to add a unique irq for the auxiliary device. The
+ * driver can invoke this function from multiple thread context safely for
+ * unique irqs of the auxiliary devices. The driver must not invoke this API
+ * multiple times if the irq is already added previously.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int auxiliary_device_sysfs_irq_add(struct auxiliary_device *auxdev, int irq)
+{
+ struct auxiliary_irq_info *info __free(kfree) = NULL;
+ struct device *dev = &auxdev->dev;
+ int ret;
+
+ ret = auxiliary_irq_dir_prepare(auxdev);
+ if (ret)
+ return ret;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ sysfs_attr_init(&info->sysfs_attr.attr);
+ snprintf(info->name, AUXILIARY_MAX_IRQ_NAME, "%d", irq);
+
+ ret = xa_insert(&auxdev->sysfs.irqs, irq, info, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ info->sysfs_attr.attr.name = info->name;
+ ret = sysfs_add_file_to_group(&dev->kobj, &info->sysfs_attr.attr,
+ auxiliary_irqs_group.name);
+ if (ret)
+ goto sysfs_add_err;
+
+ xa_store(&auxdev->sysfs.irqs, irq, no_free_ptr(info), GFP_KERNEL);
+ return 0;
+
+sysfs_add_err:
+ xa_erase(&auxdev->sysfs.irqs, irq);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(auxiliary_device_sysfs_irq_add);
+
+/**
+ * auxiliary_device_sysfs_irq_remove - remove a sysfs entry for the given IRQ
+ * @auxdev: auxiliary bus device to add the sysfs entry.
+ * @irq: the IRQ to remove.
+ *
+ * This function should be called to remove an IRQ sysfs entry.
+ * The driver must invoke this API when IRQ is released by the device.
+ */
+void auxiliary_device_sysfs_irq_remove(struct auxiliary_device *auxdev, int irq)
+{
+ struct auxiliary_irq_info *info __free(kfree) = xa_load(&auxdev->sysfs.irqs, irq);
+ struct device *dev = &auxdev->dev;
+
+ if (!info) {
+ dev_err(&auxdev->dev, "IRQ %d doesn't exist\n", irq);
+ return;
+ }
+ sysfs_remove_file_from_group(&dev->kobj, &info->sysfs_attr.attr,
+ auxiliary_irqs_group.name);
+ xa_erase(&auxdev->sysfs.irqs, irq);
+}
+EXPORT_SYMBOL_GPL(auxiliary_device_sysfs_irq_remove);
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 0738ccad08b2..430cbefbc97f 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -73,6 +73,7 @@ static inline void subsys_put(struct subsys_private *sp)
kset_put(&sp->subsys);
}
+struct subsys_private *bus_to_subsys(const struct bus_type *bus);
struct subsys_private *class_to_subsys(const struct class *class);
struct driver_private {
@@ -84,6 +85,18 @@ struct driver_private {
};
#define to_driver(obj) container_of(obj, struct driver_private, kobj)
+#ifdef CONFIG_RUST
+/**
+ * struct driver_type - Representation of a Rust driver type.
+ */
+struct driver_type {
+ /**
+ * @id: Representation of core::any::TypeId.
+ */
+ u8 id[16];
+} __packed;
+#endif
+
/**
* struct device_private - structure to hold the private to the driver core portions of the device structure.
*
@@ -99,6 +112,7 @@ struct driver_private {
* @async_driver - pointer to device driver awaiting probe via async_probe
* @device - pointer back to the struct device that this structure is
* associated with.
+ * @driver_type - The type of the bound Rust driver.
* @dead - This device is currently either in the process of or has been
* removed from the system. Any asynchronous events scheduled for this
* device should exit without taking any action.
@@ -112,9 +126,12 @@ struct device_private {
struct klist_node knode_bus;
struct klist_node knode_class;
struct list_head deferred_probe;
- struct device_driver *async_driver;
+ const struct device_driver *async_driver;
char *deferred_probe_reason;
struct device *device;
+#ifdef CONFIG_RUST
+ struct driver_type driver_type;
+#endif
u8 dead:1;
};
#define to_device_private_parent(obj) \
@@ -137,6 +154,7 @@ int hypervisor_init(void);
static inline int hypervisor_init(void) { return 0; }
#endif
int platform_bus_init(void);
+int faux_bus_init(void);
void cpu_dev_init(void);
void container_dev_init(void);
#ifdef CONFIG_AUXILIARY_BUS
@@ -145,7 +163,7 @@ void auxiliary_bus_init(void);
static inline void auxiliary_bus_init(void) { }
#endif
-struct kobject *virtual_device_parent(struct device *dev);
+struct kobject *virtual_device_parent(void);
int bus_add_device(struct device *dev);
void bus_probe_device(struct device *dev);
@@ -155,13 +173,13 @@ bool bus_is_registered(const struct bus_type *bus);
int bus_add_driver(struct device_driver *drv);
void bus_remove_driver(struct device_driver *drv);
-void device_release_driver_internal(struct device *dev, struct device_driver *drv,
+void device_release_driver_internal(struct device *dev, const struct device_driver *drv,
struct device *parent);
-void driver_detach(struct device_driver *drv);
+void driver_detach(const struct device_driver *drv);
void driver_deferred_probe_del(struct device *dev);
void device_set_deferred_probe_reason(const struct device *dev, struct va_format *vaf);
-static inline int driver_match_device(struct device_driver *drv,
+static inline int driver_match_device(const struct device_driver *drv,
struct device *dev)
{
return drv->bus->match ? drv->bus->match(dev, drv) : 1;
@@ -175,10 +193,26 @@ static inline void dev_sync_state(struct device *dev)
dev->driver->sync_state(dev);
}
-int driver_add_groups(struct device_driver *drv, const struct attribute_group **groups);
-void driver_remove_groups(struct device_driver *drv, const struct attribute_group **groups);
+int driver_add_groups(const struct device_driver *drv, const struct attribute_group **groups);
+void driver_remove_groups(const struct device_driver *drv, const struct attribute_group **groups);
void device_driver_detach(struct device *dev);
+static inline void device_set_driver(struct device *dev, const struct device_driver *drv)
+{
+ /*
+ * Majority (all?) read accesses to dev->driver happens either
+ * while holding device lock or in bus/driver code that is only
+ * invoked when the device is bound to a driver and there is no
+ * concern of the pointer being changed while it is being read.
+ * However when reading device's uevent file we read driver pointer
+ * without taking device lock (so we do not block there for
+ * arbitrary amount of time). We use WRITE_ONCE() here to prevent
+ * tearing so that READ_ONCE() can safely be used in uevent code.
+ */
+ // FIXME - this cast should not be needed "soon"
+ WRITE_ONCE(dev->driver, (struct device_driver *)drv);
+}
+
int devres_release_all(struct device *dev);
void device_block_probing(void);
void device_unblock_probing(void);
@@ -192,11 +226,14 @@ extern struct kset *devices_kset;
void devices_kset_move_last(struct device *dev);
#if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS)
-void module_add_driver(struct module *mod, struct device_driver *drv);
-void module_remove_driver(struct device_driver *drv);
+int module_add_driver(struct module *mod, const struct device_driver *drv);
+void module_remove_driver(const struct device_driver *drv);
#else
-static inline void module_add_driver(struct module *mod,
- struct device_driver *drv) { }
+static inline int module_add_driver(struct module *mod,
+ struct device_driver *drv)
+{
+ return 0;
+}
static inline void module_remove_driver(struct device_driver *drv) { }
#endif
@@ -227,9 +264,18 @@ void device_links_driver_cleanup(struct device *dev);
void device_links_no_driver(struct device *dev);
bool device_links_busy(struct device *dev);
void device_links_unbind_consumers(struct device *dev);
+bool device_link_flag_is_sync_state_only(u32 flags);
void fw_devlink_drivers_done(void);
void fw_devlink_probing_done(void);
+#define dev_for_each_link_to_supplier(__link, __dev) \
+ list_for_each_entry_srcu(__link, &(__dev)->links.suppliers, c_node, \
+ device_links_read_lock_held())
+
+#define dev_for_each_link_to_consumer(__link, __dev) \
+ list_for_each_entry_srcu(__link, &(__dev)->links.consumers, s_node, \
+ device_links_read_lock_held())
+
/* device pm support */
void device_pm_move_to_tail(struct device *dev);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index daee55c9b2d9..9eb7771706f0 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -57,7 +57,7 @@ static int __must_check bus_rescan_devices_helper(struct device *dev,
* NULL. A call to subsys_put() must be done when finished with the pointer in
* order for it to be properly freed.
*/
-static struct subsys_private *bus_to_subsys(const struct bus_type *bus)
+struct subsys_private *bus_to_subsys(const struct bus_type *bus)
{
struct subsys_private *sp = NULL;
struct kobject *kobj;
@@ -152,7 +152,8 @@ static ssize_t bus_attr_show(struct kobject *kobj, struct attribute *attr,
{
struct bus_attribute *bus_attr = to_bus_attr(attr);
struct subsys_private *subsys_priv = to_subsys_private(kobj);
- ssize_t ret = 0;
+ /* return -EIO for reading a bus attribute without show() */
+ ssize_t ret = -EIO;
if (bus_attr->show)
ret = bus_attr->show(subsys_priv->bus, buf);
@@ -164,7 +165,8 @@ static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr,
{
struct bus_attribute *bus_attr = to_bus_attr(attr);
struct subsys_private *subsys_priv = to_subsys_private(kobj);
- ssize_t ret = 0;
+ /* return -EIO for writing a bus attribute without store() */
+ ssize_t ret = -EIO;
if (bus_attr->store)
ret = bus_attr->store(subsys_priv->bus, buf, count);
@@ -332,6 +334,19 @@ static struct device *next_device(struct klist_iter *i)
return dev;
}
+static struct device *prev_device(struct klist_iter *i)
+{
+ struct klist_node *n = klist_prev(i);
+ struct device *dev = NULL;
+ struct device_private *dev_prv;
+
+ if (n) {
+ dev_prv = to_device_private_bus(n);
+ dev = dev_prv->device;
+ }
+ return dev;
+}
+
/**
* bus_for_each_dev - device iterator.
* @bus: bus type.
@@ -352,7 +367,7 @@ static struct device *next_device(struct klist_iter *i)
* count in the supplied callback.
*/
int bus_for_each_dev(const struct bus_type *bus, struct device *start,
- void *data, int (*fn)(struct device *, void *))
+ void *data, device_iter_t fn)
{
struct subsys_private *sp = bus_to_subsys(bus);
struct klist_iter i;
@@ -389,7 +404,7 @@ EXPORT_SYMBOL_GPL(bus_for_each_dev);
*/
struct device *bus_find_device(const struct bus_type *bus,
struct device *start, const void *data,
- int (*match)(struct device *dev, const void *data))
+ device_match_t match)
{
struct subsys_private *sp = bus_to_subsys(bus);
struct klist_iter i;
@@ -400,15 +415,43 @@ struct device *bus_find_device(const struct bus_type *bus,
klist_iter_init_node(&sp->klist_devices, &i,
(start ? &start->p->knode_bus : NULL));
- while ((dev = next_device(&i)))
- if (match(dev, data) && get_device(dev))
+ while ((dev = next_device(&i))) {
+ if (match(dev, data)) {
+ get_device(dev);
break;
+ }
+ }
klist_iter_exit(&i);
subsys_put(sp);
return dev;
}
EXPORT_SYMBOL_GPL(bus_find_device);
+struct device *bus_find_device_reverse(const struct bus_type *bus,
+ struct device *start, const void *data,
+ device_match_t match)
+{
+ struct subsys_private *sp = bus_to_subsys(bus);
+ struct klist_iter i;
+ struct device *dev;
+
+ if (!sp)
+ return NULL;
+
+ klist_iter_init_node(&sp->klist_devices, &i,
+ (start ? &start->p->knode_bus : NULL));
+ while ((dev = prev_device(&i))) {
+ if (match(dev, data)) {
+ get_device(dev);
+ break;
+ }
+ }
+ klist_iter_exit(&i);
+ subsys_put(sp);
+ return dev;
+}
+EXPORT_SYMBOL_GPL(bus_find_device_reverse);
+
static struct device_driver *next_driver(struct klist_iter *i)
{
struct klist_node *n = klist_next(i);
@@ -528,8 +571,7 @@ void bus_probe_device(struct device *dev)
if (!sp)
return;
- if (sp->drivers_autoprobe)
- device_initial_probe(dev);
+ device_initial_probe(dev);
mutex_lock(&sp->mutex);
list_for_each_entry(sif, &sp->interfaces, node)
@@ -674,7 +716,12 @@ int bus_add_driver(struct device_driver *drv)
if (error)
goto out_del_list;
}
- module_add_driver(drv->owner, drv);
+ error = module_add_driver(drv->owner, drv);
+ if (error) {
+ printk(KERN_ERR "%s: failed to create module links for %s\n",
+ __func__, drv->name);
+ goto out_detach;
+ }
error = driver_create_file(drv, &driver_attr_uevent);
if (error) {
@@ -699,6 +746,8 @@ int bus_add_driver(struct device_driver *drv)
return 0;
+out_detach:
+ driver_detach(drv);
out_del_list:
klist_del(&priv->knode_bus);
out_unregister:
@@ -913,6 +962,8 @@ bus_devices_fail:
bus_remove_file(bus, &bus_attr_uevent);
bus_uevent_fail:
kset_unregister(&priv->subsys);
+ /* Above kset_unregister() will kfree @priv */
+ priv = NULL;
out:
kfree(priv);
return retval;
@@ -1277,7 +1328,7 @@ EXPORT_SYMBOL_GPL(subsys_system_register);
* @groups: default attributes for the root device
*
* All 'virtual' subsystems have a /sys/devices/system/<name> root device
- * with the name of the subystem. The root device can carry subsystem-wide
+ * with the name of the subsystem. The root device can carry subsystem-wide
* attributes. All registered devices are below this single root device.
* There's no restriction on device naming. This is for kernel software
* constructs which need sysfs interface.
@@ -1287,7 +1338,7 @@ int subsys_virtual_register(const struct bus_type *subsys,
{
struct kobject *virtual_dir;
- virtual_dir = virtual_device_parent(NULL);
+ virtual_dir = virtual_device_parent();
if (!virtual_dir)
return -ENOMEM;
@@ -1378,8 +1429,13 @@ int __init buses_init(void)
return -ENOMEM;
system_kset = kset_create_and_add("system", NULL, &devices_kset->kobj);
- if (!system_kset)
+ if (!system_kset) {
+ /* Do error handling here as devices_init() do */
+ kset_unregister(bus_kset);
+ bus_kset = NULL;
+ pr_err("%s: failed to create and add kset 'bus'\n", __func__);
return -ENOMEM;
+ }
return 0;
}
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index 23b8cba4a2a3..613410705a47 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -8,6 +8,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/acpi.h>
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/cacheinfo.h>
#include <linux/compiler.h>
@@ -58,7 +59,7 @@ bool last_level_cache_is_valid(unsigned int cpu)
{
struct cacheinfo *llc;
- if (!cache_leaves(cpu))
+ if (!cache_leaves(cpu) || !per_cpu_cacheinfo(cpu))
return false;
llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
@@ -183,6 +184,54 @@ static bool cache_node_is_unified(struct cacheinfo *this_leaf,
return of_property_read_bool(np, "cache-unified");
}
+static bool match_cache_node(struct device_node *cpu,
+ const struct device_node *cache_node)
+{
+ struct device_node *prev, *cache = of_find_next_cache_node(cpu);
+
+ while (cache) {
+ if (cache == cache_node) {
+ of_node_put(cache);
+ return true;
+ }
+
+ prev = cache;
+ cache = of_find_next_cache_node(cache);
+ of_node_put(prev);
+ }
+
+ return false;
+}
+
+#ifndef arch_compact_of_hwid
+#define arch_compact_of_hwid(_x) (_x)
+#endif
+
+static void cache_of_set_id(struct cacheinfo *this_leaf,
+ struct device_node *cache_node)
+{
+ struct device_node *cpu;
+ u32 min_id = ~0;
+
+ for_each_of_cpu_node(cpu) {
+ u64 id = of_get_cpu_hwid(cpu, 0);
+
+ id = arch_compact_of_hwid(id);
+ if (FIELD_GET(GENMASK_ULL(63, 32), id)) {
+ of_node_put(cpu);
+ return;
+ }
+
+ if (match_cache_node(cpu, cache_node))
+ min_id = min(min_id, id);
+ }
+
+ if (min_id != ~0) {
+ this_leaf->id = min_id;
+ this_leaf->attributes |= CACHE_ID;
+ }
+}
+
static void cache_of_set_props(struct cacheinfo *this_leaf,
struct device_node *np)
{
@@ -198,33 +247,29 @@ static void cache_of_set_props(struct cacheinfo *this_leaf,
cache_get_line_size(this_leaf, np);
cache_nr_sets(this_leaf, np);
cache_associativity(this_leaf);
+ cache_of_set_id(this_leaf, np);
}
static int cache_setup_of_node(unsigned int cpu)
{
- struct device_node *np, *prev;
struct cacheinfo *this_leaf;
unsigned int index = 0;
- np = of_cpu_device_node_get(cpu);
+ struct device_node *np __free(device_node) = of_cpu_device_node_get(cpu);
if (!np) {
pr_err("Failed to find cpu%d device node\n", cpu);
return -ENOENT;
}
if (!of_check_cache_nodes(np)) {
- of_node_put(np);
return -ENOENT;
}
- prev = np;
-
while (index < cache_leaves(cpu)) {
this_leaf = per_cpu_cacheinfo_idx(cpu, index);
if (this_leaf->level != 1) {
+ struct device_node *prev __free(device_node) = np;
np = of_find_next_cache_node(np);
- of_node_put(prev);
- prev = np;
if (!np)
break;
}
@@ -233,8 +278,6 @@ static int cache_setup_of_node(unsigned int cpu)
index++;
}
- of_node_put(np);
-
if (index != cache_leaves(cpu)) /* not all OF nodes populated */
return -ENOENT;
@@ -243,17 +286,14 @@ static int cache_setup_of_node(unsigned int cpu)
static bool of_check_cache_nodes(struct device_node *np)
{
- struct device_node *next;
-
if (of_property_present(np, "cache-size") ||
of_property_present(np, "i-cache-size") ||
of_property_present(np, "d-cache-size") ||
of_property_present(np, "cache-unified"))
return true;
- next = of_find_next_cache_node(np);
+ struct device_node *next __free(device_node) = of_find_next_cache_node(np);
if (next) {
- of_node_put(next);
return true;
}
@@ -264,11 +304,11 @@ static int of_count_cache_leaves(struct device_node *np)
{
unsigned int leaves = 0;
- if (of_property_read_bool(np, "cache-size"))
+ if (of_property_present(np, "cache-size"))
++leaves;
- if (of_property_read_bool(np, "i-cache-size"))
+ if (of_property_present(np, "i-cache-size"))
++leaves;
- if (of_property_read_bool(np, "d-cache-size"))
+ if (of_property_present(np, "d-cache-size"))
++leaves;
if (!leaves) {
@@ -287,12 +327,10 @@ static int of_count_cache_leaves(struct device_node *np)
int init_of_cache_level(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
- struct device_node *np = of_cpu_device_node_get(cpu);
- struct device_node *prev = NULL;
+ struct device_node *np __free(device_node) = of_cpu_device_node_get(cpu);
unsigned int levels = 0, leaves, level;
if (!of_check_cache_nodes(np)) {
- of_node_put(np);
return -ENOENT;
}
@@ -300,30 +338,27 @@ int init_of_cache_level(unsigned int cpu)
if (leaves > 0)
levels = 1;
- prev = np;
- while ((np = of_find_next_cache_node(np))) {
- of_node_put(prev);
- prev = np;
+ while (1) {
+ struct device_node *prev __free(device_node) = np;
+ np = of_find_next_cache_node(np);
+ if (!np)
+ break;
+
if (!of_device_is_compatible(np, "cache"))
- goto err_out;
+ return -EINVAL;
if (of_property_read_u32(np, "cache-level", &level))
- goto err_out;
+ return -EINVAL;
if (level <= levels)
- goto err_out;
+ return -EINVAL;
leaves += of_count_cache_leaves(np);
levels = level;
}
- of_node_put(np);
this_cpu_ci->num_levels = levels;
this_cpu_ci->num_leaves = leaves;
return 0;
-
-err_out:
- of_node_put(np);
- return -EINVAL;
}
#else
@@ -382,9 +417,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
for_each_online_cpu(i) {
- struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
-
- if (i == cpu || !sib_cpu_ci->info_list)
+ if (i == cpu || !per_cpu_cacheinfo(i))
continue;/* skip if itself or no cacheinfo */
for (sib_index = 0; sib_index < cache_leaves(i); sib_index++) {
sib_leaf = per_cpu_cacheinfo_idx(i, sib_index);
@@ -424,10 +457,7 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
for (index = 0; index < cache_leaves(cpu); index++) {
this_leaf = per_cpu_cacheinfo_idx(cpu, index);
for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
- struct cpu_cacheinfo *sib_cpu_ci =
- get_cpu_cacheinfo(sibling);
-
- if (sibling == cpu || !sib_cpu_ci->info_list)
+ if (sibling == cpu || !per_cpu_cacheinfo(sibling))
continue;/* skip if itself or no cacheinfo */
for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) {
@@ -478,11 +508,9 @@ int __weak populate_cache_leaves(unsigned int cpu)
return -ENOENT;
}
-static inline
-int allocate_cache_info(int cpu)
+static inline int allocate_cache_info(int cpu)
{
- per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
- sizeof(struct cacheinfo), GFP_ATOMIC);
+ per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), sizeof(struct cacheinfo), GFP_ATOMIC);
if (!per_cpu_cacheinfo(cpu)) {
cache_leaves(cpu) = 0;
return -ENOMEM;
@@ -554,7 +582,11 @@ static inline int init_level_allocate_ci(unsigned int cpu)
*/
ci_cacheinfo(cpu)->early_ci_levels = false;
- if (cache_leaves(cpu) <= early_leaves)
+ /*
+ * Some architectures (e.g., x86) do not use early initialization.
+ * Allocate memory now in such case.
+ */
+ if (cache_leaves(cpu) <= early_leaves && per_cpu_cacheinfo(cpu))
return 0;
kfree(per_cpu_cacheinfo(cpu));
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 7b38fdf8e1d7..2526c57d924e 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -183,6 +183,17 @@ int class_register(const struct class *cls)
pr_debug("device class '%s': registering\n", cls->name);
+ if (cls->ns_type && !cls->namespace) {
+ pr_err("%s: class '%s' does not have namespace\n",
+ __func__, cls->name);
+ return -EINVAL;
+ }
+ if (!cls->ns_type && cls->namespace) {
+ pr_err("%s: class '%s' does not have ns_type\n",
+ __func__, cls->name);
+ return -EINVAL;
+ }
+
cp = kzalloc(sizeof(*cp), GFP_KERNEL);
if (!cp)
return -ENOMEM;
@@ -312,8 +323,12 @@ void class_dev_iter_init(struct class_dev_iter *iter, const struct class *class,
struct subsys_private *sp = class_to_subsys(class);
struct klist_node *start_knode = NULL;
- if (!sp)
+ memset(iter, 0, sizeof(*iter));
+ if (!sp) {
+ pr_crit("%s: class %p was not registered yet\n",
+ __func__, class);
return;
+ }
if (start)
start_knode = &start->p->knode_class;
@@ -340,6 +355,9 @@ struct device *class_dev_iter_next(struct class_dev_iter *iter)
struct klist_node *knode;
struct device *dev;
+ if (!iter->sp)
+ return NULL;
+
while (1) {
knode = klist_next(&iter->ki);
if (!knode)
@@ -384,7 +402,7 @@ EXPORT_SYMBOL_GPL(class_dev_iter_exit);
* code. There's no locking restriction.
*/
int class_for_each_device(const struct class *class, const struct device *start,
- void *data, int (*fn)(struct device *, void *))
+ void *data, device_iter_t fn)
{
struct subsys_private *sp = class_to_subsys(class);
struct class_dev_iter iter;
@@ -394,7 +412,7 @@ int class_for_each_device(const struct class *class, const struct device *start,
if (!class)
return -EINVAL;
if (!sp) {
- WARN(1, "%s called for class '%s' before it was initialized",
+ WARN(1, "%s called for class '%s' before it was registered",
__func__, class->name);
return -EINVAL;
}
@@ -433,8 +451,7 @@ EXPORT_SYMBOL_GPL(class_for_each_device);
* code. There's no locking restriction.
*/
struct device *class_find_device(const struct class *class, const struct device *start,
- const void *data,
- int (*match)(struct device *, const void *))
+ const void *data, device_match_t match)
{
struct subsys_private *sp = class_to_subsys(class);
struct class_dev_iter iter;
@@ -443,7 +460,7 @@ struct device *class_find_device(const struct class *class, const struct device
if (!class)
return NULL;
if (!sp) {
- WARN(1, "%s called for class '%s' before it was initialized",
+ WARN(1, "%s called for class '%s' before it was registered",
__func__, class->name);
return NULL;
}
@@ -584,30 +601,10 @@ EXPORT_SYMBOL_GPL(class_compat_unregister);
* a bus device
* @cls: the compatibility class
* @dev: the target bus device
- * @device_link: an optional device to which a "device" link should be created
*/
-int class_compat_create_link(struct class_compat *cls, struct device *dev,
- struct device *device_link)
+int class_compat_create_link(struct class_compat *cls, struct device *dev)
{
- int error;
-
- error = sysfs_create_link(cls->kobj, &dev->kobj, dev_name(dev));
- if (error)
- return error;
-
- /*
- * Optionally add a "device" link (typically to the parent), as a
- * class device would have one and we want to provide as much
- * backwards compatibility as possible.
- */
- if (device_link) {
- error = sysfs_create_link(&dev->kobj, &device_link->kobj,
- "device");
- if (error)
- sysfs_remove_link(cls->kobj, dev_name(dev));
- }
-
- return error;
+ return sysfs_create_link(cls->kobj, &dev->kobj, dev_name(dev));
}
EXPORT_SYMBOL_GPL(class_compat_create_link);
@@ -616,14 +613,9 @@ EXPORT_SYMBOL_GPL(class_compat_create_link);
* a bus device
* @cls: the compatibility class
* @dev: the target bus device
- * @device_link: an optional device to which a "device" link was previously
- * created
*/
-void class_compat_remove_link(struct class_compat *cls, struct device *dev,
- struct device *device_link)
+void class_compat_remove_link(struct class_compat *cls, struct device *dev)
{
- if (device_link)
- sysfs_remove_link(&dev->kobj, "device");
sysfs_remove_link(cls->kobj, dev_name(dev));
}
EXPORT_SYMBOL_GPL(class_compat_remove_link);
diff --git a/drivers/base/component.c b/drivers/base/component.c
index 741497324d78..024ad9471b8a 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -87,17 +87,17 @@ static int component_devices_show(struct seq_file *s, void *data)
size_t i;
mutex_lock(&component_mutex);
- seq_printf(s, "%-40s %20s\n", "aggregate_device name", "status");
- seq_puts(s, "-------------------------------------------------------------\n");
- seq_printf(s, "%-40s %20s\n\n",
+ seq_printf(s, "%-50s %20s\n", "aggregate_device name", "status");
+ seq_puts(s, "-----------------------------------------------------------------------\n");
+ seq_printf(s, "%-50s %20s\n\n",
dev_name(m->parent), m->bound ? "bound" : "not bound");
- seq_printf(s, "%-40s %20s\n", "device name", "status");
- seq_puts(s, "-------------------------------------------------------------\n");
+ seq_printf(s, "%-50s %20s\n", "device name", "status");
+ seq_puts(s, "-----------------------------------------------------------------------\n");
for (i = 0; i < match->num; i++) {
struct component *component = match->compare[i].component;
- seq_printf(s, "%-40s %20s\n",
+ seq_printf(s, "%-50s %20s\n",
component ? dev_name(component->dev) : "(unknown)",
component ? (component->bound ? "bound" : "not bound") : "not registered");
}
@@ -569,10 +569,28 @@ void component_master_del(struct device *parent,
}
EXPORT_SYMBOL_GPL(component_master_del);
+bool component_master_is_bound(struct device *parent,
+ const struct component_master_ops *ops)
+{
+ struct aggregate_device *adev;
+
+ guard(mutex)(&component_mutex);
+ adev = __aggregate_find(parent, ops);
+ if (!adev)
+ return 0;
+
+ return adev->bound;
+}
+EXPORT_SYMBOL_GPL(component_master_is_bound);
+
static void component_unbind(struct component *component,
struct aggregate_device *adev, void *data)
{
- WARN_ON(!component->bound);
+ if (WARN_ON(!component->bound))
+ return;
+
+ dev_dbg(adev->parent, "unbinding %s component %p (ops %ps)\n",
+ dev_name(component->dev), component, component->ops);
if (component->ops && component->ops->unbind)
component->ops->unbind(component->dev, adev->parent, data);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 5f4e03336e68..40de2f51a1b1 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -9,28 +9,29 @@
*/
#include <linux/acpi.h>
+#include <linux/blkdev.h>
+#include <linux/cleanup.h>
#include <linux/cpufreq.h>
#include <linux/device.h>
+#include <linux/dma-map-ops.h> /* for dma_default_coherent */
#include <linux/err.h>
#include <linux/fwnode.h>
#include <linux/init.h>
+#include <linux/kdev_t.h>
#include <linux/kstrtox.h>
#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/kdev_t.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/blkdev.h>
-#include <linux/mutex.h>
#include <linux/pm_runtime.h>
-#include <linux/netdevice.h>
-#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
#include <linux/string_helpers.h>
#include <linux/swiotlb.h>
#include <linux/sysfs.h>
-#include <linux/dma-map-ops.h> /* for dma_default_coherent */
#include "base.h"
#include "physical_location.h"
@@ -96,12 +97,9 @@ static int __fwnode_link_add(struct fwnode_handle *con,
int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup,
u8 flags)
{
- int ret;
+ guard(mutex)(&fwnode_link_lock);
- mutex_lock(&fwnode_link_lock);
- ret = __fwnode_link_add(con, sup, flags);
- mutex_unlock(&fwnode_link_lock);
- return ret;
+ return __fwnode_link_add(con, sup, flags);
}
/**
@@ -142,10 +140,10 @@ static void fwnode_links_purge_suppliers(struct fwnode_handle *fwnode)
{
struct fwnode_link *link, *tmp;
- mutex_lock(&fwnode_link_lock);
+ guard(mutex)(&fwnode_link_lock);
+
list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook)
__fwnode_link_del(link);
- mutex_unlock(&fwnode_link_lock);
}
/**
@@ -158,10 +156,10 @@ static void fwnode_links_purge_consumers(struct fwnode_handle *fwnode)
{
struct fwnode_link *link, *tmp;
- mutex_lock(&fwnode_link_lock);
+ guard(mutex)(&fwnode_link_lock);
+
list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook)
__fwnode_link_del(link);
- mutex_unlock(&fwnode_link_lock);
}
/**
@@ -289,7 +287,7 @@ static bool device_is_ancestor(struct device *dev, struct device *target)
#define DL_MARKER_FLAGS (DL_FLAG_INFERRED | \
DL_FLAG_CYCLE | \
DL_FLAG_MANAGED)
-static inline bool device_link_flag_is_sync_state_only(u32 flags)
+bool device_link_flag_is_sync_state_only(u32 flags)
{
return (flags & ~DL_MARKER_FLAGS) == DL_FLAG_SYNC_STATE_ONLY;
}
@@ -462,9 +460,9 @@ static ssize_t auto_remove_on_show(struct device *dev,
struct device_link *link = to_devlink(dev);
const char *output;
- if (link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
+ if (device_link_test(link, DL_FLAG_AUTOREMOVE_SUPPLIER))
output = "supplier unbind";
- else if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER)
+ else if (device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER))
output = "consumer unbind";
else
output = "never";
@@ -478,7 +476,7 @@ static ssize_t runtime_pm_show(struct device *dev,
{
struct device_link *link = to_devlink(dev);
- return sysfs_emit(buf, "%d\n", !!(link->flags & DL_FLAG_PM_RUNTIME));
+ return sysfs_emit(buf, "%d\n", device_link_test(link, DL_FLAG_PM_RUNTIME));
}
static DEVICE_ATTR_RO(runtime_pm);
@@ -487,8 +485,7 @@ static ssize_t sync_state_only_show(struct device *dev,
{
struct device_link *link = to_devlink(dev);
- return sysfs_emit(buf, "%d\n",
- !!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
+ return sysfs_emit(buf, "%d\n", device_link_test(link, DL_FLAG_SYNC_STATE_ONLY));
}
static DEVICE_ATTR_RO(sync_state_only);
@@ -554,7 +551,7 @@ void device_link_wait_removal(void)
}
EXPORT_SYMBOL_GPL(device_link_wait_removal);
-static struct class devlink_class = {
+static const struct class devlink_class = {
.name = "devlink",
.dev_groups = devlink_groups,
.dev_release = devlink_dev_release,
@@ -562,20 +559,11 @@ static struct class devlink_class = {
static int devlink_add_symlinks(struct device *dev)
{
+ char *buf_con __free(kfree) = NULL, *buf_sup __free(kfree) = NULL;
int ret;
- size_t len;
struct device_link *link = to_devlink(dev);
struct device *sup = link->supplier;
struct device *con = link->consumer;
- char *buf;
-
- len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
- strlen(dev_bus_name(con)) + strlen(dev_name(con)));
- len += strlen(":");
- len += strlen("supplier:") + 1;
- buf = kzalloc(len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
ret = sysfs_create_link(&link->link_dev.kobj, &sup->kobj, "supplier");
if (ret)
@@ -585,58 +573,64 @@ static int devlink_add_symlinks(struct device *dev)
if (ret)
goto err_con;
- snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
- ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf);
+ buf_con = kasprintf(GFP_KERNEL, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
+ if (!buf_con) {
+ ret = -ENOMEM;
+ goto err_con_dev;
+ }
+
+ ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf_con);
if (ret)
goto err_con_dev;
- snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
- ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf);
+ buf_sup = kasprintf(GFP_KERNEL, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
+ if (!buf_sup) {
+ ret = -ENOMEM;
+ goto err_sup_dev;
+ }
+
+ ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf_sup);
if (ret)
goto err_sup_dev;
goto out;
err_sup_dev:
- snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
- sysfs_remove_link(&sup->kobj, buf);
+ sysfs_remove_link(&sup->kobj, buf_con);
err_con_dev:
sysfs_remove_link(&link->link_dev.kobj, "consumer");
err_con:
sysfs_remove_link(&link->link_dev.kobj, "supplier");
out:
- kfree(buf);
return ret;
}
static void devlink_remove_symlinks(struct device *dev)
{
+ char *buf_con __free(kfree) = NULL, *buf_sup __free(kfree) = NULL;
struct device_link *link = to_devlink(dev);
- size_t len;
struct device *sup = link->supplier;
struct device *con = link->consumer;
- char *buf;
sysfs_remove_link(&link->link_dev.kobj, "consumer");
sysfs_remove_link(&link->link_dev.kobj, "supplier");
- len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
- strlen(dev_bus_name(con)) + strlen(dev_name(con)));
- len += strlen(":");
- len += strlen("supplier:") + 1;
- buf = kzalloc(len, GFP_KERNEL);
- if (!buf) {
- WARN(1, "Unable to properly free device link symlinks!\n");
- return;
- }
-
if (device_is_registered(con)) {
- snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
- sysfs_remove_link(&con->kobj, buf);
+ buf_sup = kasprintf(GFP_KERNEL, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
+ if (!buf_sup)
+ goto out;
+ sysfs_remove_link(&con->kobj, buf_sup);
}
- snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
- sysfs_remove_link(&sup->kobj, buf);
- kfree(buf);
+
+ buf_con = kasprintf(GFP_KERNEL, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
+ if (!buf_con)
+ goto out;
+ sysfs_remove_link(&sup->kobj, buf_con);
+
+ return;
+
+out:
+ WARN(1, "Unable to properly free device link symlinks!\n");
}
static struct class_interface devlink_class_intf = {
@@ -677,6 +671,9 @@ postcore_initcall(devlink_class_init);
* @supplier: Supplier end of the link.
* @flags: Link flags.
*
+ * Return: On success, a device_link struct will be returned.
+ * On error or invalid flag settings, NULL will be returned.
+ *
* The caller is responsible for the proper synchronization of the link creation
* with runtime PM. First, setting the DL_FLAG_PM_RUNTIME flag will cause the
* runtime PM framework to take the link into account. Second, if the
@@ -794,12 +791,12 @@ struct device_link *device_link_add(struct device *consumer,
if (link->consumer != consumer)
continue;
- if (link->flags & DL_FLAG_INFERRED &&
+ if (device_link_test(link, DL_FLAG_INFERRED) &&
!(flags & DL_FLAG_INFERRED))
link->flags &= ~DL_FLAG_INFERRED;
if (flags & DL_FLAG_PM_RUNTIME) {
- if (!(link->flags & DL_FLAG_PM_RUNTIME)) {
+ if (!device_link_test(link, DL_FLAG_PM_RUNTIME)) {
pm_runtime_new_link(consumer);
link->flags |= DL_FLAG_PM_RUNTIME;
}
@@ -809,8 +806,8 @@ struct device_link *device_link_add(struct device *consumer,
if (flags & DL_FLAG_STATELESS) {
kref_get(&link->kref);
- if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
- !(link->flags & DL_FLAG_STATELESS)) {
+ if (device_link_test(link, DL_FLAG_SYNC_STATE_ONLY) &&
+ !device_link_test(link, DL_FLAG_STATELESS)) {
link->flags |= DL_FLAG_STATELESS;
goto reorder;
} else {
@@ -825,7 +822,7 @@ struct device_link *device_link_add(struct device *consumer,
* update the existing link to stay around longer.
*/
if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) {
- if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
+ if (device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER)) {
link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER;
}
@@ -833,12 +830,12 @@ struct device_link *device_link_add(struct device *consumer,
link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER |
DL_FLAG_AUTOREMOVE_SUPPLIER);
}
- if (!(link->flags & DL_FLAG_MANAGED)) {
+ if (!device_link_test(link, DL_FLAG_MANAGED)) {
kref_get(&link->kref);
link->flags |= DL_FLAG_MANAGED;
device_link_init_status(link, consumer, supplier);
}
- if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
+ if (device_link_test(link, DL_FLAG_SYNC_STATE_ONLY) &&
!(flags & DL_FLAG_SYNC_STATE_ONLY)) {
link->flags &= ~DL_FLAG_SYNC_STATE_ONLY;
goto reorder;
@@ -942,7 +939,7 @@ static void __device_link_del(struct kref *kref)
static void device_link_put_kref(struct device_link *link)
{
- if (link->flags & DL_FLAG_STATELESS)
+ if (device_link_test(link, DL_FLAG_STATELESS))
kref_put(&link->kref, __device_link_del);
else if (!device_is_registered(link->consumer))
__device_link_del(&link->kref);
@@ -1006,7 +1003,7 @@ static void device_links_missing_supplier(struct device *dev)
if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
} else {
- WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
+ WARN_ON(!device_link_test(link, DL_FLAG_SYNC_STATE_ONLY));
WRITE_ONCE(link->status, DL_STATE_DORMANT);
}
}
@@ -1060,42 +1057,36 @@ int device_links_check_suppliers(struct device *dev)
* Device waiting for supplier to become available is not allowed to
* probe.
*/
- mutex_lock(&fwnode_link_lock);
- sup_fw = fwnode_links_check_suppliers(dev->fwnode);
- if (sup_fw) {
- if (!dev_is_best_effort(dev)) {
- fwnode_ret = -EPROBE_DEFER;
- dev_err_probe(dev, -EPROBE_DEFER,
- "wait for supplier %pfwf\n", sup_fw);
- } else {
- fwnode_ret = -EAGAIN;
+ scoped_guard(mutex, &fwnode_link_lock) {
+ sup_fw = fwnode_links_check_suppliers(dev->fwnode);
+ if (sup_fw) {
+ if (dev_is_best_effort(dev))
+ fwnode_ret = -EAGAIN;
+ else
+ return dev_err_probe(dev, -EPROBE_DEFER,
+ "wait for supplier %pfwf\n", sup_fw);
}
}
- mutex_unlock(&fwnode_link_lock);
- if (fwnode_ret == -EPROBE_DEFER)
- return fwnode_ret;
device_links_write_lock();
list_for_each_entry(link, &dev->links.suppliers, c_node) {
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
if (link->status != DL_STATE_AVAILABLE &&
- !(link->flags & DL_FLAG_SYNC_STATE_ONLY)) {
+ !device_link_test(link, DL_FLAG_SYNC_STATE_ONLY)) {
if (dev_is_best_effort(dev) &&
- link->flags & DL_FLAG_INFERRED &&
+ device_link_test(link, DL_FLAG_INFERRED) &&
!link->supplier->can_match) {
ret = -EAGAIN;
continue;
}
device_links_missing_supplier(dev);
- dev_err_probe(dev, -EPROBE_DEFER,
- "supplier %s not ready\n",
- dev_name(link->supplier));
- ret = -EPROBE_DEFER;
+ ret = dev_err_probe(dev, -EPROBE_DEFER,
+ "supplier %s not ready\n", dev_name(link->supplier));
break;
}
WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
@@ -1136,7 +1127,7 @@ static void __device_links_queue_sync_state(struct device *dev,
return;
list_for_each_entry(link, &dev->links.consumers, s_node) {
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
if (link->status != DL_STATE_ACTIVE)
return;
@@ -1248,9 +1239,8 @@ static ssize_t waiting_for_supplier_show(struct device *dev,
bool val;
device_lock(dev);
- mutex_lock(&fwnode_link_lock);
- val = !!fwnode_links_check_suppliers(dev->fwnode);
- mutex_unlock(&fwnode_link_lock);
+ scoped_guard(mutex, &fwnode_link_lock)
+ val = !!fwnode_links_check_suppliers(dev->fwnode);
device_unlock(dev);
return sysfs_emit(buf, "%u\n", val);
}
@@ -1277,7 +1267,7 @@ void device_links_force_bind(struct device *dev)
device_links_write_lock();
list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) {
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
if (link->status != DL_STATE_AVAILABLE) {
@@ -1323,20 +1313,22 @@ void device_links_driver_bound(struct device *dev)
*/
if (dev->fwnode && dev->fwnode->dev == dev) {
struct fwnode_handle *child;
+
fwnode_links_purge_suppliers(dev->fwnode);
- mutex_lock(&fwnode_link_lock);
+
+ guard(mutex)(&fwnode_link_lock);
+
fwnode_for_each_available_child_node(dev->fwnode, child)
__fw_devlink_pickup_dangling_consumers(child,
dev->fwnode);
__fw_devlink_link_to_consumers(dev);
- mutex_unlock(&fwnode_link_lock);
}
device_remove_file(dev, &dev_attr_waiting_for_supplier);
device_links_write_lock();
list_for_each_entry(link, &dev->links.consumers, s_node) {
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
/*
@@ -1352,7 +1344,7 @@ void device_links_driver_bound(struct device *dev)
WARN_ON(link->status != DL_STATE_DORMANT);
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
- if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER)
+ if (device_link_test(link, DL_FLAG_AUTOPROBE_CONSUMER))
driver_deferred_probe_add(link->consumer);
}
@@ -1364,11 +1356,11 @@ void device_links_driver_bound(struct device *dev)
list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) {
struct device *supplier;
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
supplier = link->supplier;
- if (link->flags & DL_FLAG_SYNC_STATE_ONLY) {
+ if (device_link_test(link, DL_FLAG_SYNC_STATE_ONLY)) {
/*
* When DL_FLAG_SYNC_STATE_ONLY is set, it means no
* other DL_MANAGED_LINK_FLAGS have been set. So, it's
@@ -1376,7 +1368,7 @@ void device_links_driver_bound(struct device *dev)
*/
device_link_drop_managed(link);
} else if (dev_is_best_effort(dev) &&
- link->flags & DL_FLAG_INFERRED &&
+ device_link_test(link, DL_FLAG_INFERRED) &&
link->status != DL_STATE_CONSUMER_PROBE &&
!link->supplier->can_match) {
/*
@@ -1428,10 +1420,10 @@ static void __device_links_no_driver(struct device *dev)
struct device_link *link, *ln;
list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
- if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
+ if (device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER)) {
device_link_drop_managed(link);
continue;
}
@@ -1443,7 +1435,7 @@ static void __device_links_no_driver(struct device *dev)
if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
} else {
- WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
+ WARN_ON(!device_link_test(link, DL_FLAG_SYNC_STATE_ONLY));
WRITE_ONCE(link->status, DL_STATE_DORMANT);
}
}
@@ -1468,7 +1460,7 @@ void device_links_no_driver(struct device *dev)
device_links_write_lock();
list_for_each_entry(link, &dev->links.consumers, s_node) {
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
/*
@@ -1505,10 +1497,10 @@ void device_links_driver_cleanup(struct device *dev)
device_links_write_lock();
list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) {
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
- WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER);
+ WARN_ON(device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER));
WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND);
/*
@@ -1517,7 +1509,7 @@ void device_links_driver_cleanup(struct device *dev)
* has moved to DL_STATE_SUPPLIER_UNBIND.
*/
if (link->status == DL_STATE_SUPPLIER_UNBIND &&
- link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
+ device_link_test(link, DL_FLAG_AUTOREMOVE_SUPPLIER))
device_link_drop_managed(link);
WRITE_ONCE(link->status, DL_STATE_DORMANT);
@@ -1551,7 +1543,7 @@ bool device_links_busy(struct device *dev)
device_links_write_lock();
list_for_each_entry(link, &dev->links.consumers, s_node) {
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
if (link->status == DL_STATE_CONSUMER_PROBE
@@ -1593,8 +1585,8 @@ void device_links_unbind_consumers(struct device *dev)
list_for_each_entry(link, &dev->links.consumers, s_node) {
enum device_link_state status;
- if (!(link->flags & DL_FLAG_MANAGED) ||
- link->flags & DL_FLAG_SYNC_STATE_ONLY)
+ if (!device_link_test(link, DL_FLAG_MANAGED) ||
+ device_link_test(link, DL_FLAG_SYNC_STATE_ONLY))
continue;
status = link->status;
@@ -1750,7 +1742,7 @@ static void fw_devlink_parse_fwtree(struct fwnode_handle *fwnode)
static void fw_devlink_relax_link(struct device_link *link)
{
- if (!(link->flags & DL_FLAG_INFERRED))
+ if (!device_link_test(link, DL_FLAG_INFERRED))
return;
if (device_link_flag_is_sync_state_only(link->flags))
@@ -1786,13 +1778,13 @@ static int fw_devlink_dev_sync_state(struct device *dev, void *data)
struct device_link *link = to_devlink(dev);
struct device *sup = link->supplier;
- if (!(link->flags & DL_FLAG_MANAGED) ||
+ if (!device_link_test(link, DL_FLAG_MANAGED) ||
link->status == DL_STATE_ACTIVE || sup->state_synced ||
!dev_has_sync_state(sup))
return 0;
if (fw_devlink_sync_state == FW_DEVLINK_SYNC_STATE_STRICT) {
- dev_warn(sup, "sync_state() pending due to %s\n",
+ dev_info(sup, "sync_state() pending due to %s\n",
dev_name(link->consumer));
return 0;
}
@@ -1888,8 +1880,6 @@ static void fw_devlink_unblock_consumers(struct device *dev)
device_links_write_unlock();
}
-#define get_dev_from_fwnode(fwnode) get_device((fwnode)->dev)
-
static bool fwnode_init_without_drv(struct fwnode_handle *fwnode)
{
struct device *dev;
@@ -1978,7 +1968,7 @@ static struct device *fwnode_get_next_parent_dev(const struct fwnode_handle *fwn
/**
* __fw_devlink_relax_cycles - Relax and mark dependency cycles.
- * @con: Potential consumer device.
+ * @con_handle: Potential consumer device fwnode.
* @sup_handle: Potential supplier's fwnode.
*
* Needs to be called with fwnode_lock and device link lock held.
@@ -1996,10 +1986,10 @@ static struct device *fwnode_get_next_parent_dev(const struct fwnode_handle *fwn
*
* Return true if one or more cycles were found. Otherwise, return false.
*/
-static bool __fw_devlink_relax_cycles(struct device *con,
+static bool __fw_devlink_relax_cycles(struct fwnode_handle *con_handle,
struct fwnode_handle *sup_handle)
{
- struct device *sup_dev = NULL, *par_dev = NULL;
+ struct device *sup_dev = NULL, *par_dev = NULL, *con_dev = NULL;
struct fwnode_link *link;
struct device_link *dev_link;
bool ret = false;
@@ -2016,22 +2006,22 @@ static bool __fw_devlink_relax_cycles(struct device *con,
sup_handle->flags |= FWNODE_FLAG_VISITED;
- sup_dev = get_dev_from_fwnode(sup_handle);
-
/* Termination condition. */
- if (sup_dev == con) {
+ if (sup_handle == con_handle) {
pr_debug("----- cycle: start -----\n");
ret = true;
goto out;
}
+ sup_dev = get_dev_from_fwnode(sup_handle);
+ con_dev = get_dev_from_fwnode(con_handle);
/*
* If sup_dev is bound to a driver and @con hasn't started binding to a
* driver, sup_dev can't be a consumer of @con. So, no need to check
* further.
*/
if (sup_dev && sup_dev->links.status == DL_DEV_DRIVER_BOUND &&
- con->links.status == DL_DEV_NO_DRIVER) {
+ con_dev && con_dev->links.status == DL_DEV_NO_DRIVER) {
ret = false;
goto out;
}
@@ -2040,7 +2030,7 @@ static bool __fw_devlink_relax_cycles(struct device *con,
if (link->flags & FWLINK_FLAG_IGNORE)
continue;
- if (__fw_devlink_relax_cycles(con, link->supplier)) {
+ if (__fw_devlink_relax_cycles(con_handle, link->supplier)) {
__fwnode_link_cycle(link);
ret = true;
}
@@ -2055,7 +2045,7 @@ static bool __fw_devlink_relax_cycles(struct device *con,
else
par_dev = fwnode_get_next_parent_dev(sup_handle);
- if (par_dev && __fw_devlink_relax_cycles(con, par_dev->fwnode)) {
+ if (par_dev && __fw_devlink_relax_cycles(con_handle, par_dev->fwnode)) {
pr_debug("%pfwf: cycle: child of %pfwf\n", sup_handle,
par_dev->fwnode);
ret = true;
@@ -2070,10 +2060,10 @@ static bool __fw_devlink_relax_cycles(struct device *con,
* such due to a cycle.
*/
if (device_link_flag_is_sync_state_only(dev_link->flags) &&
- !(dev_link->flags & DL_FLAG_CYCLE))
+ !device_link_test(dev_link, DL_FLAG_CYCLE))
continue;
- if (__fw_devlink_relax_cycles(con,
+ if (__fw_devlink_relax_cycles(con_handle,
dev_link->supplier->fwnode)) {
pr_debug("%pfwf: cycle: depends on %pfwf\n", sup_handle,
dev_link->supplier->fwnode);
@@ -2086,6 +2076,7 @@ static bool __fw_devlink_relax_cycles(struct device *con,
out:
sup_handle->flags &= ~FWNODE_FLAG_VISITED;
put_device(sup_dev);
+ put_device(con_dev);
put_device(par_dev);
return ret;
}
@@ -2121,11 +2112,6 @@ static int fw_devlink_create_devlink(struct device *con,
if (link->flags & FWLINK_FLAG_IGNORE)
return 0;
- if (con->fwnode == link->consumer)
- flags = fw_devlink_get_flags(link->flags);
- else
- flags = FW_DEVLINK_FLAGS_PERMISSIVE;
-
/*
* In some cases, a device P might also be a supplier to its child node
* C. However, this would defer the probe of C until the probe of P
@@ -2146,25 +2132,23 @@ static int fw_devlink_create_devlink(struct device *con,
return -EINVAL;
/*
- * SYNC_STATE_ONLY device links don't block probing and supports cycles.
- * So, one might expect that cycle detection isn't necessary for them.
- * However, if the device link was marked as SYNC_STATE_ONLY because
- * it's part of a cycle, then we still need to do cycle detection. This
- * is because the consumer and supplier might be part of multiple cycles
- * and we need to detect all those cycles.
+ * Don't try to optimize by not calling the cycle detection logic under
+ * certain conditions. There's always some corner case that won't get
+ * detected.
*/
- if (!device_link_flag_is_sync_state_only(flags) ||
- flags & DL_FLAG_CYCLE) {
- device_links_write_lock();
- if (__fw_devlink_relax_cycles(con, sup_handle)) {
- __fwnode_link_cycle(link);
- flags = fw_devlink_get_flags(link->flags);
- pr_debug("----- cycle: end -----\n");
- dev_info(con, "Fixed dependency cycle(s) with %pfwf\n",
- sup_handle);
- }
- device_links_write_unlock();
+ device_links_write_lock();
+ if (__fw_devlink_relax_cycles(link->consumer, sup_handle)) {
+ __fwnode_link_cycle(link);
+ pr_debug("----- cycle: end -----\n");
+ pr_info("%pfwf: Fixed dependency cycle(s) with %pfwf\n",
+ link->consumer, sup_handle);
}
+ device_links_write_unlock();
+
+ if (con->fwnode == link->consumer)
+ flags = fw_devlink_get_flags(link->flags);
+ else
+ flags = FW_DEVLINK_FLAGS_PERMISSIVE;
if (sup_handle->flags & FWNODE_FLAG_NOT_DEVICE)
sup_dev = fwnode_get_next_parent_dev(sup_handle);
@@ -2187,8 +2171,8 @@ static int fw_devlink_create_devlink(struct device *con,
}
if (con != sup_dev && !device_link_add(con, sup_dev, flags)) {
- dev_err(con, "Failed to create device link (0x%x) with %s\n",
- flags, dev_name(sup_dev));
+ dev_err(con, "Failed to create device link (0x%x) with supplier %s for %pfwf\n",
+ flags, dev_name(sup_dev), link->consumer);
ret = -EINVAL;
}
@@ -2338,16 +2322,14 @@ static void fw_devlink_link_device(struct device *dev)
fw_devlink_parse_fwtree(fwnode);
- mutex_lock(&fwnode_link_lock);
+ guard(mutex)(&fwnode_link_lock);
+
__fw_devlink_link_to_consumers(dev);
__fw_devlink_link_to_suppliers(dev, fwnode);
- mutex_unlock(&fwnode_link_lock);
}
/* Device links support end. */
-int (*platform_notify)(struct device *dev) = NULL;
-int (*platform_notify_remove)(struct device *dev) = NULL;
static struct kobject *dev_kobj;
/* /sys/dev/char */
@@ -2395,16 +2377,10 @@ static void device_platform_notify(struct device *dev)
acpi_device_notify(dev);
software_node_notify(dev);
-
- if (platform_notify)
- platform_notify(dev);
}
static void device_platform_notify_remove(struct device *dev)
{
- if (platform_notify_remove)
- platform_notify_remove(dev);
-
software_node_notify_remove(dev);
acpi_device_notify_remove(dev);
@@ -2546,6 +2522,15 @@ ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
}
EXPORT_SYMBOL_GPL(device_show_bool);
+ssize_t device_show_string(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+
+ return sysfs_emit(buf, "%s\n", (char *)ea->var);
+}
+EXPORT_SYMBOL_GPL(device_show_string);
+
/**
* device_release - free device structure.
* @kobj: device's kobject.
@@ -2589,7 +2574,7 @@ static const void *device_namespace(const struct kobject *kobj)
const struct device *dev = kobj_to_dev(kobj);
const void *ns = NULL;
- if (dev->class && dev->class->ns_type)
+ if (dev->class && dev->class->namespace)
ns = dev->class->namespace(dev);
return ns;
@@ -2636,6 +2621,35 @@ static const char *dev_uevent_name(const struct kobject *kobj)
return NULL;
}
+/*
+ * Try filling "DRIVER=<name>" uevent variable for a device. Because this
+ * function may race with binding and unbinding the device from a driver,
+ * we need to be careful. Binding is generally safe, at worst we miss the
+ * fact that the device is already bound to a driver (but the driver
+ * information that is delivered through uevents is best-effort, it may
+ * become obsolete as soon as it is generated anyways). Unbinding is more
+ * risky as driver pointer is transitioning to NULL, so READ_ONCE() should
+ * be used to make sure we are dealing with the same pointer, and to
+ * ensure that driver structure is not going to disappear from under us
+ * we take bus' drivers klist lock. The assumption that only registered
+ * driver can be bound to a device, and to unregister a driver bus code
+ * will take the same lock.
+ */
+static void dev_driver_uevent(const struct device *dev, struct kobj_uevent_env *env)
+{
+ struct subsys_private *sp = bus_to_subsys(dev->bus);
+
+ if (sp) {
+ scoped_guard(spinlock, &sp->klist_drivers.k_lock) {
+ struct device_driver *drv = READ_ONCE(dev->driver);
+ if (drv)
+ add_uevent_var(env, "DRIVER=%s", drv->name);
+ }
+
+ subsys_put(sp);
+ }
+}
+
static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
{
const struct device *dev = kobj_to_dev(kobj);
@@ -2667,8 +2681,8 @@ static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
if (dev->type && dev->type->name)
add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
- if (dev->driver)
- add_uevent_var(env, "DRIVER=%s", dev->driver->name);
+ /* Add "DRIVER=%s" variable if the device is bound to a driver */
+ dev_driver_uevent(dev, env);
/* Add common DT information about the device */
of_device_uevent(dev, env);
@@ -2844,15 +2858,6 @@ static void devm_attr_group_remove(struct device *dev, void *res)
sysfs_remove_group(&dev->kobj, group);
}
-static void devm_attr_groups_remove(struct device *dev, void *res)
-{
- union device_attr_group_devres *devres = res;
- const struct attribute_group **groups = devres->groups;
-
- dev_dbg(dev, "%s: removing groups %p\n", __func__, groups);
- sysfs_remove_groups(&dev->kobj, groups);
-}
-
/**
* devm_device_add_group - given a device, create a managed attribute group
* @dev: The device to create the group for
@@ -2885,42 +2890,6 @@ int devm_device_add_group(struct device *dev, const struct attribute_group *grp)
}
EXPORT_SYMBOL_GPL(devm_device_add_group);
-/**
- * devm_device_add_groups - create a bunch of managed attribute groups
- * @dev: The device to create the group for
- * @groups: The attribute groups to create, NULL terminated
- *
- * This function creates a bunch of managed attribute groups. If an error
- * occurs when creating a group, all previously created groups will be
- * removed, unwinding everything back to the original state when this
- * function was called. It will explicitly warn and error if any of the
- * attribute files being created already exist.
- *
- * Returns 0 on success or error code from sysfs_create_group on failure.
- */
-int devm_device_add_groups(struct device *dev,
- const struct attribute_group **groups)
-{
- union device_attr_group_devres *devres;
- int error;
-
- devres = devres_alloc(devm_attr_groups_remove,
- sizeof(*devres), GFP_KERNEL);
- if (!devres)
- return -ENOMEM;
-
- error = sysfs_create_groups(&dev->kobj, groups);
- if (error) {
- devres_free(devres);
- return error;
- }
-
- devres->groups = groups;
- devres_add(dev, devres);
- return 0;
-}
-EXPORT_SYMBOL_GPL(devm_device_add_groups);
-
static int device_add_attrs(struct device *dev)
{
const struct class *class = dev->class;
@@ -3208,7 +3177,7 @@ void device_initialize(struct device *dev)
}
EXPORT_SYMBOL_GPL(device_initialize);
-struct kobject *virtual_device_parent(struct device *dev)
+struct kobject *virtual_device_parent(void)
{
static struct kobject *virtual_dir = NULL;
@@ -3286,7 +3255,7 @@ static struct kobject *get_device_parent(struct device *dev,
* in a "glue" directory to prevent namespace collisions.
*/
if (parent == NULL)
- parent_kobj = virtual_device_parent(dev);
+ parent_kobj = virtual_device_parent();
else if (parent->class && !dev->class->ns_type) {
subsys_put(sp);
return &parent->kobj;
@@ -3754,7 +3723,7 @@ done:
device_pm_remove(dev);
dpm_sysfs_remove(dev);
DPMError:
- dev->driver = NULL;
+ device_set_driver(dev, NULL);
bus_remove_device(dev);
BusError:
device_remove_attrs(dev);
@@ -4025,8 +3994,8 @@ const char *device_get_devnode(const struct device *dev,
/**
* device_for_each_child - device child iterator.
* @parent: parent struct device.
- * @fn: function to be called for each device.
* @data: data for the callback.
+ * @fn: function to be called for each device.
*
* Iterate over @parent's child devices, and call @fn for each,
* passing it @data.
@@ -4035,13 +4004,13 @@ const char *device_get_devnode(const struct device *dev,
* other than 0, we break out and return that value.
*/
int device_for_each_child(struct device *parent, void *data,
- int (*fn)(struct device *dev, void *data))
+ device_iter_t fn)
{
struct klist_iter i;
struct device *child;
int error = 0;
- if (!parent->p)
+ if (!parent || !parent->p)
return 0;
klist_iter_init(&parent->p->klist_children, &i);
@@ -4055,8 +4024,8 @@ EXPORT_SYMBOL_GPL(device_for_each_child);
/**
* device_for_each_child_reverse - device child iterator in reversed order.
* @parent: parent struct device.
- * @fn: function to be called for each device.
* @data: data for the callback.
+ * @fn: function to be called for each device.
*
* Iterate over @parent's child devices, and call @fn for each,
* passing it @data.
@@ -4065,13 +4034,13 @@ EXPORT_SYMBOL_GPL(device_for_each_child);
* other than 0, we break out and return that value.
*/
int device_for_each_child_reverse(struct device *parent, void *data,
- int (*fn)(struct device *dev, void *data))
+ device_iter_t fn)
{
struct klist_iter i;
struct device *child;
int error = 0;
- if (!parent->p)
+ if (!parent || !parent->p)
return 0;
klist_iter_init(&parent->p->klist_children, &i);
@@ -4083,87 +4052,77 @@ int device_for_each_child_reverse(struct device *parent, void *data,
EXPORT_SYMBOL_GPL(device_for_each_child_reverse);
/**
- * device_find_child - device iterator for locating a particular device.
- * @parent: parent struct device
- * @match: Callback function to check device
- * @data: Data to pass to match function
- *
- * This is similar to the device_for_each_child() function above, but it
- * returns a reference to a device that is 'found' for later use, as
- * determined by the @match callback.
+ * device_for_each_child_reverse_from - device child iterator in reversed order.
+ * @parent: parent struct device.
+ * @from: optional starting point in child list
+ * @data: data for the callback.
+ * @fn: function to be called for each device.
*
- * The callback should return 0 if the device doesn't match and non-zero
- * if it does. If the callback returns non-zero and a reference to the
- * current device can be obtained, this function will return to the caller
- * and not iterate over any more devices.
+ * Iterate over @parent's child devices, starting at @from, and call @fn
+ * for each, passing it @data. This helper is identical to
+ * device_for_each_child_reverse() when @from is NULL.
*
- * NOTE: you will need to drop the reference with put_device() after use.
+ * @fn is checked each iteration. If it returns anything other than 0,
+ * iteration stop and that value is returned to the caller of
+ * device_for_each_child_reverse_from();
*/
-struct device *device_find_child(struct device *parent, void *data,
- int (*match)(struct device *dev, void *data))
+int device_for_each_child_reverse_from(struct device *parent,
+ struct device *from, void *data,
+ device_iter_t fn)
{
struct klist_iter i;
struct device *child;
+ int error = 0;
- if (!parent)
- return NULL;
+ if (!parent || !parent->p)
+ return 0;
- klist_iter_init(&parent->p->klist_children, &i);
- while ((child = next_device(&i)))
- if (match(child, data) && get_device(child))
- break;
+ klist_iter_init_node(&parent->p->klist_children, &i,
+ (from ? &from->p->knode_parent : NULL));
+ while ((child = prev_device(&i)) && !error)
+ error = fn(child, data);
klist_iter_exit(&i);
- return child;
+ return error;
}
-EXPORT_SYMBOL_GPL(device_find_child);
+EXPORT_SYMBOL_GPL(device_for_each_child_reverse_from);
/**
- * device_find_child_by_name - device iterator for locating a child device.
+ * device_find_child - device iterator for locating a particular device.
* @parent: parent struct device
- * @name: name of the child device
+ * @data: Data to pass to match function
+ * @match: Callback function to check device
*
- * This is similar to the device_find_child() function above, but it
- * returns a reference to a device that has the name @name.
+ * This is similar to the device_for_each_child() function above, but it
+ * returns a reference to a device that is 'found' for later use, as
+ * determined by the @match callback.
+ *
+ * The callback should return 0 if the device doesn't match and non-zero
+ * if it does. If the callback returns non-zero and a reference to the
+ * current device can be obtained, this function will return to the caller
+ * and not iterate over any more devices.
*
* NOTE: you will need to drop the reference with put_device() after use.
*/
-struct device *device_find_child_by_name(struct device *parent,
- const char *name)
+struct device *device_find_child(struct device *parent, const void *data,
+ device_match_t match)
{
struct klist_iter i;
struct device *child;
- if (!parent)
+ if (!parent || !parent->p)
return NULL;
klist_iter_init(&parent->p->klist_children, &i);
- while ((child = next_device(&i)))
- if (sysfs_streq(dev_name(child), name) && get_device(child))
+ while ((child = next_device(&i))) {
+ if (match(child, data)) {
+ get_device(child);
break;
+ }
+ }
klist_iter_exit(&i);
return child;
}
-EXPORT_SYMBOL_GPL(device_find_child_by_name);
-
-static int match_any(struct device *dev, void *unused)
-{
- return 1;
-}
-
-/**
- * device_find_any_child - device iterator for locating a child device, if any.
- * @parent: parent struct device
- *
- * This is similar to the device_find_child() function above, but it
- * returns a reference to a child device, if any.
- *
- * NOTE: you will need to drop the reference with put_device() after use.
- */
-struct device *device_find_any_child(struct device *parent)
-{
- return device_find_child(parent, NULL, match_any);
-}
-EXPORT_SYMBOL_GPL(device_find_any_child);
+EXPORT_SYMBOL_GPL(device_find_child);
int __init devices_init(void)
{
@@ -4179,7 +4138,7 @@ int __init devices_init(void)
sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj);
if (!sysfs_dev_char_kobj)
goto char_kobj_err;
- device_link_wq = alloc_workqueue("device_link_wq", 0, 0);
+ device_link_wq = alloc_workqueue("device_link_wq", WQ_PERCPU, 0);
if (!device_link_wq)
goto wq_err;
@@ -4553,9 +4512,11 @@ EXPORT_SYMBOL_GPL(device_destroy);
*/
int device_rename(struct device *dev, const char *new_name)
{
+ struct subsys_private *sp = NULL;
struct kobject *kobj = &dev->kobj;
char *old_device_name = NULL;
int error;
+ bool is_link_renamed = false;
dev = get_device(dev);
if (!dev)
@@ -4570,7 +4531,7 @@ int device_rename(struct device *dev, const char *new_name)
}
if (dev->class) {
- struct subsys_private *sp = class_to_subsys(dev->class);
+ sp = class_to_subsys(dev->class);
if (!sp) {
error = -EINVAL;
@@ -4579,16 +4540,19 @@ int device_rename(struct device *dev, const char *new_name)
error = sysfs_rename_link_ns(&sp->subsys.kobj, kobj, old_device_name,
new_name, kobject_namespace(kobj));
- subsys_put(sp);
if (error)
goto out;
+
+ is_link_renamed = true;
}
error = kobject_rename(kobj, new_name);
- if (error)
- goto out;
-
out:
+ if (error && is_link_renamed)
+ sysfs_rename_link_ns(&sp->subsys.kobj, kobj, new_name,
+ old_device_name, kobject_namespace(kobj));
+ subsys_put(sp);
+
put_device(dev);
kfree(old_device_name);
@@ -4910,7 +4874,7 @@ set_dev_info(const struct device *dev, struct dev_printk_info *dev_info)
else
return;
- strscpy(dev_info->subsystem, subsys, sizeof(dev_info->subsystem));
+ strscpy(dev_info->subsystem, subsys);
/*
* Add device identifier DEVICE=:
@@ -5020,6 +4984,49 @@ define_dev_printk_level(_dev_info, KERN_INFO);
#endif
+static void __dev_probe_failed(const struct device *dev, int err, bool fatal,
+ const char *fmt, va_list vargsp)
+{
+ struct va_format vaf;
+ va_list vargs;
+
+ /*
+ * On x86_64 and possibly on other architectures, va_list is actually a
+ * size-1 array containing a structure. As a result, function parameter
+ * vargsp decays from T[1] to T*, and &vargsp has type T** rather than
+ * T(*)[1], which is expected by its assignment to vaf.va below.
+ *
+ * One standard way to solve this mess is by creating a copy in a local
+ * variable of type va_list and then using a pointer to that local copy
+ * instead, which is the approach employed here.
+ */
+ va_copy(vargs, vargsp);
+
+ vaf.fmt = fmt;
+ vaf.va = &vargs;
+
+ switch (err) {
+ case -EPROBE_DEFER:
+ device_set_deferred_probe_reason(dev, &vaf);
+ dev_dbg(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
+ break;
+
+ case -ENOMEM:
+ /* Don't print anything on -ENOMEM, there's already enough output */
+ break;
+
+ default:
+ /* Log fatal final failures as errors, otherwise produce warnings */
+ if (fatal)
+ dev_err(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
+ else
+ dev_warn(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
+ break;
+ }
+
+ va_end(vargs);
+}
+
/**
* dev_err_probe - probe error check and log helper
* @dev: the pointer to the struct device
@@ -5032,7 +5039,7 @@ define_dev_printk_level(_dev_info, KERN_INFO);
* -EPROBE_DEFER and propagate error upwards.
* In case of -EPROBE_DEFER it sets also defer probe reason, which can be
* checked later by reading devices_deferred debugfs attribute.
- * It replaces code sequence::
+ * It replaces the following code sequence::
*
* if (err != -EPROBE_DEFER)
* dev_err(dev, ...);
@@ -5044,37 +5051,78 @@ define_dev_printk_level(_dev_info, KERN_INFO);
*
* return dev_err_probe(dev, err, ...);
*
- * Using this helper in your probe function is totally fine even if @err is
- * known to never be -EPROBE_DEFER.
+ * Using this helper in your probe function is totally fine even if @err
+ * is known to never be -EPROBE_DEFER.
* The benefit compared to a normal dev_err() is the standardized format
- * of the error code, it being emitted symbolically (i.e. you get "EAGAIN"
- * instead of "-35") and the fact that the error code is returned which allows
- * more compact error paths.
+ * of the error code, which is emitted symbolically (i.e. you get "EAGAIN"
+ * instead of "-35"), and having the error code returned allows more
+ * compact error paths.
*
* Returns @err.
*/
int dev_err_probe(const struct device *dev, int err, const char *fmt, ...)
{
- struct va_format vaf;
- va_list args;
+ va_list vargs;
- va_start(args, fmt);
- vaf.fmt = fmt;
- vaf.va = &args;
+ va_start(vargs, fmt);
- if (err != -EPROBE_DEFER) {
- dev_err(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
- } else {
- device_set_deferred_probe_reason(dev, &vaf);
- dev_dbg(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
- }
+ /* Use dev_err() for logging when err doesn't equal -EPROBE_DEFER */
+ __dev_probe_failed(dev, err, true, fmt, vargs);
- va_end(args);
+ va_end(vargs);
return err;
}
EXPORT_SYMBOL_GPL(dev_err_probe);
+/**
+ * dev_warn_probe - probe error check and log helper
+ * @dev: the pointer to the struct device
+ * @err: error value to test
+ * @fmt: printf-style format string
+ * @...: arguments as specified in the format string
+ *
+ * This helper implements common pattern present in probe functions for error
+ * checking: print debug or warning message depending if the error value is
+ * -EPROBE_DEFER and propagate error upwards.
+ * In case of -EPROBE_DEFER it sets also defer probe reason, which can be
+ * checked later by reading devices_deferred debugfs attribute.
+ * It replaces the following code sequence::
+ *
+ * if (err != -EPROBE_DEFER)
+ * dev_warn(dev, ...);
+ * else
+ * dev_dbg(dev, ...);
+ * return err;
+ *
+ * with::
+ *
+ * return dev_warn_probe(dev, err, ...);
+ *
+ * Using this helper in your probe function is totally fine even if @err
+ * is known to never be -EPROBE_DEFER.
+ * The benefit compared to a normal dev_warn() is the standardized format
+ * of the error code, which is emitted symbolically (i.e. you get "EAGAIN"
+ * instead of "-35"), and having the error code returned allows more
+ * compact error paths.
+ *
+ * Returns @err.
+ */
+int dev_warn_probe(const struct device *dev, int err, const char *fmt, ...)
+{
+ va_list vargs;
+
+ va_start(vargs, fmt);
+
+ /* Use dev_warn() for logging when err doesn't equal -EPROBE_DEFER */
+ __dev_probe_failed(dev, err, false, fmt, vargs);
+
+ va_end(vargs);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(dev_warn_probe);
+
static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
{
return fwnode && !IS_ERR(fwnode->secondary);
@@ -5147,6 +5195,67 @@ void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
EXPORT_SYMBOL_GPL(set_secondary_fwnode);
/**
+ * device_remove_of_node - Remove an of_node from a device
+ * @dev: device whose device tree node is being removed
+ */
+void device_remove_of_node(struct device *dev)
+{
+ dev = get_device(dev);
+ if (!dev)
+ return;
+
+ if (!dev->of_node)
+ goto end;
+
+ if (dev->fwnode == of_fwnode_handle(dev->of_node))
+ dev->fwnode = NULL;
+
+ of_node_put(dev->of_node);
+ dev->of_node = NULL;
+
+end:
+ put_device(dev);
+}
+EXPORT_SYMBOL_GPL(device_remove_of_node);
+
+/**
+ * device_add_of_node - Add an of_node to an existing device
+ * @dev: device whose device tree node is being added
+ * @of_node: of_node to add
+ *
+ * Return: 0 on success or error code on failure.
+ */
+int device_add_of_node(struct device *dev, struct device_node *of_node)
+{
+ int ret;
+
+ if (!of_node)
+ return -EINVAL;
+
+ dev = get_device(dev);
+ if (!dev)
+ return -EINVAL;
+
+ if (dev->of_node) {
+ dev_err(dev, "Cannot replace node %pOF with %pOF\n",
+ dev->of_node, of_node);
+ ret = -EBUSY;
+ goto end;
+ }
+
+ dev->of_node = of_node_get(of_node);
+
+ if (!dev->fwnode)
+ dev->fwnode = of_fwnode_handle(of_node);
+
+ ret = 0;
+end:
+ put_device(dev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(device_add_of_node);
+
+/**
* device_set_of_node_from_dev - reuse device-tree node of another device
* @dev: device whose device-tree node is being set
* @dev2: device whose device-tree node is being reused
@@ -5169,21 +5278,52 @@ void device_set_node(struct device *dev, struct fwnode_handle *fwnode)
}
EXPORT_SYMBOL_GPL(device_set_node);
+/**
+ * get_dev_from_fwnode - Obtain a reference count of the struct device the
+ * struct fwnode_handle is associated with.
+ * @fwnode: The pointer to the struct fwnode_handle to obtain the struct device
+ * reference count of.
+ *
+ * This function obtains a reference count of the device the device pointer
+ * embedded in the struct fwnode_handle points to.
+ *
+ * Note that the struct device pointer embedded in struct fwnode_handle does
+ * *not* have a reference count of the struct device itself.
+ *
+ * Hence, it is a UAF (and thus a bug) to call this function if the caller can't
+ * guarantee that the last reference count of the corresponding struct device is
+ * not dropped concurrently.
+ *
+ * This is possible since struct fwnode_handle has its own reference count and
+ * hence can out-live the struct device it is associated with.
+ */
+struct device *get_dev_from_fwnode(struct fwnode_handle *fwnode)
+{
+ return get_device((fwnode)->dev);
+}
+EXPORT_SYMBOL_GPL(get_dev_from_fwnode);
+
int device_match_name(struct device *dev, const void *name)
{
return sysfs_streq(dev_name(dev), name);
}
EXPORT_SYMBOL_GPL(device_match_name);
+int device_match_type(struct device *dev, const void *type)
+{
+ return dev->type == type;
+}
+EXPORT_SYMBOL_GPL(device_match_type);
+
int device_match_of_node(struct device *dev, const void *np)
{
- return dev->of_node == np;
+ return np && dev->of_node == np;
}
EXPORT_SYMBOL_GPL(device_match_of_node);
int device_match_fwnode(struct device *dev, const void *fwnode)
{
- return dev_fwnode(dev) == fwnode;
+ return fwnode && dev_fwnode(dev) == fwnode;
}
EXPORT_SYMBOL_GPL(device_match_fwnode);
@@ -5195,13 +5335,13 @@ EXPORT_SYMBOL_GPL(device_match_devt);
int device_match_acpi_dev(struct device *dev, const void *adev)
{
- return ACPI_COMPANION(dev) == adev;
+ return adev && ACPI_COMPANION(dev) == adev;
}
EXPORT_SYMBOL(device_match_acpi_dev);
int device_match_acpi_handle(struct device *dev, const void *handle)
{
- return ACPI_HANDLE(dev) == handle;
+ return handle && ACPI_HANDLE(dev) == handle;
}
EXPORT_SYMBOL(device_match_acpi_handle);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index c61ecb0c2ae2..c6c57b6f61c6 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -26,7 +26,7 @@
static DEFINE_PER_CPU(struct device *, cpu_sys_devices);
-static int cpu_subsys_match(struct device *dev, struct device_driver *drv)
+static int cpu_subsys_match(struct device *dev, const struct device_driver *drv)
{
/* ACPI style match is the only one that may succeed. */
if (acpi_driver_match_device(dev, drv))
@@ -95,6 +95,7 @@ void unregister_cpu(struct cpu *cpu)
{
int logical_cpu = cpu->dev.id;
+ set_cpu_enabled(logical_cpu, false);
unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu));
device_unregister(&cpu->dev);
@@ -273,6 +274,13 @@ static ssize_t print_cpus_offline(struct device *dev,
}
static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
+static ssize_t print_cpus_enabled(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(cpu_enabled_mask));
+}
+static DEVICE_ATTR(enabled, 0444, print_cpus_enabled, NULL);
+
static ssize_t print_cpus_isolated(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -292,13 +300,30 @@ static ssize_t print_cpus_isolated(struct device *dev,
}
static DEVICE_ATTR(isolated, 0444, print_cpus_isolated, NULL);
+static ssize_t housekeeping_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct cpumask *hk_mask;
+
+ hk_mask = housekeeping_cpumask(HK_TYPE_KERNEL_NOISE);
+
+ if (housekeeping_enabled(HK_TYPE_KERNEL_NOISE))
+ return sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(hk_mask));
+ return sysfs_emit(buf, "\n");
+}
+static DEVICE_ATTR_RO(housekeeping);
+
#ifdef CONFIG_NO_HZ_FULL
-static ssize_t print_cpus_nohz_full(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t nohz_full_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
- return sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(tick_nohz_full_mask));
+ if (cpumask_available(tick_nohz_full_mask))
+ return sysfs_emit(buf, "%*pbl\n",
+ cpumask_pr_args(tick_nohz_full_mask));
+ return sysfs_emit(buf, "\n");
}
-static DEVICE_ATTR(nohz_full, 0444, print_cpus_nohz_full, NULL);
+static DEVICE_ATTR_RO(nohz_full);
#endif
#ifdef CONFIG_CRASH_HOTPLUG
@@ -308,7 +333,7 @@ static ssize_t crash_hotplug_show(struct device *dev,
{
return sysfs_emit(buf, "%d\n", crash_check_hotplug_support());
}
-static DEVICE_ATTR_ADMIN_RO(crash_hotplug);
+static DEVICE_ATTR_RO(crash_hotplug);
#endif
static void cpu_device_release(struct device *dev)
@@ -317,7 +342,7 @@ static void cpu_device_release(struct device *dev)
* This is an empty function to prevent the driver core from spitting a
* warning at us. Yes, I know this is directly opposite of what the
* documentation for the driver core and kobjects say, and the author
- * of this code has already been publically ridiculed for doing
+ * of this code has already been publicly ridiculed for doing
* something as foolish as this. However, at this point in time, it is
* the only way to handle the issue of statically allocated cpu
* devices. The different architectures will have their cpu device
@@ -413,6 +438,7 @@ int register_cpu(struct cpu *cpu, int num)
register_cpu_under_node(num, cpu_to_node(num));
dev_pm_qos_expose_latency_limit(&cpu->dev,
PM_QOS_RESUME_LATENCY_NO_CONSTRAINT);
+ set_cpu_enabled(num, true);
return 0;
}
@@ -494,7 +520,9 @@ static struct attribute *cpu_root_attrs[] = {
&cpu_attrs[2].attr.attr,
&dev_attr_kernel_max.attr,
&dev_attr_offline.attr,
+ &dev_attr_enabled.attr,
&dev_attr_isolated.attr,
+ &dev_attr_housekeeping.attr,
#ifdef CONFIG_NO_HZ_FULL
&dev_attr_nohz_full.attr,
#endif
@@ -558,7 +586,7 @@ static void __init cpu_dev_register_generic(void)
for_each_present_cpu(i) {
ret = arch_register_cpu(i);
- if (ret)
+ if (ret && ret != -EPROBE_DEFER)
pr_warn("register_cpu %d failed (%d)\n", i, ret);
}
}
@@ -589,6 +617,11 @@ CPU_SHOW_VULN_FALLBACK(retbleed);
CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow);
CPU_SHOW_VULN_FALLBACK(gds);
CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling);
+CPU_SHOW_VULN_FALLBACK(ghostwrite);
+CPU_SHOW_VULN_FALLBACK(old_microcode);
+CPU_SHOW_VULN_FALLBACK(indirect_target_selection);
+CPU_SHOW_VULN_FALLBACK(tsa);
+CPU_SHOW_VULN_FALLBACK(vmscape);
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
@@ -604,6 +637,11 @@ static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL);
static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL);
static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
+static DEVICE_ATTR(ghostwrite, 0444, cpu_show_ghostwrite, NULL);
+static DEVICE_ATTR(old_microcode, 0444, cpu_show_old_microcode, NULL);
+static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL);
+static DEVICE_ATTR(tsa, 0444, cpu_show_tsa, NULL);
+static DEVICE_ATTR(vmscape, 0444, cpu_show_vmscape, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@@ -620,6 +658,11 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_spec_rstack_overflow.attr,
&dev_attr_gather_data_sampling.attr,
&dev_attr_reg_file_data_sampling.attr,
+ &dev_attr_ghostwrite.attr,
+ &dev_attr_old_microcode.attr,
+ &dev_attr_indirect_target_selection.attr,
+ &dev_attr_tsa.attr,
+ &dev_attr_vmscape.attr,
NULL
};
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 83d352394fdf..349f31bedfa1 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -25,6 +25,7 @@
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/async.h>
+#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/pinctrl/devinfo.h>
#include <linux/slab.h>
@@ -192,7 +193,7 @@ void driver_deferred_probe_trigger(void)
* Kick the re-probe thread. It may already be scheduled, but it is
* safe to kick it again.
*/
- queue_work(system_unbound_wq, &deferred_probe_work);
+ queue_work(system_dfl_wq, &deferred_probe_work);
}
/**
@@ -248,7 +249,7 @@ static int deferred_devs_show(struct seq_file *s, void *data)
list_for_each_entry(curr, &deferred_probe_pending_list, deferred_probe)
seq_printf(s, "%s\t%s", dev_name(curr->device),
- curr->device->p->deferred_probe_reason ?: "\n");
+ curr->deferred_probe_reason ?: "\n");
mutex_unlock(&deferred_probe_mutex);
@@ -393,6 +394,7 @@ bool device_is_bound(struct device *dev)
{
return dev->p && klist_node_attached(&dev->p->knode_driver);
}
+EXPORT_SYMBOL_GPL(device_is_bound);
static void driver_bound(struct device *dev)
{
@@ -549,8 +551,9 @@ static void device_unbind_cleanup(struct device *dev)
arch_teardown_dma_ops(dev);
kfree(dev->dma_range_map);
dev->dma_range_map = NULL;
- dev->driver = NULL;
+ device_set_driver(dev, NULL);
dev_set_drvdata(dev, NULL);
+ dev_pm_domain_detach(dev, dev->power.detach_power_off);
if (dev->pm_domain && dev->pm_domain->dismiss)
dev->pm_domain->dismiss(dev);
pm_runtime_reinit(dev);
@@ -568,7 +571,7 @@ static void device_remove(struct device *dev)
dev->driver->remove(dev);
}
-static int call_driver_probe(struct device *dev, struct device_driver *drv)
+static int call_driver_probe(struct device *dev, const struct device_driver *drv)
{
int ret = 0;
@@ -599,7 +602,7 @@ static int call_driver_probe(struct device *dev, struct device_driver *drv)
return ret;
}
-static int really_probe(struct device *dev, struct device_driver *drv)
+static int really_probe(struct device *dev, const struct device_driver *drv)
{
bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE) &&
!drv->suppress_bind_attrs;
@@ -628,7 +631,7 @@ static int really_probe(struct device *dev, struct device_driver *drv)
}
re_probe:
- dev->driver = drv;
+ device_set_driver(dev, drv);
/* If using pinctrl, bind pins now before probing */
ret = pinctrl_bind_pins(dev);
@@ -727,7 +730,7 @@ done:
/*
* For initcall_debug, show the driver probe time.
*/
-static int really_probe_debug(struct device *dev, struct device_driver *drv)
+static int really_probe_debug(struct device *dev, const struct device_driver *drv)
{
ktime_t calltime, rettime;
int ret;
@@ -774,7 +777,7 @@ void wait_for_device_probe(void)
}
EXPORT_SYMBOL_GPL(wait_for_device_probe);
-static int __driver_probe_device(struct device_driver *drv, struct device *dev)
+static int __driver_probe_device(const struct device_driver *drv, struct device *dev)
{
int ret = 0;
@@ -819,7 +822,7 @@ static int __driver_probe_device(struct device_driver *drv, struct device *dev)
*
* If the device has a parent, runtime-resume the parent before driver probing.
*/
-static int driver_probe_device(struct device_driver *drv, struct device *dev)
+static int driver_probe_device(const struct device_driver *drv, struct device *dev)
{
int trigger_count = atomic_read(&deferred_trigger_count);
int ret;
@@ -863,7 +866,7 @@ static int __init save_async_options(char *buf)
}
__setup("driver_async_probe=", save_async_options);
-static bool driver_allows_async_probing(struct device_driver *drv)
+static bool driver_allows_async_probing(const struct device_driver *drv)
{
switch (drv->probe_type) {
case PROBE_PREFER_ASYNCHRONOUS:
@@ -1012,7 +1015,7 @@ static int __device_attach(struct device *dev, bool allow_async)
if (ret == 0)
ret = 1;
else {
- dev->driver = NULL;
+ device_set_driver(dev, NULL);
ret = 0;
}
} else {
@@ -1074,7 +1077,15 @@ EXPORT_SYMBOL_GPL(device_attach);
void device_initial_probe(struct device *dev)
{
- __device_attach(dev, true);
+ struct subsys_private *sp = bus_to_subsys(dev->bus);
+
+ if (!sp)
+ return;
+
+ if (sp->drivers_autoprobe)
+ __device_attach(dev, true);
+
+ subsys_put(sp);
}
/*
@@ -1117,7 +1128,7 @@ static void __device_driver_unlock(struct device *dev, struct device *parent)
* Manually attach driver to a device. Will acquire both @dev lock and
* @dev->parent lock if needed. Returns 0 on success, -ERR on failure.
*/
-int device_driver_attach(struct device_driver *drv, struct device *dev)
+int device_driver_attach(const struct device_driver *drv, struct device *dev)
{
int ret;
@@ -1137,7 +1148,7 @@ EXPORT_SYMBOL_GPL(device_driver_attach);
static void __driver_attach_async_helper(void *_dev, async_cookie_t cookie)
{
struct device *dev = _dev;
- struct device_driver *drv;
+ const struct device_driver *drv;
int ret;
__device_driver_lock(dev, dev->parent);
@@ -1153,7 +1164,7 @@ static void __driver_attach_async_helper(void *_dev, async_cookie_t cookie)
static int __driver_attach(struct device *dev, void *data)
{
- struct device_driver *drv = data;
+ const struct device_driver *drv = data;
bool async = false;
int ret;
@@ -1226,9 +1237,10 @@ static int __driver_attach(struct device *dev, void *data)
* returns 0 and the @dev->driver is set, we've found a
* compatible pair.
*/
-int driver_attach(struct device_driver *drv)
+int driver_attach(const struct device_driver *drv)
{
- return bus_for_each_dev(drv->bus, NULL, drv, __driver_attach);
+ /* The (void *) will be put back to const * in __driver_attach() */
+ return bus_for_each_dev(drv->bus, NULL, (void *)drv, __driver_attach);
}
EXPORT_SYMBOL_GPL(driver_attach);
@@ -1284,7 +1296,7 @@ static void __device_release_driver(struct device *dev, struct device *parent)
}
void device_release_driver_internal(struct device *dev,
- struct device_driver *drv,
+ const struct device_driver *drv,
struct device *parent)
{
__device_driver_lock(dev, parent);
@@ -1333,7 +1345,7 @@ void device_driver_detach(struct device *dev)
* driver_detach - detach driver from all devices it controls.
* @drv: driver.
*/
-void driver_detach(struct device_driver *drv)
+void driver_detach(const struct device_driver *drv)
{
struct device_private *dev_prv;
struct device *dev;
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
index 82aeb09b3d1b..55bdc7f5e59d 100644
--- a/drivers/base/devcoredump.c
+++ b/drivers/base/devcoredump.c
@@ -18,58 +18,51 @@ static struct class devcd_class;
/* global disable flag, for security purposes */
static bool devcd_disabled;
-/* if data isn't read by userspace after 5 minutes then delete it */
-#define DEVCD_TIMEOUT (HZ * 60 * 5)
-
struct devcd_entry {
struct device devcd_dev;
void *data;
size_t datalen;
/*
- * Here, mutex is required to serialize the calls to del_wk work between
- * user/kernel space which happens when devcd is added with device_add()
- * and that sends uevent to user space. User space reads the uevents,
- * and calls to devcd_data_write() which try to modify the work which is
- * not even initialized/queued from devcoredump.
- *
- *
+ * There are 2 races for which mutex is required.
*
- * cpu0(X) cpu1(Y)
+ * The first race is between device creation and userspace writing to
+ * schedule immediately destruction.
*
- * dev_coredump() uevent sent to user space
- * device_add() ======================> user space process Y reads the
- * uevents writes to devcd fd
- * which results into writes to
+ * This race is handled by arming the timer before device creation, but
+ * when device creation fails the timer still exists.
*
- * devcd_data_write()
- * mod_delayed_work()
- * try_to_grab_pending()
- * del_timer()
- * debug_assert_init()
- * INIT_DELAYED_WORK()
- * schedule_delayed_work()
+ * To solve this, hold the mutex during device_add(), and set
+ * init_completed on success before releasing the mutex.
*
+ * That way the timer will never fire until device_add() is called,
+ * it will do nothing if init_completed is not set. The timer is also
+ * cancelled in that case.
*
- * Also, mutex alone would not be enough to avoid scheduling of
- * del_wk work after it get flush from a call to devcd_free()
- * mentioned as below.
- *
- * disabled_store()
- * devcd_free()
- * mutex_lock() devcd_data_write()
- * flush_delayed_work()
- * mutex_unlock()
- * mutex_lock()
- * mod_delayed_work()
- * mutex_unlock()
- * So, delete_work flag is required.
+ * The second race involves multiple parallel invocations of devcd_free(),
+ * add a deleted flag so only 1 can call the destructor.
*/
struct mutex mutex;
- bool delete_work;
+ bool init_completed, deleted;
struct module *owner;
ssize_t (*read)(char *buffer, loff_t offset, size_t count,
void *data, size_t datalen);
void (*free)(void *data);
+ /*
+ * If nothing interferes and device_add() was returns success,
+ * del_wk will destroy the device after the timer fires.
+ *
+ * Multiple userspace processes can interfere in the working of the timer:
+ * - Writing to the coredump will reschedule the timer to run immediately,
+ * if still armed.
+ *
+ * This is handled by using "if (cancel_delayed_work()) {
+ * schedule_delayed_work() }", to prevent re-arming after having
+ * been previously fired.
+ * - Writing to /sys/class/devcoredump/disabled will destroy the
+ * coredump synchronously.
+ * This is handled by using disable_delayed_work_sync(), and then
+ * checking if deleted flag is set with &devcd->mutex held.
+ */
struct delayed_work del_wk;
struct device *failing_dev;
};
@@ -98,18 +91,31 @@ static void devcd_dev_release(struct device *dev)
kfree(devcd);
}
+static void __devcd_del(struct devcd_entry *devcd)
+{
+ devcd->deleted = true;
+ device_del(&devcd->devcd_dev);
+ put_device(&devcd->devcd_dev);
+}
+
static void devcd_del(struct work_struct *wk)
{
struct devcd_entry *devcd;
+ bool init_completed;
devcd = container_of(wk, struct devcd_entry, del_wk.work);
- device_del(&devcd->devcd_dev);
- put_device(&devcd->devcd_dev);
+ /* devcd->mutex serializes against dev_coredumpm_timeout */
+ mutex_lock(&devcd->mutex);
+ init_completed = devcd->init_completed;
+ mutex_unlock(&devcd->mutex);
+
+ if (init_completed)
+ __devcd_del(devcd);
}
static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -119,30 +125,26 @@ static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj,
}
static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
struct devcd_entry *devcd = dev_to_devcd(dev);
- mutex_lock(&devcd->mutex);
- if (!devcd->delete_work) {
- devcd->delete_work = true;
- mod_delayed_work(system_wq, &devcd->del_wk, 0);
- }
- mutex_unlock(&devcd->mutex);
+ /*
+ * Although it's tempting to use mod_delayed work here,
+ * that will cause a reschedule if the timer already fired.
+ */
+ if (cancel_delayed_work(&devcd->del_wk))
+ schedule_delayed_work(&devcd->del_wk, 0);
return count;
}
-static struct bin_attribute devcd_attr_data = {
- .attr = { .name = "data", .mode = S_IRUSR | S_IWUSR, },
- .size = 0,
- .read = devcd_data_read,
- .write = devcd_data_write,
-};
+static const struct bin_attribute devcd_attr_data =
+ __BIN_ATTR(data, 0600, devcd_data_read, devcd_data_write, 0);
-static struct bin_attribute *devcd_dev_bin_attrs[] = {
+static const struct bin_attribute *const devcd_dev_bin_attrs[] = {
&devcd_attr_data, NULL,
};
@@ -158,11 +160,21 @@ static int devcd_free(struct device *dev, void *data)
{
struct devcd_entry *devcd = dev_to_devcd(dev);
+ /*
+ * To prevent a race with devcd_data_write(), disable work and
+ * complete manually instead.
+ *
+ * We cannot rely on the return value of
+ * disable_delayed_work_sync() here, because it might be in the
+ * middle of a cancel_delayed_work + schedule_delayed_work pair.
+ *
+ * devcd->mutex here guards against multiple parallel invocations
+ * of devcd_free().
+ */
+ disable_delayed_work_sync(&devcd->del_wk);
mutex_lock(&devcd->mutex);
- if (!devcd->delete_work)
- devcd->delete_work = true;
-
- flush_delayed_work(&devcd->del_wk);
+ if (!devcd->deleted)
+ __devcd_del(devcd);
mutex_unlock(&devcd->mutex);
return 0;
}
@@ -186,12 +198,10 @@ static ssize_t disabled_show(const struct class *class, const struct class_attri
* put_device() <- last reference
* error = fn(dev, data) devcd_dev_release()
* devcd_free(dev, data) kfree(devcd)
- * mutex_lock(&devcd->mutex);
*
*
- * In the above diagram, It looks like disabled_store() would be racing with parallely
- * running devcd_del() and result in memory abort while acquiring devcd->mutex which
- * is called after kfree of devcd memory after dropping its last reference with
+ * In the above diagram, it looks like disabled_store() would be racing with parallelly
+ * running devcd_del() and result in memory abort after dropping its last reference with
* put_device(). However, this will not happens as fn(dev, data) runs
* with its own reference to device via klist_node so it is not its last reference.
* so, above situation would not occur.
@@ -288,6 +298,8 @@ static void devcd_free_sgtable(void *data)
* @offset: start copy from @offset@ bytes from the head of the data
* in the given scatterlist
* @data_len: the length of the data in the sg_table
+ *
+ * Returns: the number of bytes copied
*/
static ssize_t devcd_read_from_sgtable(char *buffer, loff_t offset,
size_t buf_len, void *data,
@@ -328,7 +340,8 @@ void dev_coredump_put(struct device *dev)
EXPORT_SYMBOL_GPL(dev_coredump_put);
/**
- * dev_coredumpm - create device coredump with read/free methods
+ * dev_coredumpm_timeout - create device coredump with read/free methods with a
+ * custom timeout.
* @dev: the struct device for the crashed device
* @owner: the module that contains the read/free functions, use %THIS_MODULE
* @data: data cookie for the @read/@free functions
@@ -336,17 +349,20 @@ EXPORT_SYMBOL_GPL(dev_coredump_put);
* @gfp: allocation flags
* @read: function to read from the given buffer
* @free: function to free the given buffer
+ * @timeout: time in jiffies to remove coredump
*
* Creates a new device coredump for the given device. If a previous one hasn't
* been read yet, the new coredump is discarded. The data lifetime is determined
* by the device coredump framework and when it is no longer needed the @free
* function will be called to free the data.
*/
-void dev_coredumpm(struct device *dev, struct module *owner,
- void *data, size_t datalen, gfp_t gfp,
- ssize_t (*read)(char *buffer, loff_t offset, size_t count,
- void *data, size_t datalen),
- void (*free)(void *data))
+void dev_coredumpm_timeout(struct device *dev, struct module *owner,
+ void *data, size_t datalen, gfp_t gfp,
+ ssize_t (*read)(char *buffer, loff_t offset,
+ size_t count, void *data,
+ size_t datalen),
+ void (*free)(void *data),
+ unsigned long timeout)
{
static atomic_t devcd_count = ATOMIC_INIT(0);
struct devcd_entry *devcd;
@@ -375,7 +391,7 @@ void dev_coredumpm(struct device *dev, struct module *owner,
devcd->read = read;
devcd->free = free;
devcd->failing_dev = get_device(dev);
- devcd->delete_work = false;
+ devcd->deleted = false;
mutex_init(&devcd->mutex);
device_initialize(&devcd->devcd_dev);
@@ -384,8 +400,14 @@ void dev_coredumpm(struct device *dev, struct module *owner,
atomic_inc_return(&devcd_count));
devcd->devcd_dev.class = &devcd_class;
- mutex_lock(&devcd->mutex);
dev_set_uevent_suppress(&devcd->devcd_dev, true);
+
+ /* devcd->mutex prevents devcd_del() completing until init finishes */
+ mutex_lock(&devcd->mutex);
+ devcd->init_completed = false;
+ INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
+ schedule_delayed_work(&devcd->del_wk, timeout);
+
if (device_add(&devcd->devcd_dev))
goto put_device;
@@ -402,19 +424,26 @@ void dev_coredumpm(struct device *dev, struct module *owner,
dev_set_uevent_suppress(&devcd->devcd_dev, false);
kobject_uevent(&devcd->devcd_dev.kobj, KOBJ_ADD);
- INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
- schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT);
+
+ /*
+ * Safe to run devcd_del() now that we are done with devcd_dev.
+ * Alternatively we could have taken a ref on devcd_dev before
+ * dropping the lock.
+ */
+ devcd->init_completed = true;
mutex_unlock(&devcd->mutex);
return;
put_device:
- put_device(&devcd->devcd_dev);
mutex_unlock(&devcd->mutex);
+ cancel_delayed_work_sync(&devcd->del_wk);
+ put_device(&devcd->devcd_dev);
+
put_module:
module_put(owner);
free:
free(data);
}
-EXPORT_SYMBOL_GPL(dev_coredumpm);
+EXPORT_SYMBOL_GPL(dev_coredumpm_timeout);
/**
* dev_coredumpsg - create device coredump that uses scatterlist as data
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 3df0025d12aa..f54db6d138ab 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -85,7 +85,7 @@ static void group_close_release(struct device *dev, void *res)
/* noop */
}
-static struct devres_group * node_to_group(struct devres_node *node)
+static struct devres_group *node_to_group(struct devres_node *node)
{
if (node->release == &group_open_release)
return container_of(node, struct devres_group, node[0]);
@@ -107,8 +107,8 @@ static bool check_dr_size(size_t size, size_t *tot_size)
return true;
}
-static __always_inline struct devres * alloc_dr(dr_release_t release,
- size_t size, gfp_t gfp, int nid)
+static __always_inline struct devres *alloc_dr(dr_release_t release,
+ size_t size, gfp_t gfp, int nid)
{
size_t tot_size;
struct devres *dr;
@@ -283,8 +283,8 @@ static struct devres *find_dr(struct device *dev, dr_release_t release,
* RETURNS:
* Pointer to found devres, NULL if not found.
*/
-void * devres_find(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data)
+void *devres_find(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data)
{
struct devres *dr;
unsigned long flags;
@@ -313,8 +313,8 @@ EXPORT_SYMBOL_GPL(devres_find);
* RETURNS:
* Pointer to found or added devres.
*/
-void * devres_get(struct device *dev, void *new_res,
- dr_match_t match, void *match_data)
+void *devres_get(struct device *dev, void *new_res,
+ dr_match_t match, void *match_data)
{
struct devres *new_dr = container_of(new_res, struct devres, data);
struct devres *dr;
@@ -349,8 +349,8 @@ EXPORT_SYMBOL_GPL(devres_get);
* RETURNS:
* Pointer to removed devres on success, NULL if not found.
*/
-void * devres_remove(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data)
+void *devres_remove(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data)
{
struct devres *dr;
unsigned long flags;
@@ -549,7 +549,7 @@ int devres_release_all(struct device *dev)
* RETURNS:
* ID of the new group, NULL on failure.
*/
-void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
+void *devres_open_group(struct device *dev, void *id, gfp_t gfp)
{
struct devres_group *grp;
unsigned long flags;
@@ -567,6 +567,7 @@ void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
grp->id = grp;
if (id)
grp->id = id;
+ grp->color = 0;
spin_lock_irqsave(&dev->devres_lock, flags);
add_dr(dev, &grp->node[0]);
@@ -575,8 +576,11 @@ void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
}
EXPORT_SYMBOL_GPL(devres_open_group);
-/* Find devres group with ID @id. If @id is NULL, look for the latest. */
-static struct devres_group * find_group(struct device *dev, void *id)
+/*
+ * Find devres group with ID @id. If @id is NULL, look for the latest open
+ * group.
+ */
+static struct devres_group *find_group(struct device *dev, void *id)
{
struct devres_node *node;
@@ -686,6 +690,13 @@ int devres_release_group(struct device *dev, void *id)
spin_unlock_irqrestore(&dev->devres_lock, flags);
release_nodes(dev, &todo);
+ } else if (list_empty(&dev->devres_head)) {
+ /*
+ * dev is probably dying via devres_release_all(): groups
+ * have already been removed and are on the process of
+ * being released - don't touch and don't warn.
+ */
+ spin_unlock_irqrestore(&dev->devres_lock, flags);
} else {
WARN_ON(1);
spin_unlock_irqrestore(&dev->devres_lock, flags);
@@ -748,26 +759,50 @@ int __devm_add_action(struct device *dev, void (*action)(void *), void *data, co
}
EXPORT_SYMBOL_GPL(__devm_add_action);
+bool devm_is_action_added(struct device *dev, void (*action)(void *), void *data)
+{
+ struct action_devres devres = {
+ .data = data,
+ .action = action,
+ };
+
+ return devres_find(dev, devm_action_release, devm_action_match, &devres);
+}
+EXPORT_SYMBOL_GPL(devm_is_action_added);
+
/**
- * devm_remove_action() - removes previously added custom action
+ * devm_remove_action_nowarn() - removes previously added custom action
* @dev: Device that owns the action
* @action: Function implementing the action
* @data: Pointer to data passed to @action implementation
*
* Removes instance of @action previously added by devm_add_action().
* Both action and data should match one of the existing entries.
+ *
+ * In contrast to devm_remove_action(), this function does not WARN() if no
+ * entry could have been found.
+ *
+ * This should only be used if the action is contained in an object with
+ * independent lifetime management, e.g. the Devres rust abstraction.
+ *
+ * Causing the warning from regular driver code most likely indicates an abuse
+ * of the devres API.
+ *
+ * Returns: 0 on success, -ENOENT if no entry could have been found.
*/
-void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
+int devm_remove_action_nowarn(struct device *dev,
+ void (*action)(void *),
+ void *data)
{
struct action_devres devres = {
.data = data,
.action = action,
};
- WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match,
- &devres));
+ return devres_destroy(dev, devm_action_release, devm_action_match,
+ &devres);
}
-EXPORT_SYMBOL_GPL(devm_remove_action);
+EXPORT_SYMBOL_GPL(devm_remove_action_nowarn);
/**
* devm_release_action() - release previously added custom action
@@ -896,9 +931,12 @@ void *devm_krealloc(struct device *dev, void *ptr, size_t new_size, gfp_t gfp)
/*
* Otherwise: allocate new, larger chunk. We need to allocate before
* taking the lock as most probably the caller uses GFP_KERNEL.
+ * alloc_dr() will call check_dr_size() to reserve extra memory
+ * for struct devres automatically, so size @new_size user request
+ * is delivered to it directly as devm_kmalloc() does.
*/
new_dr = alloc_dr(devm_kmalloc_release,
- total_new_size, gfp, dev_to_node(dev));
+ new_size, gfp, dev_to_node(dev));
if (!new_dr)
return NULL;
@@ -949,17 +987,10 @@ EXPORT_SYMBOL_GPL(devm_krealloc);
*/
char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp)
{
- size_t size;
- char *buf;
-
if (!s)
return NULL;
- size = strlen(s) + 1;
- buf = devm_kmalloc(dev, size, gfp);
- if (buf)
- memcpy(buf, s, size);
- return buf;
+ return devm_kmemdup(dev, s, strlen(s) + 1, gfp);
}
EXPORT_SYMBOL_GPL(devm_kstrdup);
@@ -1086,6 +1117,27 @@ void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp)
}
EXPORT_SYMBOL_GPL(devm_kmemdup);
+/**
+ * devm_kmemdup_const - conditionally duplicate and manage a region of memory
+ *
+ * @dev: Device this memory belongs to
+ * @src: memory region to duplicate
+ * @len: memory region length,
+ * @gfp: GFP mask to use
+ *
+ * Return: source address if it is in .rodata or the return value of kmemdup()
+ * to which the function falls back otherwise.
+ */
+const void *
+devm_kmemdup_const(struct device *dev, const void *src, size_t len, gfp_t gfp)
+{
+ if (is_kernel_rodata((unsigned long)src))
+ return src;
+
+ return devm_kmemdup(dev, src, len, gfp);
+}
+EXPORT_SYMBOL_GPL(devm_kmemdup_const);
+
struct pages_devres {
unsigned long addr;
unsigned int order;
@@ -1170,13 +1222,6 @@ static void devm_percpu_release(struct device *dev, void *pdata)
free_percpu(p);
}
-static int devm_percpu_match(struct device *dev, void *data, void *p)
-{
- struct devres *devr = container_of(data, struct devres, data);
-
- return *(void **)devr->data == p;
-}
-
/**
* __devm_alloc_percpu - Resource-managed alloc_percpu
* @dev: Device to allocate per-cpu memory for
@@ -1212,17 +1257,3 @@ void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
return pcpu;
}
EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
-
-/**
- * devm_free_percpu - Resource-managed free_percpu
- * @dev: Device this memory belongs to
- * @pdata: Per-cpu memory to free
- *
- * Free memory allocated with devm_alloc_percpu().
- */
-void devm_free_percpu(struct device *dev, void __percpu *pdata)
-{
- WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match,
- (__force void *)pdata));
-}
-EXPORT_SYMBOL_GPL(devm_free_percpu);
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index b848764ef018..194b44075ac7 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -63,22 +63,6 @@ __setup("devtmpfs.mount=", mount_param);
static struct vfsmount *mnt;
-static struct dentry *public_dev_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
-{
- struct super_block *s = mnt->mnt_sb;
- int err;
-
- atomic_inc(&s->s_active);
- down_write(&s->s_umount);
- err = reconfigure_single(s, flags, data);
- if (err < 0) {
- deactivate_locked_super(s);
- return ERR_PTR(err);
- }
- return dget(s->s_root);
-}
-
static struct file_system_type internal_fs_type = {
.name = "devtmpfs",
#ifdef CONFIG_TMPFS
@@ -86,12 +70,43 @@ static struct file_system_type internal_fs_type = {
#else
.init_fs_context = ramfs_init_fs_context,
#endif
- .kill_sb = kill_litter_super,
+ .kill_sb = kill_anon_super,
};
+/* Simply take a ref on the existing mount */
+static int devtmpfs_get_tree(struct fs_context *fc)
+{
+ struct super_block *sb = mnt->mnt_sb;
+
+ atomic_inc(&sb->s_active);
+ down_write(&sb->s_umount);
+ fc->root = dget(sb->s_root);
+ return 0;
+}
+
+/* Ops are filled in during init depending on underlying shmem or ramfs type */
+struct fs_context_operations devtmpfs_context_ops = {};
+
+/* Call the underlying initialization and set to our ops */
+static int devtmpfs_init_fs_context(struct fs_context *fc)
+{
+ int ret;
+#ifdef CONFIG_TMPFS
+ ret = shmem_init_fs_context(fc);
+#else
+ ret = ramfs_init_fs_context(fc);
+#endif
+ if (ret < 0)
+ return ret;
+
+ fc->ops = &devtmpfs_context_ops;
+
+ return 0;
+}
+
static struct file_system_type dev_fs_type = {
.name = "devtmpfs",
- .mount = public_dev_mount,
+ .init_fs_context = devtmpfs_init_fs_context,
};
static int devtmpfs_submit_req(struct req *req, const char *tmp)
@@ -160,18 +175,17 @@ static int dev_mkdir(const char *name, umode_t mode)
{
struct dentry *dentry;
struct path path;
- int err;
- dentry = kern_path_create(AT_FDCWD, name, &path, LOOKUP_DIRECTORY);
+ dentry = start_creating_path(AT_FDCWD, name, &path, LOOKUP_DIRECTORY);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- err = vfs_mkdir(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode);
- if (!err)
+ dentry = vfs_mkdir(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode, NULL);
+ if (!IS_ERR(dentry))
/* mark as kernel-created inode */
d_inode(dentry)->i_private = &thread;
- done_path_create(&path, dentry);
- return err;
+ end_creating_path(&path, dentry);
+ return PTR_ERR_OR_ZERO(dentry);
}
static int create_path(const char *nodepath)
@@ -208,16 +222,16 @@ static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
struct path path;
int err;
- dentry = kern_path_create(AT_FDCWD, nodename, &path, 0);
+ dentry = start_creating_path(AT_FDCWD, nodename, &path, 0);
if (dentry == ERR_PTR(-ENOENT)) {
create_path(nodename);
- dentry = kern_path_create(AT_FDCWD, nodename, &path, 0);
+ dentry = start_creating_path(AT_FDCWD, nodename, &path, 0);
}
if (IS_ERR(dentry))
return PTR_ERR(dentry);
err = vfs_mknod(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode,
- dev->devt);
+ dev->devt, NULL);
if (!err) {
struct iattr newattrs;
@@ -232,7 +246,7 @@ static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
/* mark as kernel-created inode */
d_inode(dentry)->i_private = &thread;
}
- done_path_create(&path, dentry);
+ end_creating_path(&path, dentry);
return err;
}
@@ -242,21 +256,16 @@ static int dev_rmdir(const char *name)
struct dentry *dentry;
int err;
- dentry = kern_path_locked(name, &parent);
+ dentry = start_removing_path(name, &parent);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- if (d_really_is_positive(dentry)) {
- if (d_inode(dentry)->i_private == &thread)
- err = vfs_rmdir(&nop_mnt_idmap, d_inode(parent.dentry),
- dentry);
- else
- err = -EPERM;
- } else {
- err = -ENOENT;
- }
- dput(dentry);
- inode_unlock(d_inode(parent.dentry));
- path_put(&parent);
+ if (d_inode(dentry)->i_private == &thread)
+ err = vfs_rmdir(&nop_mnt_idmap, d_inode(parent.dentry),
+ dentry, NULL);
+ else
+ err = -EPERM;
+
+ end_removing_path(&parent, dentry);
return err;
}
@@ -285,7 +294,7 @@ static int delete_path(const char *nodepath)
return err;
}
-static int dev_mynode(struct device *dev, struct inode *inode, struct kstat *stat)
+static int dev_mynode(struct device *dev, struct inode *inode)
{
/* did we create it */
if (inode->i_private != &thread)
@@ -293,13 +302,13 @@ static int dev_mynode(struct device *dev, struct inode *inode, struct kstat *sta
/* does the dev_t match */
if (is_blockdev(dev)) {
- if (!S_ISBLK(stat->mode))
+ if (!S_ISBLK(inode->i_mode))
return 0;
} else {
- if (!S_ISCHR(stat->mode))
+ if (!S_ISCHR(inode->i_mode))
return 0;
}
- if (stat->rdev != dev->devt)
+ if (inode->i_rdev != dev->devt)
return 0;
/* ours */
@@ -310,44 +319,36 @@ static int handle_remove(const char *nodename, struct device *dev)
{
struct path parent;
struct dentry *dentry;
+ struct inode *inode;
int deleted = 0;
- int err;
+ int err = 0;
- dentry = kern_path_locked(nodename, &parent);
+ dentry = start_removing_path(nodename, &parent);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- if (d_really_is_positive(dentry)) {
- struct kstat stat;
- struct path p = {.mnt = parent.mnt, .dentry = dentry};
- err = vfs_getattr(&p, &stat, STATX_TYPE | STATX_MODE,
- AT_STATX_SYNC_AS_STAT);
- if (!err && dev_mynode(dev, d_inode(dentry), &stat)) {
- struct iattr newattrs;
- /*
- * before unlinking this node, reset permissions
- * of possible references like hardlinks
- */
- newattrs.ia_uid = GLOBAL_ROOT_UID;
- newattrs.ia_gid = GLOBAL_ROOT_GID;
- newattrs.ia_mode = stat.mode & ~0777;
- newattrs.ia_valid =
- ATTR_UID|ATTR_GID|ATTR_MODE;
- inode_lock(d_inode(dentry));
- notify_change(&nop_mnt_idmap, dentry, &newattrs, NULL);
- inode_unlock(d_inode(dentry));
- err = vfs_unlink(&nop_mnt_idmap, d_inode(parent.dentry),
- dentry, NULL);
- if (!err || err == -ENOENT)
- deleted = 1;
- }
- } else {
- err = -ENOENT;
+ inode = d_inode(dentry);
+ if (dev_mynode(dev, inode)) {
+ struct iattr newattrs;
+ /*
+ * before unlinking this node, reset permissions
+ * of possible references like hardlinks
+ */
+ newattrs.ia_uid = GLOBAL_ROOT_UID;
+ newattrs.ia_gid = GLOBAL_ROOT_GID;
+ newattrs.ia_mode = inode->i_mode & ~0777;
+ newattrs.ia_valid =
+ ATTR_UID|ATTR_GID|ATTR_MODE;
+ inode_lock(d_inode(dentry));
+ notify_change(&nop_mnt_idmap, dentry, &newattrs, NULL);
+ inode_unlock(d_inode(dentry));
+ err = vfs_unlink(&nop_mnt_idmap, d_inode(parent.dentry),
+ dentry, NULL);
+ if (!err || err == -ENOENT)
+ deleted = 1;
}
- dput(dentry);
- inode_unlock(d_inode(parent.dentry));
+ end_removing_path(&parent, dentry);
- path_put(&parent);
if (deleted && strchr(nodename, '/'))
delete_path(nodename);
return err;
@@ -443,6 +444,31 @@ static int __ref devtmpfsd(void *p)
}
/*
+ * Get the underlying (shmem/ramfs) context ops to build ours
+ */
+static int devtmpfs_configure_context(void)
+{
+ struct fs_context *fc;
+
+ fc = fs_context_for_reconfigure(mnt->mnt_root, mnt->mnt_sb->s_flags,
+ MS_RMT_MASK);
+ if (IS_ERR(fc))
+ return PTR_ERR(fc);
+
+ /* Set up devtmpfs_context_ops based on underlying type */
+ devtmpfs_context_ops.free = fc->ops->free;
+ devtmpfs_context_ops.dup = fc->ops->dup;
+ devtmpfs_context_ops.parse_param = fc->ops->parse_param;
+ devtmpfs_context_ops.parse_monolithic = fc->ops->parse_monolithic;
+ devtmpfs_context_ops.get_tree = &devtmpfs_get_tree;
+ devtmpfs_context_ops.reconfigure = fc->ops->reconfigure;
+
+ put_fs_context(fc);
+
+ return 0;
+}
+
+/*
* Create devtmpfs instance, driver-core devices will add their device
* nodes here.
*/
@@ -456,6 +482,13 @@ int __init devtmpfs_init(void)
pr_err("unable to create devtmpfs %ld\n", PTR_ERR(mnt));
return PTR_ERR(mnt);
}
+
+ err = devtmpfs_configure_context();
+ if (err) {
+ pr_err("unable to configure devtmpfs type %d\n", err);
+ return err;
+ }
+
err = register_filesystem(&dev_fs_type);
if (err) {
pr_err("unable to register devtmpfs type %d\n", err);
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index c8436c26ed6a..8ab010ddf709 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -115,7 +115,7 @@ EXPORT_SYMBOL_GPL(driver_set_override);
* Iterate over the @drv's list of devices calling @fn for each one.
*/
int driver_for_each_device(struct device_driver *drv, struct device *start,
- void *data, int (*fn)(struct device *, void *))
+ void *data, device_iter_t fn)
{
struct klist_iter i;
struct device *dev;
@@ -148,9 +148,9 @@ EXPORT_SYMBOL_GPL(driver_for_each_device);
* if it does. If the callback returns non-zero, this function will
* return to the caller and not iterate over any more devices.
*/
-struct device *driver_find_device(struct device_driver *drv,
+struct device *driver_find_device(const struct device_driver *drv,
struct device *start, const void *data,
- int (*match)(struct device *dev, const void *data))
+ device_match_t match)
{
struct klist_iter i;
struct device *dev;
@@ -160,9 +160,12 @@ struct device *driver_find_device(struct device_driver *drv,
klist_iter_init_node(&drv->p->klist_devices, &i,
(start ? &start->p->knode_driver : NULL));
- while ((dev = next_device(&i)))
- if (match(dev, data) && get_device(dev))
+ while ((dev = next_device(&i))) {
+ if (match(dev, data)) {
+ get_device(dev);
break;
+ }
+ }
klist_iter_exit(&i);
return dev;
}
@@ -173,7 +176,7 @@ EXPORT_SYMBOL_GPL(driver_find_device);
* @drv: driver.
* @attr: driver attribute descriptor.
*/
-int driver_create_file(struct device_driver *drv,
+int driver_create_file(const struct device_driver *drv,
const struct driver_attribute *attr)
{
int error;
@@ -191,7 +194,7 @@ EXPORT_SYMBOL_GPL(driver_create_file);
* @drv: driver.
* @attr: driver attribute descriptor.
*/
-void driver_remove_file(struct device_driver *drv,
+void driver_remove_file(const struct device_driver *drv,
const struct driver_attribute *attr)
{
if (drv)
@@ -199,13 +202,13 @@ void driver_remove_file(struct device_driver *drv,
}
EXPORT_SYMBOL_GPL(driver_remove_file);
-int driver_add_groups(struct device_driver *drv,
+int driver_add_groups(const struct device_driver *drv,
const struct attribute_group **groups)
{
return sysfs_create_groups(&drv->p->kobj, groups);
}
-void driver_remove_groups(struct device_driver *drv,
+void driver_remove_groups(const struct device_driver *drv,
const struct attribute_group **groups)
{
sysfs_remove_groups(&drv->p->kobj, groups);
diff --git a/drivers/base/faux.c b/drivers/base/faux.c
new file mode 100644
index 000000000000..21dd02124231
--- /dev/null
+++ b/drivers/base/faux.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (c) 2025 The Linux Foundation
+ *
+ * A "simple" faux bus that allows devices to be created and added
+ * automatically to it. This is to be used whenever you need to create a
+ * device that is not associated with any "real" system resources, and do
+ * not want to have to deal with a bus/driver binding logic. It is
+ * intended to be very simple, with only a create and a destroy function
+ * available.
+ */
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/container_of.h>
+#include <linux/device/faux.h>
+#include "base.h"
+
+/*
+ * Internal wrapper structure so we can hold a pointer to the
+ * faux_device_ops for this device.
+ */
+struct faux_object {
+ struct faux_device faux_dev;
+ const struct faux_device_ops *faux_ops;
+ const struct attribute_group **groups;
+};
+#define to_faux_object(dev) container_of_const(dev, struct faux_object, faux_dev.dev)
+
+static struct device faux_bus_root = {
+ .init_name = "faux",
+};
+
+static int faux_match(struct device *dev, const struct device_driver *drv)
+{
+ /* Match always succeeds, we only have one driver */
+ return 1;
+}
+
+static int faux_probe(struct device *dev)
+{
+ struct faux_object *faux_obj = to_faux_object(dev);
+ struct faux_device *faux_dev = &faux_obj->faux_dev;
+ const struct faux_device_ops *faux_ops = faux_obj->faux_ops;
+ int ret;
+
+ if (faux_ops && faux_ops->probe) {
+ ret = faux_ops->probe(faux_dev);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Add groups after the probe succeeds to ensure resources are
+ * initialized correctly
+ */
+ ret = device_add_groups(dev, faux_obj->groups);
+ if (ret && faux_ops && faux_ops->remove)
+ faux_ops->remove(faux_dev);
+
+ return ret;
+}
+
+static void faux_remove(struct device *dev)
+{
+ struct faux_object *faux_obj = to_faux_object(dev);
+ struct faux_device *faux_dev = &faux_obj->faux_dev;
+ const struct faux_device_ops *faux_ops = faux_obj->faux_ops;
+
+ device_remove_groups(dev, faux_obj->groups);
+
+ if (faux_ops && faux_ops->remove)
+ faux_ops->remove(faux_dev);
+}
+
+static const struct bus_type faux_bus_type = {
+ .name = "faux",
+ .match = faux_match,
+ .probe = faux_probe,
+ .remove = faux_remove,
+};
+
+static struct device_driver faux_driver = {
+ .name = "faux_driver",
+ .bus = &faux_bus_type,
+ .probe_type = PROBE_FORCE_SYNCHRONOUS,
+ .suppress_bind_attrs = true,
+};
+
+static void faux_device_release(struct device *dev)
+{
+ struct faux_object *faux_obj = to_faux_object(dev);
+
+ kfree(faux_obj);
+}
+
+/**
+ * faux_device_create_with_groups - Create and register with the driver
+ * core a faux device and populate the device with an initial
+ * set of sysfs attributes.
+ * @name: The name of the device we are adding, must be unique for
+ * all faux devices.
+ * @parent: Pointer to a potential parent struct device. If set to
+ * NULL, the device will be created in the "root" of the faux
+ * device tree in sysfs.
+ * @faux_ops: struct faux_device_ops that the new device will call back
+ * into, can be NULL.
+ * @groups: The set of sysfs attributes that will be created for this
+ * device when it is registered with the driver core.
+ *
+ * Create a new faux device and register it in the driver core properly.
+ * If present, callbacks in @faux_ops will be called with the device that
+ * for the caller to do something with at the proper time given the
+ * device's lifecycle.
+ *
+ * Note, when this function is called, the functions specified in struct
+ * faux_ops can be called before the function returns, so be prepared for
+ * everything to be properly initialized before that point in time. If the
+ * probe callback (if one is present) does NOT succeed, the creation of the
+ * device will fail and NULL will be returned.
+ *
+ * Return:
+ * * NULL if an error happened with creating the device
+ * * pointer to a valid struct faux_device that is registered with sysfs
+ */
+struct faux_device *faux_device_create_with_groups(const char *name,
+ struct device *parent,
+ const struct faux_device_ops *faux_ops,
+ const struct attribute_group **groups)
+{
+ struct faux_object *faux_obj;
+ struct faux_device *faux_dev;
+ struct device *dev;
+ int ret;
+
+ faux_obj = kzalloc(sizeof(*faux_obj), GFP_KERNEL);
+ if (!faux_obj)
+ return NULL;
+
+ /* Save off the callbacks and groups so we can use them in the future */
+ faux_obj->faux_ops = faux_ops;
+ faux_obj->groups = groups;
+
+ /* Initialize the device portion and register it with the driver core */
+ faux_dev = &faux_obj->faux_dev;
+ dev = &faux_dev->dev;
+
+ device_initialize(dev);
+ dev->release = faux_device_release;
+ if (parent)
+ dev->parent = parent;
+ else
+ dev->parent = &faux_bus_root;
+ dev->bus = &faux_bus_type;
+ dev_set_name(dev, "%s", name);
+ device_set_pm_not_required(dev);
+
+ ret = device_add(dev);
+ if (ret) {
+ pr_err("%s: device_add for faux device '%s' failed with %d\n",
+ __func__, name, ret);
+ put_device(dev);
+ return NULL;
+ }
+
+ /*
+ * Verify that we did bind the driver to the device (i.e. probe worked),
+ * if not, let's fail the creation as trying to guess if probe was
+ * successful is almost impossible to determine by the caller.
+ */
+ if (!dev->driver) {
+ dev_dbg(dev, "probe did not succeed, tearing down the device\n");
+ faux_device_destroy(faux_dev);
+ faux_dev = NULL;
+ }
+
+ return faux_dev;
+}
+EXPORT_SYMBOL_GPL(faux_device_create_with_groups);
+
+/**
+ * faux_device_create - create and register with the driver core a faux device
+ * @name: The name of the device we are adding, must be unique for all
+ * faux devices.
+ * @parent: Pointer to a potential parent struct device. If set to
+ * NULL, the device will be created in the "root" of the faux
+ * device tree in sysfs.
+ * @faux_ops: struct faux_device_ops that the new device will call back
+ * into, can be NULL.
+ *
+ * Create a new faux device and register it in the driver core properly.
+ * If present, callbacks in @faux_ops will be called with the device that
+ * for the caller to do something with at the proper time given the
+ * device's lifecycle.
+ *
+ * Note, when this function is called, the functions specified in struct
+ * faux_ops can be called before the function returns, so be prepared for
+ * everything to be properly initialized before that point in time.
+ *
+ * Return:
+ * * NULL if an error happened with creating the device
+ * * pointer to a valid struct faux_device that is registered with sysfs
+ */
+struct faux_device *faux_device_create(const char *name,
+ struct device *parent,
+ const struct faux_device_ops *faux_ops)
+{
+ return faux_device_create_with_groups(name, parent, faux_ops, NULL);
+}
+EXPORT_SYMBOL_GPL(faux_device_create);
+
+/**
+ * faux_device_destroy - destroy a faux device
+ * @faux_dev: faux device to destroy
+ *
+ * Unregisters and cleans up a device that was created with a call to
+ * faux_device_create()
+ */
+void faux_device_destroy(struct faux_device *faux_dev)
+{
+ struct device *dev = &faux_dev->dev;
+
+ if (!faux_dev)
+ return;
+
+ device_del(dev);
+
+ /* The final put_device() will clean up the memory we allocated for this device. */
+ put_device(dev);
+}
+EXPORT_SYMBOL_GPL(faux_device_destroy);
+
+int __init faux_bus_init(void)
+{
+ int ret;
+
+ ret = device_register(&faux_bus_root);
+ if (ret) {
+ put_device(&faux_bus_root);
+ return ret;
+ }
+
+ ret = bus_register(&faux_bus_type);
+ if (ret)
+ goto error_bus;
+
+ ret = driver_register(&faux_driver);
+ if (ret)
+ goto error_driver;
+
+ return ret;
+
+error_driver:
+ bus_unregister(&faux_bus_type);
+
+error_bus:
+ device_unregister(&faux_bus_root);
+ return ret;
+}
diff --git a/drivers/base/firmware_loader/Kconfig b/drivers/base/firmware_loader/Kconfig
index 5ca00e02fe82..15eff8a4b505 100644
--- a/drivers/base/firmware_loader/Kconfig
+++ b/drivers/base/firmware_loader/Kconfig
@@ -3,8 +3,7 @@ menu "Firmware loader"
config FW_LOADER
tristate "Firmware loading facility" if EXPERT
- select CRYPTO_HASH if FW_LOADER_DEBUG
- select CRYPTO_SHA256 if FW_LOADER_DEBUG
+ select CRYPTO_LIB_SHA256 if FW_LOADER_DEBUG
default y
help
This enables the firmware loading facility in the kernel. The kernel
@@ -28,7 +27,6 @@ config FW_LOADER
config FW_LOADER_DEBUG
bool "Log filenames and checksums for loaded firmware"
- depends on CRYPTO = FW_LOADER || CRYPTO=y
depends on DYNAMIC_DEBUG
depends on FW_LOADER
default FW_LOADER
@@ -37,6 +35,13 @@ config FW_LOADER_DEBUG
SHA256 checksums to the kernel log for each firmware file that is
loaded.
+config RUST_FW_LOADER_ABSTRACTIONS
+ bool "Rust Firmware Loader abstractions"
+ depends on RUST
+ select FW_LOADER
+ help
+ This enables the Rust abstractions for the firmware loader API.
+
if FW_LOADER
config FW_LOADER_PAGED_BUF
diff --git a/drivers/base/firmware_loader/builtin/main.c b/drivers/base/firmware_loader/builtin/main.c
index a065c3150897..d36befebb1b9 100644
--- a/drivers/base/firmware_loader/builtin/main.c
+++ b/drivers/base/firmware_loader/builtin/main.c
@@ -61,7 +61,7 @@ bool firmware_request_builtin(struct firmware *fw, const char *name)
return false;
}
-EXPORT_SYMBOL_NS_GPL(firmware_request_builtin, TEST_FIRMWARE);
+EXPORT_SYMBOL_NS_GPL(firmware_request_builtin, "TEST_FIRMWARE");
/**
* firmware_request_builtin_buf() - load builtin firmware into optional buffer
diff --git a/drivers/base/firmware_loader/fallback_table.c b/drivers/base/firmware_loader/fallback_table.c
index 8432ab2c3b3c..c8afc501a8a4 100644
--- a/drivers/base/firmware_loader/fallback_table.c
+++ b/drivers/base/firmware_loader/fallback_table.c
@@ -22,10 +22,10 @@ struct firmware_fallback_config fw_fallback_config = {
.loading_timeout = 60,
.old_timeout = 60,
};
-EXPORT_SYMBOL_NS_GPL(fw_fallback_config, FIRMWARE_LOADER_PRIVATE);
+EXPORT_SYMBOL_NS_GPL(fw_fallback_config, "FIRMWARE_LOADER_PRIVATE");
#ifdef CONFIG_SYSCTL
-static struct ctl_table firmware_config_table[] = {
+static const struct ctl_table firmware_config_table[] = {
{
.procname = "force_sysfs_fallback",
.data = &fw_fallback_config.force_sysfs_fallback,
@@ -56,13 +56,13 @@ int register_firmware_config_sysctl(void)
return -ENOMEM;
return 0;
}
-EXPORT_SYMBOL_NS_GPL(register_firmware_config_sysctl, FIRMWARE_LOADER_PRIVATE);
+EXPORT_SYMBOL_NS_GPL(register_firmware_config_sysctl, "FIRMWARE_LOADER_PRIVATE");
void unregister_firmware_config_sysctl(void)
{
unregister_sysctl_table(firmware_config_sysct_table_header);
firmware_config_sysct_table_header = NULL;
}
-EXPORT_SYMBOL_NS_GPL(unregister_firmware_config_sysctl, FIRMWARE_LOADER_PRIVATE);
+EXPORT_SYMBOL_NS_GPL(unregister_firmware_config_sysctl, "FIRMWARE_LOADER_PRIVATE");
#endif /* CONFIG_SYSCTL */
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index da8ca01d011c..4ebdca9e4da4 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -806,42 +806,15 @@ static void fw_abort_batch_reqs(struct firmware *fw)
}
#if defined(CONFIG_FW_LOADER_DEBUG)
-#include <crypto/hash.h>
#include <crypto/sha2.h>
static void fw_log_firmware_info(const struct firmware *fw, const char *name, struct device *device)
{
- struct shash_desc *shash;
- struct crypto_shash *alg;
- u8 *sha256buf;
- char *outbuf;
+ u8 digest[SHA256_DIGEST_SIZE];
- alg = crypto_alloc_shash("sha256", 0, 0);
- if (IS_ERR(alg))
- return;
-
- sha256buf = kmalloc(SHA256_DIGEST_SIZE, GFP_KERNEL);
- outbuf = kmalloc(SHA256_BLOCK_SIZE + 1, GFP_KERNEL);
- shash = kmalloc(sizeof(*shash) + crypto_shash_descsize(alg), GFP_KERNEL);
- if (!sha256buf || !outbuf || !shash)
- goto out_free;
-
- shash->tfm = alg;
-
- if (crypto_shash_digest(shash, fw->data, fw->size, sha256buf) < 0)
- goto out_shash;
-
- for (int i = 0; i < SHA256_DIGEST_SIZE; i++)
- sprintf(&outbuf[i * 2], "%02x", sha256buf[i]);
- outbuf[SHA256_BLOCK_SIZE] = 0;
- dev_dbg(device, "Loaded FW: %s, sha256: %s\n", name, outbuf);
-
-out_shash:
- crypto_free_shash(alg);
-out_free:
- kfree(shash);
- kfree(outbuf);
- kfree(sha256buf);
+ sha256(fw->data, fw->size, digest);
+ dev_dbg(device, "Loaded FW: %s, sha256: %*phN\n",
+ name, SHA256_DIGEST_SIZE, digest);
}
#else
static void fw_log_firmware_info(const struct firmware *fw, const char *name,
@@ -856,8 +829,6 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
size_t offset, u32 opt_flags)
{
struct firmware *fw = NULL;
- struct cred *kern_cred = NULL;
- const struct cred *old_cred;
bool nondirect = false;
int ret;
@@ -869,6 +840,25 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
goto out;
}
+
+ /*
+ * Reject firmware file names with ".." path components.
+ * There are drivers that construct firmware file names from
+ * device-supplied strings, and we don't want some device to be
+ * able to tell us "I would like to be sent my firmware from
+ * ../../../etc/shadow, please".
+ *
+ * This intentionally only looks at the firmware name, not at
+ * the firmware base directory or at symlink contents.
+ */
+ if (name_contains_dotdot(name)) {
+ dev_warn(device,
+ "Firmware load for '%s' refused, path contains '..' component\n",
+ name);
+ ret = -EINVAL;
+ goto out;
+ }
+
ret = _request_firmware_prepare(&fw, name, device, buf, size,
offset, opt_flags);
if (ret <= 0) /* error or already assigned */
@@ -879,45 +869,38 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
* called by a driver when serving an unrelated request from userland, we use
* the kernel credentials to read the file.
*/
- kern_cred = prepare_kernel_cred(&init_task);
- if (!kern_cred) {
- ret = -ENOMEM;
- goto out;
- }
- old_cred = override_creds(kern_cred);
+ scoped_with_kernel_creds() {
+ ret = fw_get_filesystem_firmware(device, fw->priv, "", NULL);
- ret = fw_get_filesystem_firmware(device, fw->priv, "", NULL);
-
- /* Only full reads can support decompression, platform, and sysfs. */
- if (!(opt_flags & FW_OPT_PARTIAL))
- nondirect = true;
+ /* Only full reads can support decompression, platform, and sysfs. */
+ if (!(opt_flags & FW_OPT_PARTIAL))
+ nondirect = true;
#ifdef CONFIG_FW_LOADER_COMPRESS_ZSTD
- if (ret == -ENOENT && nondirect)
- ret = fw_get_filesystem_firmware(device, fw->priv, ".zst",
- fw_decompress_zstd);
+ if (ret == -ENOENT && nondirect)
+ ret = fw_get_filesystem_firmware(device, fw->priv, ".zst",
+ fw_decompress_zstd);
#endif
#ifdef CONFIG_FW_LOADER_COMPRESS_XZ
- if (ret == -ENOENT && nondirect)
- ret = fw_get_filesystem_firmware(device, fw->priv, ".xz",
- fw_decompress_xz);
+ if (ret == -ENOENT && nondirect)
+ ret = fw_get_filesystem_firmware(device, fw->priv, ".xz",
+ fw_decompress_xz);
#endif
- if (ret == -ENOENT && nondirect)
- ret = firmware_fallback_platform(fw->priv);
+ if (ret == -ENOENT && nondirect)
+ ret = firmware_fallback_platform(fw->priv);
- if (ret) {
- if (!(opt_flags & FW_OPT_NO_WARN))
- dev_warn(device,
- "Direct firmware load for %s failed with error %d\n",
- name, ret);
- if (nondirect)
- ret = firmware_fallback_sysfs(fw, name, device,
- opt_flags, ret);
- } else
- ret = assign_fw(fw, device);
-
- revert_creds(old_cred);
- put_cred(kern_cred);
+ if (ret) {
+ if (!(opt_flags & FW_OPT_NO_WARN))
+ dev_warn(device,
+ "Direct firmware load for %s failed with error %d\n",
+ name, ret);
+ if (nondirect)
+ ret = firmware_fallback_sysfs(fw, name, device,
+ opt_flags, ret);
+ } else {
+ ret = assign_fw(fw, device);
+ }
+ }
out:
if (ret < 0) {
@@ -946,6 +929,8 @@ out:
* @name will be used as $FIRMWARE in the uevent environment and
* should be distinctive enough not to be confused with any other
* firmware image for this or any other device.
+ * It must not contain any ".." path components - "foo/bar..bin" is
+ * allowed, but "foo/../bar.bin" is not.
*
* Caller must hold the reference count of @device.
*
@@ -1045,8 +1030,8 @@ EXPORT_SYMBOL_GPL(firmware_request_platform);
/**
* firmware_request_cache() - cache firmware for suspend so resume can use it
- * @name: name of firmware file
* @device: device for which firmware should be cached for
+ * @name: name of firmware file
*
* There are some devices with an optimization that enables the device to not
* require loading firmware on system reboot. This optimization may still
@@ -1172,34 +1157,11 @@ static void request_firmware_work_func(struct work_struct *work)
kfree(fw_work);
}
-/**
- * request_firmware_nowait() - asynchronous version of request_firmware
- * @module: module requesting the firmware
- * @uevent: sends uevent to copy the firmware image if this flag
- * is non-zero else the firmware copy must be done manually.
- * @name: name of firmware file
- * @device: device for which firmware is being loaded
- * @gfp: allocation flags
- * @context: will be passed over to @cont, and
- * @fw may be %NULL if firmware request fails.
- * @cont: function will be called asynchronously when the firmware
- * request is over.
- *
- * Caller must hold the reference count of @device.
- *
- * Asynchronous variant of request_firmware() for user contexts:
- * - sleep for as small periods as possible since it may
- * increase kernel boot time of built-in device drivers
- * requesting firmware in their ->probe() methods, if
- * @gfp is GFP_KERNEL.
- *
- * - can't sleep at all if @gfp is GFP_ATOMIC.
- **/
-int
-request_firmware_nowait(
+
+static int _request_firmware_nowait(
struct module *module, bool uevent,
const char *name, struct device *device, gfp_t gfp, void *context,
- void (*cont)(const struct firmware *fw, void *context))
+ void (*cont)(const struct firmware *fw, void *context), bool nowarn)
{
struct firmware_work *fw_work;
@@ -1217,7 +1179,8 @@ request_firmware_nowait(
fw_work->context = context;
fw_work->cont = cont;
fw_work->opt_flags = FW_OPT_NOWAIT |
- (uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER);
+ (uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER) |
+ (nowarn ? FW_OPT_NO_WARN : 0);
if (!uevent && fw_cache_is_setup(device, name)) {
kfree_const(fw_work->name);
@@ -1236,8 +1199,66 @@ request_firmware_nowait(
schedule_work(&fw_work->work);
return 0;
}
+
+/**
+ * request_firmware_nowait() - asynchronous version of request_firmware
+ * @module: module requesting the firmware
+ * @uevent: sends uevent to copy the firmware image if this flag
+ * is non-zero else the firmware copy must be done manually.
+ * @name: name of firmware file
+ * @device: device for which firmware is being loaded
+ * @gfp: allocation flags
+ * @context: will be passed over to @cont, and
+ * @fw may be %NULL if firmware request fails.
+ * @cont: function will be called asynchronously when the firmware
+ * request is over.
+ *
+ * Caller must hold the reference count of @device.
+ *
+ * Asynchronous variant of request_firmware() for user contexts:
+ * - sleep for as small periods as possible since it may
+ * increase kernel boot time of built-in device drivers
+ * requesting firmware in their ->probe() methods, if
+ * @gfp is GFP_KERNEL.
+ *
+ * - can't sleep at all if @gfp is GFP_ATOMIC.
+ **/
+int request_firmware_nowait(
+ struct module *module, bool uevent,
+ const char *name, struct device *device, gfp_t gfp, void *context,
+ void (*cont)(const struct firmware *fw, void *context))
+{
+ return _request_firmware_nowait(module, uevent, name, device, gfp,
+ context, cont, false);
+
+}
EXPORT_SYMBOL(request_firmware_nowait);
+/**
+ * firmware_request_nowait_nowarn() - async version of request_firmware_nowarn
+ * @module: module requesting the firmware
+ * @name: name of firmware file
+ * @device: device for which firmware is being loaded
+ * @gfp: allocation flags
+ * @context: will be passed over to @cont, and
+ * @fw may be %NULL if firmware request fails.
+ * @cont: function will be called asynchronously when the firmware
+ * request is over.
+ *
+ * Similar in function to request_firmware_nowait(), but doesn't print a warning
+ * when the firmware file could not be found and always sends a uevent to copy
+ * the firmware image.
+ */
+int firmware_request_nowait_nowarn(
+ struct module *module, const char *name,
+ struct device *device, gfp_t gfp, void *context,
+ void (*cont)(const struct firmware *fw, void *context))
+{
+ return _request_firmware_nowait(module, FW_ACTION_UEVENT, name, device,
+ gfp, context, cont, true);
+}
+EXPORT_SYMBOL_GPL(firmware_request_nowait_nowarn);
+
#ifdef CONFIG_FW_CACHE
static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain);
@@ -1555,16 +1576,20 @@ static int fw_pm_notify(struct notifier_block *notify_block,
}
/* stop caching firmware once syscore_suspend is reached */
-static int fw_suspend(void)
+static int fw_suspend(void *data)
{
fw_cache.state = FW_LOADER_NO_CACHE;
return 0;
}
-static struct syscore_ops fw_syscore_ops = {
+static const struct syscore_ops fw_syscore_ops = {
.suspend = fw_suspend,
};
+static struct syscore fw_syscore = {
+ .ops = &fw_syscore_ops,
+};
+
static int __init register_fw_pm_ops(void)
{
int ret;
@@ -1580,14 +1605,14 @@ static int __init register_fw_pm_ops(void)
if (ret)
return ret;
- register_syscore_ops(&fw_syscore_ops);
+ register_syscore(&fw_syscore);
return ret;
}
static inline void unregister_fw_pm_ops(void)
{
- unregister_syscore_ops(&fw_syscore_ops);
+ unregister_syscore(&fw_syscore);
unregister_pm_notifier(&fw_cache.pm_notify);
}
#else
diff --git a/drivers/base/firmware_loader/sysfs.c b/drivers/base/firmware_loader/sysfs.c
index c9c93b47d9a5..92e91050f96a 100644
--- a/drivers/base/firmware_loader/sysfs.c
+++ b/drivers/base/firmware_loader/sysfs.c
@@ -47,7 +47,10 @@ static ssize_t timeout_show(const struct class *class, const struct class_attrib
static ssize_t timeout_store(const struct class *class, const struct class_attribute *attr,
const char *buf, size_t count)
{
- int tmp_loading_timeout = simple_strtol(buf, NULL, 10);
+ int tmp_loading_timeout;
+
+ if (kstrtoint(buf, 10, &tmp_loading_timeout))
+ return -EINVAL;
if (tmp_loading_timeout < 0)
tmp_loading_timeout = 0;
@@ -157,7 +160,10 @@ static ssize_t firmware_loading_store(struct device *dev,
struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
struct fw_priv *fw_priv;
ssize_t written = count;
- int loading = simple_strtol(buf, NULL, 10);
+ int loading;
+
+ if (kstrtoint(buf, 10, &loading))
+ return -EINVAL;
mutex_lock(&fw_lock);
fw_priv = fw_sysfs->fw_priv;
@@ -259,7 +265,7 @@ static void firmware_rw(struct fw_priv *fw_priv, char *buffer,
}
static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -316,7 +322,7 @@ static int fw_realloc_pages(struct fw_sysfs *fw_sysfs, int min_size)
* the driver as a firmware image.
**/
static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -356,7 +362,7 @@ out:
return retval;
}
-static struct bin_attribute firmware_attr_data = {
+static const struct bin_attribute firmware_attr_data = {
.attr = { .name = "data", .mode = 0644 },
.size = 0,
.read = firmware_data_read,
@@ -374,7 +380,7 @@ static struct attribute *fw_dev_attrs[] = {
NULL
};
-static struct bin_attribute *fw_dev_bin_attrs[] = {
+static const struct bin_attribute *const fw_dev_bin_attrs[] = {
&firmware_attr_data,
NULL
};
diff --git a/drivers/base/firmware_loader/sysfs.h b/drivers/base/firmware_loader/sysfs.h
index 2060add8ef81..1cabea544a40 100644
--- a/drivers/base/firmware_loader/sysfs.h
+++ b/drivers/base/firmware_loader/sysfs.h
@@ -6,7 +6,7 @@
#include "firmware.h"
-MODULE_IMPORT_NS(FIRMWARE_LOADER_PRIVATE);
+MODULE_IMPORT_NS("FIRMWARE_LOADER_PRIVATE");
extern struct firmware_fallback_config fw_fallback_config;
extern struct device_attribute dev_attr_loading;
diff --git a/drivers/base/firmware_loader/sysfs_upload.c b/drivers/base/firmware_loader/sysfs_upload.c
index 829270067d16..c3797b93c5f5 100644
--- a/drivers/base/firmware_loader/sysfs_upload.c
+++ b/drivers/base/firmware_loader/sysfs_upload.c
@@ -100,8 +100,10 @@ static ssize_t cancel_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
mutex_lock(&fwlp->lock);
- if (fwlp->progress == FW_UPLOAD_PROG_IDLE)
- ret = -ENODEV;
+ if (fwlp->progress == FW_UPLOAD_PROG_IDLE) {
+ mutex_unlock(&fwlp->lock);
+ return -ENODEV;
+ }
fwlp->ops->cancel(fwlp->fw_upload);
mutex_unlock(&fwlp->lock);
diff --git a/drivers/base/init.c b/drivers/base/init.c
index c4954835128c..9d2b06d65dfc 100644
--- a/drivers/base/init.c
+++ b/drivers/base/init.c
@@ -32,6 +32,7 @@ void __init driver_init(void)
/* These are also core pieces, but must come after the
* core core pieces.
*/
+ faux_bus_init();
of_core_init();
platform_bus_init();
auxiliary_bus_init();
diff --git a/drivers/base/isa.c b/drivers/base/isa.c
index e23d0b49a793..bfd9215c9070 100644
--- a/drivers/base/isa.c
+++ b/drivers/base/isa.c
@@ -23,7 +23,7 @@ struct isa_dev {
#define to_isa_dev(x) container_of((x), struct isa_dev, dev)
-static int isa_bus_match(struct device *dev, struct device_driver *driver)
+static int isa_bus_match(struct device *dev, const struct device_driver *driver)
{
struct isa_driver *isa_driver = to_isa_driver(driver);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 67858eeb92ed..751f248ca4a8 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -22,6 +22,7 @@
#include <linux/stat.h>
#include <linux/slab.h>
#include <linux/xarray.h>
+#include <linux/export.h>
#include <linux/atomic.h>
#include <linux/uaccess.h>
@@ -48,22 +49,8 @@ int mhp_online_type_from_str(const char *str)
#define to_memory_block(dev) container_of(dev, struct memory_block, dev)
-static int sections_per_block;
-
-static inline unsigned long memory_block_id(unsigned long section_nr)
-{
- return section_nr / sections_per_block;
-}
-
-static inline unsigned long pfn_to_block_id(unsigned long pfn)
-{
- return memory_block_id(pfn_to_section_nr(pfn));
-}
-
-static inline unsigned long phys_to_block_id(unsigned long phys)
-{
- return pfn_to_block_id(PFN_DOWN(phys));
-}
+int sections_per_block;
+EXPORT_SYMBOL(sections_per_block);
static int memory_subsys_online(struct device *dev);
static int memory_subsys_offline(struct device *dev);
@@ -110,6 +97,57 @@ static void memory_block_release(struct device *dev)
kfree(mem);
}
+
+/* Max block size to be set by memory_block_advise_max_size */
+static unsigned long memory_block_advised_size;
+static bool memory_block_advised_size_queried;
+
+/**
+ * memory_block_advise_max_size() - advise memory hotplug on the max suggested
+ * block size, usually for alignment.
+ * @size: suggestion for maximum block size. must be aligned on power of 2.
+ *
+ * Early boot software (pre-allocator init) may advise archs on the max block
+ * size. This value can only decrease after initialization, as the intent is
+ * to identify the largest supported alignment for all sources.
+ *
+ * Use of this value is arch-defined, as is min/max block size.
+ *
+ * Return: 0 on success
+ * -EINVAL if size is 0 or not pow2 aligned
+ * -EBUSY if value has already been probed
+ */
+int __init memory_block_advise_max_size(unsigned long size)
+{
+ if (!size || !is_power_of_2(size))
+ return -EINVAL;
+
+ if (memory_block_advised_size_queried)
+ return -EBUSY;
+
+ if (memory_block_advised_size)
+ memory_block_advised_size = min(memory_block_advised_size, size);
+ else
+ memory_block_advised_size = size;
+
+ return 0;
+}
+
+/**
+ * memory_block_advised_max_size() - query advised max hotplug block size.
+ *
+ * After the first call, the value can never change. Callers looking for the
+ * actual block size should use memory_block_size_bytes. This interface is
+ * intended for use by arch-init when initializing the hotplug block size.
+ *
+ * Return: advised size in bytes, or 0 if never set.
+ */
+unsigned long memory_block_advised_max_size(void)
+{
+ memory_block_advised_size_queried = true;
+ return memory_block_advised_size;
+}
+
unsigned long __weak memory_block_size_bytes(void)
{
return MIN_MEMORY_BLOCK_SIZE;
@@ -160,15 +198,15 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
break;
default:
WARN_ON(1);
- return sysfs_emit(buf, "ERROR-UNKNOWN-%ld\n", mem->state);
+ return sysfs_emit(buf, "ERROR-UNKNOWN-%d\n", mem->state);
}
return sysfs_emit(buf, "%s\n", output);
}
-int memory_notify(unsigned long val, void *v)
+int memory_notify(enum memory_block_state state, void *v)
{
- return blocking_notifier_call_chain(&memory_chain, val, v);
+ return blocking_notifier_call_chain(&memory_chain, state, v);
}
#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG)
@@ -188,7 +226,6 @@ static int memory_block_online(struct memory_block *mem)
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
unsigned long nr_vmemmap_pages = 0;
- struct memory_notify arg;
struct zone *zone;
int ret;
@@ -208,19 +245,9 @@ static int memory_block_online(struct memory_block *mem)
if (mem->altmap)
nr_vmemmap_pages = mem->altmap->free;
- arg.altmap_start_pfn = start_pfn;
- arg.altmap_nr_pages = nr_vmemmap_pages;
- arg.start_pfn = start_pfn + nr_vmemmap_pages;
- arg.nr_pages = nr_pages - nr_vmemmap_pages;
mem_hotplug_begin();
- ret = memory_notify(MEM_PREPARE_ONLINE, &arg);
- ret = notifier_to_errno(ret);
- if (ret)
- goto out_notifier;
-
if (nr_vmemmap_pages) {
- ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages,
- zone, mem->altmap->inaccessible);
+ ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone);
if (ret)
goto out;
}
@@ -242,11 +269,7 @@ static int memory_block_online(struct memory_block *mem)
nr_vmemmap_pages);
mem->zone = zone;
- mem_hotplug_done();
- return ret;
out:
- memory_notify(MEM_FINISH_OFFLINE, &arg);
-out_notifier:
mem_hotplug_done();
return ret;
}
@@ -259,7 +282,6 @@ static int memory_block_offline(struct memory_block *mem)
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
unsigned long nr_vmemmap_pages = 0;
- struct memory_notify arg;
int ret;
if (!mem->zone)
@@ -291,11 +313,6 @@ static int memory_block_offline(struct memory_block *mem)
mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
mem->zone = NULL;
- arg.altmap_start_pfn = start_pfn;
- arg.altmap_nr_pages = nr_vmemmap_pages;
- arg.start_pfn = start_pfn + nr_vmemmap_pages;
- arg.nr_pages = nr_pages - nr_vmemmap_pages;
- memory_notify(MEM_FINISH_OFFLINE, &arg);
out:
mem_hotplug_done();
return ret;
@@ -455,7 +472,7 @@ static ssize_t valid_zones_show(struct device *dev,
struct memory_group *group = mem->group;
struct zone *default_zone;
int nid = mem->nid;
- int len = 0;
+ int len;
/*
* Check the existing zone. Make sure that we do that only on the
@@ -466,22 +483,18 @@ static ssize_t valid_zones_show(struct device *dev,
* If !mem->zone, the memory block spans multiple zones and
* cannot get offlined.
*/
- default_zone = mem->zone;
- if (!default_zone)
- return sysfs_emit(buf, "%s\n", "none");
- len += sysfs_emit_at(buf, len, "%s", default_zone->name);
- goto out;
+ return sysfs_emit(buf, "%s\n",
+ mem->zone ? mem->zone->name : "none");
}
default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, group,
start_pfn, nr_pages);
- len += sysfs_emit_at(buf, len, "%s", default_zone->name);
+ len = sysfs_emit(buf, "%s", default_zone->name);
len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages,
MMOP_ONLINE_KERNEL, default_zone);
len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages,
MMOP_ONLINE_MOVABLE, default_zone);
-out:
len += sysfs_emit_at(buf, len, "\n");
return len;
}
@@ -512,7 +525,7 @@ static ssize_t auto_online_blocks_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%s\n",
- online_type_to_str[mhp_default_online_type]);
+ online_type_to_str[mhp_get_default_online_type()]);
}
static ssize_t auto_online_blocks_store(struct device *dev,
@@ -524,7 +537,7 @@ static ssize_t auto_online_blocks_store(struct device *dev,
if (online_type < 0)
return -EINVAL;
- mhp_default_online_type = online_type;
+ mhp_set_default_online_type(online_type);
return count;
}
@@ -636,7 +649,7 @@ int __weak arch_get_memory_phys_device(unsigned long start_pfn)
*
* Called under device_hotplug_lock.
*/
-static struct memory_block *find_memory_block_by_id(unsigned long block_id)
+struct memory_block *find_memory_block_by_id(unsigned long block_id)
{
struct memory_block *mem;
@@ -735,21 +748,22 @@ static struct zone *early_node_zone_for_memory_block(struct memory_block *mem,
#ifdef CONFIG_NUMA
/**
- * memory_block_add_nid() - Indicate that system RAM falling into this memory
- * block device (partially) belongs to the given node.
+ * memory_block_add_nid_early() - Indicate that early system RAM falling into
+ * this memory block device (partially) belongs
+ * to the given node.
* @mem: The memory block device.
* @nid: The node id.
- * @context: The memory initialization context.
*
- * Indicate that system RAM falling into this memory block (partially) belongs
- * to the given node. If the context indicates ("early") that we are adding the
- * node during node device subsystem initialization, this will also properly
- * set/adjust mem->zone based on the zone ranges of the given node.
+ * Indicate that early system RAM falling into this memory block (partially)
+ * belongs to the given node. This will also properly set/adjust mem->zone based
+ * on the zone ranges of the given node.
+ *
+ * Memory hotplug handles this on memory block creation, where we can only have
+ * a single nid span a memory block.
*/
-void memory_block_add_nid(struct memory_block *mem, int nid,
- enum meminit_context context)
+void memory_block_add_nid_early(struct memory_block *mem, int nid)
{
- if (context == MEMINIT_EARLY && mem->nid != nid) {
+ if (mem->nid != nid) {
/*
* For early memory we have to determine the zone when setting
* the node id and handle multiple nodes spanning a single
@@ -763,19 +777,18 @@ void memory_block_add_nid(struct memory_block *mem, int nid,
mem->zone = early_node_zone_for_memory_block(mem, nid);
else
mem->zone = NULL;
+ /*
+ * If this memory block spans multiple nodes, we only indicate
+ * the last processed node. If we span multiple nodes (not applicable
+ * to hotplugged memory), zone == NULL will prohibit memory offlining
+ * and consequently unplug.
+ */
+ mem->nid = nid;
}
-
- /*
- * If this memory block spans multiple nodes, we only indicate
- * the last processed node. If we span multiple nodes (not applicable
- * to hotplugged memory), zone == NULL will prohibit memory offlining
- * and consequently unplug.
- */
- mem->nid = nid;
}
#endif
-static int add_memory_block(unsigned long block_id, unsigned long state,
+static int add_memory_block(unsigned long block_id, int nid, unsigned long state,
struct vmem_altmap *altmap,
struct memory_group *group)
{
@@ -793,7 +806,7 @@ static int add_memory_block(unsigned long block_id, unsigned long state,
mem->start_section_nr = block_id * sections_per_block;
mem->state = state;
- mem->nid = NUMA_NO_NODE;
+ mem->nid = nid;
mem->altmap = altmap;
INIT_LIST_HEAD(&mem->group_next);
@@ -820,29 +833,6 @@ static int add_memory_block(unsigned long block_id, unsigned long state,
return 0;
}
-static int __init add_boot_memory_block(unsigned long base_section_nr)
-{
- int section_count = 0;
- unsigned long nr;
-
- for (nr = base_section_nr; nr < base_section_nr + sections_per_block;
- nr++)
- if (present_section_nr(nr))
- section_count++;
-
- if (section_count == 0)
- return 0;
- return add_memory_block(memory_block_id(base_section_nr),
- MEM_ONLINE, NULL, NULL);
-}
-
-static int add_hotplug_memory_block(unsigned long block_id,
- struct vmem_altmap *altmap,
- struct memory_group *group)
-{
- return add_memory_block(block_id, MEM_OFFLINE, altmap, group);
-}
-
static void remove_memory_block(struct memory_block *memory)
{
if (WARN_ON_ONCE(memory->dev.bus != &memory_subsys))
@@ -868,7 +858,7 @@ static void remove_memory_block(struct memory_block *memory)
* Called under device_hotplug_lock.
*/
int create_memory_block_devices(unsigned long start, unsigned long size,
- struct vmem_altmap *altmap,
+ int nid, struct vmem_altmap *altmap,
struct memory_group *group)
{
const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
@@ -882,7 +872,7 @@ int create_memory_block_devices(unsigned long start, unsigned long size,
return -EINVAL;
for (block_id = start_block_id; block_id != end_block_id; block_id++) {
- ret = add_hotplug_memory_block(block_id, altmap, group);
+ ret = add_memory_block(block_id, nid, MEM_OFFLINE, altmap, group);
if (ret)
break;
}
@@ -962,7 +952,7 @@ static const struct attribute_group *memory_root_attr_groups[] = {
void __init memory_dev_init(void)
{
int ret;
- unsigned long block_sz, nr;
+ unsigned long block_sz, block_id, nr;
/* Validate the configured memory block size */
block_sz = memory_block_size_bytes();
@@ -975,15 +965,23 @@ void __init memory_dev_init(void)
panic("%s() failed to register subsystem: %d\n", __func__, ret);
/*
- * Create entries for memory sections that were found
- * during boot and have been initialized
+ * Create entries for memory sections that were found during boot
+ * and have been initialized. Use @block_id to track the last
+ * handled block and initialize it to an invalid value (ULONG_MAX)
+ * to bypass the block ID matching check for the first present
+ * block so that it can be covered.
*/
- for (nr = 0; nr <= __highest_present_section_nr;
- nr += sections_per_block) {
- ret = add_boot_memory_block(nr);
- if (ret)
- panic("%s() failed to add memory block: %d\n", __func__,
- ret);
+ block_id = ULONG_MAX;
+ for_each_present_section_nr(0, nr) {
+ if (block_id != ULONG_MAX && memory_block_id(nr) == block_id)
+ continue;
+
+ block_id = memory_block_id(nr);
+ ret = add_memory_block(block_id, NUMA_NO_NODE, MEM_ONLINE, NULL, NULL);
+ if (ret) {
+ panic("%s() failed to add memory block: %d\n",
+ __func__, ret);
+ }
}
}
diff --git a/drivers/base/module.c b/drivers/base/module.c
index 46ad4d636731..218aaa096455 100644
--- a/drivers/base/module.c
+++ b/drivers/base/module.c
@@ -9,7 +9,7 @@
#include <linux/string.h>
#include "base.h"
-static char *make_driver_name(struct device_driver *drv)
+static char *make_driver_name(const struct device_driver *drv)
{
char *driver_name;
@@ -30,46 +30,67 @@ static void module_create_drivers_dir(struct module_kobject *mk)
mutex_unlock(&drivers_dir_mutex);
}
-void module_add_driver(struct module *mod, struct device_driver *drv)
+int module_add_driver(struct module *mod, const struct device_driver *drv)
{
char *driver_name;
- int no_warn;
struct module_kobject *mk = NULL;
+ int ret;
if (!drv)
- return;
+ return 0;
if (mod)
mk = &mod->mkobj;
else if (drv->mod_name) {
- struct kobject *mkobj;
-
- /* Lookup built-in module entry in /sys/modules */
- mkobj = kset_find_obj(module_kset, drv->mod_name);
- if (mkobj) {
- mk = container_of(mkobj, struct module_kobject, kobj);
+ /* Lookup or create built-in module entry in /sys/modules */
+ mk = lookup_or_create_module_kobject(drv->mod_name);
+ if (mk) {
/* remember our module structure */
drv->p->mkobj = mk;
- /* kset_find_obj took a reference */
- kobject_put(mkobj);
+ /* lookup_or_create_module_kobject took a reference */
+ kobject_put(&mk->kobj);
}
}
if (!mk)
- return;
+ return 0;
+
+ ret = sysfs_create_link(&drv->p->kobj, &mk->kobj, "module");
+ if (ret)
+ return ret;
- /* Don't check return codes; these calls are idempotent */
- no_warn = sysfs_create_link(&drv->p->kobj, &mk->kobj, "module");
driver_name = make_driver_name(drv);
- if (driver_name) {
- module_create_drivers_dir(mk);
- no_warn = sysfs_create_link(mk->drivers_dir, &drv->p->kobj,
- driver_name);
- kfree(driver_name);
+ if (!driver_name) {
+ ret = -ENOMEM;
+ goto out_remove_kobj;
}
+
+ module_create_drivers_dir(mk);
+ if (!mk->drivers_dir) {
+ ret = -EINVAL;
+ goto out_free_driver_name;
+ }
+
+ ret = sysfs_create_link(mk->drivers_dir, &drv->p->kobj, driver_name);
+ if (ret)
+ goto out_remove_drivers_dir;
+
+ kfree(driver_name);
+
+ return 0;
+
+out_remove_drivers_dir:
+ sysfs_remove_link(mk->drivers_dir, driver_name);
+
+out_free_driver_name:
+ kfree(driver_name);
+
+out_remove_kobj:
+ sysfs_remove_link(&drv->p->kobj, "module");
+ return ret;
}
-void module_remove_driver(struct device_driver *drv)
+void module_remove_driver(const struct device_driver *drv)
{
struct module_kobject *mk = NULL;
char *driver_name;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index eb72580288e6..00cf4532f121 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -7,6 +7,7 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/memory.h>
+#include <linux/mempolicy.h>
#include <linux/vmstat.h>
#include <linux/notifier.h>
#include <linux/node.h>
@@ -20,6 +21,7 @@
#include <linux/pm_runtime.h>
#include <linux/swap.h>
#include <linux/slab.h>
+#include <linux/memblock.h>
static const struct bus_type node_subsys = {
.name = "node",
@@ -27,7 +29,7 @@ static const struct bus_type node_subsys = {
};
static inline ssize_t cpumap_read(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -45,10 +47,10 @@ static inline ssize_t cpumap_read(struct file *file, struct kobject *kobj,
return n;
}
-static BIN_ATTR_RO(cpumap, CPUMAP_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(cpumap, CPUMAP_FILE_MAX_BYTES);
static inline ssize_t cpulist_read(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -66,7 +68,7 @@ static inline ssize_t cpulist_read(struct file *file, struct kobject *kobj,
return n;
}
-static BIN_ATTR_RO(cpulist, CPULIST_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(cpulist, CPULIST_FILE_MAX_BYTES);
/**
* struct node_access_nodes - Access class device to hold user visible
@@ -110,6 +112,27 @@ static const struct attribute_group *node_access_node_groups[] = {
NULL,
};
+#ifdef CONFIG_MEMORY_HOTPLUG
+static BLOCKING_NOTIFIER_HEAD(node_chain);
+
+int register_node_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&node_chain, nb);
+}
+EXPORT_SYMBOL(register_node_notifier);
+
+void unregister_node_notifier(struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&node_chain, nb);
+}
+EXPORT_SYMBOL(unregister_node_notifier);
+
+int node_notify(unsigned long val, void *v)
+{
+ return blocking_notifier_call_chain(&node_chain, val, v);
+}
+#endif
+
static void node_remove_accesses(struct node *node)
{
struct node_access_nodes *c, *cnext;
@@ -214,10 +237,56 @@ void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord,
break;
}
}
+
+ /* When setting CPU access coordinates, update mempolicy */
+ if (access == ACCESS_COORDINATE_CPU) {
+ if (mempolicy_set_node_perf(nid, coord)) {
+ pr_info("failed to set mempolicy attrs for node %d\n",
+ nid);
+ }
+ }
}
EXPORT_SYMBOL_GPL(node_set_perf_attrs);
/**
+ * node_update_perf_attrs - Update the performance values for given access class
+ * @nid: Node identifier to be updated
+ * @coord: Heterogeneous memory performance coordinates
+ * @access: The access class for the given attributes
+ */
+void node_update_perf_attrs(unsigned int nid, struct access_coordinate *coord,
+ enum access_coordinate_class access)
+{
+ struct node_access_nodes *access_node;
+ struct node *node;
+ int i;
+
+ if (WARN_ON_ONCE(!node_online(nid)))
+ return;
+
+ node = node_devices[nid];
+ list_for_each_entry(access_node, &node->access_list, list_node) {
+ if (access_node->access != access)
+ continue;
+
+ access_node->coord = *coord;
+ for (i = 0; access_attrs[i]; i++) {
+ sysfs_notify(&access_node->dev.kobj,
+ NULL, access_attrs[i]->name);
+ }
+ break;
+ }
+
+ /* When setting CPU access coordinates, update mempolicy */
+ if (access != ACCESS_COORDINATE_CPU)
+ return;
+
+ if (mempolicy_set_node_perf(nid, coord))
+ pr_info("failed to set mempolicy attrs for node %d\n", nid);
+}
+EXPORT_SYMBOL_GPL(node_update_perf_attrs);
+
+/**
* struct node_cache_info - Internal tracking for memory node caches
* @dev: Device represeting the cache level
* @node: List element for tracking in the node
@@ -244,12 +313,14 @@ CACHE_ATTR(size, "%llu")
CACHE_ATTR(line_size, "%u")
CACHE_ATTR(indexing, "%u")
CACHE_ATTR(write_policy, "%u")
+CACHE_ATTR(address_mode, "%#x")
static struct attribute *cache_attrs[] = {
&dev_attr_indexing.attr,
&dev_attr_size.attr,
&dev_attr_line_size.attr,
&dev_attr_write_policy.attr,
+ &dev_attr_address_mode.attr,
NULL,
};
ATTRIBUTE_GROUPS(cache);
@@ -466,8 +537,8 @@ static ssize_t node_read_meminfo(struct device *dev,
nid, K(node_page_state(pgdat, NR_PAGETABLE)),
nid, K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)),
nid, 0UL,
- nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
- nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
+ nid, 0UL,
+ nid, 0UL,
nid, K(sreclaimable +
node_page_state(pgdat, NR_KERNEL_MISC_RECLAIMABLE)),
nid, K(sreclaimable + sunreclaimable),
@@ -578,7 +649,7 @@ static struct attribute *node_dev_attrs[] = {
NULL
};
-static struct bin_attribute *node_dev_bin_attrs[] = {
+static const struct bin_attribute *node_dev_bin_attrs[] = {
&bin_attr_cpumap,
&bin_attr_cpulist,
NULL
@@ -586,7 +657,7 @@ static struct bin_attribute *node_dev_bin_attrs[] = {
static const struct attribute_group node_dev_group = {
.attrs = node_dev_attrs,
- .bin_attrs = node_dev_bin_attrs
+ .bin_attrs = node_dev_bin_attrs,
};
static const struct attribute_group *node_dev_groups[] = {
@@ -605,48 +676,6 @@ static void node_device_release(struct device *dev)
kfree(to_node(dev));
}
-/*
- * register_node - Setup a sysfs device for a node.
- * @num - Node number to use when creating the device.
- *
- * Initialize and register the node device.
- */
-static int register_node(struct node *node, int num)
-{
- int error;
-
- node->dev.id = num;
- node->dev.bus = &node_subsys;
- node->dev.release = node_device_release;
- node->dev.groups = node_dev_groups;
- error = device_register(&node->dev);
-
- if (error) {
- put_device(&node->dev);
- } else {
- hugetlb_register_node(node);
- compaction_register_node(node);
- }
-
- return error;
-}
-
-/**
- * unregister_node - unregister a node device
- * @node: node going away
- *
- * Unregisters a node device @node. All the devices on the node must be
- * unregistered before calling this function.
- */
-void unregister_node(struct node *node)
-{
- hugetlb_unregister_node(node);
- compaction_unregister_node(node);
- node_remove_accesses(node);
- node_remove_caches(node);
- device_unregister(&node->dev);
-}
-
struct node *node_devices[MAX_NUMNODES];
/*
@@ -745,23 +774,11 @@ int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
}
#ifdef CONFIG_MEMORY_HOTPLUG
-static int __ref get_nid_for_pfn(unsigned long pfn)
-{
-#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
- if (system_state < SYSTEM_RUNNING)
- return early_pfn_to_nid(pfn);
-#endif
- return pfn_to_nid(pfn);
-}
-
static void do_register_memory_block_under_node(int nid,
- struct memory_block *mem_blk,
- enum meminit_context context)
+ struct memory_block *mem_blk)
{
int ret;
- memory_block_add_nid(mem_blk, nid, context);
-
ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
&mem_blk->dev.kobj,
kobject_name(&mem_blk->dev.kobj));
@@ -780,46 +797,6 @@ static void do_register_memory_block_under_node(int nid,
ret);
}
-/* register memory section under specified node if it spans that node */
-static int register_mem_block_under_node_early(struct memory_block *mem_blk,
- void *arg)
-{
- unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE;
- unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
- unsigned long end_pfn = start_pfn + memory_block_pfns - 1;
- int nid = *(int *)arg;
- unsigned long pfn;
-
- for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
- int page_nid;
-
- /*
- * memory block could have several absent sections from start.
- * skip pfn range from absent section
- */
- if (!pfn_in_present_section(pfn)) {
- pfn = round_down(pfn + PAGES_PER_SECTION,
- PAGES_PER_SECTION) - 1;
- continue;
- }
-
- /*
- * We need to check if page belongs to nid only at the boot
- * case because node's ranges can be interleaved.
- */
- page_nid = get_nid_for_pfn(pfn);
- if (page_nid < 0)
- continue;
- if (page_nid != nid)
- continue;
-
- do_register_memory_block_under_node(nid, mem_blk, MEMINIT_EARLY);
- return 0;
- }
- /* mem section does not span the specified node */
- return 0;
-}
-
/*
* During hotplug we know that all pages in the memory block belong to the same
* node.
@@ -829,7 +806,7 @@ static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk,
{
int nid = *(int *)arg;
- do_register_memory_block_under_node(nid, mem_blk, MEMINIT_HOTPLUG);
+ do_register_memory_block_under_node(nid, mem_blk);
return 0;
}
@@ -848,24 +825,51 @@ void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
kobject_name(&node_devices[mem_blk->nid]->dev.kobj));
}
-void register_memory_blocks_under_node(int nid, unsigned long start_pfn,
- unsigned long end_pfn,
- enum meminit_context context)
+/* register all memory blocks under the corresponding nodes */
+static void register_memory_blocks_under_nodes(void)
{
- walk_memory_blocks_func_t func;
+ struct memblock_region *r;
+
+ for_each_mem_region(r) {
+ const unsigned long start_block_id = phys_to_block_id(r->base);
+ const unsigned long end_block_id = phys_to_block_id(r->base + r->size - 1);
+ const int nid = memblock_get_region_node(r);
+ unsigned long block_id;
+
+ if (!node_online(nid))
+ continue;
+
+ for (block_id = start_block_id; block_id <= end_block_id; block_id++) {
+ struct memory_block *mem;
+
+ mem = find_memory_block_by_id(block_id);
+ if (!mem)
+ continue;
+
+ memory_block_add_nid_early(mem, nid);
+ do_register_memory_block_under_node(nid, mem);
+ put_device(&mem->dev);
+ }
- if (context == MEMINIT_HOTPLUG)
- func = register_mem_block_under_node_hotplug;
- else
- func = register_mem_block_under_node_early;
+ }
+}
+void register_memory_blocks_under_node_hotplug(int nid, unsigned long start_pfn,
+ unsigned long end_pfn)
+{
walk_memory_blocks(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn),
- (void *)&nid, func);
+ (void *)&nid, register_mem_block_under_node_hotplug);
return;
}
#endif /* CONFIG_MEMORY_HOTPLUG */
-int __register_one_node(int nid)
+/**
+ * register_node - Initialize and register the node device.
+ * @nid: Node number to use when creating the device.
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+int register_node(int nid)
{
int error;
int cpu;
@@ -876,9 +880,22 @@ int __register_one_node(int nid)
return -ENOMEM;
INIT_LIST_HEAD(&node->access_list);
- node_devices[nid] = node;
- error = register_node(node_devices[nid], nid);
+ node->dev.id = nid;
+ node->dev.bus = &node_subsys;
+ node->dev.release = node_device_release;
+ node->dev.groups = node_dev_groups;
+
+ error = device_register(&node->dev);
+ if (error) {
+ put_device(&node->dev);
+ return error;
+ }
+
+ node_devices[nid] = node;
+ hugetlb_register_node(node);
+ compaction_register_node(node);
+ reclaim_register_node(node);
/* link cpu under this node */
for_each_present_cpu(cpu) {
@@ -890,13 +907,26 @@ int __register_one_node(int nid)
return error;
}
-
-void unregister_one_node(int nid)
+/**
+ * unregister_node - unregister a node device
+ * @nid: nid of the node going away
+ *
+ * Unregisters the node device at node id @nid. All the devices on the
+ * node must be unregistered before calling this function.
+ */
+void unregister_node(int nid)
{
- if (!node_devices[nid])
+ struct node *node = node_devices[nid];
+
+ if (!node)
return;
- unregister_node(node_devices[nid]);
+ hugetlb_unregister_node(node);
+ compaction_unregister_node(node);
+ reclaim_unregister_node(node);
+ node_remove_accesses(node);
+ node_remove_caches(node);
+ device_unregister(&node->dev);
node_devices[nid] = NULL;
}
@@ -969,11 +999,13 @@ void __init node_dev_init(void)
/*
* Create all node devices, which will properly link the node
- * to applicable memory block devices and already created cpu devices.
+ * to already created cpu devices.
*/
for_each_online_node(i) {
- ret = register_one_node(i);
+ ret = register_node(i);
if (ret)
panic("%s() failed to add node: %d\n", __func__, ret);
}
+
+ register_memory_blocks_under_nodes();
}
diff --git a/drivers/base/physical_location.c b/drivers/base/physical_location.c
index 951819e71b4a..a5539e294d4d 100644
--- a/drivers/base/physical_location.c
+++ b/drivers/base/physical_location.c
@@ -7,19 +7,18 @@
#include <linux/acpi.h>
#include <linux/sysfs.h>
+#include <linux/string_choices.h>
#include "physical_location.h"
bool dev_add_physical_location(struct device *dev)
{
struct acpi_pld_info *pld;
- acpi_status status;
if (!has_acpi_companion(dev))
return false;
- status = acpi_get_physical_device_location(ACPI_HANDLE(dev), &pld);
- if (ACPI_FAILURE(status))
+ if (!acpi_get_physical_device_location(ACPI_HANDLE(dev), &pld))
return false;
dev->physical_location =
@@ -118,7 +117,7 @@ static ssize_t dock_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%s\n",
- dev->physical_location->dock ? "yes" : "no");
+ str_yes_no(dev->physical_location->dock));
}
static DEVICE_ATTR_RO(dock);
@@ -126,7 +125,7 @@ static ssize_t lid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%s\n",
- dev->physical_location->lid ? "yes" : "no");
+ str_yes_no(dev->physical_location->lid));
}
static DEVICE_ATTR_RO(lid);
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index 11f5fdf65b9e..70db08f3ac6f 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -4,346 +4,12 @@
*
* Copyright (C) 2015 ARM Limited, All Rights Reserved.
* Author: Marc Zyngier <marc.zyngier@arm.com>
+ * Copyright (C) 2022 Linutronix GmbH
*/
#include <linux/device.h>
-#include <linux/idr.h>
-#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/msi.h>
-#include <linux/slab.h>
-
-/* Begin of removal area. Once everything is converted over. Cleanup the includes too! */
-
-#define DEV_ID_SHIFT 21
-#define MAX_DEV_MSIS (1 << (32 - DEV_ID_SHIFT))
-
-/*
- * Internal data structure containing a (made up, but unique) devid
- * and the callback to write the MSI message.
- */
-struct platform_msi_priv_data {
- struct device *dev;
- void *host_data;
- msi_alloc_info_t arg;
- irq_write_msi_msg_t write_msg;
- int devid;
-};
-
-/* The devid allocator */
-static DEFINE_IDA(platform_msi_devid_ida);
-
-#ifdef GENERIC_MSI_DOMAIN_OPS
-/*
- * Convert an msi_desc to a globaly unique identifier (per-device
- * devid + msi_desc position in the msi_list).
- */
-static irq_hw_number_t platform_msi_calc_hwirq(struct msi_desc *desc)
-{
- u32 devid = desc->dev->msi.data->platform_data->devid;
-
- return (devid << (32 - DEV_ID_SHIFT)) | desc->msi_index;
-}
-
-static void platform_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
-{
- arg->desc = desc;
- arg->hwirq = platform_msi_calc_hwirq(desc);
-}
-
-static int platform_msi_init(struct irq_domain *domain,
- struct msi_domain_info *info,
- unsigned int virq, irq_hw_number_t hwirq,
- msi_alloc_info_t *arg)
-{
- return irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
- info->chip, info->chip_data);
-}
-
-static void platform_msi_set_proxy_dev(msi_alloc_info_t *arg)
-{
- arg->flags |= MSI_ALLOC_FLAGS_PROXY_DEVICE;
-}
-#else
-#define platform_msi_set_desc NULL
-#define platform_msi_init NULL
-#define platform_msi_set_proxy_dev(x) do {} while(0)
-#endif
-
-static void platform_msi_update_dom_ops(struct msi_domain_info *info)
-{
- struct msi_domain_ops *ops = info->ops;
-
- BUG_ON(!ops);
-
- if (ops->msi_init == NULL)
- ops->msi_init = platform_msi_init;
- if (ops->set_desc == NULL)
- ops->set_desc = platform_msi_set_desc;
-}
-
-static void platform_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
-{
- struct msi_desc *desc = irq_data_get_msi_desc(data);
-
- desc->dev->msi.data->platform_data->write_msg(desc, msg);
-}
-
-static void platform_msi_update_chip_ops(struct msi_domain_info *info)
-{
- struct irq_chip *chip = info->chip;
-
- BUG_ON(!chip);
- if (!chip->irq_mask)
- chip->irq_mask = irq_chip_mask_parent;
- if (!chip->irq_unmask)
- chip->irq_unmask = irq_chip_unmask_parent;
- if (!chip->irq_eoi)
- chip->irq_eoi = irq_chip_eoi_parent;
- if (!chip->irq_set_affinity)
- chip->irq_set_affinity = msi_domain_set_affinity;
- if (!chip->irq_write_msi_msg)
- chip->irq_write_msi_msg = platform_msi_write_msg;
- if (WARN_ON((info->flags & MSI_FLAG_LEVEL_CAPABLE) &&
- !(chip->flags & IRQCHIP_SUPPORTS_LEVEL_MSI)))
- info->flags &= ~MSI_FLAG_LEVEL_CAPABLE;
-}
-
-/**
- * platform_msi_create_irq_domain - Create a platform MSI interrupt domain
- * @fwnode: Optional fwnode of the interrupt controller
- * @info: MSI domain info
- * @parent: Parent irq domain
- *
- * Updates the domain and chip ops and creates a platform MSI
- * interrupt domain.
- *
- * Returns:
- * A domain pointer or NULL in case of failure.
- */
-struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
- struct msi_domain_info *info,
- struct irq_domain *parent)
-{
- struct irq_domain *domain;
-
- if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
- platform_msi_update_dom_ops(info);
- if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
- platform_msi_update_chip_ops(info);
- info->flags |= MSI_FLAG_DEV_SYSFS | MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS |
- MSI_FLAG_FREE_MSI_DESCS;
-
- domain = msi_create_irq_domain(fwnode, info, parent);
- if (domain)
- irq_domain_update_bus_token(domain, DOMAIN_BUS_PLATFORM_MSI);
-
- return domain;
-}
-EXPORT_SYMBOL_GPL(platform_msi_create_irq_domain);
-
-static int platform_msi_alloc_priv_data(struct device *dev, unsigned int nvec,
- irq_write_msi_msg_t write_msi_msg)
-{
- struct platform_msi_priv_data *datap;
- int err;
-
- /*
- * Limit the number of interrupts to 2048 per device. Should we
- * need to bump this up, DEV_ID_SHIFT should be adjusted
- * accordingly (which would impact the max number of MSI
- * capable devices).
- */
- if (!dev->msi.domain || !write_msi_msg || !nvec || nvec > MAX_DEV_MSIS)
- return -EINVAL;
-
- if (dev->msi.domain->bus_token != DOMAIN_BUS_PLATFORM_MSI) {
- dev_err(dev, "Incompatible msi_domain, giving up\n");
- return -EINVAL;
- }
-
- err = msi_setup_device_data(dev);
- if (err)
- return err;
-
- /* Already initialized? */
- if (dev->msi.data->platform_data)
- return -EBUSY;
-
- datap = kzalloc(sizeof(*datap), GFP_KERNEL);
- if (!datap)
- return -ENOMEM;
-
- datap->devid = ida_alloc_max(&platform_msi_devid_ida,
- (1 << DEV_ID_SHIFT) - 1, GFP_KERNEL);
- if (datap->devid < 0) {
- err = datap->devid;
- kfree(datap);
- return err;
- }
-
- datap->write_msg = write_msi_msg;
- datap->dev = dev;
- dev->msi.data->platform_data = datap;
- return 0;
-}
-
-static void platform_msi_free_priv_data(struct device *dev)
-{
- struct platform_msi_priv_data *data = dev->msi.data->platform_data;
-
- dev->msi.data->platform_data = NULL;
- ida_free(&platform_msi_devid_ida, data->devid);
- kfree(data);
-}
-
-/**
- * platform_msi_domain_alloc_irqs - Allocate MSI interrupts for @dev
- * @dev: The device for which to allocate interrupts
- * @nvec: The number of interrupts to allocate
- * @write_msi_msg: Callback to write an interrupt message for @dev
- *
- * Returns:
- * Zero for success, or an error code in case of failure
- */
-static int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
- irq_write_msi_msg_t write_msi_msg)
-{
- int err;
-
- err = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg);
- if (err)
- return err;
-
- err = msi_domain_alloc_irqs_range(dev, MSI_DEFAULT_DOMAIN, 0, nvec - 1);
- if (err)
- platform_msi_free_priv_data(dev);
-
- return err;
-}
-
-/**
- * platform_msi_get_host_data - Query the private data associated with
- * a platform-msi domain
- * @domain: The platform-msi domain
- *
- * Return: The private data provided when calling
- * platform_msi_create_device_domain().
- */
-void *platform_msi_get_host_data(struct irq_domain *domain)
-{
- struct platform_msi_priv_data *data = domain->host_data;
-
- return data->host_data;
-}
-
-static struct lock_class_key platform_device_msi_lock_class;
-
-/**
- * __platform_msi_create_device_domain - Create a platform-msi device domain
- *
- * @dev: The device generating the MSIs
- * @nvec: The number of MSIs that need to be allocated
- * @is_tree: flag to indicate tree hierarchy
- * @write_msi_msg: Callback to write an interrupt message for @dev
- * @ops: The hierarchy domain operations to use
- * @host_data: Private data associated to this domain
- *
- * Return: An irqdomain for @nvec interrupts on success, NULL in case of error.
- *
- * This is for interrupt domains which stack on a platform-msi domain
- * created by platform_msi_create_irq_domain(). @dev->msi.domain points to
- * that platform-msi domain which is the parent for the new domain.
- */
-struct irq_domain *
-__platform_msi_create_device_domain(struct device *dev,
- unsigned int nvec,
- bool is_tree,
- irq_write_msi_msg_t write_msi_msg,
- const struct irq_domain_ops *ops,
- void *host_data)
-{
- struct platform_msi_priv_data *data;
- struct irq_domain *domain;
- int err;
-
- err = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg);
- if (err)
- return NULL;
-
- /*
- * Use a separate lock class for the MSI descriptor mutex on
- * platform MSI device domains because the descriptor mutex nests
- * into the domain mutex. See alloc/free below.
- */
- lockdep_set_class(&dev->msi.data->mutex, &platform_device_msi_lock_class);
-
- data = dev->msi.data->platform_data;
- data->host_data = host_data;
- domain = irq_domain_create_hierarchy(dev->msi.domain, 0,
- is_tree ? 0 : nvec,
- dev->fwnode, ops, data);
- if (!domain)
- goto free_priv;
-
- platform_msi_set_proxy_dev(&data->arg);
- err = msi_domain_prepare_irqs(domain->parent, dev, nvec, &data->arg);
- if (err)
- goto free_domain;
-
- return domain;
-
-free_domain:
- irq_domain_remove(domain);
-free_priv:
- platform_msi_free_priv_data(dev);
- return NULL;
-}
-
-/**
- * platform_msi_device_domain_free - Free interrupts associated with a platform-msi
- * device domain
- *
- * @domain: The platform-msi device domain
- * @virq: The base irq from which to perform the free operation
- * @nr_irqs: How many interrupts to free from @virq
- */
-void platform_msi_device_domain_free(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs)
-{
- struct platform_msi_priv_data *data = domain->host_data;
-
- msi_lock_descs(data->dev);
- msi_domain_depopulate_descs(data->dev, virq, nr_irqs);
- irq_domain_free_irqs_common(domain, virq, nr_irqs);
- msi_free_msi_descs_range(data->dev, virq, virq + nr_irqs - 1);
- msi_unlock_descs(data->dev);
-}
-
-/**
- * platform_msi_device_domain_alloc - Allocate interrupts associated with
- * a platform-msi device domain
- *
- * @domain: The platform-msi device domain
- * @virq: The base irq from which to perform the allocate operation
- * @nr_irqs: How many interrupts to allocate from @virq
- *
- * Return 0 on success, or an error code on failure. Must be called
- * with irq_domain_mutex held (which can only be done as part of a
- * top-level interrupt allocation).
- */
-int platform_msi_device_domain_alloc(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs)
-{
- struct platform_msi_priv_data *data = domain->host_data;
- struct device *dev = data->dev;
-
- return msi_domain_populate_irqs(domain->parent, dev, virq, nr_irqs, &data->arg);
-}
-
-/* End of removal area */
-
-/* Real per device domain interfaces */
/*
* This indirection can go when platform_device_msi_init_and_alloc_irqs()
@@ -357,7 +23,7 @@ static void platform_msi_write_msi_msg(struct irq_data *d, struct msi_msg *msg)
cb(irq_data_get_msi_desc(d), msg);
}
-static void platform_msi_set_desc_byindex(msi_alloc_info_t *arg, struct msi_desc *desc)
+static void platform_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
{
arg->desc = desc;
arg->hwirq = desc->msi_index;
@@ -373,7 +39,7 @@ static const struct msi_domain_template platform_msi_template = {
},
.ops = {
- .set_desc = platform_msi_set_desc_byindex,
+ .set_desc = platform_msi_set_desc,
},
.info = {
@@ -408,10 +74,6 @@ int platform_device_msi_init_and_alloc_irqs(struct device *dev, unsigned int nve
if (!domain || !write_msi_msg)
return -EINVAL;
- /* Migration support. Will go away once everything is converted */
- if (!irq_domain_is_msi_parent(domain))
- return platform_msi_domain_alloc_irqs(dev, nvec, write_msi_msg);
-
/*
* @write_msi_msg is stored in the resulting msi_domain_info::data.
* The underlying domain creation mechanism will assign that
@@ -432,12 +94,7 @@ EXPORT_SYMBOL_GPL(platform_device_msi_init_and_alloc_irqs);
*/
void platform_device_msi_free_irqs_all(struct device *dev)
{
- struct irq_domain *domain = dev->msi.domain;
-
msi_domain_free_irqs_all(dev, MSI_DEFAULT_DOMAIN);
-
- /* Migration support. Will go away once everything is converted */
- if (!irq_domain_is_msi_parent(domain))
- platform_msi_free_priv_data(dev);
+ msi_remove_device_irq_domain(dev, MSI_DEFAULT_DOMAIN);
}
EXPORT_SYMBOL_GPL(platform_device_msi_free_irqs_all);
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 10c577963418..b45d41b018ca 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -150,25 +150,37 @@ devm_platform_ioremap_resource_byname(struct platform_device *pdev,
EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource_byname);
#endif /* CONFIG_HAS_IOMEM */
+static const struct cpumask *get_irq_affinity(struct platform_device *dev,
+ unsigned int num)
+{
+ const struct cpumask *mask = NULL;
+#ifndef CONFIG_SPARC
+ struct fwnode_handle *fwnode = dev_fwnode(&dev->dev);
+
+ if (is_of_node(fwnode))
+ mask = of_irq_get_affinity(to_of_node(fwnode), num);
+ else if (is_acpi_device_node(fwnode))
+ mask = acpi_irq_get_affinity(ACPI_HANDLE_FWNODE(fwnode), num);
+#endif
+
+ return mask ?: cpu_possible_mask;
+}
+
/**
- * platform_get_irq_optional - get an optional IRQ for a device
- * @dev: platform device
- * @num: IRQ number index
- *
- * Gets an IRQ for a platform device. Device drivers should check the return
- * value for errors so as to not pass a negative integer value to the
- * request_irq() APIs. This is the same as platform_get_irq(), except that it
- * does not print an error message if an IRQ can not be obtained.
- *
- * For example::
+ * platform_get_irq_affinity - get an optional IRQ and its affinity for a device
+ * @dev: platform device
+ * @num: interrupt number index
+ * @affinity: optional cpumask pointer to get the affinity of a per-cpu interrupt
*
- * int irq = platform_get_irq_optional(pdev, 0);
- * if (irq < 0)
- * return irq;
+ * Gets an interupt for a platform device. Device drivers should check the
+ * return value for errors so as to not pass a negative integer value to
+ * the request_irq() APIs. Optional affinity information is provided in the
+ * affinity pointer if available, and NULL otherwise.
*
- * Return: non-zero IRQ number on success, negative error number on failure.
+ * Return: non-zero interrupt number on success, negative error number on failure.
*/
-int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
+int platform_get_irq_affinity(struct platform_device *dev, unsigned int num,
+ const struct cpumask **affinity)
{
int ret;
#ifdef CONFIG_SPARC
@@ -236,8 +248,37 @@ out_not_found:
out:
if (WARN(!ret, "0 is an invalid IRQ number\n"))
return -EINVAL;
+
+ if (ret > 0 && affinity)
+ *affinity = get_irq_affinity(dev, num);
+
return ret;
}
+EXPORT_SYMBOL_GPL(platform_get_irq_affinity);
+
+/**
+ * platform_get_irq_optional - get an optional interrupt for a device
+ * @dev: platform device
+ * @num: interrupt number index
+ *
+ * Gets an interrupt for a platform device. Device drivers should check the
+ * return value for errors so as to not pass a negative integer value to
+ * the request_irq() APIs. This is the same as platform_get_irq(), except
+ * that it does not print an error message if an interrupt can not be
+ * obtained.
+ *
+ * For example::
+ *
+ * int irq = platform_get_irq_optional(pdev, 0);
+ * if (irq < 0)
+ * return irq;
+ *
+ * Return: non-zero interrupt number on success, negative error number on failure.
+ */
+int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
+{
+ return platform_get_irq_affinity(dev, num, NULL);
+}
EXPORT_SYMBOL_GPL(platform_get_irq_optional);
/**
@@ -608,7 +649,7 @@ int platform_device_add_resources(struct platform_device *pdev,
struct resource *r = NULL;
if (res) {
- r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL);
+ r = kmemdup_array(res, num, sizeof(*r), GFP_KERNEL);
if (!r)
return -ENOMEM;
}
@@ -982,7 +1023,7 @@ struct platform_device * __init_or_module __platform_create_bundle(
struct platform_device *pdev;
int error;
- pdev = platform_device_alloc(driver->driver.name, -1);
+ pdev = platform_device_alloc(driver->driver.name, PLATFORM_DEVID_NONE);
if (!pdev) {
error = -ENOMEM;
goto err_out;
@@ -1122,7 +1163,7 @@ static int platform_legacy_resume(struct device *dev)
int platform_pm_suspend(struct device *dev)
{
- struct device_driver *drv = dev->driver;
+ const struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
@@ -1140,7 +1181,7 @@ int platform_pm_suspend(struct device *dev)
int platform_pm_resume(struct device *dev)
{
- struct device_driver *drv = dev->driver;
+ const struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
@@ -1162,7 +1203,7 @@ int platform_pm_resume(struct device *dev)
int platform_pm_freeze(struct device *dev)
{
- struct device_driver *drv = dev->driver;
+ const struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
@@ -1180,7 +1221,7 @@ int platform_pm_freeze(struct device *dev)
int platform_pm_thaw(struct device *dev)
{
- struct device_driver *drv = dev->driver;
+ const struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
@@ -1198,7 +1239,7 @@ int platform_pm_thaw(struct device *dev)
int platform_pm_poweroff(struct device *dev)
{
- struct device_driver *drv = dev->driver;
+ const struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
@@ -1216,7 +1257,7 @@ int platform_pm_poweroff(struct device *dev)
int platform_pm_restore(struct device *dev)
{
- struct device_driver *drv = dev->driver;
+ const struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
@@ -1332,7 +1373,7 @@ __ATTRIBUTE_GROUPS(platform_dev);
* and compare it against the name of the driver. Return whether they match
* or not.
*/
-static int platform_match(struct device *dev, struct device_driver *drv)
+static int platform_match(struct device *dev, const struct device_driver *drv)
{
struct platform_device *pdev = to_platform_device(dev);
struct platform_driver *pdrv = to_platform_driver(drv);
@@ -1396,15 +1437,13 @@ static int platform_probe(struct device *_dev)
if (ret < 0)
return ret;
- ret = dev_pm_domain_attach(_dev, true);
+ ret = dev_pm_domain_attach(_dev, PD_FLAG_ATTACH_POWER_ON |
+ PD_FLAG_DETACH_POWER_OFF);
if (ret)
goto out;
- if (drv->probe) {
+ if (drv->probe)
ret = drv->probe(dev);
- if (ret)
- dev_pm_domain_detach(_dev, true);
- }
out:
if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
@@ -1420,15 +1459,8 @@ static void platform_remove(struct device *_dev)
struct platform_driver *drv = to_platform_driver(_dev->driver);
struct platform_device *dev = to_platform_device(_dev);
- if (drv->remove_new) {
- drv->remove_new(dev);
- } else if (drv->remove) {
- int ret = drv->remove(dev);
-
- if (ret)
- dev_warn(_dev, "remove callback returned a non-zero value. This will be ignored.\n");
- }
- dev_pm_domain_detach(_dev, true);
+ if (drv->remove)
+ drv->remove(dev);
}
static void platform_shutdown(struct device *_dev)
@@ -1446,7 +1478,7 @@ static void platform_shutdown(struct device *_dev)
static int platform_dma_configure(struct device *dev)
{
- struct platform_driver *drv = to_platform_driver(dev->driver);
+ struct device_driver *drv = READ_ONCE(dev->driver);
struct fwnode_handle *fwnode = dev_fwnode(dev);
enum dev_dma_attr attr;
int ret = 0;
@@ -1457,7 +1489,8 @@ static int platform_dma_configure(struct device *dev)
attr = acpi_get_dma_attr(to_acpi_device_node(fwnode));
ret = acpi_dma_configure(dev, attr);
}
- if (ret || drv->driver_managed_dma)
+ /* @dev->driver may not be valid when we're called from the IOMMU layer */
+ if (ret || !drv || to_platform_driver(drv)->driver_managed_dma)
return ret;
ret = iommu_device_use_default_domain(dev);
@@ -1480,7 +1513,7 @@ static const struct dev_pm_ops platform_dev_pm_ops = {
USE_PLATFORM_PM_SLEEP_OPS
};
-struct bus_type platform_bus_type = {
+const struct bus_type platform_bus_type = {
.name = "platform",
.dev_groups = platform_dev_groups,
.match = platform_match,
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 01f11629d241..2989e42d0161 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -4,5 +4,6 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o wakeup_stats.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
obj-$(CONFIG_HAVE_CLK) += clock_ops.o
obj-$(CONFIG_PM_QOS_KUNIT_TEST) += qos-test.o
+obj-$(CONFIG_PM_RUNTIME_KUNIT_TEST) += runtime-test.o
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index e18ba676cdf6..b69bcb37c830 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -259,39 +259,6 @@ int pm_clk_add_clk(struct device *dev, struct clk *clk)
}
EXPORT_SYMBOL_GPL(pm_clk_add_clk);
-
-/**
- * of_pm_clk_add_clk - Start using a device clock for power management.
- * @dev: Device whose clock is going to be used for power management.
- * @name: Name of clock that is going to be used for power management.
- *
- * Add the clock described in the 'clocks' device-tree node that matches
- * with the 'name' provided, to the list of clocks used for the power
- * management of @dev. On success, returns 0. Returns a negative error
- * code if the clock is not found or cannot be added.
- */
-int of_pm_clk_add_clk(struct device *dev, const char *name)
-{
- struct clk *clk;
- int ret;
-
- if (!dev || !dev->of_node || !name)
- return -EINVAL;
-
- clk = of_clk_get_by_name(dev->of_node, name);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
-
- ret = pm_clk_add_clk(dev, clk);
- if (ret) {
- clk_put(clk);
- return ret;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(of_pm_clk_add_clk);
-
/**
* of_pm_clk_add_clks - Start using device clock(s) for power management.
* @dev: Device whose clock(s) is going to be used for power management.
@@ -377,46 +344,6 @@ static void __pm_clk_remove(struct pm_clock_entry *ce)
}
/**
- * pm_clk_remove - Stop using a device clock for power management.
- * @dev: Device whose clock should not be used for PM any more.
- * @con_id: Connection ID of the clock.
- *
- * Remove the clock represented by @con_id from the list of clocks used for
- * the power management of @dev.
- */
-void pm_clk_remove(struct device *dev, const char *con_id)
-{
- struct pm_subsys_data *psd = dev_to_psd(dev);
- struct pm_clock_entry *ce;
-
- if (!psd)
- return;
-
- pm_clk_list_lock(psd);
-
- list_for_each_entry(ce, &psd->clock_list, node) {
- if (!con_id && !ce->con_id)
- goto remove;
- else if (!con_id || !ce->con_id)
- continue;
- else if (!strcmp(con_id, ce->con_id))
- goto remove;
- }
-
- pm_clk_list_unlock(psd);
- return;
-
- remove:
- list_del(&ce->node);
- if (ce->enabled_when_prepared)
- psd->clock_op_might_sleep--;
- pm_clk_list_unlock(psd);
-
- __pm_clk_remove(ce);
-}
-EXPORT_SYMBOL_GPL(pm_clk_remove);
-
-/**
* pm_clk_remove_clk - Stop using a device clock for power management.
* @dev: Device whose clock should not be used for PM any more.
* @clk: Clock pointer
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index 327d168dd37a..6ecf9ce4a4e6 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -11,6 +11,7 @@
#include <linux/pm_clock.h>
#include <linux/acpi.h>
#include <linux/pm_domain.h>
+#include <linux/pm_opp.h>
#include "power.h"
@@ -82,7 +83,7 @@ EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data);
/**
* dev_pm_domain_attach - Attach a device to its PM domain.
* @dev: Device to attach.
- * @power_on: Used to indicate whether we should power on the device.
+ * @flags: indicate whether we should power on/off the device on attach/detach
*
* The @dev may only be attached to a single PM domain. By iterating through
* the available alternatives we try to find a valid PM domain for the device.
@@ -99,17 +100,20 @@ EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data);
* Returns 0 on successfully attached PM domain, or when it is found that the
* device doesn't need a PM domain, else a negative error code.
*/
-int dev_pm_domain_attach(struct device *dev, bool power_on)
+int dev_pm_domain_attach(struct device *dev, u32 flags)
{
int ret;
if (dev->pm_domain)
return 0;
- ret = acpi_dev_pm_attach(dev, power_on);
+ ret = acpi_dev_pm_attach(dev, !!(flags & PD_FLAG_ATTACH_POWER_ON));
if (!ret)
ret = genpd_dev_pm_attach(dev);
+ if (dev->pm_domain)
+ dev->power.detach_power_off = !!(flags & PD_FLAG_DETACH_POWER_OFF);
+
return ret < 0 ? ret : 0;
}
EXPORT_SYMBOL_GPL(dev_pm_domain_attach);
@@ -195,6 +199,7 @@ int dev_pm_domain_attach_list(struct device *dev,
struct device *pd_dev = NULL;
int ret, i, num_pds = 0;
bool by_id = true;
+ size_t size;
u32 pd_flags = data ? data->pd_flags : 0;
u32 link_flags = pd_flags & PD_FLAG_NO_DEV_LINK ? 0 :
DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME;
@@ -217,19 +222,19 @@ int dev_pm_domain_attach_list(struct device *dev,
if (num_pds <= 0)
return 0;
- pds = devm_kzalloc(dev, sizeof(*pds), GFP_KERNEL);
+ pds = kzalloc(sizeof(*pds), GFP_KERNEL);
if (!pds)
return -ENOMEM;
- pds->pd_devs = devm_kcalloc(dev, num_pds, sizeof(*pds->pd_devs),
- GFP_KERNEL);
- if (!pds->pd_devs)
- return -ENOMEM;
-
- pds->pd_links = devm_kcalloc(dev, num_pds, sizeof(*pds->pd_links),
- GFP_KERNEL);
- if (!pds->pd_links)
- return -ENOMEM;
+ size = sizeof(*pds->pd_devs) + sizeof(*pds->pd_links) +
+ sizeof(*pds->opp_tokens);
+ pds->pd_devs = kcalloc(num_pds, size, GFP_KERNEL);
+ if (!pds->pd_devs) {
+ ret = -ENOMEM;
+ goto free_pds;
+ }
+ pds->pd_links = (void *)(pds->pd_devs + num_pds);
+ pds->opp_tokens = (void *)(pds->pd_links + num_pds);
if (link_flags && pd_flags & PD_FLAG_DEV_LINK_ON)
link_flags |= DL_FLAG_RPM_ACTIVE;
@@ -245,6 +250,19 @@ int dev_pm_domain_attach_list(struct device *dev,
goto err_attach;
}
+ if (pd_flags & PD_FLAG_REQUIRED_OPP) {
+ struct dev_pm_opp_config config = {
+ .required_dev = pd_dev,
+ .required_dev_index = i,
+ };
+
+ ret = dev_pm_opp_set_config(dev, &config);
+ if (ret < 0)
+ goto err_link;
+
+ pds->opp_tokens[i] = ret;
+ }
+
if (link_flags) {
struct device_link *link;
@@ -265,18 +283,68 @@ int dev_pm_domain_attach_list(struct device *dev,
return num_pds;
err_link:
+ dev_pm_opp_clear_config(pds->opp_tokens[i]);
dev_pm_domain_detach(pd_dev, true);
err_attach:
while (--i >= 0) {
+ dev_pm_opp_clear_config(pds->opp_tokens[i]);
if (pds->pd_links[i])
device_link_del(pds->pd_links[i]);
dev_pm_domain_detach(pds->pd_devs[i], true);
}
+ kfree(pds->pd_devs);
+free_pds:
+ kfree(pds);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_domain_attach_list);
/**
+ * devm_pm_domain_detach_list - devres-enabled version of dev_pm_domain_detach_list.
+ * @_list: The list of PM domains to detach.
+ *
+ * This function reverse the actions from devm_pm_domain_attach_list().
+ * it will be invoked during the remove phase from drivers implicitly if driver
+ * uses devm_pm_domain_attach_list() to attach the PM domains.
+ */
+static void devm_pm_domain_detach_list(void *_list)
+{
+ struct dev_pm_domain_list *list = _list;
+
+ dev_pm_domain_detach_list(list);
+}
+
+/**
+ * devm_pm_domain_attach_list - devres-enabled version of dev_pm_domain_attach_list
+ * @dev: The device used to lookup the PM domains for.
+ * @data: The data used for attaching to the PM domains.
+ * @list: An out-parameter with an allocated list of attached PM domains.
+ *
+ * NOTE: this will also handle calling devm_pm_domain_detach_list() for
+ * you during remove phase.
+ *
+ * Returns the number of attached PM domains or a negative error code in case of
+ * a failure.
+ */
+int devm_pm_domain_attach_list(struct device *dev,
+ const struct dev_pm_domain_attach_data *data,
+ struct dev_pm_domain_list **list)
+{
+ int ret, num_pds;
+
+ num_pds = dev_pm_domain_attach_list(dev, data, list);
+ if (num_pds <= 0)
+ return num_pds;
+
+ ret = devm_add_action_or_reset(dev, devm_pm_domain_detach_list, *list);
+ if (ret)
+ return ret;
+
+ return num_pds;
+}
+EXPORT_SYMBOL_GPL(devm_pm_domain_attach_list);
+
+/**
* dev_pm_domain_detach - Detach a device from its PM domain.
* @dev: Device to detach.
* @power_off: Used to indicate whether we should power off the device.
@@ -314,10 +382,14 @@ void dev_pm_domain_detach_list(struct dev_pm_domain_list *list)
return;
for (i = 0; i < list->num_pds; i++) {
+ dev_pm_opp_clear_config(list->opp_tokens[i]);
if (list->pd_links[i])
device_link_del(list->pd_links[i]);
dev_pm_domain_detach(list->pd_devs[i], true);
}
+
+ kfree(list->pd_devs);
+ kfree(list);
}
EXPORT_SYMBOL_GPL(dev_pm_domain_detach_list);
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index 4fa525668cb7..af99bbcf281c 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -8,6 +8,13 @@
#include <linux/pm_runtime.h>
#include <linux/export.h>
+#define CALL_PM_OP(dev, op) \
+({ \
+ struct device *_dev = (dev); \
+ const struct dev_pm_ops *pm = _dev->driver ? _dev->driver->pm : NULL; \
+ pm && pm->op ? pm->op(_dev) : 0; \
+})
+
#ifdef CONFIG_PM
/**
* pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems.
@@ -19,12 +26,7 @@
*/
int pm_generic_runtime_suspend(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- int ret;
-
- ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : 0;
-
- return ret;
+ return CALL_PM_OP(dev, runtime_suspend);
}
EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend);
@@ -38,12 +40,7 @@ EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend);
*/
int pm_generic_runtime_resume(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- int ret;
-
- ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : 0;
-
- return ret;
+ return CALL_PM_OP(dev, runtime_resume);
}
EXPORT_SYMBOL_GPL(pm_generic_runtime_resume);
#endif /* CONFIG_PM */
@@ -72,9 +69,7 @@ int pm_generic_prepare(struct device *dev)
*/
int pm_generic_suspend_noirq(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->suspend_noirq ? pm->suspend_noirq(dev) : 0;
+ return CALL_PM_OP(dev, suspend_noirq);
}
EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
@@ -84,9 +79,7 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
*/
int pm_generic_suspend_late(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->suspend_late ? pm->suspend_late(dev) : 0;
+ return CALL_PM_OP(dev, suspend_late);
}
EXPORT_SYMBOL_GPL(pm_generic_suspend_late);
@@ -96,9 +89,7 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend_late);
*/
int pm_generic_suspend(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->suspend ? pm->suspend(dev) : 0;
+ return CALL_PM_OP(dev, suspend);
}
EXPORT_SYMBOL_GPL(pm_generic_suspend);
@@ -108,33 +99,17 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend);
*/
int pm_generic_freeze_noirq(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->freeze_noirq ? pm->freeze_noirq(dev) : 0;
+ return CALL_PM_OP(dev, freeze_noirq);
}
EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
/**
- * pm_generic_freeze_late - Generic freeze_late callback for subsystems.
- * @dev: Device to freeze.
- */
-int pm_generic_freeze_late(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->freeze_late ? pm->freeze_late(dev) : 0;
-}
-EXPORT_SYMBOL_GPL(pm_generic_freeze_late);
-
-/**
* pm_generic_freeze - Generic freeze callback for subsystems.
* @dev: Device to freeze.
*/
int pm_generic_freeze(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->freeze ? pm->freeze(dev) : 0;
+ return CALL_PM_OP(dev, freeze);
}
EXPORT_SYMBOL_GPL(pm_generic_freeze);
@@ -144,9 +119,7 @@ EXPORT_SYMBOL_GPL(pm_generic_freeze);
*/
int pm_generic_poweroff_noirq(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->poweroff_noirq ? pm->poweroff_noirq(dev) : 0;
+ return CALL_PM_OP(dev, poweroff_noirq);
}
EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
@@ -156,9 +129,7 @@ EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
*/
int pm_generic_poweroff_late(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->poweroff_late ? pm->poweroff_late(dev) : 0;
+ return CALL_PM_OP(dev, poweroff_late);
}
EXPORT_SYMBOL_GPL(pm_generic_poweroff_late);
@@ -168,9 +139,7 @@ EXPORT_SYMBOL_GPL(pm_generic_poweroff_late);
*/
int pm_generic_poweroff(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->poweroff ? pm->poweroff(dev) : 0;
+ return CALL_PM_OP(dev, poweroff);
}
EXPORT_SYMBOL_GPL(pm_generic_poweroff);
@@ -180,33 +149,17 @@ EXPORT_SYMBOL_GPL(pm_generic_poweroff);
*/
int pm_generic_thaw_noirq(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->thaw_noirq ? pm->thaw_noirq(dev) : 0;
+ return CALL_PM_OP(dev, thaw_noirq);
}
EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
/**
- * pm_generic_thaw_early - Generic thaw_early callback for subsystems.
- * @dev: Device to thaw.
- */
-int pm_generic_thaw_early(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->thaw_early ? pm->thaw_early(dev) : 0;
-}
-EXPORT_SYMBOL_GPL(pm_generic_thaw_early);
-
-/**
* pm_generic_thaw - Generic thaw callback for subsystems.
* @dev: Device to thaw.
*/
int pm_generic_thaw(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->thaw ? pm->thaw(dev) : 0;
+ return CALL_PM_OP(dev, thaw);
}
EXPORT_SYMBOL_GPL(pm_generic_thaw);
@@ -216,9 +169,7 @@ EXPORT_SYMBOL_GPL(pm_generic_thaw);
*/
int pm_generic_resume_noirq(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->resume_noirq ? pm->resume_noirq(dev) : 0;
+ return CALL_PM_OP(dev, resume_noirq);
}
EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
@@ -228,9 +179,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
*/
int pm_generic_resume_early(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->resume_early ? pm->resume_early(dev) : 0;
+ return CALL_PM_OP(dev, resume_early);
}
EXPORT_SYMBOL_GPL(pm_generic_resume_early);
@@ -240,9 +189,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume_early);
*/
int pm_generic_resume(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->resume ? pm->resume(dev) : 0;
+ return CALL_PM_OP(dev, resume);
}
EXPORT_SYMBOL_GPL(pm_generic_resume);
@@ -252,9 +199,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume);
*/
int pm_generic_restore_noirq(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->restore_noirq ? pm->restore_noirq(dev) : 0;
+ return CALL_PM_OP(dev, restore_noirq);
}
EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
@@ -264,9 +209,7 @@ EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
*/
int pm_generic_restore_early(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->restore_early ? pm->restore_early(dev) : 0;
+ return CALL_PM_OP(dev, restore_early);
}
EXPORT_SYMBOL_GPL(pm_generic_restore_early);
@@ -276,9 +219,7 @@ EXPORT_SYMBOL_GPL(pm_generic_restore_early);
*/
int pm_generic_restore(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->restore ? pm->restore(dev) : 0;
+ return CALL_PM_OP(dev, restore);
}
EXPORT_SYMBOL_GPL(pm_generic_restore);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 4a67e83300e1..97a8b4fcf471 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -34,16 +34,13 @@
#include <linux/cpufreq.h>
#include <linux/devfreq.h>
#include <linux/timer.h>
+#include <linux/nmi.h>
#include "../base.h"
#include "power.h"
typedef int (*pm_callback_t)(struct device *);
-#define list_for_each_entry_rcu_locked(pos, head, member) \
- list_for_each_entry_rcu(pos, head, member, \
- device_links_read_lock_held())
-
/*
* The entries in the dpm_list list are in a depth first order, simply
* because children are guaranteed to be discovered after parents, and
@@ -63,8 +60,23 @@ static LIST_HEAD(dpm_noirq_list);
static DEFINE_MUTEX(dpm_list_mtx);
static pm_message_t pm_transition;
+static DEFINE_MUTEX(async_wip_mtx);
static int async_error;
+/**
+ * pm_hibernate_is_recovering - if recovering from hibernate due to error.
+ *
+ * Used to query if dev_pm_ops.thaw() is called for normal hibernation case or
+ * recovering from some error.
+ *
+ * Return: true for error case, false for normal case.
+ */
+bool pm_hibernate_is_recovering(void)
+{
+ return pm_transition.event == PM_EVENT_RECOVER;
+}
+EXPORT_SYMBOL_GPL(pm_hibernate_is_recovering);
+
static const char *pm_verb(int event)
{
switch (event) {
@@ -84,6 +96,8 @@ static const char *pm_verb(int event)
return "restore";
case PM_EVENT_RECOVER:
return "recover";
+ case PM_EVENT_POWEROFF:
+ return "poweroff";
default:
return "(unknown PM event)";
}
@@ -249,7 +263,7 @@ static int dpm_wait_fn(struct device *dev, void *async_ptr)
static void dpm_wait_for_children(struct device *dev, bool async)
{
- device_for_each_child(dev, &async, dpm_wait_fn);
+ device_for_each_child(dev, &async, dpm_wait_fn);
}
static void dpm_wait_for_suppliers(struct device *dev, bool async)
@@ -266,8 +280,9 @@ static void dpm_wait_for_suppliers(struct device *dev, bool async)
* callbacks freeing the link objects for the links in the list we're
* walking.
*/
- list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
- if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dev_for_each_link_to_supplier(link, dev)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT &&
+ !device_link_flag_is_sync_state_only(link->flags))
dpm_wait(link->supplier, async);
device_links_read_unlock(idx);
@@ -323,8 +338,9 @@ static void dpm_wait_for_consumers(struct device *dev, bool async)
* continue instead of trying to continue in parallel with its
* unregistration).
*/
- list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
- if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dev_for_each_link_to_consumer(link, dev)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT &&
+ !device_link_flag_is_sync_state_only(link->flags))
dpm_wait(link->consumer, async);
device_links_read_unlock(idx);
@@ -354,6 +370,7 @@ static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
return ops->freeze;
+ case PM_EVENT_POWEROFF:
case PM_EVENT_HIBERNATE:
return ops->poweroff;
case PM_EVENT_THAW:
@@ -388,6 +405,7 @@ static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
return ops->freeze_late;
+ case PM_EVENT_POWEROFF:
case PM_EVENT_HIBERNATE:
return ops->poweroff_late;
case PM_EVENT_THAW:
@@ -422,6 +440,7 @@ static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t stat
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
return ops->freeze_noirq;
+ case PM_EVENT_POWEROFF:
case PM_EVENT_HIBERNATE:
return ops->poweroff_noirq;
case PM_EVENT_THAW:
@@ -496,11 +515,17 @@ struct dpm_watchdog {
struct device *dev;
struct task_struct *tsk;
struct timer_list timer;
+ bool fatal;
};
#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
struct dpm_watchdog wd
+static bool __read_mostly dpm_watchdog_all_cpu_backtrace;
+module_param(dpm_watchdog_all_cpu_backtrace, bool, 0644);
+MODULE_PARM_DESC(dpm_watchdog_all_cpu_backtrace,
+ "Backtrace all CPUs on DPM watchdog timeout");
+
/**
* dpm_watchdog_handler - Driver suspend / resume watchdog handler.
* @t: The timer that PM watchdog depends on.
@@ -511,12 +536,28 @@ struct dpm_watchdog {
*/
static void dpm_watchdog_handler(struct timer_list *t)
{
- struct dpm_watchdog *wd = from_timer(wd, t, timer);
+ struct dpm_watchdog *wd = timer_container_of(wd, t, timer);
+ struct timer_list *timer = &wd->timer;
+ unsigned int time_left;
+
+ if (wd->fatal) {
+ unsigned int this_cpu = smp_processor_id();
+
+ dev_emerg(wd->dev, "**** DPM device timeout ****\n");
+ show_stack(wd->tsk, NULL, KERN_EMERG);
+ if (dpm_watchdog_all_cpu_backtrace)
+ trigger_allbutcpu_cpu_backtrace(this_cpu);
+ panic("%s %s: unrecoverable failure\n",
+ dev_driver_string(wd->dev), dev_name(wd->dev));
+ }
+
+ time_left = CONFIG_DPM_WATCHDOG_TIMEOUT - CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
+ dev_warn(wd->dev, "**** DPM device timeout after %u seconds; %u seconds until panic ****\n",
+ CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT, time_left);
+ show_stack(wd->tsk, NULL, KERN_WARNING);
- dev_emerg(wd->dev, "**** DPM device timeout ****\n");
- show_stack(wd->tsk, NULL, KERN_EMERG);
- panic("%s %s: unrecoverable failure\n",
- dev_driver_string(wd->dev), dev_name(wd->dev));
+ wd->fatal = true;
+ mod_timer(timer, jiffies + HZ * time_left);
}
/**
@@ -530,10 +571,11 @@ static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
wd->dev = dev;
wd->tsk = current;
+ wd->fatal = CONFIG_DPM_WATCHDOG_TIMEOUT == CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
/* use same timeout value for both suspend and resume */
- timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
+ timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
add_timer(timer);
}
@@ -545,8 +587,8 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
{
struct timer_list *timer = &wd->timer;
- del_timer_sync(timer);
- destroy_timer_on_stack(timer);
+ timer_delete_sync(timer);
+ timer_destroy_on_stack(timer);
}
#else
#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
@@ -583,29 +625,96 @@ static bool is_async(struct device *dev)
&& !pm_trace_is_enabled();
}
+static bool __dpm_async(struct device *dev, async_func_t func)
+{
+ if (dev->power.work_in_progress)
+ return true;
+
+ if (!is_async(dev))
+ return false;
+
+ dev->power.work_in_progress = true;
+
+ get_device(dev);
+
+ if (async_schedule_dev_nocall(func, dev))
+ return true;
+
+ put_device(dev);
+
+ return false;
+}
+
static bool dpm_async_fn(struct device *dev, async_func_t func)
{
- reinit_completion(&dev->power.completion);
+ guard(mutex)(&async_wip_mtx);
- if (is_async(dev)) {
- dev->power.async_in_progress = true;
+ return __dpm_async(dev, func);
+}
- get_device(dev);
+static int dpm_async_with_cleanup(struct device *dev, void *fn)
+{
+ guard(mutex)(&async_wip_mtx);
- if (async_schedule_dev_nocall(func, dev))
- return true;
+ if (!__dpm_async(dev, fn))
+ dev->power.work_in_progress = false;
- put_device(dev);
- }
+ return 0;
+}
+
+static void dpm_async_resume_children(struct device *dev, async_func_t func)
+{
/*
- * Because async_schedule_dev_nocall() above has returned false or it
- * has not been called at all, func() is not running and it is safe to
- * update the async_in_progress flag without extra synchronization.
+ * Prevent racing with dpm_clear_async_state() during initial list
+ * walks in dpm_noirq_resume_devices(), dpm_resume_early(), and
+ * dpm_resume().
*/
- dev->power.async_in_progress = false;
- return false;
+ guard(mutex)(&dpm_list_mtx);
+
+ /*
+ * Start processing "async" children of the device unless it's been
+ * started already for them.
+ */
+ device_for_each_child(dev, func, dpm_async_with_cleanup);
+}
+
+static void dpm_async_resume_subordinate(struct device *dev, async_func_t func)
+{
+ struct device_link *link;
+ int idx;
+
+ dpm_async_resume_children(dev, func);
+
+ idx = device_links_read_lock();
+
+ /* Start processing the device's "async" consumers. */
+ dev_for_each_link_to_consumer(link, dev)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dpm_async_with_cleanup(link->consumer, func);
+
+ device_links_read_unlock(idx);
+}
+
+static void dpm_clear_async_state(struct device *dev)
+{
+ reinit_completion(&dev->power.completion);
+ dev->power.work_in_progress = false;
+}
+
+static bool dpm_root_device(struct device *dev)
+{
+ lockdep_assert_held(&dpm_list_mtx);
+
+ /*
+ * Since this function is required to run under dpm_list_mtx, the
+ * list_empty() below will only return true if the device's list of
+ * consumers is actually empty before calling it.
+ */
+ return !dev->parent && list_empty(&dev->links.suppliers);
}
+static void async_resume_noirq(void *data, async_cookie_t cookie);
+
/**
* device_resume_noirq - Execute a "noirq resume" callback for given device.
* @dev: Device to handle.
@@ -628,8 +737,20 @@ static void device_resume_noirq(struct device *dev, pm_message_t state, bool asy
if (dev->power.syscore || dev->power.direct_complete)
goto Out;
- if (!dev->power.is_noirq_suspended)
+ if (!dev->power.is_noirq_suspended) {
+ /*
+ * This means that system suspend has been aborted in the noirq
+ * phase before invoking the noirq suspend callback for the
+ * device, so if device_suspend_late() has left it in suspend,
+ * device_resume_early() should leave it in suspend either in
+ * case the early resume of it depends on the noirq resume that
+ * has not run.
+ */
+ if (dev_pm_skip_suspend(dev))
+ dev->power.must_resume = false;
+
goto Out;
+ }
if (!dpm_wait_for_superior(dev, async))
goto Out;
@@ -642,12 +763,12 @@ static void device_resume_noirq(struct device *dev, pm_message_t state, bool asy
* so change its status accordingly.
*
* Otherwise, the device is going to be resumed, so set its PM-runtime
- * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
- * to avoid confusing drivers that don't use it.
+ * status to "active" unless its power.smart_suspend flag is clear, in
+ * which case it is not necessary to update its PM-runtime status.
*/
if (skip_resume)
pm_runtime_set_suspended(dev);
- else if (dev_pm_skip_suspend(dev))
+ else if (dev_pm_smart_suspend(dev))
pm_runtime_set_active(dev);
if (dev->pm_domain) {
@@ -685,10 +806,12 @@ Out:
TRACE_RESUME(error);
if (error) {
- async_error = error;
+ WRITE_ONCE(async_error, error);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
}
+
+ dpm_async_resume_subordinate(dev, async_resume_noirq);
}
static void async_resume_noirq(void *data, async_cookie_t cookie)
@@ -712,17 +835,20 @@ static void dpm_noirq_resume_devices(pm_message_t state)
mutex_lock(&dpm_list_mtx);
/*
- * Trigger the resume of "async" devices upfront so they don't have to
- * wait for the "non-async" ones they don't depend on.
+ * Start processing "async" root devices upfront so they don't wait for
+ * the "sync" devices they don't depend on.
*/
- list_for_each_entry(dev, &dpm_noirq_list, power.entry)
- dpm_async_fn(dev, async_resume_noirq);
+ list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
+ dpm_clear_async_state(dev);
+ if (dpm_root_device(dev))
+ dpm_async_with_cleanup(dev, async_resume_noirq);
+ }
while (!list_empty(&dpm_noirq_list)) {
dev = to_device(dpm_noirq_list.next);
list_move_tail(&dev->power.entry, &dpm_late_early_list);
- if (!dev->power.async_in_progress) {
+ if (!dpm_async_fn(dev, async_resume_noirq)) {
get_device(dev);
mutex_unlock(&dpm_list_mtx);
@@ -737,7 +863,7 @@ static void dpm_noirq_resume_devices(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, "noirq");
- if (async_error)
+ if (READ_ONCE(async_error))
dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
@@ -758,6 +884,8 @@ void dpm_resume_noirq(pm_message_t state)
device_wakeup_disarm_wake_irqs();
}
+static void async_resume_early(void *data, async_cookie_t cookie);
+
/**
* device_resume_early - Execute an "early resume" callback for given device.
* @dev: Device to handle.
@@ -775,12 +903,15 @@ static void device_resume_early(struct device *dev, pm_message_t state, bool asy
TRACE_DEVICE(dev);
TRACE_RESUME(0);
- if (dev->power.syscore || dev->power.direct_complete)
+ if (dev->power.direct_complete)
goto Out;
if (!dev->power.is_late_suspended)
goto Out;
+ if (dev->power.syscore)
+ goto Skip;
+
if (!dpm_wait_for_superior(dev, async))
goto Out;
@@ -813,18 +944,20 @@ Run:
Skip:
dev->power.is_late_suspended = false;
+ pm_runtime_enable(dev);
Out:
TRACE_RESUME(error);
- pm_runtime_enable(dev);
complete_all(&dev->power.completion);
if (error) {
- async_error = error;
+ WRITE_ONCE(async_error, error);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async early" : " early", error);
}
+
+ dpm_async_resume_subordinate(dev, async_resume_early);
}
static void async_resume_early(void *data, async_cookie_t cookie)
@@ -852,17 +985,20 @@ void dpm_resume_early(pm_message_t state)
mutex_lock(&dpm_list_mtx);
/*
- * Trigger the resume of "async" devices upfront so they don't have to
- * wait for the "non-async" ones they don't depend on.
+ * Start processing "async" root devices upfront so they don't wait for
+ * the "sync" devices they don't depend on.
*/
- list_for_each_entry(dev, &dpm_late_early_list, power.entry)
- dpm_async_fn(dev, async_resume_early);
+ list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
+ dpm_clear_async_state(dev);
+ if (dpm_root_device(dev))
+ dpm_async_with_cleanup(dev, async_resume_early);
+ }
while (!list_empty(&dpm_late_early_list)) {
dev = to_device(dpm_late_early_list.next);
list_move_tail(&dev->power.entry, &dpm_suspended_list);
- if (!dev->power.async_in_progress) {
+ if (!dpm_async_fn(dev, async_resume_early)) {
get_device(dev);
mutex_unlock(&dpm_list_mtx);
@@ -877,7 +1013,7 @@ void dpm_resume_early(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, "early");
- if (async_error)
+ if (READ_ONCE(async_error))
dpm_save_failed_step(SUSPEND_RESUME_EARLY);
trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
@@ -894,6 +1030,8 @@ void dpm_resume_start(pm_message_t state)
}
EXPORT_SYMBOL_GPL(dpm_resume_start);
+static void async_resume(void *data, async_cookie_t cookie);
+
/**
* device_resume - Execute "resume" callbacks for given device.
* @dev: Device to handle.
@@ -913,8 +1051,20 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
if (dev->power.syscore)
goto Complete;
+ if (!dev->power.is_suspended)
+ goto Complete;
+
+ dev->power.is_suspended = false;
+
if (dev->power.direct_complete) {
- /* Match the pm_runtime_disable() in __device_suspend(). */
+ /*
+ * Allow new children to be added under the device after this
+ * point if it has no PM callbacks.
+ */
+ if (dev->power.no_pm_callbacks)
+ dev->power.is_prepared = false;
+
+ /* Match the pm_runtime_disable() in device_suspend(). */
pm_runtime_enable(dev);
goto Complete;
}
@@ -931,9 +1081,6 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
*/
dev->power.is_prepared = false;
- if (!dev->power.is_suspended)
- goto Unlock;
-
if (dev->pm_domain) {
info = "power domain ";
callback = pm_op(&dev->pm_domain->ops, state);
@@ -971,9 +1118,7 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
End:
error = dpm_run_callback(callback, dev, state, info);
- dev->power.is_suspended = false;
- Unlock:
device_unlock(dev);
dpm_watchdog_clear(&wd);
@@ -983,10 +1128,12 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
TRACE_RESUME(error);
if (error) {
- async_error = error;
+ WRITE_ONCE(async_error, error);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async" : "", error);
}
+
+ dpm_async_resume_subordinate(dev, async_resume);
}
static void async_resume(void *data, async_cookie_t cookie)
@@ -1010,7 +1157,6 @@ void dpm_resume(pm_message_t state)
ktime_t starttime = ktime_get();
trace_suspend_resume(TPS("dpm_resume"), state.event, true);
- might_sleep();
pm_transition = state;
async_error = 0;
@@ -1018,17 +1164,20 @@ void dpm_resume(pm_message_t state)
mutex_lock(&dpm_list_mtx);
/*
- * Trigger the resume of "async" devices upfront so they don't have to
- * wait for the "non-async" ones they don't depend on.
+ * Start processing "async" root devices upfront so they don't wait for
+ * the "sync" devices they don't depend on.
*/
- list_for_each_entry(dev, &dpm_suspended_list, power.entry)
- dpm_async_fn(dev, async_resume);
+ list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
+ dpm_clear_async_state(dev);
+ if (dpm_root_device(dev))
+ dpm_async_with_cleanup(dev, async_resume);
+ }
while (!list_empty(&dpm_suspended_list)) {
dev = to_device(dpm_suspended_list.next);
list_move_tail(&dev->power.entry, &dpm_prepared_list);
- if (!dev->power.async_in_progress) {
+ if (!dpm_async_fn(dev, async_resume)) {
get_device(dev);
mutex_unlock(&dpm_list_mtx);
@@ -1043,7 +1192,7 @@ void dpm_resume(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, NULL);
- if (async_error)
+ if (READ_ONCE(async_error))
dpm_save_failed_step(SUSPEND_RESUME);
cpufreq_resume();
@@ -1093,6 +1242,8 @@ static void device_complete(struct device *dev, pm_message_t state)
device_unlock(dev);
out:
+ /* If enabling runtime PM for the device is blocked, unblock it. */
+ pm_runtime_unblock(dev);
pm_runtime_put(dev);
}
@@ -1108,7 +1259,6 @@ void dpm_complete(pm_message_t state)
struct list_head list;
trace_suspend_resume(TPS("dpm_complete"), state.event, true);
- might_sleep();
INIT_LIST_HEAD(&list);
mutex_lock(&dpm_list_mtx);
@@ -1147,6 +1297,7 @@ void dpm_complete(pm_message_t state)
void dpm_resume_end(pm_message_t state)
{
dpm_resume(state);
+ pm_restore_gfp_mask();
dpm_complete(state);
}
EXPORT_SYMBOL_GPL(dpm_resume_end);
@@ -1154,6 +1305,82 @@ EXPORT_SYMBOL_GPL(dpm_resume_end);
/*------------------------- Suspend routines -------------------------*/
+static bool dpm_leaf_device(struct device *dev)
+{
+ struct device *child;
+
+ lockdep_assert_held(&dpm_list_mtx);
+
+ child = device_find_any_child(dev);
+ if (child) {
+ put_device(child);
+
+ return false;
+ }
+
+ /*
+ * Since this function is required to run under dpm_list_mtx, the
+ * list_empty() below will only return true if the device's list of
+ * consumers is actually empty before calling it.
+ */
+ return list_empty(&dev->links.consumers);
+}
+
+static bool dpm_async_suspend_parent(struct device *dev, async_func_t func)
+{
+ guard(mutex)(&dpm_list_mtx);
+
+ /*
+ * If the device is suspended asynchronously and the parent's callback
+ * deletes both the device and the parent itself, the parent object may
+ * be freed while this function is running, so avoid that by checking
+ * if the device has been deleted already as the parent cannot be
+ * deleted before it.
+ */
+ if (!device_pm_initialized(dev))
+ return false;
+
+ /* Start processing the device's parent if it is "async". */
+ if (dev->parent)
+ dpm_async_with_cleanup(dev->parent, func);
+
+ return true;
+}
+
+static void dpm_async_suspend_superior(struct device *dev, async_func_t func)
+{
+ struct device_link *link;
+ int idx;
+
+ if (!dpm_async_suspend_parent(dev, func))
+ return;
+
+ idx = device_links_read_lock();
+
+ /* Start processing the device's "async" suppliers. */
+ dev_for_each_link_to_supplier(link, dev)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dpm_async_with_cleanup(link->supplier, func);
+
+ device_links_read_unlock(idx);
+}
+
+static void dpm_async_suspend_complete_all(struct list_head *device_list)
+{
+ struct device *dev;
+
+ guard(mutex)(&async_wip_mtx);
+
+ list_for_each_entry_reverse(dev, device_list, power.entry) {
+ /*
+ * In case the device is being waited for and async processing
+ * has not started for it yet, let the waiters make progress.
+ */
+ if (!dev->power.work_in_progress)
+ complete_all(&dev->power.completion);
+ }
+}
+
/**
* resume_event - Return a "resume" message for given "suspend" sleep state.
* @sleep_state: PM message representing a sleep state.
@@ -1185,12 +1412,14 @@ static void dpm_superior_set_must_resume(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
+ dev_for_each_link_to_supplier(link, dev)
link->supplier->power.must_resume = true;
device_links_read_unlock(idx);
}
+static void async_suspend_noirq(void *data, async_cookie_t cookie);
+
/**
* device_suspend_noirq - Execute a "noirq suspend" callback for given device.
* @dev: Device to handle.
@@ -1200,7 +1429,7 @@ static void dpm_superior_set_must_resume(struct device *dev)
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
-static int device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
+static void device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -1211,7 +1440,7 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state, bool asy
dpm_wait_for_subordinate(dev, async);
- if (async_error)
+ if (READ_ONCE(async_error))
goto Complete;
if (dev->power.syscore || dev->power.direct_complete)
@@ -1244,7 +1473,7 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state, bool asy
Run:
error = dpm_run_callback(callback, dev, state, info);
if (error) {
- async_error = error;
+ WRITE_ONCE(async_error, error);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
goto Complete;
@@ -1254,14 +1483,13 @@ Skip:
dev->power.is_noirq_suspended = true;
/*
- * Skipping the resume of devices that were in use right before the
- * system suspend (as indicated by their PM-runtime usage counters)
- * would be suboptimal. Also resume them if doing that is not allowed
- * to be skipped.
+ * Devices must be resumed unless they are explicitly allowed to be left
+ * in suspend, but even in that case skipping the resume of devices that
+ * were in use right before the system suspend (as indicated by their
+ * runtime PM usage counters and child counters) would be suboptimal.
*/
- if (atomic_read(&dev->power.usage_count) > 1 ||
- !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
- dev->power.may_skip_resume))
+ if (!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
+ dev->power.may_skip_resume) || !pm_runtime_need_not_resume(dev))
dev->power.must_resume = true;
if (dev->power.must_resume)
@@ -1270,7 +1498,11 @@ Skip:
Complete:
complete_all(&dev->power.completion);
TRACE_SUSPEND(error);
- return error;
+
+ if (error || READ_ONCE(async_error))
+ return;
+
+ dpm_async_suspend_superior(dev, async_suspend_noirq);
}
static void async_suspend_noirq(void *data, async_cookie_t cookie)
@@ -1284,7 +1516,8 @@ static void async_suspend_noirq(void *data, async_cookie_t cookie)
static int dpm_noirq_suspend_devices(pm_message_t state)
{
ktime_t starttime = ktime_get();
- int error = 0;
+ struct device *dev;
+ int error;
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
@@ -1293,8 +1526,18 @@ static int dpm_noirq_suspend_devices(pm_message_t state)
mutex_lock(&dpm_list_mtx);
+ /*
+ * Start processing "async" leaf devices upfront so they don't need to
+ * wait for the "sync" devices they don't depend on.
+ */
+ list_for_each_entry_reverse(dev, &dpm_late_early_list, power.entry) {
+ dpm_clear_async_state(dev);
+ if (dpm_leaf_device(dev))
+ dpm_async_with_cleanup(dev, async_suspend_noirq);
+ }
+
while (!list_empty(&dpm_late_early_list)) {
- struct device *dev = to_device(dpm_late_early_list.prev);
+ dev = to_device(dpm_late_early_list.prev);
list_move(&dev->power.entry, &dpm_noirq_list);
@@ -1305,22 +1548,28 @@ static int dpm_noirq_suspend_devices(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
- error = device_suspend_noirq(dev, state, false);
+ device_suspend_noirq(dev, state, false);
put_device(dev);
mutex_lock(&dpm_list_mtx);
- if (error || async_error)
+ if (READ_ONCE(async_error)) {
+ dpm_async_suspend_complete_all(&dpm_late_early_list);
+ /*
+ * Move all devices to the target list to resume them
+ * properly.
+ */
+ list_splice_init(&dpm_late_early_list, &dpm_noirq_list);
break;
+ }
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
- if (!error)
- error = async_error;
+ error = READ_ONCE(async_error);
if (error)
dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
@@ -1365,6 +1614,8 @@ static void dpm_propagate_wakeup_to_parent(struct device *dev)
spin_unlock_irq(&parent->power.lock);
}
+static void async_suspend_late(void *data, async_cookie_t cookie);
+
/**
* device_suspend_late - Execute a "late suspend" callback for given device.
* @dev: Device to handle.
@@ -1373,7 +1624,7 @@ static void dpm_propagate_wakeup_to_parent(struct device *dev)
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
-static int device_suspend_late(struct device *dev, pm_message_t state, bool async)
+static void device_suspend_late(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -1382,21 +1633,28 @@ static int device_suspend_late(struct device *dev, pm_message_t state, bool asyn
TRACE_DEVICE(dev);
TRACE_SUSPEND(0);
- __pm_runtime_disable(dev, false);
-
dpm_wait_for_subordinate(dev, async);
- if (async_error)
+ if (READ_ONCE(async_error))
goto Complete;
if (pm_wakeup_pending()) {
- async_error = -EBUSY;
+ WRITE_ONCE(async_error, -EBUSY);
goto Complete;
}
- if (dev->power.syscore || dev->power.direct_complete)
+ if (dev->power.direct_complete)
goto Complete;
+ /*
+ * Disable runtime PM for the device without checking if there is a
+ * pending resume request for it.
+ */
+ __pm_runtime_disable(dev, false);
+
+ if (dev->power.syscore)
+ goto Skip;
+
if (dev->pm_domain) {
info = "late power domain ";
callback = pm_late_early_op(&dev->pm_domain->ops, state);
@@ -1424,9 +1682,10 @@ static int device_suspend_late(struct device *dev, pm_message_t state, bool asyn
Run:
error = dpm_run_callback(callback, dev, state, info);
if (error) {
- async_error = error;
+ WRITE_ONCE(async_error, error);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async late" : " late", error);
+ pm_runtime_enable(dev);
goto Complete;
}
dpm_propagate_wakeup_to_parent(dev);
@@ -1437,7 +1696,11 @@ Skip:
Complete:
TRACE_SUSPEND(error);
complete_all(&dev->power.completion);
- return error;
+
+ if (error || READ_ONCE(async_error))
+ return;
+
+ dpm_async_suspend_superior(dev, async_suspend_late);
}
static void async_suspend_late(void *data, async_cookie_t cookie)
@@ -1455,7 +1718,8 @@ static void async_suspend_late(void *data, async_cookie_t cookie)
int dpm_suspend_late(pm_message_t state)
{
ktime_t starttime = ktime_get();
- int error = 0;
+ struct device *dev;
+ int error;
trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
@@ -1466,8 +1730,18 @@ int dpm_suspend_late(pm_message_t state)
mutex_lock(&dpm_list_mtx);
+ /*
+ * Start processing "async" leaf devices upfront so they don't need to
+ * wait for the "sync" devices they don't depend on.
+ */
+ list_for_each_entry_reverse(dev, &dpm_suspended_list, power.entry) {
+ dpm_clear_async_state(dev);
+ if (dpm_leaf_device(dev))
+ dpm_async_with_cleanup(dev, async_suspend_late);
+ }
+
while (!list_empty(&dpm_suspended_list)) {
- struct device *dev = to_device(dpm_suspended_list.prev);
+ dev = to_device(dpm_suspended_list.prev);
list_move(&dev->power.entry, &dpm_late_early_list);
@@ -1478,22 +1752,28 @@ int dpm_suspend_late(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
- error = device_suspend_late(dev, state, false);
+ device_suspend_late(dev, state, false);
put_device(dev);
mutex_lock(&dpm_list_mtx);
- if (error || async_error)
+ if (READ_ONCE(async_error)) {
+ dpm_async_suspend_complete_all(&dpm_suspended_list);
+ /*
+ * Move all devices to the target list to resume them
+ * properly.
+ */
+ list_splice_init(&dpm_suspended_list, &dpm_late_early_list);
break;
+ }
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
- if (!error)
- error = async_error;
+ error = READ_ONCE(async_error);
if (error) {
dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
dpm_resume_early(resume_event(state));
@@ -1565,7 +1845,7 @@ static void dpm_clear_superiors_direct_complete(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
+ dev_for_each_link_to_supplier(link, dev) {
spin_lock_irq(&link->supplier->power.lock);
link->supplier->power.direct_complete = false;
spin_unlock_irq(&link->supplier->power.lock);
@@ -1574,13 +1854,15 @@ static void dpm_clear_superiors_direct_complete(struct device *dev)
device_links_read_unlock(idx);
}
+static void async_suspend(void *data, async_cookie_t cookie);
+
/**
* device_suspend - Execute "suspend" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
*/
-static int device_suspend(struct device *dev, pm_message_t state, bool async)
+static void device_suspend(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -1592,7 +1874,7 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async)
dpm_wait_for_subordinate(dev, async);
- if (async_error) {
+ if (READ_ONCE(async_error)) {
dev->power.direct_complete = false;
goto Complete;
}
@@ -1612,7 +1894,7 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async)
if (pm_wakeup_pending()) {
dev->power.direct_complete = false;
- async_error = -EBUSY;
+ WRITE_ONCE(async_error, -EBUSY);
goto Complete;
}
@@ -1628,6 +1910,7 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async)
pm_runtime_disable(dev);
if (pm_runtime_status_suspended(dev)) {
pm_dev_dbg(dev, state, "direct-complete ");
+ dev->power.is_suspended = true;
goto Complete;
}
@@ -1695,14 +1978,18 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async)
Complete:
if (error) {
- async_error = error;
+ WRITE_ONCE(async_error, error);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async" : "", error);
}
complete_all(&dev->power.completion);
TRACE_SUSPEND(error);
- return error;
+
+ if (error || READ_ONCE(async_error))
+ return;
+
+ dpm_async_suspend_superior(dev, async_suspend);
}
static void async_suspend(void *data, async_cookie_t cookie)
@@ -1720,7 +2007,8 @@ static void async_suspend(void *data, async_cookie_t cookie)
int dpm_suspend(pm_message_t state)
{
ktime_t starttime = ktime_get();
- int error = 0;
+ struct device *dev;
+ int error;
trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
might_sleep();
@@ -1733,8 +2021,18 @@ int dpm_suspend(pm_message_t state)
mutex_lock(&dpm_list_mtx);
+ /*
+ * Start processing "async" leaf devices upfront so they don't need to
+ * wait for the "sync" devices they don't depend on.
+ */
+ list_for_each_entry_reverse(dev, &dpm_prepared_list, power.entry) {
+ dpm_clear_async_state(dev);
+ if (dpm_leaf_device(dev))
+ dpm_async_with_cleanup(dev, async_suspend);
+ }
+
while (!list_empty(&dpm_prepared_list)) {
- struct device *dev = to_device(dpm_prepared_list.prev);
+ dev = to_device(dpm_prepared_list.prev);
list_move(&dev->power.entry, &dpm_suspended_list);
@@ -1745,22 +2043,28 @@ int dpm_suspend(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
- error = device_suspend(dev, state, false);
+ device_suspend(dev, state, false);
put_device(dev);
mutex_lock(&dpm_list_mtx);
- if (error || async_error)
+ if (READ_ONCE(async_error)) {
+ dpm_async_suspend_complete_all(&dpm_prepared_list);
+ /*
+ * Move all devices to the target list to resume them
+ * properly.
+ */
+ list_splice_init(&dpm_prepared_list, &dpm_suspended_list);
break;
+ }
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
- if (!error)
- error = async_error;
+ error = READ_ONCE(async_error);
if (error)
dpm_save_failed_step(SUSPEND_SUSPEND);
@@ -1769,6 +2073,46 @@ int dpm_suspend(pm_message_t state)
return error;
}
+static bool device_prepare_smart_suspend(struct device *dev)
+{
+ struct device_link *link;
+ bool ret = true;
+ int idx;
+
+ /*
+ * The "smart suspend" feature is enabled for devices whose drivers ask
+ * for it and for devices without PM callbacks.
+ *
+ * However, if "smart suspend" is not enabled for the device's parent
+ * or any of its suppliers that take runtime PM into account, it cannot
+ * be enabled for the device either.
+ */
+ if (!dev->power.no_pm_callbacks &&
+ !dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
+ return false;
+
+ if (dev->parent && !dev_pm_smart_suspend(dev->parent) &&
+ !dev->parent->power.ignore_children && !pm_runtime_blocked(dev->parent))
+ return false;
+
+ idx = device_links_read_lock();
+
+ dev_for_each_link_to_supplier(link, dev) {
+ if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
+ continue;
+
+ if (!dev_pm_smart_suspend(link->supplier) &&
+ !pm_runtime_blocked(link->supplier)) {
+ ret = false;
+ break;
+ }
+ }
+
+ device_links_read_unlock(idx);
+
+ return ret;
+}
+
/**
* device_prepare - Prepare a device for system power transition.
* @dev: Device to handle.
@@ -1780,6 +2124,7 @@ int dpm_suspend(pm_message_t state)
static int device_prepare(struct device *dev, pm_message_t state)
{
int (*callback)(struct device *) = NULL;
+ bool smart_suspend;
int ret = 0;
/*
@@ -1789,6 +2134,13 @@ static int device_prepare(struct device *dev, pm_message_t state)
* it again during the complete phase.
*/
pm_runtime_get_noresume(dev);
+ /*
+ * If runtime PM is disabled for the device at this point and it has
+ * never been enabled so far, it should not be enabled until this system
+ * suspend-resume cycle is complete, so prepare to trigger a warning on
+ * subsequent attempts to enable it.
+ */
+ smart_suspend = !pm_runtime_block_if_disabled(dev);
if (dev->power.syscore)
return 0;
@@ -1796,6 +2148,7 @@ static int device_prepare(struct device *dev, pm_message_t state)
device_lock(dev);
dev->power.wakeup_path = false;
+ dev->power.out_band_wakeup = false;
if (dev->power.no_pm_callbacks)
goto unlock;
@@ -1823,6 +2176,13 @@ unlock:
pm_runtime_put(dev);
return ret;
}
+ /* Do not enable "smart suspend" for devices with disabled runtime PM. */
+ if (smart_suspend)
+ smart_suspend = device_prepare_smart_suspend(dev);
+
+ spin_lock_irq(&dev->power.lock);
+
+ dev->power.smart_suspend = smart_suspend;
/*
* A positive return value from ->prepare() means "this device appears
* to be runtime-suspended and its state is fine, so if it really is
@@ -1830,11 +2190,12 @@ unlock:
* will do the same thing with all of its descendants". This only
* applies to suspend transitions, however.
*/
- spin_lock_irq(&dev->power.lock);
dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
(ret > 0 || dev->power.no_pm_callbacks) &&
!dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
+
spin_unlock_irq(&dev->power.lock);
+
return 0;
}
@@ -1849,7 +2210,6 @@ int dpm_prepare(pm_message_t state)
int error = 0;
trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
- might_sleep();
/*
* Give a chance for the known devices to complete their probes, before
@@ -1916,8 +2276,10 @@ int dpm_suspend_start(pm_message_t state)
error = dpm_prepare(state);
if (error)
dpm_save_failed_step(SUSPEND_PREPARE);
- else
+ else {
+ pm_restrict_gfp_mask();
error = dpm_suspend(state);
+ }
dpm_show_time(starttime, state, error, "start");
return error;
@@ -1998,6 +2360,5 @@ void device_pm_check_callbacks(struct device *dev)
bool dev_pm_skip_suspend(struct device *dev)
{
- return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
- pm_runtime_status_suspended(dev);
+ return dev_pm_smart_suspend(dev) && pm_runtime_status_suspended(dev);
}
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index bd77f6734f14..ff393cba7649 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -137,6 +137,7 @@ s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type)
return ret;
}
+EXPORT_SYMBOL_GPL(dev_pm_qos_read_value);
/**
* apply_constraint - Add/modify/remove device PM QoS request.
diff --git a/drivers/base/power/runtime-test.c b/drivers/base/power/runtime-test.c
new file mode 100644
index 000000000000..1535ad2b0264
--- /dev/null
+++ b/drivers/base/power/runtime-test.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2025 Google, Inc.
+ */
+
+#include <linux/cleanup.h>
+#include <linux/pm_runtime.h>
+#include <kunit/device.h>
+#include <kunit/test.h>
+
+#define DEVICE_NAME "pm_runtime_test_device"
+
+static void pm_runtime_depth_test(struct kunit *test)
+{
+ struct device *dev = kunit_device_register(test, DEVICE_NAME);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ pm_runtime_enable(dev);
+
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_get_sync(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_get_sync(dev)); /* "already active" */
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_put_sync(dev));
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_put_sync(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+}
+
+/* Test pm_runtime_put() and friends when already suspended. */
+static void pm_runtime_already_suspended_test(struct kunit *test)
+{
+ struct device *dev = kunit_device_register(test, DEVICE_NAME);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ pm_runtime_enable(dev);
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+
+ pm_runtime_get_noresume(dev);
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_put_sync(dev));
+
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_suspend(dev));
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_autosuspend(dev));
+ KUNIT_EXPECT_EQ(test, 1, pm_request_autosuspend(dev));
+
+ pm_runtime_get_noresume(dev);
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_put_sync_autosuspend(dev));
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ /* Grab 2 refcounts */
+ pm_runtime_get_noresume(dev);
+ pm_runtime_get_noresume(dev);
+ /* The first put() sees usage_count 1 */
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_put_sync_autosuspend(dev));
+ /* The second put() sees usage_count 0 but tells us "already suspended". */
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_put_sync_autosuspend(dev));
+
+ /* Should have remained suspended the whole time. */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+}
+
+static void pm_runtime_idle_test(struct kunit *test)
+{
+ struct device *dev = kunit_device_register(test, DEVICE_NAME);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ pm_runtime_enable(dev);
+
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_get_sync(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_runtime_idle(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+ pm_runtime_put_noidle(dev);
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_idle(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_runtime_idle(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_request_idle(dev));
+}
+
+static void pm_runtime_disabled_test(struct kunit *test)
+{
+ struct device *dev = kunit_device_register(test, DEVICE_NAME);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ /* Never called pm_runtime_enable() */
+ KUNIT_EXPECT_FALSE(test, pm_runtime_enabled(dev));
+
+ /* "disabled" is treated as "active" */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+ KUNIT_EXPECT_FALSE(test, pm_runtime_suspended(dev));
+
+ /*
+ * Note: these "fail", but they still acquire/release refcounts, so
+ * keep them balanced.
+ */
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_get(dev));
+ pm_runtime_put(dev);
+
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_get_sync(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_put_sync(dev));
+
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_get(dev));
+ pm_runtime_put_autosuspend(dev);
+
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_resume_and_get(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_idle(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_request_idle(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_request_resume(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_request_autosuspend(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_suspend(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_resume(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_autosuspend(dev));
+
+ /* Still disabled */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+ KUNIT_EXPECT_FALSE(test, pm_runtime_enabled(dev));
+}
+
+static void pm_runtime_error_test(struct kunit *test)
+{
+ struct device *dev = kunit_device_register(test, DEVICE_NAME);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ pm_runtime_enable(dev);
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+
+ /* Fake a .runtime_resume() error */
+ dev->power.runtime_error = -EIO;
+
+ /*
+ * Note: these "fail", but they still acquire/release refcounts, so
+ * keep them balanced.
+ */
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_get(dev));
+ pm_runtime_put(dev);
+
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_get_sync(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_put_sync(dev));
+
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_get(dev));
+ pm_runtime_put_autosuspend(dev);
+
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_get(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_put_sync_autosuspend(dev));
+
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_resume_and_get(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_idle(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_request_idle(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_request_resume(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_request_autosuspend(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_suspend(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_resume(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_autosuspend(dev));
+
+ /* Error is still pending */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+ KUNIT_EXPECT_EQ(test, -EIO, dev->power.runtime_error);
+ /* Clear error */
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_set_suspended(dev));
+ KUNIT_EXPECT_EQ(test, 0, dev->power.runtime_error);
+ /* Still suspended */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_get(dev));
+ pm_runtime_barrier(dev);
+ pm_runtime_put(dev);
+ pm_runtime_suspend(dev); /* flush the put(), to suspend */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_get_sync(dev));
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_put_sync(dev));
+
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_get_sync(dev));
+ pm_runtime_put_autosuspend(dev);
+
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_resume_and_get(dev));
+
+ /*
+ * The following should all return -EAGAIN (usage is non-zero) or 1
+ * (already resumed).
+ */
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_runtime_idle(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_request_idle(dev));
+ KUNIT_EXPECT_EQ(test, 1, pm_request_resume(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_request_autosuspend(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_runtime_suspend(dev));
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_resume(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_runtime_autosuspend(dev));
+
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_put_sync(dev));
+
+ /* Suspended again */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+}
+
+/*
+ * Explore a typical probe() sequence in which a device marks itself powered,
+ * but doesn't hold any runtime PM reference, so it suspends as soon as it goes
+ * idle.
+ */
+static void pm_runtime_probe_active_test(struct kunit *test)
+{
+ struct device *dev = kunit_device_register(test, DEVICE_NAME);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ KUNIT_EXPECT_TRUE(test, pm_runtime_status_suspended(dev));
+
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_set_active(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+
+ pm_runtime_enable(dev);
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+
+ /* Nothing to flush. We stay active. */
+ pm_runtime_barrier(dev);
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+
+ /* Ask for idle? Now we suspend. */
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_idle(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+}
+
+static struct kunit_case pm_runtime_test_cases[] = {
+ KUNIT_CASE(pm_runtime_depth_test),
+ KUNIT_CASE(pm_runtime_already_suspended_test),
+ KUNIT_CASE(pm_runtime_idle_test),
+ KUNIT_CASE(pm_runtime_disabled_test),
+ KUNIT_CASE(pm_runtime_error_test),
+ KUNIT_CASE(pm_runtime_probe_active_test),
+ {}
+};
+
+static struct kunit_suite pm_runtime_test_suite = {
+ .name = "pm_runtime_test_cases",
+ .test_cases = pm_runtime_test_cases,
+};
+
+kunit_test_suite(pm_runtime_test_suite);
+MODULE_DESCRIPTION("Runtime power management unit test suite");
+MODULE_LICENSE("GPL");
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 2ee45841486b..84676cc24221 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -19,10 +19,24 @@
typedef int (*pm_callback_t)(struct device *);
+static inline pm_callback_t get_callback_ptr(const void *start, size_t offset)
+{
+ return *(pm_callback_t *)(start + offset);
+}
+
+static pm_callback_t __rpm_get_driver_callback(struct device *dev,
+ size_t cb_offset)
+{
+ if (dev->driver && dev->driver->pm)
+ return get_callback_ptr(dev->driver->pm, cb_offset);
+
+ return NULL;
+}
+
static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
{
- pm_callback_t cb;
const struct dev_pm_ops *ops;
+ pm_callback_t cb = NULL;
if (dev->pm_domain)
ops = &dev->pm_domain->ops;
@@ -36,12 +50,10 @@ static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
ops = NULL;
if (ops)
- cb = *(pm_callback_t *)((void *)ops + cb_offset);
- else
- cb = NULL;
+ cb = get_callback_ptr(ops, cb_offset);
- if (!cb && dev->driver && dev->driver->pm)
- cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
+ if (!cb)
+ cb = __rpm_get_driver_callback(dev, cb_offset);
return cb;
}
@@ -78,7 +90,7 @@ static void update_pm_runtime_accounting(struct device *dev)
/*
* Because ktime_get_mono_fast_ns() is not monotonic during
* timekeeping updates, ensure that 'now' is after the last saved
- * timesptamp.
+ * timestamp.
*/
if (now < last)
return;
@@ -205,7 +217,7 @@ static int dev_memalloc_noio(struct device *dev, void *data)
* resume/suspend callback of any one of its ancestors(or the
* block device itself), the deadlock may be triggered inside the
* memory allocation since it might not complete until the block
- * device becomes active and the involed page I/O finishes. The
+ * device becomes active and the involved page I/O finishes. The
* situation is pointed out first by Alan Stern. Network device
* are involved in iSCSI kind of situation.
*
@@ -290,7 +302,7 @@ static int rpm_get_suppliers(struct device *dev)
device_links_read_lock_held()) {
int retval;
- if (!(link->flags & DL_FLAG_PM_RUNTIME))
+ if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
continue;
retval = pm_runtime_get_sync(link->supplier);
@@ -448,8 +460,19 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
retval = __rpm_callback(cb, dev);
}
- dev->power.runtime_error = retval;
- return retval != -EACCES ? retval : -EIO;
+ /*
+ * Since -EACCES means that runtime PM is disabled for the given device,
+ * it should not be returned by runtime PM callbacks. If it is returned
+ * nevertheless, assume it to be a transient error and convert it to
+ * -EAGAIN.
+ */
+ if (retval == -EACCES)
+ retval = -EAGAIN;
+
+ if (retval != -EAGAIN && retval != -EBUSY)
+ dev->power.runtime_error = retval;
+
+ return retval;
}
/**
@@ -475,6 +498,9 @@ static int rpm_idle(struct device *dev, int rpmflags)
if (retval < 0)
; /* Conditions are wrong. */
+ else if ((rpmflags & RPM_GET_PUT) && retval == 1)
+ ; /* put() is allowed in RPM_SUSPENDED */
+
/* Idle notifications are allowed only in the RPM_ACTIVE state. */
else if (dev->power.runtime_status != RPM_ACTIVE)
retval = -EAGAIN;
@@ -725,21 +751,18 @@ static int rpm_suspend(struct device *dev, int rpmflags)
dev->power.deferred_resume = false;
wake_up_all(&dev->power.wait_queue);
- if (retval == -EAGAIN || retval == -EBUSY) {
- dev->power.runtime_error = 0;
+ /*
+ * On transient errors, if the callback routine failed an autosuspend,
+ * and if the last_busy time has been updated so that there is a new
+ * autosuspend expiration time, automatically reschedule another
+ * autosuspend.
+ */
+ if (!dev->power.runtime_error && (rpmflags & RPM_AUTO) &&
+ pm_runtime_autosuspend_expiration(dev) != 0)
+ goto repeat;
+
+ pm_runtime_cancel_pending(dev);
- /*
- * If the callback routine failed an autosuspend, and
- * if the last_busy time has been updated so that there
- * is a new autosuspend expiration time, automatically
- * reschedule another autosuspend.
- */
- if ((rpmflags & RPM_AUTO) &&
- pm_runtime_autosuspend_expiration(dev) != 0)
- goto repeat;
- } else {
- pm_runtime_cancel_pending(dev);
- }
goto out;
}
@@ -776,6 +799,8 @@ static int rpm_resume(struct device *dev, int rpmflags)
if (dev->power.runtime_status == RPM_ACTIVE &&
dev->power.last_status == RPM_ACTIVE)
retval = 1;
+ else if (rpmflags & RPM_TRANSPARENT)
+ goto out;
else
retval = -EACCES;
}
@@ -1003,7 +1028,7 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
* If 'expires' is after the current time, we've been called
* too early.
*/
- if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
+ if (expires > 0 && expires <= ktime_get_mono_fast_ns()) {
dev->power.timer_expires = 0;
rpm_suspend(dev, dev->power.timer_autosuspends ?
(RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
@@ -1183,10 +1208,12 @@ EXPORT_SYMBOL_GPL(__pm_runtime_resume);
*
* Return -EINVAL if runtime PM is disabled for @dev.
*
- * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either
- * @ign_usage_count is %true or the runtime PM usage counter of @dev is not
- * zero, increment the usage counter of @dev and return 1. Otherwise, return 0
- * without changing the usage counter.
+ * Otherwise, if its runtime PM status is %RPM_ACTIVE and (1) @ign_usage_count
+ * is set, or (2) @dev is not ignoring children and its active child count is
+ * nonzero, or (3) the runtime PM usage counter of @dev is not zero, increment
+ * the usage counter of @dev and return 1.
+ *
+ * Otherwise, return 0 without changing the usage counter.
*
* If @ign_usage_count is %true, this function can be used to prevent suspending
* the device when its runtime PM status is %RPM_ACTIVE.
@@ -1208,7 +1235,8 @@ static int pm_runtime_get_conditional(struct device *dev, bool ign_usage_count)
retval = -EINVAL;
} else if (dev->power.runtime_status != RPM_ACTIVE) {
retval = 0;
- } else if (ign_usage_count) {
+ } else if (ign_usage_count || (!dev->power.ignore_children &&
+ atomic_read(&dev->power.child_count) > 0)) {
retval = 1;
atomic_inc(&dev->power.usage_count);
} else {
@@ -1241,10 +1269,16 @@ EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
* @dev: Target device.
*
* Increment the runtime PM usage counter of @dev if its runtime PM status is
- * %RPM_ACTIVE and its runtime PM usage counter is greater than 0, in which case
- * it returns 1. If the device is in a different state or its usage_count is 0,
- * 0 is returned. -EINVAL is returned if runtime PM is disabled for the device,
- * in which case also the usage_count will remain unmodified.
+ * %RPM_ACTIVE and its runtime PM usage counter is greater than 0 or it is not
+ * ignoring children and its active child count is nonzero. 1 is returned in
+ * this case.
+ *
+ * If @dev is in a different state or it is not in use (that is, its usage
+ * counter is 0, or it is ignoring children, or its active child count is 0),
+ * 0 is returned.
+ *
+ * -EINVAL is returned if runtime PM is disabled for the device, in which case
+ * also the usage counter of @dev is not updated.
*/
int pm_runtime_get_if_in_use(struct device *dev)
{
@@ -1433,47 +1467,48 @@ static void __pm_runtime_barrier(struct device *dev)
* Next, make sure that all pending requests for the device have been flushed
* from pm_wq and wait for all runtime PM operations involving the device in
* progress to complete.
- *
- * Return value:
- * 1, if there was a resume request pending and the device had to be woken up,
- * 0, otherwise
*/
-int pm_runtime_barrier(struct device *dev)
+void pm_runtime_barrier(struct device *dev)
{
- int retval = 0;
-
pm_runtime_get_noresume(dev);
spin_lock_irq(&dev->power.lock);
if (dev->power.request_pending
- && dev->power.request == RPM_REQ_RESUME) {
+ && dev->power.request == RPM_REQ_RESUME)
rpm_resume(dev, 0);
- retval = 1;
- }
__pm_runtime_barrier(dev);
spin_unlock_irq(&dev->power.lock);
pm_runtime_put_noidle(dev);
-
- return retval;
}
EXPORT_SYMBOL_GPL(pm_runtime_barrier);
-/**
- * __pm_runtime_disable - Disable runtime PM of a device.
- * @dev: Device to handle.
- * @check_resume: If set, check if there's a resume request for the device.
- *
- * Increment power.disable_depth for the device and if it was zero previously,
- * cancel all pending runtime PM requests for the device and wait for all
- * operations in progress to complete. The device can be either active or
- * suspended after its runtime PM has been disabled.
- *
- * If @check_resume is set and there's a resume request pending when
- * __pm_runtime_disable() is called and power.disable_depth is zero, the
- * function will wake up the device before disabling its runtime PM.
- */
+bool pm_runtime_block_if_disabled(struct device *dev)
+{
+ bool ret;
+
+ spin_lock_irq(&dev->power.lock);
+
+ ret = !pm_runtime_enabled(dev);
+ if (ret && dev->power.last_status == RPM_INVALID)
+ dev->power.last_status = RPM_BLOCKED;
+
+ spin_unlock_irq(&dev->power.lock);
+
+ return ret;
+}
+
+void pm_runtime_unblock(struct device *dev)
+{
+ spin_lock_irq(&dev->power.lock);
+
+ if (dev->power.last_status == RPM_BLOCKED)
+ dev->power.last_status = RPM_INVALID;
+
+ spin_unlock_irq(&dev->power.lock);
+}
+
void __pm_runtime_disable(struct device *dev, bool check_resume)
{
spin_lock_irq(&dev->power.lock);
@@ -1532,6 +1567,10 @@ void pm_runtime_enable(struct device *dev)
if (--dev->power.disable_depth > 0)
goto out;
+ if (dev->power.last_status == RPM_BLOCKED) {
+ dev_warn(dev, "Attempt to enable runtime PM when it is blocked\n");
+ dump_stack();
+ }
dev->power.last_status = RPM_INVALID;
dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
@@ -1545,6 +1584,32 @@ out:
}
EXPORT_SYMBOL_GPL(pm_runtime_enable);
+static void pm_runtime_set_suspended_action(void *data)
+{
+ pm_runtime_set_suspended(data);
+}
+
+/**
+ * devm_pm_runtime_set_active_enabled - set_active version of devm_pm_runtime_enable.
+ *
+ * @dev: Device to handle.
+ */
+int devm_pm_runtime_set_active_enabled(struct device *dev)
+{
+ int err;
+
+ err = pm_runtime_set_active(dev);
+ if (err)
+ return err;
+
+ err = devm_add_action_or_reset(dev, pm_runtime_set_suspended_action, dev);
+ if (err)
+ return err;
+
+ return devm_pm_runtime_enable(dev);
+}
+EXPORT_SYMBOL_GPL(devm_pm_runtime_set_active_enabled);
+
static void pm_runtime_disable_action(void *data)
{
pm_runtime_dont_use_autosuspend(data);
@@ -1567,13 +1632,34 @@ int devm_pm_runtime_enable(struct device *dev)
}
EXPORT_SYMBOL_GPL(devm_pm_runtime_enable);
+static void pm_runtime_put_noidle_action(void *data)
+{
+ pm_runtime_put_noidle(data);
+}
+
+/**
+ * devm_pm_runtime_get_noresume - devres-enabled version of pm_runtime_get_noresume.
+ *
+ * @dev: Device to handle.
+ */
+int devm_pm_runtime_get_noresume(struct device *dev)
+{
+ pm_runtime_get_noresume(dev);
+
+ return devm_add_action_or_reset(dev, pm_runtime_put_noidle_action, dev);
+}
+EXPORT_SYMBOL_GPL(devm_pm_runtime_get_noresume);
+
/**
* pm_runtime_forbid - Block runtime PM of a device.
* @dev: Device to handle.
*
- * Increase the device's usage count and clear its power.runtime_auto flag,
- * so that it cannot be suspended at run time until pm_runtime_allow() is called
- * for it.
+ * Resume @dev if already suspended and block runtime suspend of @dev in such
+ * a way that it can be unblocked via the /sys/devices/.../power/control
+ * interface, or otherwise by calling pm_runtime_allow().
+ *
+ * Calling this function many times in a row has the same effect as calling it
+ * once.
*/
void pm_runtime_forbid(struct device *dev)
{
@@ -1594,7 +1680,13 @@ EXPORT_SYMBOL_GPL(pm_runtime_forbid);
* pm_runtime_allow - Unblock runtime PM of a device.
* @dev: Device to handle.
*
- * Decrease the device's usage count and set its power.runtime_auto flag.
+ * Unblock runtime suspend of @dev after it has been blocked by
+ * pm_runtime_forbid() (for instance, if it has been blocked via the
+ * /sys/devices/.../power/control interface), check if @dev can be
+ * suspended and suspend it in that case.
+ *
+ * Calling this function many times in a row has the same effect as calling it
+ * once.
*/
void pm_runtime_allow(struct device *dev)
{
@@ -1760,12 +1852,12 @@ void pm_runtime_init(struct device *dev)
dev->power.request_pending = false;
dev->power.request = RPM_REQ_NONE;
dev->power.deferred_resume = false;
- dev->power.needs_force_resume = 0;
+ dev->power.needs_force_resume = false;
INIT_WORK(&dev->power.work, pm_runtime_work);
dev->power.timer_expires = 0;
- hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- dev->power.suspend_timer.function = pm_suspend_timer_fn;
+ hrtimer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS);
init_waitqueue_head(&dev->power.wait_queue);
}
@@ -1787,6 +1879,11 @@ void pm_runtime_reinit(struct device *dev)
pm_runtime_put(dev->parent);
}
}
+ /*
+ * Clear power.needs_force_resume in case it has been set by
+ * pm_runtime_force_suspend() invoked from a driver remove callback.
+ */
+ dev->power.needs_force_resume = false;
}
/**
@@ -1810,9 +1907,8 @@ void pm_runtime_get_suppliers(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
- device_links_read_lock_held())
- if (link->flags & DL_FLAG_PM_RUNTIME) {
+ dev_for_each_link_to_supplier(link, dev)
+ if (device_link_test(link, DL_FLAG_PM_RUNTIME)) {
link->supplier_preactivated = true;
pm_runtime_get_sync(link->supplier);
}
@@ -1866,7 +1962,7 @@ static void pm_runtime_drop_link_count(struct device *dev)
*/
void pm_runtime_drop_link(struct device_link *link)
{
- if (!(link->flags & DL_FLAG_PM_RUNTIME))
+ if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
return;
pm_runtime_drop_link_count(link->consumer);
@@ -1874,13 +1970,23 @@ void pm_runtime_drop_link(struct device_link *link)
pm_request_idle(link->supplier);
}
-static bool pm_runtime_need_not_resume(struct device *dev)
+static pm_callback_t get_callback(struct device *dev, size_t cb_offset)
{
- return atomic_read(&dev->power.usage_count) <= 1 &&
- (atomic_read(&dev->power.child_count) == 0 ||
- dev->power.ignore_children);
+ /*
+ * Setting power.strict_midlayer means that the middle layer
+ * code does not want its runtime PM callbacks to be invoked via
+ * pm_runtime_force_suspend() and pm_runtime_force_resume(), so
+ * return a direct pointer to the driver callback in that case.
+ */
+ if (dev_pm_strict_midlayer_is_set(dev))
+ return __rpm_get_driver_callback(dev, cb_offset);
+
+ return __rpm_get_callback(dev, cb_offset);
}
+#define GET_CALLBACK(dev, callback) \
+ get_callback(dev, offsetof(struct dev_pm_ops, callback))
+
/**
* pm_runtime_force_suspend - Force a device into suspend state if needed.
* @dev: Device to suspend.
@@ -1897,10 +2003,6 @@ static bool pm_runtime_need_not_resume(struct device *dev)
* sure the device is put into low power state and it should only be used during
* system-wide PM transitions to sleep states. It assumes that the analogous
* pm_runtime_force_resume() will be used to resume the device.
- *
- * Do not use with DPM_FLAG_SMART_SUSPEND as this can lead to an inconsistent
- * state where this function has called the ->runtime_suspend callback but the
- * PM core marks the driver as runtime active.
*/
int pm_runtime_force_suspend(struct device *dev)
{
@@ -1908,10 +2010,10 @@ int pm_runtime_force_suspend(struct device *dev)
int ret;
pm_runtime_disable(dev);
- if (pm_runtime_status_suspended(dev))
+ if (pm_runtime_status_suspended(dev) || dev->power.needs_force_resume)
return 0;
- callback = RPM_GET_CALLBACK(dev, runtime_suspend);
+ callback = GET_CALLBACK(dev, runtime_suspend);
dev_pm_enable_wake_irq_check(dev, true);
ret = callback ? callback(dev) : 0;
@@ -1923,15 +2025,16 @@ int pm_runtime_force_suspend(struct device *dev)
/*
* If the device can stay in suspend after the system-wide transition
* to the working state that will follow, drop the children counter of
- * its parent, but set its status to RPM_SUSPENDED anyway in case this
- * function will be called again for it in the meantime.
+ * its parent and the usage counters of its suppliers. Otherwise, set
+ * power.needs_force_resume to let pm_runtime_force_resume() know that
+ * the device needs to be taken care of and to prevent this function
+ * from handling the device again in case the device is passed to it
+ * once more subsequently.
*/
- if (pm_runtime_need_not_resume(dev)) {
+ if (pm_runtime_need_not_resume(dev))
pm_runtime_set_suspended(dev);
- } else {
- __update_runtime_status(dev, RPM_SUSPENDED);
- dev->power.needs_force_resume = 1;
- }
+ else
+ dev->power.needs_force_resume = true;
return 0;
@@ -1942,33 +2045,37 @@ err:
}
EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
+#ifdef CONFIG_PM_SLEEP
+
/**
* pm_runtime_force_resume - Force a device into resume state if needed.
* @dev: Device to resume.
*
- * Prior invoking this function we expect the user to have brought the device
- * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
- * those actions and bring the device into full power, if it is expected to be
- * used on system resume. In the other case, we defer the resume to be managed
- * via runtime PM.
+ * This function expects that either pm_runtime_force_suspend() has put the
+ * device into a low-power state prior to calling it, or the device had been
+ * runtime-suspended before the preceding system-wide suspend transition and it
+ * was left in suspend during that transition.
+ *
+ * The actions carried out by pm_runtime_force_suspend(), or by a runtime
+ * suspend in general, are reversed and the device is brought back into full
+ * power if it is expected to be used on system resume, which is the case when
+ * its needs_force_resume flag is set or when its smart_suspend flag is set and
+ * its runtime PM status is "active".
*
- * Typically this function may be invoked from a system resume callback.
+ * In other cases, the resume is deferred to be managed via runtime PM.
+ *
+ * Typically, this function may be invoked from a system resume callback.
*/
int pm_runtime_force_resume(struct device *dev)
{
int (*callback)(struct device *);
int ret = 0;
- if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
+ if (!dev->power.needs_force_resume && (!dev_pm_smart_suspend(dev) ||
+ pm_runtime_status_suspended(dev)))
goto out;
- /*
- * The value of the parent's children counter is correct already, so
- * just update the status of the device.
- */
- __update_runtime_status(dev, RPM_ACTIVE);
-
- callback = RPM_GET_CALLBACK(dev, runtime_resume);
+ callback = GET_CALLBACK(dev, runtime_resume);
dev_pm_disable_wake_irq_check(dev, false);
ret = callback ? callback(dev) : 0;
@@ -1979,9 +2086,30 @@ int pm_runtime_force_resume(struct device *dev)
}
pm_runtime_mark_last_busy(dev);
+
out:
- dev->power.needs_force_resume = 0;
+ /*
+ * The smart_suspend flag can be cleared here because it is not going
+ * to be necessary until the next system-wide suspend transition that
+ * will update it again.
+ */
+ dev->power.smart_suspend = false;
+ /*
+ * Also clear needs_force_resume to make this function skip devices that
+ * have been seen by it once.
+ */
+ dev->power.needs_force_resume = false;
+
pm_runtime_enable(dev);
return ret;
}
EXPORT_SYMBOL_GPL(pm_runtime_force_resume);
+
+bool pm_runtime_need_not_resume(struct device *dev)
+{
+ return atomic_read(&dev->power.usage_count) <= 1 &&
+ (atomic_read(&dev->power.child_count) == 0 ||
+ dev->power.ignore_children);
+}
+
+#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index a1474fb67db9..13b31a3adc77 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -6,7 +6,6 @@
#include <linux/export.h>
#include <linux/pm_qos.h>
#include <linux/pm_runtime.h>
-#include <linux/pm_wakeup.h>
#include <linux/atomic.h>
#include <linux/jiffies.h>
#include "power.h"
@@ -509,14 +508,6 @@ static ssize_t wakeup_last_time_ms_show(struct device *dev,
return sysfs_emit(buf, "%lld\n", msec);
}
-static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid,
- kgid_t kgid)
-{
- if (dev->power.wakeup && dev->power.wakeup->dev)
- return device_change_owner(dev->power.wakeup->dev, kuid, kgid);
- return 0;
-}
-
static DEVICE_ATTR_RO(wakeup_last_time_ms);
#ifdef CONFIG_PM_AUTOSLEEP
@@ -541,6 +532,15 @@ static ssize_t wakeup_prevent_sleep_time_ms_show(struct device *dev,
static DEVICE_ATTR_RO(wakeup_prevent_sleep_time_ms);
#endif /* CONFIG_PM_AUTOSLEEP */
+
+static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid,
+ kgid_t kgid)
+{
+ if (dev->power.wakeup && dev->power.wakeup->dev)
+ return device_change_owner(dev->power.wakeup->dev, kuid, kgid);
+ return 0;
+}
+
#else /* CONFIG_PM_SLEEP */
static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid,
kgid_t kgid)
@@ -611,15 +611,9 @@ static DEVICE_ATTR_RW(async);
#endif /* CONFIG_PM_ADVANCED_DEBUG */
static struct attribute *power_attrs[] = {
-#ifdef CONFIG_PM_ADVANCED_DEBUG
-#ifdef CONFIG_PM_SLEEP
+#if defined(CONFIG_PM_ADVANCED_DEBUG) && defined(CONFIG_PM_SLEEP)
&dev_attr_async.attr,
#endif
- &dev_attr_runtime_status.attr,
- &dev_attr_runtime_usage.attr,
- &dev_attr_runtime_active_kids.attr,
- &dev_attr_runtime_enabled.attr,
-#endif /* CONFIG_PM_ADVANCED_DEBUG */
NULL,
};
static const struct attribute_group pm_attr_group = {
@@ -650,13 +644,16 @@ static const struct attribute_group pm_wakeup_attr_group = {
};
static struct attribute *runtime_attrs[] = {
-#ifndef CONFIG_PM_ADVANCED_DEBUG
&dev_attr_runtime_status.attr,
-#endif
&dev_attr_control.attr,
&dev_attr_runtime_suspended_time.attr,
&dev_attr_runtime_active_time.attr,
&dev_attr_autosuspend_delay_ms.attr,
+#ifdef CONFIG_PM_ADVANCED_DEBUG
+ &dev_attr_runtime_usage.attr,
+ &dev_attr_runtime_active_kids.attr,
+ &dev_attr_runtime_enabled.attr,
+#endif
NULL,
};
static const struct attribute_group pm_runtime_attr_group = {
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index cd6e559648b2..d8da7195bb00 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -238,10 +238,8 @@ int show_trace_dev_match(char *buf, size_t size)
unsigned int hash = hash_string(DEVSEED, dev_name(dev),
DEVHASH);
if (hash == value) {
- int len = snprintf(buf, size, "%s\n",
+ int len = scnprintf(buf, size, "%s\n",
dev_driver_string(dev));
- if (len > size)
- len = size;
buf += len;
ret += len;
size -= len;
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index 5a5a9e978e85..8aa28c08b289 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -103,6 +103,32 @@ void dev_pm_clear_wake_irq(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq);
+static void devm_pm_clear_wake_irq(void *dev)
+{
+ dev_pm_clear_wake_irq(dev);
+}
+
+/**
+ * devm_pm_set_wake_irq - device-managed variant of dev_pm_set_wake_irq
+ * @dev: Device entry
+ * @irq: Device IO interrupt
+ *
+ *
+ * Attach a device IO interrupt as a wake IRQ, same with dev_pm_set_wake_irq,
+ * but the device will be auto clear wake capability on driver detach.
+ */
+int devm_pm_set_wake_irq(struct device *dev, int irq)
+{
+ int ret;
+
+ ret = dev_pm_set_wake_irq(dev, irq);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, devm_pm_clear_wake_irq, dev);
+}
+EXPORT_SYMBOL_GPL(devm_pm_set_wake_irq);
+
/**
* handle_threaded_wake_irq - Handler for dedicated wake-up interrupts
* @irq: Device specific dedicated wake-up interrupt
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 752b417e8129..1e1a0e7eeac5 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -77,7 +77,7 @@ static DEFINE_IDA(wakeup_ida);
* wakeup_source_create - Create a struct wakeup_source object.
* @name: Name of the new wakeup source.
*/
-struct wakeup_source *wakeup_source_create(const char *name)
+static struct wakeup_source *wakeup_source_create(const char *name)
{
struct wakeup_source *ws;
const char *ws_name;
@@ -106,7 +106,6 @@ err_name:
err_ws:
return NULL;
}
-EXPORT_SYMBOL_GPL(wakeup_source_create);
/*
* Record wakeup_source statistics being deleted into a dummy wakeup_source.
@@ -149,7 +148,7 @@ static void wakeup_source_free(struct wakeup_source *ws)
*
* Use only for wakeup source objects created with wakeup_source_create().
*/
-void wakeup_source_destroy(struct wakeup_source *ws)
+static void wakeup_source_destroy(struct wakeup_source *ws)
{
if (!ws)
return;
@@ -158,13 +157,12 @@ void wakeup_source_destroy(struct wakeup_source *ws)
wakeup_source_record(ws);
wakeup_source_free(ws);
}
-EXPORT_SYMBOL_GPL(wakeup_source_destroy);
/**
* wakeup_source_add - Add given object to the list of wakeup sources.
* @ws: Wakeup source object to add to the list.
*/
-void wakeup_source_add(struct wakeup_source *ws)
+static void wakeup_source_add(struct wakeup_source *ws)
{
unsigned long flags;
@@ -179,32 +177,29 @@ void wakeup_source_add(struct wakeup_source *ws)
list_add_rcu(&ws->entry, &wakeup_sources);
raw_spin_unlock_irqrestore(&events_lock, flags);
}
-EXPORT_SYMBOL_GPL(wakeup_source_add);
/**
* wakeup_source_remove - Remove given object from the wakeup sources list.
* @ws: Wakeup source object to remove from the list.
*/
-void wakeup_source_remove(struct wakeup_source *ws)
+static void wakeup_source_remove(struct wakeup_source *ws)
{
unsigned long flags;
if (WARN_ON(!ws))
return;
+ /*
+ * After shutting down the timer, wakeup_source_activate() will warn if
+ * the given wakeup source is passed to it.
+ */
+ timer_shutdown_sync(&ws->timer);
+
raw_spin_lock_irqsave(&events_lock, flags);
list_del_rcu(&ws->entry);
raw_spin_unlock_irqrestore(&events_lock, flags);
synchronize_srcu(&wakeup_srcu);
-
- del_timer_sync(&ws->timer);
- /*
- * Clear timer.function to make wakeup_source_not_registered() treat
- * this wakeup source as not registered.
- */
- ws->timer.function = NULL;
}
-EXPORT_SYMBOL_GPL(wakeup_source_remove);
/**
* wakeup_source_register - Create wakeup source and add it to the list.
@@ -337,7 +332,7 @@ int device_wakeup_enable(struct device *dev)
if (!dev || !dev->power.can_wakeup)
return -EINVAL;
- if (pm_suspend_target_state != PM_SUSPEND_ON)
+ if (pm_sleep_transition_in_progress())
dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__);
ws = wakeup_source_register(dev, dev_name(dev));
@@ -510,14 +505,14 @@ int device_set_wakeup_enable(struct device *dev, bool enable)
EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
/**
- * wakeup_source_not_registered - validate the given wakeup source.
+ * wakeup_source_not_usable - validate the given wakeup source.
* @ws: Wakeup source to be validated.
*/
-static bool wakeup_source_not_registered(struct wakeup_source *ws)
+static bool wakeup_source_not_usable(struct wakeup_source *ws)
{
/*
- * Use timer struct to check if the given source is initialized
- * by wakeup_source_add.
+ * Use the timer struct to check if the given wakeup source has been
+ * initialized by wakeup_source_add() and it is not going away.
*/
return ws->timer.function != pm_wakeup_timer_fn;
}
@@ -562,8 +557,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
{
unsigned int cec;
- if (WARN_ONCE(wakeup_source_not_registered(ws),
- "unregistered wakeup source\n"))
+ if (WARN_ONCE(wakeup_source_not_usable(ws), "unusable wakeup source\n"))
return;
ws->active = true;
@@ -613,7 +607,7 @@ void __pm_stay_awake(struct wakeup_source *ws)
spin_lock_irqsave(&ws->lock, flags);
wakeup_source_report_event(ws, false);
- del_timer(&ws->timer);
+ timer_delete(&ws->timer);
ws->timer_expires = 0;
spin_unlock_irqrestore(&ws->lock, flags);
@@ -693,7 +687,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
ws->max_time = duration;
ws->last_time = now;
- del_timer(&ws->timer);
+ timer_delete(&ws->timer);
ws->timer_expires = 0;
if (ws->autosleep_enabled)
@@ -763,7 +757,7 @@ EXPORT_SYMBOL_GPL(pm_relax);
*/
static void pm_wakeup_timer_fn(struct timer_list *t)
{
- struct wakeup_source *ws = from_timer(ws, t, timer);
+ struct wakeup_source *ws = timer_container_of(ws, t, timer);
unsigned long flags;
spin_lock_irqsave(&ws->lock, flags);
diff --git a/drivers/base/power/wakeup_stats.c b/drivers/base/power/wakeup_stats.c
index 6732ed2869f9..3ffd427248e8 100644
--- a/drivers/base/power/wakeup_stats.c
+++ b/drivers/base/power/wakeup_stats.c
@@ -34,6 +34,7 @@ wakeup_attr(active_count);
wakeup_attr(event_count);
wakeup_attr(wakeup_count);
wakeup_attr(expire_count);
+wakeup_attr(relax_count);
static ssize_t active_time_ms_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -119,6 +120,7 @@ static struct attribute *wakeup_source_attrs[] = {
&dev_attr_event_count.attr,
&dev_attr_wakeup_count.attr,
&dev_attr_expire_count.attr,
+ &dev_attr_relax_count.attr,
&dev_attr_active_time_ms.attr,
&dev_attr_total_time_ms.attr,
&dev_attr_max_time_ms.attr,
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 7324a704a9a1..6a63860579dd 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -71,6 +71,44 @@ bool fwnode_property_present(const struct fwnode_handle *fwnode,
EXPORT_SYMBOL_GPL(fwnode_property_present);
/**
+ * device_property_read_bool - Return the value for a boolean property of a device
+ * @dev: Device whose property is being checked
+ * @propname: Name of the property
+ *
+ * Return if property @propname is true or false in the device firmware description.
+ *
+ * Return: true if property @propname is present. Otherwise, returns false.
+ */
+bool device_property_read_bool(const struct device *dev, const char *propname)
+{
+ return fwnode_property_read_bool(dev_fwnode(dev), propname);
+}
+EXPORT_SYMBOL_GPL(device_property_read_bool);
+
+/**
+ * fwnode_property_read_bool - Return the value for a boolean property of a firmware node
+ * @fwnode: Firmware node whose property to check
+ * @propname: Name of the property
+ *
+ * Return if property @propname is true or false in the firmware description.
+ */
+bool fwnode_property_read_bool(const struct fwnode_handle *fwnode,
+ const char *propname)
+{
+ bool ret;
+
+ if (IS_ERR_OR_NULL(fwnode))
+ return false;
+
+ ret = fwnode_call_bool_op(fwnode, property_read_bool, propname);
+ if (ret)
+ return ret;
+
+ return fwnode_call_bool_op(fwnode->secondary, property_read_bool, propname);
+}
+EXPORT_SYMBOL_GPL(fwnode_property_read_bool);
+
+/**
* device_property_read_u8_array - return a u8 array property of a device
* @dev: Device to get the property of
* @propname: Name of the property
@@ -540,7 +578,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_match_property_string);
* @prop: The name of the property
* @nargs_prop: The name of the property telling the number of
* arguments in the referred node. NULL if @nargs is known,
- * otherwise @nargs is ignored. Only relevant on OF.
+ * otherwise @nargs is ignored.
* @nargs: Number of arguments. Ignored if @nargs_prop is non-NULL.
* @index: Index of the reference, from zero onwards.
* @args: Result structure with reference and integer arguments.
@@ -869,20 +907,6 @@ struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode)
EXPORT_SYMBOL_GPL(fwnode_handle_get);
/**
- * fwnode_handle_put - Drop reference to a device node
- * @fwnode: Pointer to the device node to drop the reference to.
- *
- * This has to be used when terminating device_for_each_child_node() iteration
- * with break or return to prevent stale device node references from being left
- * behind.
- */
-void fwnode_handle_put(struct fwnode_handle *fwnode)
-{
- fwnode_call_void_op(fwnode, put);
-}
-EXPORT_SYMBOL_GPL(fwnode_handle_put);
-
-/**
* fwnode_device_is_available - check if a device is available for use
* @fwnode: Pointer to the fwnode of the device.
*
@@ -904,22 +928,49 @@ bool fwnode_device_is_available(const struct fwnode_handle *fwnode)
EXPORT_SYMBOL_GPL(fwnode_device_is_available);
/**
- * device_get_child_node_count - return the number of child nodes for device
- * @dev: Device to cound the child nodes for
+ * fwnode_get_child_node_count - return the number of child nodes for a given firmware node
+ * @fwnode: Pointer to the parent firmware node
*
- * Return: the number of child nodes for a given device.
+ * Return: the number of child nodes for a given firmware node.
+ */
+unsigned int fwnode_get_child_node_count(const struct fwnode_handle *fwnode)
+{
+ struct fwnode_handle *child;
+ unsigned int count = 0;
+
+ fwnode_for_each_child_node(fwnode, child)
+ count++;
+
+ return count;
+}
+EXPORT_SYMBOL_GPL(fwnode_get_child_node_count);
+
+/**
+ * fwnode_get_named_child_node_count - number of child nodes with given name
+ * @fwnode: Node which child nodes are counted.
+ * @name: String to match child node name against.
+ *
+ * Scan child nodes and count all the nodes with a specific name. Potential
+ * 'number' -ending after the 'at sign' for scanned names is ignored.
+ * E.g.::
+ * fwnode_get_named_child_node_count(fwnode, "channel");
+ * would match all the nodes::
+ * channel { }, channel@0 {}, channel@0xabba {}...
+ *
+ * Return: the number of child nodes with a matching name for a given device.
*/
-unsigned int device_get_child_node_count(const struct device *dev)
+unsigned int fwnode_get_named_child_node_count(const struct fwnode_handle *fwnode,
+ const char *name)
{
struct fwnode_handle *child;
unsigned int count = 0;
- device_for_each_child_node(dev, child)
+ fwnode_for_each_named_child_node(fwnode, child, name)
count++;
return count;
}
-EXPORT_SYMBOL_GPL(device_get_child_node_count);
+EXPORT_SYMBOL_GPL(fwnode_get_named_child_node_count);
bool device_dma_supported(const struct device *dev)
{
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index b1affac70d5d..ffb2ef488298 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -6,8 +6,6 @@
config REGMAP
bool
default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SOUNDWIRE || REGMAP_SOUNDWIRE_MBQ || REGMAP_SCCB || REGMAP_I3C || REGMAP_SPI_AVMM || REGMAP_MDIO || REGMAP_FSI)
- select IRQ_DOMAIN if REGMAP_IRQ
- select MDIO_BUS if REGMAP_MDIO
help
Enable support for the Register Map (regmap) access API.
@@ -58,12 +56,14 @@ config REGMAP_W1
config REGMAP_MDIO
tristate
+ select MDIO_BUS
config REGMAP_MMIO
tristate
config REGMAP_IRQ
bool
+ select IRQ_DOMAIN
config REGMAP_RAM
tristate
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 83acccdc1008..1477329410ec 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -59,6 +59,7 @@ struct regmap {
unsigned long raw_spinlock_flags;
};
};
+ struct lock_class_key *lock_key;
regmap_lock lock;
regmap_unlock unlock;
void *lock_arg; /* This is passed to lock/unlock functions */
@@ -72,12 +73,12 @@ struct regmap {
void *bus_context;
const char *name;
- bool async;
spinlock_t async_lock;
wait_queue_head_t async_waitq;
struct list_head async_list;
struct list_head async_free;
int async_ret;
+ bool async;
#ifdef CONFIG_DEBUG_FS
bool debugfs_disable;
@@ -116,8 +117,6 @@ struct regmap {
void *val_buf, size_t val_size);
int (*write)(void *context, const void *data, size_t count);
- bool defer_caching;
-
unsigned long read_flag_mask;
unsigned long write_flag_mask;
@@ -126,6 +125,8 @@ struct regmap {
int reg_stride;
int reg_stride_order;
+ bool defer_caching;
+
/* If set, will always write field to HW. */
bool force_write_field;
@@ -160,6 +161,9 @@ struct regmap {
struct reg_sequence *patch;
int patch_regs;
+ /* if set, the regmap core can sleep */
+ bool can_sleep;
+
/* if set, converts bulk read to single read */
bool use_single_read;
/* if set, converts bulk write to single write */
@@ -175,9 +179,6 @@ struct regmap {
void *selector_work_buf; /* Scratch buffer used for selector */
struct hwspinlock *hwlock;
-
- /* if set, the regmap core can sleep */
- bool can_sleep;
};
struct regcache_ops {
@@ -185,6 +186,7 @@ struct regcache_ops {
enum regcache_type type;
int (*init)(struct regmap *map);
int (*exit)(struct regmap *map);
+ int (*populate)(struct regmap *map);
#ifdef CONFIG_DEBUG_FS
void (*debugfs_init)(struct regmap *map);
#endif
@@ -287,6 +289,7 @@ enum regmap_endian regmap_get_val_endian(struct device *dev,
const struct regmap_bus *bus,
const struct regmap_config *config);
+extern struct regcache_ops regcache_flat_sparse_ops;
extern struct regcache_ops regcache_rbtree_ops;
extern struct regcache_ops regcache_maple_ops;
extern struct regcache_ops regcache_flat_ops;
diff --git a/drivers/base/regmap/regcache-flat.c b/drivers/base/regmap/regcache-flat.c
index 9b17c77dec9d..53cc59c84e2f 100644
--- a/drivers/base/regmap/regcache-flat.c
+++ b/drivers/base/regmap/regcache-flat.c
@@ -6,7 +6,11 @@
//
// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
#include <linux/device.h>
+#include <linux/limits.h>
+#include <linux/overflow.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -18,46 +22,92 @@ static inline unsigned int regcache_flat_get_index(const struct regmap *map,
return regcache_get_index_by_order(map, reg);
}
+struct regcache_flat_data {
+ unsigned long *valid;
+ unsigned int data[];
+};
+
static int regcache_flat_init(struct regmap *map)
{
- int i;
- unsigned int *cache;
+ unsigned int cache_size;
+ struct regcache_flat_data *cache;
if (!map || map->reg_stride_order < 0 || !map->max_register_is_set)
return -EINVAL;
- map->cache = kcalloc(regcache_flat_get_index(map, map->max_register)
- + 1, sizeof(unsigned int), GFP_KERNEL);
- if (!map->cache)
+ cache_size = regcache_flat_get_index(map, map->max_register) + 1;
+ cache = kzalloc(struct_size(cache, data, cache_size), map->alloc_flags);
+ if (!cache)
return -ENOMEM;
- cache = map->cache;
+ cache->valid = bitmap_zalloc(cache_size, map->alloc_flags);
+ if (!cache->valid)
+ goto err_free;
+
+ map->cache = cache;
+
+ return 0;
+
+err_free:
+ kfree(cache);
+ return -ENOMEM;
+}
+
+static int regcache_flat_exit(struct regmap *map)
+{
+ struct regcache_flat_data *cache = map->cache;
+
+ if (cache)
+ bitmap_free(cache->valid);
+
+ kfree(cache);
+ map->cache = NULL;
+
+ return 0;
+}
+
+static int regcache_flat_populate(struct regmap *map)
+{
+ struct regcache_flat_data *cache = map->cache;
+ unsigned int i;
for (i = 0; i < map->num_reg_defaults; i++) {
unsigned int reg = map->reg_defaults[i].reg;
unsigned int index = regcache_flat_get_index(map, reg);
- cache[index] = map->reg_defaults[i].def;
+ cache->data[index] = map->reg_defaults[i].def;
+ __set_bit(index, cache->valid);
}
return 0;
}
-static int regcache_flat_exit(struct regmap *map)
+static int regcache_flat_read(struct regmap *map,
+ unsigned int reg, unsigned int *value)
{
- kfree(map->cache);
- map->cache = NULL;
+ struct regcache_flat_data *cache = map->cache;
+ unsigned int index = regcache_flat_get_index(map, reg);
+
+ /* legacy behavior: ignore validity, but warn the user */
+ if (unlikely(!test_bit(index, cache->valid)))
+ dev_warn_once(map->dev,
+ "using zero-initialized flat cache, this may cause unexpected behavior");
+
+ *value = cache->data[index];
return 0;
}
-static int regcache_flat_read(struct regmap *map,
- unsigned int reg, unsigned int *value)
+static int regcache_flat_sparse_read(struct regmap *map,
+ unsigned int reg, unsigned int *value)
{
- unsigned int *cache = map->cache;
+ struct regcache_flat_data *cache = map->cache;
unsigned int index = regcache_flat_get_index(map, reg);
- *value = cache[index];
+ if (unlikely(!test_bit(index, cache->valid)))
+ return -ENOENT;
+
+ *value = cache->data[index];
return 0;
}
@@ -65,10 +115,23 @@ static int regcache_flat_read(struct regmap *map,
static int regcache_flat_write(struct regmap *map, unsigned int reg,
unsigned int value)
{
- unsigned int *cache = map->cache;
+ struct regcache_flat_data *cache = map->cache;
unsigned int index = regcache_flat_get_index(map, reg);
- cache[index] = value;
+ cache->data[index] = value;
+ __set_bit(index, cache->valid);
+
+ return 0;
+}
+
+static int regcache_flat_drop(struct regmap *map, unsigned int min,
+ unsigned int max)
+{
+ struct regcache_flat_data *cache = map->cache;
+ unsigned int bitmap_min = regcache_flat_get_index(map, min);
+ unsigned int bitmap_max = regcache_flat_get_index(map, max);
+
+ bitmap_clear(cache->valid, bitmap_min, bitmap_max + 1 - bitmap_min);
return 0;
}
@@ -78,6 +141,18 @@ struct regcache_ops regcache_flat_ops = {
.name = "flat",
.init = regcache_flat_init,
.exit = regcache_flat_exit,
+ .populate = regcache_flat_populate,
.read = regcache_flat_read,
.write = regcache_flat_write,
};
+
+struct regcache_ops regcache_flat_sparse_ops = {
+ .type = REGCACHE_FLAT_S,
+ .name = "flat-sparse",
+ .init = regcache_flat_init,
+ .exit = regcache_flat_exit,
+ .populate = regcache_flat_populate,
+ .read = regcache_flat_sparse_read,
+ .write = regcache_flat_write,
+ .drop = regcache_flat_drop,
+};
diff --git a/drivers/base/regmap/regcache-maple.c b/drivers/base/regmap/regcache-maple.c
index e42433404854..ca1c72b68f31 100644
--- a/drivers/base/regmap/regcache-maple.c
+++ b/drivers/base/regmap/regcache-maple.c
@@ -73,8 +73,7 @@ static int regcache_maple_write(struct regmap *map, unsigned int reg,
rcu_read_unlock();
- entry = kmalloc((last - index + 1) * sizeof(unsigned long),
- map->alloc_flags);
+ entry = kmalloc_array(last - index + 1, sizeof(*entry), map->alloc_flags);
if (!entry)
return -ENOMEM;
@@ -110,7 +109,8 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min,
struct maple_tree *mt = map->cache;
MA_STATE(mas, mt, min, max);
unsigned long *entry, *lower, *upper;
- unsigned long lower_index, lower_last;
+ /* initialized to work around false-positive -Wuninitialized warning */
+ unsigned long lower_index = 0, lower_last = 0;
unsigned long upper_index, upper_last;
int ret = 0;
@@ -132,9 +132,9 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min,
lower_index = mas.index;
lower_last = min -1;
- lower = kmemdup(entry, ((min - mas.index) *
- sizeof(unsigned long)),
- map->alloc_flags);
+ lower = kmemdup_array(entry,
+ min - mas.index, sizeof(*lower),
+ map->alloc_flags);
if (!lower) {
ret = -ENOMEM;
goto out_unlocked;
@@ -145,10 +145,9 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min,
upper_index = max + 1;
upper_last = mas.last;
- upper = kmemdup(&entry[max - mas.index + 1],
- ((mas.last - max) *
- sizeof(unsigned long)),
- map->alloc_flags);
+ upper = kmemdup_array(&entry[max - mas.index + 1],
+ mas.last - max, sizeof(*upper),
+ map->alloc_flags);
if (!upper) {
ret = -ENOMEM;
goto out_unlocked;
@@ -204,7 +203,7 @@ static int regcache_maple_sync_block(struct regmap *map, unsigned long *entry,
* overheads.
*/
if (max - min > 1 && regmap_can_raw_write(map)) {
- buf = kmalloc(val_bytes * (max - min), map->alloc_flags);
+ buf = kmalloc_array(max - min, val_bytes, map->alloc_flags);
if (!buf) {
ret = -ENOMEM;
goto out;
@@ -290,6 +289,23 @@ out:
return ret;
}
+static int regcache_maple_init(struct regmap *map)
+{
+ struct maple_tree *mt;
+
+ mt = kmalloc(sizeof(*mt), map->alloc_flags);
+ if (!mt)
+ return -ENOMEM;
+ map->cache = mt;
+
+ mt_init(mt);
+
+ if (!mt_external_lock(mt) && map->lock_key)
+ lockdep_set_class_and_subclass(&mt->ma_lock, map->lock_key, 1);
+
+ return 0;
+}
+
static int regcache_maple_exit(struct regmap *map)
{
struct maple_tree *mt = map->cache;
@@ -320,7 +336,7 @@ static int regcache_maple_insert_block(struct regmap *map, int first,
unsigned long *entry;
int i, ret;
- entry = kcalloc(last - first + 1, sizeof(unsigned long), map->alloc_flags);
+ entry = kmalloc_array(last - first + 1, sizeof(*entry), map->alloc_flags);
if (!entry)
return -ENOMEM;
@@ -341,23 +357,12 @@ static int regcache_maple_insert_block(struct regmap *map, int first,
return ret;
}
-static int regcache_maple_init(struct regmap *map)
+static int regcache_maple_populate(struct regmap *map)
{
- struct maple_tree *mt;
int i;
int ret;
int range_start;
- mt = kmalloc(sizeof(*mt), GFP_KERNEL);
- if (!mt)
- return -ENOMEM;
- map->cache = mt;
-
- mt_init(mt);
-
- if (!map->num_reg_defaults)
- return 0;
-
range_start = 0;
/* Scan for ranges of contiguous registers */
@@ -367,23 +372,14 @@ static int regcache_maple_init(struct regmap *map)
ret = regcache_maple_insert_block(map, range_start,
i - 1);
if (ret != 0)
- goto err;
+ return ret;
range_start = i;
}
}
/* Add the last block */
- ret = regcache_maple_insert_block(map, range_start,
- map->num_reg_defaults - 1);
- if (ret != 0)
- goto err;
-
- return 0;
-
-err:
- regcache_maple_exit(map);
- return ret;
+ return regcache_maple_insert_block(map, range_start, map->num_reg_defaults - 1);
}
struct regcache_ops regcache_maple_ops = {
@@ -391,6 +387,7 @@ struct regcache_ops regcache_maple_ops = {
.name = "maple",
.init = regcache_maple_init,
.exit = regcache_maple_exit,
+ .populate = regcache_maple_populate,
.read = regcache_maple_read,
.write = regcache_maple_write,
.drop = regcache_maple_drop,
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 3db88bbcae0f..3344b82c3799 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -184,10 +184,8 @@ static void rbtree_debugfs_init(struct regmap *map)
static int regcache_rbtree_init(struct regmap *map)
{
struct regcache_rbtree_ctx *rbtree_ctx;
- int i;
- int ret;
- map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
+ map->cache = kmalloc(sizeof *rbtree_ctx, map->alloc_flags);
if (!map->cache)
return -ENOMEM;
@@ -195,19 +193,7 @@ static int regcache_rbtree_init(struct regmap *map)
rbtree_ctx->root = RB_ROOT;
rbtree_ctx->cached_rbnode = NULL;
- for (i = 0; i < map->num_reg_defaults; i++) {
- ret = regcache_rbtree_write(map,
- map->reg_defaults[i].reg,
- map->reg_defaults[i].def);
- if (ret)
- goto err;
- }
-
return 0;
-
-err:
- regcache_rbtree_exit(map);
- return ret;
}
static int regcache_rbtree_exit(struct regmap *map)
@@ -239,6 +225,22 @@ static int regcache_rbtree_exit(struct regmap *map)
return 0;
}
+static int regcache_rbtree_populate(struct regmap *map)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < map->num_reg_defaults; i++) {
+ ret = regcache_rbtree_write(map,
+ map->reg_defaults[i].reg,
+ map->reg_defaults[i].def);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int regcache_rbtree_read(struct regmap *map,
unsigned int reg, unsigned int *value)
{
@@ -275,18 +277,16 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
pos = (reg - base_reg) / map->reg_stride;
offset = (rbnode->base_reg - base_reg) / map->reg_stride;
- blk = krealloc(rbnode->block,
- blklen * map->cache_word_size,
- map->alloc_flags);
+ blk = krealloc_array(rbnode->block, blklen, map->cache_word_size, map->alloc_flags);
if (!blk)
return -ENOMEM;
rbnode->block = blk;
if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
- present = krealloc(rbnode->cache_present,
- BITS_TO_LONGS(blklen) * sizeof(*present),
- map->alloc_flags);
+ present = krealloc_array(rbnode->cache_present,
+ BITS_TO_LONGS(blklen), sizeof(*present),
+ map->alloc_flags);
if (!present)
return -ENOMEM;
@@ -548,6 +548,7 @@ struct regcache_ops regcache_rbtree_ops = {
.name = "rbtree",
.init = regcache_rbtree_init,
.exit = regcache_rbtree_exit,
+ .populate = regcache_rbtree_populate,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = rbtree_debugfs_init,
#endif
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 2e41cb12b8e2..319c342bf5a0 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -16,11 +16,32 @@
#include "internal.h"
static const struct regcache_ops *cache_types[] = {
+ &regcache_flat_sparse_ops,
&regcache_rbtree_ops,
&regcache_maple_ops,
&regcache_flat_ops,
};
+static int regcache_defaults_cmp(const void *a, const void *b)
+{
+ const struct reg_default *x = a;
+ const struct reg_default *y = b;
+
+ if (x->reg > y->reg)
+ return 1;
+ else if (x->reg < y->reg)
+ return -1;
+ else
+ return 0;
+}
+
+void regcache_sort_defaults(struct reg_default *defaults, unsigned int ndefaults)
+{
+ sort(defaults, ndefaults, sizeof(*defaults),
+ regcache_defaults_cmp, NULL);
+}
+EXPORT_SYMBOL_GPL(regcache_sort_defaults);
+
static int regcache_hw_init(struct regmap *map)
{
int i, j;
@@ -154,7 +175,7 @@ int regcache_init(struct regmap *map, const struct regmap_config *config)
map->num_reg_defaults = config->num_reg_defaults;
map->num_reg_defaults_raw = config->num_reg_defaults_raw;
map->reg_defaults_raw = config->reg_defaults_raw;
- map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
+ map->cache_word_size = BITS_TO_BYTES(config->val_bits);
map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
map->cache = NULL;
@@ -170,8 +191,8 @@ int regcache_init(struct regmap *map, const struct regmap_config *config)
* a copy of it.
*/
if (config->reg_defaults) {
- tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults *
- sizeof(struct reg_default), GFP_KERNEL);
+ tmp_buf = kmemdup_array(config->reg_defaults, map->num_reg_defaults,
+ sizeof(*map->reg_defaults), GFP_KERNEL);
if (!tmp_buf)
return -ENOMEM;
map->reg_defaults = tmp_buf;
@@ -195,12 +216,30 @@ int regcache_init(struct regmap *map, const struct regmap_config *config)
if (map->cache_ops->init) {
dev_dbg(map->dev, "Initializing %s cache\n",
map->cache_ops->name);
+ map->lock(map->lock_arg);
ret = map->cache_ops->init(map);
+ map->unlock(map->lock_arg);
if (ret)
goto err_free;
}
+
+ if (map->num_reg_defaults && map->cache_ops->populate) {
+ dev_dbg(map->dev, "Populating %s cache\n", map->cache_ops->name);
+ map->lock(map->lock_arg);
+ ret = map->cache_ops->populate(map);
+ map->unlock(map->lock_arg);
+ if (ret)
+ goto err_exit;
+ }
return 0;
+err_exit:
+ if (map->cache_ops->exit) {
+ dev_dbg(map->dev, "Destroying %s cache\n", map->cache_ops->name);
+ map->lock(map->lock_arg);
+ ret = map->cache_ops->exit(map);
+ map->unlock(map->lock_arg);
+ }
err_free:
kfree(map->reg_defaults);
if (map->cache_free)
@@ -223,7 +262,9 @@ void regcache_exit(struct regmap *map)
if (map->cache_ops->exit) {
dev_dbg(map->dev, "Destroying %s cache\n",
map->cache_ops->name);
+ map->lock(map->lock_arg);
map->cache_ops->exit(map);
+ map->unlock(map->lock_arg);
}
}
@@ -407,7 +448,7 @@ out:
* have gone out of sync, force writes of all the paging
* registers.
*/
- rb_for_each(node, 0, &map->range_tree, rbtree_all) {
+ rb_for_each(node, NULL, &map->range_tree, rbtree_all) {
struct regmap_range_node *this =
rb_entry(node, struct regmap_range_node, node);
diff --git a/drivers/base/regmap/regmap-ac97.c b/drivers/base/regmap/regmap-ac97.c
index b9f76bdf74a9..a561971c459c 100644
--- a/drivers/base/regmap/regmap-ac97.c
+++ b/drivers/base/regmap/regmap-ac97.c
@@ -86,4 +86,5 @@ struct regmap *__devm_regmap_init_ac97(struct snd_ac97 *ac97,
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_ac97);
+MODULE_DESCRIPTION("Register map access API - AC'97 support");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index fb84cda92a75..c9b4c04b1cf6 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -470,10 +470,6 @@ static ssize_t regmap_cache_only_write_file(struct file *file,
if (err)
return count;
- err = debugfs_file_get(file->f_path.dentry);
- if (err)
- return err;
-
map->lock(map->lock_arg);
if (new_val && !map->cache_only) {
@@ -486,7 +482,6 @@ static ssize_t regmap_cache_only_write_file(struct file *file,
map->cache_only = new_val;
map->unlock(map->lock_arg);
- debugfs_file_put(file->f_path.dentry);
if (require_sync) {
err = regcache_sync(map);
@@ -517,10 +512,6 @@ static ssize_t regmap_cache_bypass_write_file(struct file *file,
if (err)
return count;
- err = debugfs_file_get(file->f_path.dentry);
- if (err)
- return err;
-
map->lock(map->lock_arg);
if (new_val && !map->cache_bypass) {
@@ -532,7 +523,6 @@ static ssize_t regmap_cache_bypass_write_file(struct file *file,
map->cache_bypass = new_val;
map->unlock(map->lock_arg);
- debugfs_file_put(file->f_path.dentry);
return count;
}
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index 3ec611dc0c09..c9b39a02278e 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -350,7 +350,8 @@ static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
if (quirks->max_write_len &&
(bus->max_raw_write == 0 || bus->max_raw_write > quirks->max_write_len))
- max_write = quirks->max_write_len;
+ max_write = quirks->max_write_len -
+ (config->reg_bits + config->pad_bits) / BITS_PER_BYTE;
if (max_read || max_write) {
ret_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL);
@@ -396,4 +397,5 @@ struct regmap *__devm_regmap_init_i2c(struct i2c_client *i2c,
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_i2c);
+MODULE_DESCRIPTION("Register map access API - I2C support");
MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap-i3c.c b/drivers/base/regmap/regmap-i3c.c
index b5300b7c477e..863b348704dc 100644
--- a/drivers/base/regmap/regmap-i3c.c
+++ b/drivers/base/regmap/regmap-i3c.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
+#include <linux/array_size.h>
#include <linux/regmap.h>
#include <linux/i3c/device.h>
#include <linux/i3c/master.h>
@@ -10,7 +11,7 @@ static int regmap_i3c_write(void *context, const void *data, size_t count)
{
struct device *dev = context;
struct i3c_device *i3c = dev_to_i3cdev(dev);
- struct i3c_priv_xfer xfers[] = {
+ struct i3c_xfer xfers[] = {
{
.rnw = false,
.len = count,
@@ -18,7 +19,7 @@ static int regmap_i3c_write(void *context, const void *data, size_t count)
},
};
- return i3c_device_do_priv_xfers(i3c, xfers, 1);
+ return i3c_device_do_xfers(i3c, xfers, ARRAY_SIZE(xfers), I3C_SDR);
}
static int regmap_i3c_read(void *context,
@@ -27,7 +28,7 @@ static int regmap_i3c_read(void *context,
{
struct device *dev = context;
struct i3c_device *i3c = dev_to_i3cdev(dev);
- struct i3c_priv_xfer xfers[2];
+ struct i3c_xfer xfers[2];
xfers[0].rnw = false;
xfers[0].len = reg_size;
@@ -37,7 +38,7 @@ static int regmap_i3c_read(void *context,
xfers[1].len = val_size;
xfers[1].data.in = val;
- return i3c_device_do_priv_xfers(i3c, xfers, 2);
+ return i3c_device_do_xfers(i3c, xfers, ARRAY_SIZE(xfers), I3C_SDR);
}
static const struct regmap_bus regmap_i3c = {
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 45fd13ef13fc..6112d942499b 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -6,11 +6,13 @@
//
// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+#include <linux/array_size.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
+#include <linux/overflow.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -19,6 +21,7 @@
struct regmap_irq_chip_data {
struct mutex lock;
+ struct lock_class_key lock_key;
struct irq_chip irq_chip;
struct regmap *map;
@@ -33,6 +36,7 @@ struct regmap_irq_chip_data {
void *status_reg_buf;
unsigned int *main_status_buf;
unsigned int *status_buf;
+ unsigned int *prev_status_buf;
unsigned int *mask_buf;
unsigned int *mask_buf_def;
unsigned int *wake_buf;
@@ -193,10 +197,10 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
/* If we've changed our wakeup count propagate it to the parent */
if (d->wake_count < 0)
for (i = d->wake_count; i < 0; i++)
- irq_set_irq_wake(d->irq, 0);
+ disable_irq_wake(d->irq);
else if (d->wake_count > 0)
for (i = 0; i < d->wake_count; i++)
- irq_set_irq_wake(d->irq, 1);
+ enable_irq_wake(d->irq);
d->wake_count = 0;
@@ -305,8 +309,8 @@ static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
unsigned int b)
{
const struct regmap_irq_chip *chip = data->chip;
+ const struct regmap_irq_sub_irq_map *subreg;
struct regmap *map = data->map;
- struct regmap_irq_sub_irq_map *subreg;
unsigned int reg;
int i, ret = 0;
@@ -332,27 +336,13 @@ static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
return ret;
}
-static irqreturn_t regmap_irq_thread(int irq, void *d)
+static int read_irq_data(struct regmap_irq_chip_data *data)
{
- struct regmap_irq_chip_data *data = d;
const struct regmap_irq_chip *chip = data->chip;
struct regmap *map = data->map;
int ret, i;
- bool handled = false;
u32 reg;
- if (chip->handle_pre_irq)
- chip->handle_pre_irq(chip->irq_drv_data);
-
- if (chip->runtime_pm) {
- ret = pm_runtime_get_sync(map->dev);
- if (ret < 0) {
- dev_err(map->dev, "IRQ thread failed to resume: %d\n",
- ret);
- goto exit;
- }
- }
-
/*
* Read only registers with active IRQs if the chip has 'main status
* register'. Else read in the statuses, using a single bulk read if
@@ -364,14 +354,11 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
memset32(data->status_buf, GENMASK(31, 0), chip->num_regs);
} else if (chip->num_main_regs) {
unsigned int max_main_bits;
- unsigned long size;
-
- size = chip->num_regs * sizeof(unsigned int);
max_main_bits = (chip->num_main_status_bits) ?
chip->num_main_status_bits : chip->num_regs;
/* Clear the status buf as we don't read all status regs */
- memset(data->status_buf, 0, size);
+ memset32(data->status_buf, 0, chip->num_regs);
/* We could support bulk read for main status registers
* but I don't expect to see devices with really many main
@@ -382,10 +369,8 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
reg = data->get_irq_reg(data, chip->main_status, i);
ret = regmap_read(map, reg, &data->main_status_buf[i]);
if (ret) {
- dev_err(map->dev,
- "Failed to read IRQ status %d\n",
- ret);
- goto exit;
+ dev_err(map->dev, "Failed to read IRQ status %d\n", ret);
+ return ret;
}
}
@@ -401,10 +386,8 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
ret = read_sub_irq_data(data, b);
if (ret != 0) {
- dev_err(map->dev,
- "Failed to read IRQ status %d\n",
- ret);
- goto exit;
+ dev_err(map->dev, "Failed to read IRQ status %d\n", ret);
+ return ret;
}
}
@@ -421,9 +404,8 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
data->status_reg_buf,
chip->num_regs);
if (ret != 0) {
- dev_err(map->dev, "Failed to read IRQ status: %d\n",
- ret);
- goto exit;
+ dev_err(map->dev, "Failed to read IRQ status: %d\n", ret);
+ return ret;
}
for (i = 0; i < data->chip->num_regs; i++) {
@@ -439,7 +421,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
break;
default:
BUG();
- goto exit;
+ return -EIO;
}
}
@@ -450,10 +432,8 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
ret = regmap_read(map, reg, &data->status_buf[i]);
if (ret != 0) {
- dev_err(map->dev,
- "Failed to read IRQ status: %d\n",
- ret);
- goto exit;
+ dev_err(map->dev, "Failed to read IRQ status: %d\n", ret);
+ return ret;
}
}
}
@@ -462,6 +442,42 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
for (i = 0; i < data->chip->num_regs; i++)
data->status_buf[i] = ~data->status_buf[i];
+ return 0;
+}
+
+static irqreturn_t regmap_irq_thread(int irq, void *d)
+{
+ struct regmap_irq_chip_data *data = d;
+ const struct regmap_irq_chip *chip = data->chip;
+ struct regmap *map = data->map;
+ int ret, i;
+ bool handled = false;
+ u32 reg;
+
+ if (chip->handle_pre_irq)
+ chip->handle_pre_irq(chip->irq_drv_data);
+
+ if (chip->runtime_pm) {
+ ret = pm_runtime_get_sync(map->dev);
+ if (ret < 0) {
+ dev_err(map->dev, "IRQ thread failed to resume: %d\n", ret);
+ goto exit;
+ }
+ }
+
+ ret = read_irq_data(data);
+ if (ret < 0)
+ goto exit;
+
+ if (chip->status_is_level) {
+ for (i = 0; i < data->chip->num_regs; i++) {
+ unsigned int val = data->status_buf[i];
+
+ data->status_buf[i] ^= data->prev_status_buf[i];
+ data->prev_status_buf[i] = val;
+ }
+ }
+
/*
* Ignore masked IRQs and ack if we need to; we ack early so
* there is no race between handling and acknowledging the
@@ -514,12 +530,16 @@ exit:
return IRQ_NONE;
}
+static struct lock_class_key regmap_irq_lock_class;
+static struct lock_class_key regmap_irq_request_class;
+
static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
struct regmap_irq_chip_data *data = h->host_data;
irq_set_chip_data(virq, data);
+ irq_set_lockdep_class(virq, &regmap_irq_lock_class, &regmap_irq_request_class);
irq_set_chip(virq, &data->irq_chip);
irq_set_nested_thread(virq, 1);
irq_set_parent(virq, data->irq);
@@ -608,6 +628,30 @@ int regmap_irq_set_type_config_simple(unsigned int **buf, unsigned int type,
}
EXPORT_SYMBOL_GPL(regmap_irq_set_type_config_simple);
+static int regmap_irq_create_domain(struct fwnode_handle *fwnode, int irq_base,
+ const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data *d)
+{
+ struct irq_domain_info info = {
+ .fwnode = fwnode,
+ .size = chip->num_irqs,
+ .hwirq_max = chip->num_irqs,
+ .virq_base = irq_base,
+ .ops = &regmap_domain_ops,
+ .host_data = d,
+ .name_suffix = chip->domain_suffix,
+ };
+
+ d->domain = irq_domain_instantiate(&info);
+ if (IS_ERR(d->domain)) {
+ dev_err(d->map->dev, "Failed to create IRQ domain\n");
+ return PTR_ERR(d->domain);
+ }
+
+ return 0;
+}
+
+
/**
* regmap_add_irq_chip_fwnode() - Use standard regmap IRQ controller handling
*
@@ -680,6 +724,13 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (!d->status_buf)
goto err_alloc;
+ if (chip->status_is_level) {
+ d->prev_status_buf = kcalloc(chip->num_regs, sizeof(*d->prev_status_buf),
+ GFP_KERNEL);
+ if (!d->prev_status_buf)
+ goto err_alloc;
+ }
+
d->mask_buf = kcalloc(chip->num_regs, sizeof(*d->mask_buf),
GFP_KERNEL);
if (!d->mask_buf)
@@ -751,7 +802,13 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
goto err_alloc;
}
- mutex_init(&d->lock);
+ /*
+ * If one regmap-irq is the parent of another then we'll try
+ * to lock the child with the parent locked, use an explicit
+ * lock_key so lockdep can figure out what's going on.
+ */
+ lockdep_register_key(&d->lock_key);
+ mutex_init_with_key(&d->lock, &d->lock_key);
for (i = 0; i < chip->num_irqs; i++)
d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
@@ -766,7 +823,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
d->mask_buf[i],
chip->irq_drv_data);
if (ret)
- goto err_alloc;
+ goto err_mutex;
}
if (chip->mask_base && !chip->handle_mask_sync) {
@@ -777,7 +834,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (ret) {
dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
reg, ret);
- goto err_alloc;
+ goto err_mutex;
}
}
@@ -788,7 +845,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (ret) {
dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
reg, ret);
- goto err_alloc;
+ goto err_mutex;
}
}
@@ -798,14 +855,14 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
/* Ack masked but set interrupts */
if (d->chip->no_status) {
/* no status register so default to all active */
- d->status_buf[i] = GENMASK(31, 0);
+ d->status_buf[i] = UINT_MAX;
} else {
reg = d->get_irq_reg(d, d->chip->status_base, i);
ret = regmap_read(map, reg, &d->status_buf[i]);
if (ret != 0) {
dev_err(map->dev, "Failed to read IRQ status: %d\n",
ret);
- goto err_alloc;
+ goto err_mutex;
}
}
@@ -829,7 +886,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (ret != 0) {
dev_err(map->dev, "Failed to ack 0x%x: %d\n",
reg, ret);
- goto err_alloc;
+ goto err_mutex;
}
}
}
@@ -851,24 +908,25 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (ret != 0) {
dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
reg, ret);
- goto err_alloc;
+ goto err_mutex;
}
}
}
- if (irq_base)
- d->domain = irq_domain_create_legacy(fwnode, chip->num_irqs,
- irq_base, 0,
- &regmap_domain_ops, d);
- else
- d->domain = irq_domain_create_linear(fwnode, chip->num_irqs,
- &regmap_domain_ops, d);
- if (!d->domain) {
- dev_err(map->dev, "Failed to create IRQ domain\n");
- ret = -ENOMEM;
- goto err_alloc;
+ /* Store current levels */
+ if (chip->status_is_level) {
+ ret = read_irq_data(d);
+ if (ret < 0)
+ goto err_mutex;
+
+ memcpy(d->prev_status_buf, d->status_buf,
+ array_size(d->chip->num_regs, sizeof(d->prev_status_buf[0])));
}
+ ret = regmap_irq_create_domain(fwnode, irq_base, chip, d);
+ if (ret)
+ goto err_mutex;
+
ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
irq_flags | IRQF_ONESHOT,
chip->name, d);
@@ -884,13 +942,18 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
err_domain:
/* Should really dispose of the domain but... */
+err_mutex:
+ mutex_destroy(&d->lock);
+ lockdep_unregister_key(&d->lock_key);
err_alloc:
kfree(d->type_buf);
kfree(d->type_buf_def);
kfree(d->wake_buf);
kfree(d->mask_buf_def);
kfree(d->mask_buf);
+ kfree(d->main_status_buf);
kfree(d->status_buf);
+ kfree(d->prev_status_buf);
kfree(d->status_reg_buf);
if (d->config_buf) {
for (i = 0; i < chip->num_config_bases; i++)
@@ -965,13 +1028,17 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
kfree(d->wake_buf);
kfree(d->mask_buf_def);
kfree(d->mask_buf);
+ kfree(d->main_status_buf);
kfree(d->status_reg_buf);
kfree(d->status_buf);
+ kfree(d->prev_status_buf);
if (d->config_buf) {
for (i = 0; i < d->chip->num_config_bases; i++)
kfree(d->config_buf[i]);
kfree(d->config_buf);
}
+ mutex_destroy(&d->lock);
+ lockdep_unregister_key(&d->lock_key);
kfree(d);
}
EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
diff --git a/drivers/base/regmap/regmap-kunit.c b/drivers/base/regmap/regmap-kunit.c
index 9c5314785fc2..f6fc5ed016da 100644
--- a/drivers/base/regmap/regmap-kunit.c
+++ b/drivers/base/regmap/regmap-kunit.c
@@ -22,6 +22,7 @@ struct regmap_test_param {
enum regmap_endian val_endian;
unsigned int from_reg;
+ bool fast_io;
};
static void get_changed_bytes(void *orig, void *new, size_t size)
@@ -53,6 +54,8 @@ static const char *regcache_type_name(enum regcache_type type)
return "none";
case REGCACHE_FLAT:
return "flat";
+ case REGCACHE_FLAT_S:
+ return "flat-sparse";
case REGCACHE_RBTREE:
return "rbtree";
case REGCACHE_MAPLE:
@@ -80,41 +83,62 @@ static const char *regmap_endian_name(enum regmap_endian endian)
static void param_to_desc(const struct regmap_test_param *param, char *desc)
{
- snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s-%s @%#x",
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s-%s%s @%#x",
regcache_type_name(param->cache),
regmap_endian_name(param->val_endian),
+ param->fast_io ? " fast I/O" : "",
param->from_reg);
}
static const struct regmap_test_param regcache_types_list[] = {
{ .cache = REGCACHE_NONE },
+ { .cache = REGCACHE_NONE, .fast_io = true },
{ .cache = REGCACHE_FLAT },
+ { .cache = REGCACHE_FLAT, .fast_io = true },
+ { .cache = REGCACHE_FLAT_S },
+ { .cache = REGCACHE_FLAT_S, .fast_io = true },
{ .cache = REGCACHE_RBTREE },
+ { .cache = REGCACHE_RBTREE, .fast_io = true },
{ .cache = REGCACHE_MAPLE },
+ { .cache = REGCACHE_MAPLE, .fast_io = true },
};
KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, param_to_desc);
static const struct regmap_test_param real_cache_types_only_list[] = {
{ .cache = REGCACHE_FLAT },
+ { .cache = REGCACHE_FLAT, .fast_io = true },
+ { .cache = REGCACHE_FLAT_S },
+ { .cache = REGCACHE_FLAT_S, .fast_io = true },
{ .cache = REGCACHE_RBTREE },
+ { .cache = REGCACHE_RBTREE, .fast_io = true },
{ .cache = REGCACHE_MAPLE },
+ { .cache = REGCACHE_MAPLE, .fast_io = true },
};
KUNIT_ARRAY_PARAM(real_cache_types_only, real_cache_types_only_list, param_to_desc);
static const struct regmap_test_param real_cache_types_list[] = {
{ .cache = REGCACHE_FLAT, .from_reg = 0 },
+ { .cache = REGCACHE_FLAT, .from_reg = 0, .fast_io = true },
{ .cache = REGCACHE_FLAT, .from_reg = 0x2001 },
{ .cache = REGCACHE_FLAT, .from_reg = 0x2002 },
{ .cache = REGCACHE_FLAT, .from_reg = 0x2003 },
{ .cache = REGCACHE_FLAT, .from_reg = 0x2004 },
+ { .cache = REGCACHE_FLAT_S, .from_reg = 0 },
+ { .cache = REGCACHE_FLAT_S, .from_reg = 0, .fast_io = true },
+ { .cache = REGCACHE_FLAT_S, .from_reg = 0x2001 },
+ { .cache = REGCACHE_FLAT_S, .from_reg = 0x2002 },
+ { .cache = REGCACHE_FLAT_S, .from_reg = 0x2003 },
+ { .cache = REGCACHE_FLAT_S, .from_reg = 0x2004 },
{ .cache = REGCACHE_RBTREE, .from_reg = 0 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0, .fast_io = true },
{ .cache = REGCACHE_RBTREE, .from_reg = 0x2001 },
{ .cache = REGCACHE_RBTREE, .from_reg = 0x2002 },
{ .cache = REGCACHE_RBTREE, .from_reg = 0x2003 },
{ .cache = REGCACHE_RBTREE, .from_reg = 0x2004 },
{ .cache = REGCACHE_MAPLE, .from_reg = 0 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0, .fast_io = true },
{ .cache = REGCACHE_MAPLE, .from_reg = 0x2001 },
{ .cache = REGCACHE_MAPLE, .from_reg = 0x2002 },
{ .cache = REGCACHE_MAPLE, .from_reg = 0x2003 },
@@ -124,12 +148,20 @@ static const struct regmap_test_param real_cache_types_list[] = {
KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, param_to_desc);
static const struct regmap_test_param sparse_cache_types_list[] = {
+ { .cache = REGCACHE_FLAT_S, .from_reg = 0 },
+ { .cache = REGCACHE_FLAT_S, .from_reg = 0, .fast_io = true },
+ { .cache = REGCACHE_FLAT_S, .from_reg = 0x2001 },
+ { .cache = REGCACHE_FLAT_S, .from_reg = 0x2002 },
+ { .cache = REGCACHE_FLAT_S, .from_reg = 0x2003 },
+ { .cache = REGCACHE_FLAT_S, .from_reg = 0x2004 },
{ .cache = REGCACHE_RBTREE, .from_reg = 0 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0, .fast_io = true },
{ .cache = REGCACHE_RBTREE, .from_reg = 0x2001 },
{ .cache = REGCACHE_RBTREE, .from_reg = 0x2002 },
{ .cache = REGCACHE_RBTREE, .from_reg = 0x2003 },
{ .cache = REGCACHE_RBTREE, .from_reg = 0x2004 },
{ .cache = REGCACHE_MAPLE, .from_reg = 0 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0, .fast_io = true },
{ .cache = REGCACHE_MAPLE, .from_reg = 0x2001 },
{ .cache = REGCACHE_MAPLE, .from_reg = 0x2002 },
{ .cache = REGCACHE_MAPLE, .from_reg = 0x2003 },
@@ -145,14 +177,13 @@ static struct regmap *gen_regmap(struct kunit *test,
const struct regmap_test_param *param = test->param_value;
struct regmap_test_priv *priv = test->priv;
unsigned int *buf;
- struct regmap *ret;
+ struct regmap *ret = ERR_PTR(-ENOMEM);
size_t size;
- int i;
+ int i, error;
struct reg_default *defaults;
config->cache_type = param->cache;
- config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
- config->cache_type == REGCACHE_MAPLE;
+ config->fast_io = param->fast_io;
if (config->max_register == 0) {
config->max_register = param->from_reg;
@@ -163,7 +194,7 @@ static struct regmap *gen_regmap(struct kunit *test,
config->max_register += (BLOCK_TEST_SIZE * config->reg_stride);
}
- size = (config->max_register + 1) * sizeof(unsigned int);
+ size = array_size(config->max_register + 1, sizeof(*buf));
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
@@ -172,15 +203,17 @@ static struct regmap *gen_regmap(struct kunit *test,
*data = kzalloc(sizeof(**data), GFP_KERNEL);
if (!(*data))
- return ERR_PTR(-ENOMEM);
+ goto out_free;
(*data)->vals = buf;
if (config->num_reg_defaults) {
- defaults = kcalloc(config->num_reg_defaults,
- sizeof(struct reg_default),
- GFP_KERNEL);
+ defaults = kunit_kcalloc(test,
+ config->num_reg_defaults,
+ sizeof(struct reg_default),
+ GFP_KERNEL);
if (!defaults)
- return ERR_PTR(-ENOMEM);
+ goto out_free;
+
config->reg_defaults = defaults;
for (i = 0; i < config->num_reg_defaults; i++) {
@@ -190,12 +223,19 @@ static struct regmap *gen_regmap(struct kunit *test,
}
ret = regmap_init_ram(priv->dev, config, *data);
- if (IS_ERR(ret)) {
- kfree(buf);
- kfree(*data);
- } else {
- kunit_add_action(test, regmap_exit_action, ret);
- }
+ if (IS_ERR(ret))
+ goto out_free;
+
+ /* This calls regmap_exit() on failure, which frees buf and *data */
+ error = kunit_add_action_or_reset(test, regmap_exit_action, ret);
+ if (error)
+ ret = ERR_PTR(error);
+
+ return ret;
+
+out_free:
+ kfree(buf);
+ kfree(*data);
return ret;
}
@@ -295,6 +335,77 @@ static void bulk_read(struct kunit *test)
KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
}
+static void multi_write(struct kunit *test)
+{
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ struct reg_sequence sequence[BLOCK_TEST_SIZE];
+ unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
+ int i;
+
+ config = test_regmap_config;
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ get_random_bytes(&val, sizeof(val));
+
+ /*
+ * Data written via the multi API can be read back with single
+ * reads.
+ */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++) {
+ sequence[i].reg = i;
+ sequence[i].def = val[i];
+ sequence[i].delay_us = 0;
+ }
+ KUNIT_EXPECT_EQ(test, 0,
+ regmap_multi_reg_write(map, sequence, BLOCK_TEST_SIZE));
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i]));
+
+ KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
+
+ /* If using a cache the cache satisfied the read */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
+}
+
+static void multi_read(struct kunit *test)
+{
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int regs[BLOCK_TEST_SIZE];
+ unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
+ int i;
+
+ config = test_regmap_config;
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ get_random_bytes(&val, sizeof(val));
+
+ /* Data written as single writes can be read via the multi API */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++) {
+ regs[i] = i;
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i]));
+ }
+ KUNIT_EXPECT_EQ(test, 0,
+ regmap_multi_reg_read(map, regs, rval, BLOCK_TEST_SIZE));
+ KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
+
+ /* If using a cache the cache satisfied the read */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
+}
+
static void read_bypassed(struct kunit *test)
{
const struct regmap_test_param *param = test->param_value;
@@ -609,12 +720,19 @@ static void stride(struct kunit *test)
config.reg_stride = 2;
config.num_reg_defaults = BLOCK_TEST_SIZE / 2;
+ /*
+ * Allow one extra register so that the read/written arrays
+ * are sized big enough to include an entry for the odd
+ * address past the final reg_default register.
+ */
+ config.max_register = BLOCK_TEST_SIZE;
+
map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
- /* Only even registers can be accessed, try both read and write */
+ /* Only even addresses can be accessed, try both read and write */
for (i = 0; i < BLOCK_TEST_SIZE; i++) {
data->read[i] = false;
data->written[i] = false;
@@ -636,7 +754,7 @@ static void stride(struct kunit *test)
}
}
-static struct regmap_range_cfg test_range = {
+static const struct regmap_range_cfg test_range = {
.selector_reg = 1,
.selector_mask = 0xff,
@@ -752,10 +870,9 @@ static void stress_insert(struct kunit *test)
if (IS_ERR(map))
return;
- vals = kunit_kcalloc(test, sizeof(unsigned long), config.max_register,
- GFP_KERNEL);
+ buf_sz = array_size(sizeof(*vals), config.max_register);
+ vals = kunit_kmalloc(test, buf_sz, GFP_KERNEL);
KUNIT_ASSERT_FALSE(test, vals == NULL);
- buf_sz = sizeof(unsigned long) * config.max_register;
get_random_bytes(vals, buf_sz);
@@ -1400,6 +1517,48 @@ static void cache_present(struct kunit *test)
KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, param->from_reg + i));
}
+static void cache_write_zero(struct kunit *test)
+{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int val;
+ int i;
+
+ config = test_regmap_config;
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->read[param->from_reg + i] = false;
+
+ /* No defaults so no registers cached. */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, param->from_reg + i));
+
+ /* We didn't trigger any reads */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_ASSERT_FALSE(test, data->read[param->from_reg + i]);
+
+ /* Write a zero value */
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 1, 0));
+
+ /* Read that zero value back */
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1, &val));
+ KUNIT_EXPECT_EQ(test, 0, val);
+
+ /* From the cache? */
+ KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, 1));
+
+ /* Try to throw it away */
+ KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 1, 1));
+ KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, 1));
+}
+
/* Check that caching the window register works with sync */
static void cache_range_window_reg(struct kunit *test)
{
@@ -1456,6 +1615,8 @@ static const struct regmap_test_param raw_types_list[] = {
{ .cache = REGCACHE_NONE, .val_endian = REGMAP_ENDIAN_BIG },
{ .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_LITTLE },
{ .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_BIG },
+ { .cache = REGCACHE_FLAT_S, .val_endian = REGMAP_ENDIAN_LITTLE },
+ { .cache = REGCACHE_FLAT_S, .val_endian = REGMAP_ENDIAN_BIG },
{ .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE },
{ .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG },
{ .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_LITTLE },
@@ -1467,6 +1628,8 @@ KUNIT_ARRAY_PARAM(raw_test_types, raw_types_list, param_to_desc);
static const struct regmap_test_param raw_cache_types_list[] = {
{ .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_LITTLE },
{ .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_BIG },
+ { .cache = REGCACHE_FLAT_S, .val_endian = REGMAP_ENDIAN_LITTLE },
+ { .cache = REGCACHE_FLAT_S, .val_endian = REGMAP_ENDIAN_BIG },
{ .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE },
{ .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG },
{ .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_LITTLE },
@@ -1490,16 +1653,17 @@ static struct regmap *gen_raw_regmap(struct kunit *test,
struct regmap_test_priv *priv = test->priv;
const struct regmap_test_param *param = test->param_value;
u16 *buf;
- struct regmap *ret;
- size_t size = (config->max_register + 1) * config->reg_bits / 8;
- int i;
+ struct regmap *ret = ERR_PTR(-ENOMEM);
+ int i, error;
struct reg_default *defaults;
+ size_t size;
config->cache_type = param->cache;
config->val_format_endian = param->val_endian;
config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
config->cache_type == REGCACHE_MAPLE;
+ size = array_size(config->max_register + 1, BITS_TO_BYTES(config->reg_bits));
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
@@ -1508,15 +1672,16 @@ static struct regmap *gen_raw_regmap(struct kunit *test,
*data = kzalloc(sizeof(**data), GFP_KERNEL);
if (!(*data))
- return ERR_PTR(-ENOMEM);
+ goto out_free;
(*data)->vals = (void *)buf;
config->num_reg_defaults = config->max_register + 1;
- defaults = kcalloc(config->num_reg_defaults,
- sizeof(struct reg_default),
- GFP_KERNEL);
+ defaults = kunit_kcalloc(test,
+ config->num_reg_defaults,
+ sizeof(struct reg_default),
+ GFP_KERNEL);
if (!defaults)
- return ERR_PTR(-ENOMEM);
+ goto out_free;
config->reg_defaults = defaults;
for (i = 0; i < config->num_reg_defaults; i++) {
@@ -1529,7 +1694,8 @@ static struct regmap *gen_raw_regmap(struct kunit *test,
defaults[i].def = be16_to_cpu(buf[i]);
break;
default:
- return ERR_PTR(-EINVAL);
+ ret = ERR_PTR(-EINVAL);
+ goto out_free;
}
}
@@ -1541,12 +1707,19 @@ static struct regmap *gen_raw_regmap(struct kunit *test,
config->num_reg_defaults = 0;
ret = regmap_init_raw_ram(priv->dev, config, *data);
- if (IS_ERR(ret)) {
- kfree(buf);
- kfree(*data);
- } else {
- kunit_add_action(test, regmap_exit_action, ret);
- }
+ if (IS_ERR(ret))
+ goto out_free;
+
+ /* This calls regmap_exit() on failure, which frees buf and *data */
+ error = kunit_add_action_or_reset(test, regmap_exit_action, ret);
+ if (error)
+ ret = ERR_PTR(error);
+
+ return ret;
+
+out_free:
+ kfree(buf);
+ kfree(*data);
return ret;
}
@@ -1590,7 +1763,7 @@ static void raw_read_defaults(struct kunit *test)
if (IS_ERR(map))
return;
- val_len = sizeof(*rval) * (config.max_register + 1);
+ val_len = array_size(sizeof(*rval), config.max_register + 1);
rval = kunit_kmalloc(test, val_len, GFP_KERNEL);
KUNIT_ASSERT_TRUE(test, rval != NULL);
if (!rval)
@@ -1880,6 +2053,8 @@ static struct kunit_case regmap_test_cases[] = {
KUNIT_CASE_PARAM(read_bypassed_volatile, real_cache_types_gen_params),
KUNIT_CASE_PARAM(bulk_write, regcache_types_gen_params),
KUNIT_CASE_PARAM(bulk_read, regcache_types_gen_params),
+ KUNIT_CASE_PARAM(multi_write, regcache_types_gen_params),
+ KUNIT_CASE_PARAM(multi_read, regcache_types_gen_params),
KUNIT_CASE_PARAM(write_readonly, regcache_types_gen_params),
KUNIT_CASE_PARAM(read_writeonly, regcache_types_gen_params),
KUNIT_CASE_PARAM(reg_defaults, regcache_types_gen_params),
@@ -1901,6 +2076,7 @@ static struct kunit_case regmap_test_cases[] = {
KUNIT_CASE_PARAM(cache_drop_all_and_sync_no_defaults, sparse_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_drop_all_and_sync_has_defaults, sparse_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_present, sparse_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_write_zero, sparse_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_range_window_reg, real_cache_types_only_gen_params),
KUNIT_CASE_PARAM(raw_read_defaults_single, raw_test_types_gen_params),
@@ -1951,4 +2127,5 @@ static struct kunit_suite regmap_test_suite = {
};
kunit_test_suite(regmap_test_suite);
+MODULE_DESCRIPTION("Regmap KUnit tests");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index 99d7fd85ca7d..29e5f3175301 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -609,4 +609,5 @@ void regmap_mmio_detach_clk(struct regmap *map)
}
EXPORT_SYMBOL_GPL(regmap_mmio_detach_clk);
+MODULE_DESCRIPTION("regmap MMIO Module");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-ram.c b/drivers/base/regmap/regmap-ram.c
index 5b4cbf982a11..4e5b4518ce4d 100644
--- a/drivers/base/regmap/regmap-ram.c
+++ b/drivers/base/regmap/regmap-ram.c
@@ -83,4 +83,5 @@ struct regmap *__regmap_init_ram(struct device *dev,
}
EXPORT_SYMBOL_GPL(__regmap_init_ram);
+MODULE_DESCRIPTION("Register map access API - Memory region");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-raw-ram.c b/drivers/base/regmap/regmap-raw-ram.c
index 69eabfb89eda..76c98814fb8a 100644
--- a/drivers/base/regmap/regmap-raw-ram.c
+++ b/drivers/base/regmap/regmap-raw-ram.c
@@ -142,4 +142,5 @@ struct regmap *__regmap_init_raw_ram(struct device *dev,
}
EXPORT_SYMBOL_GPL(__regmap_init_raw_ram);
+MODULE_DESCRIPTION("Register map access API - Memory region with raw access");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-sccb.c b/drivers/base/regmap/regmap-sccb.c
index 986af26d88c2..12bbbb03e5f2 100644
--- a/drivers/base/regmap/regmap-sccb.c
+++ b/drivers/base/regmap/regmap-sccb.c
@@ -125,4 +125,5 @@ struct regmap *__devm_regmap_init_sccb(struct i2c_client *i2c,
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_sccb);
+MODULE_DESCRIPTION("Register map access API - SCCB support");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-sdw-mbq.c b/drivers/base/regmap/regmap-sdw-mbq.c
index c99eada83780..6a61629f5f89 100644
--- a/drivers/base/regmap/regmap-sdw-mbq.c
+++ b/drivers/base/regmap/regmap-sdw-mbq.c
@@ -1,47 +1,189 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright(c) 2020 Intel Corporation.
+#include <linux/bits.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/errno.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/soundwire/sdw.h>
#include <linux/soundwire/sdw_registers.h>
+#include <sound/sdca_function.h>
#include "internal.h"
+struct regmap_mbq_context {
+ struct device *dev;
+ struct sdw_slave *sdw;
+
+ bool (*readable_reg)(struct device *dev, unsigned int reg);
+
+ struct regmap_sdw_mbq_cfg cfg;
+
+ int val_size;
+};
+
+static int regmap_sdw_mbq_size(struct regmap_mbq_context *ctx, unsigned int reg)
+{
+ int size = ctx->val_size;
+
+ if (ctx->cfg.mbq_size) {
+ size = ctx->cfg.mbq_size(ctx->dev, reg);
+ if (!size || size > ctx->val_size)
+ return -EINVAL;
+ }
+
+ return size;
+}
+
+static bool regmap_sdw_mbq_deferrable(struct regmap_mbq_context *ctx, unsigned int reg)
+{
+ if (ctx->cfg.deferrable)
+ return ctx->cfg.deferrable(ctx->dev, reg);
+
+ return false;
+}
+
+static int regmap_sdw_mbq_poll_busy(struct sdw_slave *slave, unsigned int reg,
+ struct regmap_mbq_context *ctx)
+{
+ struct device *dev = ctx->dev;
+ int val, ret = 0;
+
+ dev_dbg(dev, "Deferring transaction for 0x%x\n", reg);
+
+ reg = SDW_SDCA_CTL(SDW_SDCA_CTL_FUNC(reg), 0,
+ SDCA_CTL_ENTITY_0_FUNCTION_STATUS, 0);
+
+ if (ctx->readable_reg(dev, reg)) {
+ ret = read_poll_timeout(sdw_read_no_pm, val,
+ val < 0 || !(val & SDCA_CTL_ENTITY_0_FUNCTION_BUSY),
+ ctx->cfg.timeout_us, ctx->cfg.retry_us,
+ false, slave, reg);
+ if (val < 0)
+ return val;
+ if (ret)
+ dev_err(dev, "Function busy timed out 0x%x: %d\n", reg, val);
+ } else {
+ fsleep(ctx->cfg.timeout_us);
+ }
+
+ return ret;
+}
+
+static int regmap_sdw_mbq_write_impl(struct sdw_slave *slave,
+ unsigned int reg, unsigned int val,
+ int mbq_size, bool deferrable)
+{
+ int shift = mbq_size * BITS_PER_BYTE;
+ int ret;
+
+ while (--mbq_size > 0) {
+ shift -= BITS_PER_BYTE;
+
+ ret = sdw_write_no_pm(slave, SDW_SDCA_MBQ_CTL(reg),
+ (val >> shift) & 0xff);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = sdw_write_no_pm(slave, reg, val & 0xff);
+ if (deferrable && ret == -ENODATA)
+ return -EAGAIN;
+
+ return ret;
+}
+
static int regmap_sdw_mbq_write(void *context, unsigned int reg, unsigned int val)
{
- struct device *dev = context;
- struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ struct regmap_mbq_context *ctx = context;
+ struct sdw_slave *slave = ctx->sdw;
+ bool deferrable = regmap_sdw_mbq_deferrable(ctx, reg);
+ int mbq_size = regmap_sdw_mbq_size(ctx, reg);
int ret;
- ret = sdw_write_no_pm(slave, SDW_SDCA_MBQ_CTL(reg), (val >> 8) & 0xff);
- if (ret < 0)
- return ret;
+ if (mbq_size < 0)
+ return mbq_size;
+
+ /*
+ * Technically the spec does allow a device to set itself to busy for
+ * internal reasons, but since it doesn't provide any information on
+ * how to handle timeouts in that case, for now the code will only
+ * process a single wait/timeout on function busy and a single retry
+ * of the transaction.
+ */
+ ret = regmap_sdw_mbq_write_impl(slave, reg, val, mbq_size, deferrable);
+ if (ret == -EAGAIN) {
+ ret = regmap_sdw_mbq_poll_busy(slave, reg, ctx);
+ if (ret)
+ return ret;
+
+ ret = regmap_sdw_mbq_write_impl(slave, reg, val, mbq_size, false);
+ }
- return sdw_write_no_pm(slave, reg, val & 0xff);
+ return ret;
}
-static int regmap_sdw_mbq_read(void *context, unsigned int reg, unsigned int *val)
+static int regmap_sdw_mbq_read_impl(struct sdw_slave *slave,
+ unsigned int reg, unsigned int *val,
+ int mbq_size, bool deferrable)
{
- struct device *dev = context;
- struct sdw_slave *slave = dev_to_sdw_dev(dev);
- int read0;
- int read1;
+ int shift = BITS_PER_BYTE;
+ int read;
+
+ read = sdw_read_no_pm(slave, reg);
+ if (read < 0) {
+ if (deferrable && read == -ENODATA)
+ return -EAGAIN;
+
+ return read;
+ }
- read0 = sdw_read_no_pm(slave, reg);
- if (read0 < 0)
- return read0;
+ *val = read;
- read1 = sdw_read_no_pm(slave, SDW_SDCA_MBQ_CTL(reg));
- if (read1 < 0)
- return read1;
+ while (--mbq_size > 0) {
+ read = sdw_read_no_pm(slave, SDW_SDCA_MBQ_CTL(reg));
+ if (read < 0)
+ return read;
- *val = (read1 << 8) | read0;
+ *val |= read << shift;
+ shift += BITS_PER_BYTE;
+ }
return 0;
}
+static int regmap_sdw_mbq_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct regmap_mbq_context *ctx = context;
+ struct sdw_slave *slave = ctx->sdw;
+ bool deferrable = regmap_sdw_mbq_deferrable(ctx, reg);
+ int mbq_size = regmap_sdw_mbq_size(ctx, reg);
+ int ret;
+
+ if (mbq_size < 0)
+ return mbq_size;
+
+ /*
+ * Technically the spec does allow a device to set itself to busy for
+ * internal reasons, but since it doesn't provide any information on
+ * how to handle timeouts in that case, for now the code will only
+ * process a single wait/timeout on function busy and a single retry
+ * of the transaction.
+ */
+ ret = regmap_sdw_mbq_read_impl(slave, reg, val, mbq_size, deferrable);
+ if (ret == -EAGAIN) {
+ ret = regmap_sdw_mbq_poll_busy(slave, reg, ctx);
+ if (ret)
+ return ret;
+
+ ret = regmap_sdw_mbq_read_impl(slave, reg, val, mbq_size, false);
+ }
+
+ return ret;
+}
+
static const struct regmap_bus regmap_sdw_mbq = {
.reg_read = regmap_sdw_mbq_read,
.reg_write = regmap_sdw_mbq_write,
@@ -51,8 +193,7 @@ static const struct regmap_bus regmap_sdw_mbq = {
static int regmap_sdw_mbq_config_check(const struct regmap_config *config)
{
- /* MBQ-based controls are only 16-bits for now */
- if (config->val_bits != 16)
+ if (config->val_bits > (sizeof(unsigned int) * BITS_PER_BYTE))
return -ENOTSUPP;
/* Registers are 32 bits wide */
@@ -65,35 +206,71 @@ static int regmap_sdw_mbq_config_check(const struct regmap_config *config)
return 0;
}
-struct regmap *__regmap_init_sdw_mbq(struct sdw_slave *sdw,
+static struct regmap_mbq_context *
+regmap_sdw_mbq_gen_context(struct device *dev,
+ struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ const struct regmap_sdw_mbq_cfg *mbq_config)
+{
+ struct regmap_mbq_context *ctx;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ ctx->dev = dev;
+ ctx->sdw = sdw;
+
+ if (mbq_config)
+ ctx->cfg = *mbq_config;
+
+ ctx->val_size = config->val_bits / BITS_PER_BYTE;
+ ctx->readable_reg = config->readable_reg;
+
+ return ctx;
+}
+
+struct regmap *__regmap_init_sdw_mbq(struct device *dev, struct sdw_slave *sdw,
const struct regmap_config *config,
+ const struct regmap_sdw_mbq_cfg *mbq_config,
struct lock_class_key *lock_key,
const char *lock_name)
{
+ struct regmap_mbq_context *ctx;
int ret;
ret = regmap_sdw_mbq_config_check(config);
if (ret)
return ERR_PTR(ret);
- return __regmap_init(&sdw->dev, &regmap_sdw_mbq,
- &sdw->dev, config, lock_key, lock_name);
+ ctx = regmap_sdw_mbq_gen_context(dev, sdw, config, mbq_config);
+ if (IS_ERR(ctx))
+ return ERR_CAST(ctx);
+
+ return __regmap_init(dev, &regmap_sdw_mbq, ctx,
+ config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__regmap_init_sdw_mbq);
-struct regmap *__devm_regmap_init_sdw_mbq(struct sdw_slave *sdw,
+struct regmap *__devm_regmap_init_sdw_mbq(struct device *dev, struct sdw_slave *sdw,
const struct regmap_config *config,
+ const struct regmap_sdw_mbq_cfg *mbq_config,
struct lock_class_key *lock_key,
const char *lock_name)
{
+ struct regmap_mbq_context *ctx;
int ret;
ret = regmap_sdw_mbq_config_check(config);
if (ret)
return ERR_PTR(ret);
- return __devm_regmap_init(&sdw->dev, &regmap_sdw_mbq,
- &sdw->dev, config, lock_key, lock_name);
+ ctx = regmap_sdw_mbq_gen_context(dev, sdw, config, mbq_config);
+ if (IS_ERR(ctx))
+ return ERR_CAST(ctx);
+
+ return __devm_regmap_init(dev, &regmap_sdw_mbq, ctx,
+ config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw_mbq);
diff --git a/drivers/base/regmap/regmap-slimbus.c b/drivers/base/regmap/regmap-slimbus.c
index 8075db788b39..e523fae73004 100644
--- a/drivers/base/regmap/regmap-slimbus.c
+++ b/drivers/base/regmap/regmap-slimbus.c
@@ -48,8 +48,7 @@ struct regmap *__regmap_init_slimbus(struct slim_device *slimbus,
if (IS_ERR(bus))
return ERR_CAST(bus);
- return __regmap_init(&slimbus->dev, bus, &slimbus->dev, config,
- lock_key, lock_name);
+ return __regmap_init(&slimbus->dev, bus, slimbus, config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__regmap_init_slimbus);
@@ -63,9 +62,9 @@ struct regmap *__devm_regmap_init_slimbus(struct slim_device *slimbus,
if (IS_ERR(bus))
return ERR_CAST(bus);
- return __devm_regmap_init(&slimbus->dev, bus, &slimbus, config,
- lock_key, lock_name);
+ return __devm_regmap_init(&slimbus->dev, bus, slimbus, config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_slimbus);
+MODULE_DESCRIPTION("Register map access API - SLIMbus support");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-spi-avmm.c b/drivers/base/regmap/regmap-spi-avmm.c
index 4c2b94b3e30b..d86a06cadcdb 100644
--- a/drivers/base/regmap/regmap-spi-avmm.c
+++ b/drivers/base/regmap/regmap-spi-avmm.c
@@ -710,4 +710,5 @@ struct regmap *__devm_regmap_init_spi_avmm(struct spi_device *spi,
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_spi_avmm);
+MODULE_DESCRIPTION("Register map access API - SPI AVMM support");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index 094cf2a2ca3c..14b1d88997cb 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -122,8 +122,7 @@ static const struct regmap_bus *regmap_get_spi_bus(struct spi_device *spi,
return ERR_PTR(-ENOMEM);
max_msg_size = spi_max_message_size(spi);
- reg_reserve_size = config->reg_bits / BITS_PER_BYTE
- + config->pad_bits / BITS_PER_BYTE;
+ reg_reserve_size = (config->reg_bits + config->pad_bits) / BITS_PER_BYTE;
if (max_size + reg_reserve_size > max_msg_size)
max_size -= reg_reserve_size;
diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c
index cdf12d2aa3a1..347bfe9544ce 100644
--- a/drivers/base/regmap/regmap-spmi.c
+++ b/drivers/base/regmap/regmap-spmi.c
@@ -222,4 +222,5 @@ struct regmap *__devm_regmap_init_spmi_ext(struct spmi_device *sdev,
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_spmi_ext);
+MODULE_DESCRIPTION("Register map access API - SPMI support");
MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap-w1.c b/drivers/base/regmap/regmap-w1.c
index 3a8b402db852..29fd24f9c7ed 100644
--- a/drivers/base/regmap/regmap-w1.c
+++ b/drivers/base/regmap/regmap-w1.c
@@ -234,4 +234,5 @@ struct regmap *__devm_regmap_init_w1(struct device *w1_dev,
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_w1);
+MODULE_DESCRIPTION("Register map access API - W1 (1-Wire) support");
MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 0a34dd3c4f38..ce9be3989a21 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -17,7 +17,7 @@
#include <linux/delay.h>
#include <linux/log2.h>
#include <linux/hwspinlock.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -598,6 +598,17 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
}
EXPORT_SYMBOL_GPL(regmap_attach_dev);
+static int dev_get_regmap_match(struct device *dev, void *res, void *data);
+
+static int regmap_detach_dev(struct device *dev, struct regmap *map)
+{
+ if (!dev)
+ return 0;
+
+ return devres_release(dev, dev_get_regmap_release,
+ dev_get_regmap_match, (void *)map->name);
+}
+
static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
const struct regmap_config *config)
{
@@ -745,6 +756,7 @@ struct regmap *__regmap_init(struct device *dev,
lock_key, lock_name);
}
map->lock_arg = map;
+ map->lock_key = lock_key;
}
/*
@@ -757,14 +769,13 @@ struct regmap *__regmap_init(struct device *dev,
map->alloc_flags = GFP_KERNEL;
map->reg_base = config->reg_base;
+ map->reg_shift = config->pad_bits % 8;
- map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
map->format.pad_bytes = config->pad_bits / 8;
map->format.reg_shift = config->reg_shift;
- map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
- map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
- config->val_bits + config->pad_bits, 8);
- map->reg_shift = config->pad_bits % 8;
+ map->format.reg_bytes = BITS_TO_BYTES(config->reg_bits);
+ map->format.val_bytes = BITS_TO_BYTES(config->val_bits);
+ map->format.buf_size = BITS_TO_BYTES(config->reg_bits + config->val_bits + config->pad_bits);
if (config->reg_stride)
map->reg_stride = config->reg_stride;
else
@@ -816,7 +827,7 @@ struct regmap *__regmap_init(struct device *dev,
map->read_flag_mask = bus->read_flag_mask;
}
- if (config && config->read && config->write) {
+ if (config->read && config->write) {
map->reg_read = _regmap_bus_read;
if (config->reg_update_bits)
map->reg_update_bits = config->reg_update_bits;
@@ -1051,13 +1062,13 @@ skip_format_initialization:
/* Sanity check */
if (range_cfg->range_max < range_cfg->range_min) {
- dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
+ dev_err(map->dev, "Invalid range %d: %u < %u\n", i,
range_cfg->range_max, range_cfg->range_min);
goto err_range;
}
if (range_cfg->range_max > map->max_register) {
- dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
+ dev_err(map->dev, "Invalid range %d: %u > %u\n", i,
range_cfg->range_max, map->max_register);
goto err_range;
}
@@ -1162,6 +1173,8 @@ err_name:
err_map:
kfree(map);
err:
+ if (bus && bus->free_on_exit)
+ kfree(bus);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(__regmap_init);
@@ -1444,7 +1457,9 @@ void regmap_exit(struct regmap *map)
{
struct regmap_async *async;
+ regmap_detach_dev(map->dev, map);
regcache_exit(map);
+
regmap_debugfs_exit(map);
regmap_range_exit(map);
if (map->bus && map->bus->free_context)
@@ -2243,12 +2258,14 @@ EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
* @field: Register field to operate on
* @bits: Bits to test
*
- * Returns -1 if the underlying regmap_field_read() fails, 0 if at least one of the
- * tested bits is not set and 1 if all tested bits are set.
+ * Returns negative errno if the underlying regmap_field_read() fails,
+ * 0 if at least one of the tested bits is not set and 1 if all tested
+ * bits are set.
*/
int regmap_field_test_bits(struct regmap_field *field, unsigned int bits)
{
- unsigned int val, ret;
+ unsigned int val;
+ int ret;
ret = regmap_field_read(field, &val);
if (ret)
@@ -2347,7 +2364,7 @@ out:
} else {
void *wval;
- wval = kmemdup(val, val_count * val_bytes, map->alloc_flags);
+ wval = kmemdup_array(val, val_count, val_bytes, map->alloc_flags);
if (!wval)
return -ENOMEM;
@@ -3101,8 +3118,53 @@ int regmap_fields_read(struct regmap_field *field, unsigned int id,
}
EXPORT_SYMBOL_GPL(regmap_fields_read);
+static int _regmap_bulk_read(struct regmap *map, unsigned int reg,
+ const unsigned int *regs, void *val, size_t val_count)
+{
+ u32 *u32 = val;
+ u16 *u16 = val;
+ u8 *u8 = val;
+ int ret, i;
+
+ map->lock(map->lock_arg);
+
+ for (i = 0; i < val_count; i++) {
+ unsigned int ival;
+
+ if (regs) {
+ if (!IS_ALIGNED(regs[i], map->reg_stride)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = _regmap_read(map, regs[i], &ival);
+ } else {
+ ret = _regmap_read(map, reg + regmap_get_offset(map, i), &ival);
+ }
+ if (ret != 0)
+ goto out;
+
+ switch (map->format.val_bytes) {
+ case 4:
+ u32[i] = ival;
+ break;
+ case 2:
+ u16[i] = ival;
+ break;
+ case 1:
+ u8[i] = ival;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+out:
+ map->unlock(map->lock_arg);
+ return ret;
+}
+
/**
- * regmap_bulk_read() - Read multiple registers from the device
+ * regmap_bulk_read() - Read multiple sequential registers from the device
*
* @map: Register map to read from
* @reg: First register to be read from
@@ -3132,47 +3194,35 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
for (i = 0; i < val_count * val_bytes; i += val_bytes)
map->format.parse_inplace(val + i);
} else {
- u32 *u32 = val;
- u16 *u16 = val;
- u8 *u8 = val;
-
- map->lock(map->lock_arg);
-
- for (i = 0; i < val_count; i++) {
- unsigned int ival;
-
- ret = _regmap_read(map, reg + regmap_get_offset(map, i),
- &ival);
- if (ret != 0)
- goto out;
-
- switch (map->format.val_bytes) {
- case 4:
- u32[i] = ival;
- break;
- case 2:
- u16[i] = ival;
- break;
- case 1:
- u8[i] = ival;
- break;
- default:
- ret = -EINVAL;
- goto out;
- }
- }
-
-out:
- map->unlock(map->lock_arg);
+ ret = _regmap_bulk_read(map, reg, NULL, val, val_count);
}
-
if (!ret)
trace_regmap_bulk_read(map, reg, val, val_bytes * val_count);
-
return ret;
}
EXPORT_SYMBOL_GPL(regmap_bulk_read);
+/**
+ * regmap_multi_reg_read() - Read multiple non-sequential registers from the device
+ *
+ * @map: Register map to read from
+ * @regs: Array of registers to read from
+ * @val: Pointer to store read value, in native register size for device
+ * @val_count: Number of registers to read
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_multi_reg_read(struct regmap *map, const unsigned int *regs, void *val,
+ size_t val_count)
+{
+ if (val_count == 0)
+ return -EINVAL;
+
+ return _regmap_bulk_read(map, 0, regs, val, val_count);
+}
+EXPORT_SYMBOL_GPL(regmap_multi_reg_read);
+
static int _regmap_update_bits(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val,
bool *change, bool force_write)
@@ -3261,7 +3311,8 @@ EXPORT_SYMBOL_GPL(regmap_update_bits_base);
*/
int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits)
{
- unsigned int val, ret;
+ unsigned int val;
+ int ret;
ret = regmap_read(map, reg, &val);
if (ret)
diff --git a/drivers/base/regmap/trace.h b/drivers/base/regmap/trace.h
index 704e106e5dbd..bcc5a8b226a6 100644
--- a/drivers/base/regmap/trace.h
+++ b/drivers/base/regmap/trace.h
@@ -27,7 +27,7 @@ DECLARE_EVENT_CLASS(regmap_reg,
),
TP_fast_assign(
- __assign_str(name, regmap_name(map));
+ __assign_str(name);
__entry->reg = reg;
__entry->val = val;
),
@@ -74,7 +74,7 @@ DECLARE_EVENT_CLASS(regmap_bulk,
),
TP_fast_assign(
- __assign_str(name, regmap_name(map));
+ __assign_str(name);
__entry->reg = reg;
__entry->val_len = val_len;
memcpy(__get_dynamic_array(buf), val, val_len);
@@ -113,7 +113,7 @@ DECLARE_EVENT_CLASS(regmap_block,
),
TP_fast_assign(
- __assign_str(name, regmap_name(map));
+ __assign_str(name);
__entry->reg = reg;
__entry->count = count;
),
@@ -163,9 +163,9 @@ TRACE_EVENT(regcache_sync,
),
TP_fast_assign(
- __assign_str(name, regmap_name(map));
- __assign_str(status, status);
- __assign_str(type, type);
+ __assign_str(name);
+ __assign_str(status);
+ __assign_str(type);
),
TP_printk("%s type=%s status=%s", __get_str(name),
@@ -184,7 +184,7 @@ DECLARE_EVENT_CLASS(regmap_bool,
),
TP_fast_assign(
- __assign_str(name, regmap_name(map));
+ __assign_str(name);
__entry->flag = flag;
),
@@ -216,7 +216,7 @@ DECLARE_EVENT_CLASS(regmap_async,
),
TP_fast_assign(
- __assign_str(name, regmap_name(map));
+ __assign_str(name);
),
TP_printk("%s", __get_str(name))
@@ -264,7 +264,7 @@ TRACE_EVENT(regcache_drop_region,
),
TP_fast_assign(
- __assign_str(name, regmap_name(map));
+ __assign_str(name);
__entry->from = from;
__entry->to = to;
),
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index eb6eb25b343b..16a8301c25d6 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -529,20 +529,35 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
if (prop->is_inline)
return -EINVAL;
- if (index * sizeof(*ref) >= prop->length)
+ if ((index + 1) * sizeof(*ref) > prop->length)
return -ENOENT;
ref_array = prop->pointer;
ref = &ref_array[index];
- refnode = software_node_fwnode(ref->node);
+ /*
+ * A software node can reference other software nodes or firmware
+ * nodes (which are the abstraction layer sitting on top of them).
+ * This is done to ensure we can create references to static software
+ * nodes before they're registered with the firmware node framework.
+ * At the time the reference is being resolved, we expect the swnodes
+ * in question to already have been registered and to be backed by
+ * a firmware node. This is why we use the fwnode API below to read the
+ * relevant properties and bump the reference count.
+ */
+
+ if (ref->swnode)
+ refnode = software_node_fwnode(ref->swnode);
+ else if (ref->fwnode)
+ refnode = ref->fwnode;
+ else
+ return -EINVAL;
+
if (!refnode)
return -ENOENT;
if (nargs_prop) {
- error = property_entry_read_int_array(ref->node->properties,
- nargs_prop, sizeof(u32),
- &nargs_prop_val, 1);
+ error = fwnode_property_read_u32(refnode, nargs_prop, &nargs_prop_val);
if (error)
return error;
@@ -555,7 +570,7 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
if (!args)
return 0;
- args->fwnode = software_node_get(refnode);
+ args->fwnode = fwnode_handle_get(refnode);
args->nargs = nargs;
for (i = 0; i < nargs; i++)
@@ -635,7 +650,10 @@ software_node_graph_get_remote_endpoint(const struct fwnode_handle *fwnode)
ref = prop->pointer;
- return software_node_get(software_node_fwnode(ref[0].node));
+ if (!ref->swnode)
+ return NULL;
+
+ return software_node_get(software_node_fwnode(ref->swnode));
}
static struct fwnode_handle *
@@ -677,6 +695,7 @@ static const struct fwnode_operations software_node_ops = {
.get = software_node_get,
.put = software_node_put,
.property_present = software_node_property_present,
+ .property_read_bool = software_node_property_present,
.property_read_int_array = software_node_read_int_array,
.property_read_string_array = software_node_read_string_array,
.get_name = software_node_get_name,
@@ -843,7 +862,7 @@ swnode_register(const struct software_node *node, struct swnode *parent,
* of this function or by ordering the array such that parent comes before
* child.
*/
-int software_node_register_node_group(const struct software_node **node_group)
+int software_node_register_node_group(const struct software_node * const *node_group)
{
unsigned int i;
int ret;
@@ -876,8 +895,7 @@ EXPORT_SYMBOL_GPL(software_node_register_node_group);
* remove the nodes individually, in the correct order (child before
* parent).
*/
-void software_node_unregister_node_group(
- const struct software_node **node_group)
+void software_node_unregister_node_group(const struct software_node * const *node_group)
{
unsigned int i = 0;
@@ -1079,6 +1097,7 @@ void software_node_notify(struct device *dev)
if (!swnode)
return;
+ kobject_get(&swnode->kobj);
ret = sysfs_create_link(&dev->kobj, &swnode->kobj, "software_node");
if (ret)
return;
@@ -1088,8 +1107,6 @@ void software_node_notify(struct device *dev)
sysfs_remove_link(&dev->kobj, "software_node");
return;
}
-
- kobject_get(&swnode->kobj);
}
void software_node_notify_remove(struct device *dev)
diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
index 13db1f78d2ce..483adb796654 100644
--- a/drivers/base/syscore.c
+++ b/drivers/base/syscore.c
@@ -11,32 +11,32 @@
#include <linux/suspend.h>
#include <trace/events/power.h>
-static LIST_HEAD(syscore_ops_list);
-static DEFINE_MUTEX(syscore_ops_lock);
+static LIST_HEAD(syscore_list);
+static DEFINE_MUTEX(syscore_lock);
/**
- * register_syscore_ops - Register a set of system core operations.
- * @ops: System core operations to register.
+ * register_syscore - Register a set of system core operations.
+ * @syscore: System core operations to register.
*/
-void register_syscore_ops(struct syscore_ops *ops)
+void register_syscore(struct syscore *syscore)
{
- mutex_lock(&syscore_ops_lock);
- list_add_tail(&ops->node, &syscore_ops_list);
- mutex_unlock(&syscore_ops_lock);
+ mutex_lock(&syscore_lock);
+ list_add_tail(&syscore->node, &syscore_list);
+ mutex_unlock(&syscore_lock);
}
-EXPORT_SYMBOL_GPL(register_syscore_ops);
+EXPORT_SYMBOL_GPL(register_syscore);
/**
- * unregister_syscore_ops - Unregister a set of system core operations.
- * @ops: System core operations to unregister.
+ * unregister_syscore - Unregister a set of system core operations.
+ * @syscore: System core operations to unregister.
*/
-void unregister_syscore_ops(struct syscore_ops *ops)
+void unregister_syscore(struct syscore *syscore)
{
- mutex_lock(&syscore_ops_lock);
- list_del(&ops->node);
- mutex_unlock(&syscore_ops_lock);
+ mutex_lock(&syscore_lock);
+ list_del(&syscore->node);
+ mutex_unlock(&syscore_lock);
}
-EXPORT_SYMBOL_GPL(unregister_syscore_ops);
+EXPORT_SYMBOL_GPL(unregister_syscore);
#ifdef CONFIG_PM_SLEEP
/**
@@ -46,7 +46,7 @@ EXPORT_SYMBOL_GPL(unregister_syscore_ops);
*/
int syscore_suspend(void)
{
- struct syscore_ops *ops;
+ struct syscore *syscore;
int ret = 0;
trace_suspend_resume(TPS("syscore_suspend"), 0, true);
@@ -59,25 +59,27 @@ int syscore_suspend(void)
WARN_ONCE(!irqs_disabled(),
"Interrupts enabled before system core suspend.\n");
- list_for_each_entry_reverse(ops, &syscore_ops_list, node)
- if (ops->suspend) {
- pm_pr_dbg("Calling %pS\n", ops->suspend);
- ret = ops->suspend();
+ list_for_each_entry_reverse(syscore, &syscore_list, node)
+ if (syscore->ops->suspend) {
+ pm_pr_dbg("Calling %pS\n", syscore->ops->suspend);
+ ret = syscore->ops->suspend(syscore->data);
if (ret)
goto err_out;
WARN_ONCE(!irqs_disabled(),
- "Interrupts enabled after %pS\n", ops->suspend);
+ "Interrupts enabled after %pS\n",
+ syscore->ops->suspend);
}
trace_suspend_resume(TPS("syscore_suspend"), 0, false);
return 0;
err_out:
- pr_err("PM: System core suspend callback %pS failed.\n", ops->suspend);
+ pr_err("PM: System core suspend callback %pS failed.\n",
+ syscore->ops->suspend);
- list_for_each_entry_continue(ops, &syscore_ops_list, node)
- if (ops->resume)
- ops->resume();
+ list_for_each_entry_continue(syscore, &syscore_list, node)
+ if (syscore->ops->resume)
+ syscore->ops->resume(syscore->data);
return ret;
}
@@ -90,18 +92,19 @@ EXPORT_SYMBOL_GPL(syscore_suspend);
*/
void syscore_resume(void)
{
- struct syscore_ops *ops;
+ struct syscore *syscore;
trace_suspend_resume(TPS("syscore_resume"), 0, true);
WARN_ONCE(!irqs_disabled(),
"Interrupts enabled before system core resume.\n");
- list_for_each_entry(ops, &syscore_ops_list, node)
- if (ops->resume) {
- pm_pr_dbg("Calling %pS\n", ops->resume);
- ops->resume();
+ list_for_each_entry(syscore, &syscore_list, node)
+ if (syscore->ops->resume) {
+ pm_pr_dbg("Calling %pS\n", syscore->ops->resume);
+ syscore->ops->resume(syscore->data);
WARN_ONCE(!irqs_disabled(),
- "Interrupts enabled after %pS\n", ops->resume);
+ "Interrupts enabled after %pS\n",
+ syscore->ops->resume);
}
trace_suspend_resume(TPS("syscore_resume"), 0, false);
}
@@ -113,16 +116,17 @@ EXPORT_SYMBOL_GPL(syscore_resume);
*/
void syscore_shutdown(void)
{
- struct syscore_ops *ops;
+ struct syscore *syscore;
- mutex_lock(&syscore_ops_lock);
+ mutex_lock(&syscore_lock);
- list_for_each_entry_reverse(ops, &syscore_ops_list, node)
- if (ops->shutdown) {
+ list_for_each_entry_reverse(syscore, &syscore_list, node)
+ if (syscore->ops->shutdown) {
if (initcall_debug)
- pr_info("PM: Calling %pS\n", ops->shutdown);
- ops->shutdown();
+ pr_info("PM: Calling %pS\n",
+ syscore->ops->shutdown);
+ syscore->ops->shutdown(syscore->data);
}
- mutex_unlock(&syscore_ops_lock);
+ mutex_unlock(&syscore_lock);
}
diff --git a/drivers/base/test/Kconfig b/drivers/base/test/Kconfig
index 5c7fac80611c..2756870615cc 100644
--- a/drivers/base/test/Kconfig
+++ b/drivers/base/test/Kconfig
@@ -12,6 +12,7 @@ config TEST_ASYNC_DRIVER_PROBE
config DM_KUNIT_TEST
tristate "KUnit Tests for the device model" if !KUNIT_ALL_TESTS
depends on KUNIT
+ default KUNIT_ALL_TESTS
config DRIVER_PE_KUNIT_TEST
tristate "KUnit Tests for property entry API" if !KUNIT_ALL_TESTS
diff --git a/drivers/base/test/platform-device-test.c b/drivers/base/test/platform-device-test.c
index ea05b8785743..6355a2231b74 100644
--- a/drivers/base/test/platform-device-test.c
+++ b/drivers/base/test/platform-device-test.c
@@ -1,8 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
+#include <kunit/platform_device.h>
#include <kunit/resource.h>
#include <linux/device.h>
+#include <linux/device/bus.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#define DEVICE_NAME "test"
@@ -217,7 +220,43 @@ static struct kunit_suite platform_device_devm_test_suite = {
.test_cases = platform_device_devm_tests,
};
-kunit_test_suite(platform_device_devm_test_suite);
+static void platform_device_find_by_null_test(struct kunit *test)
+{
+ struct platform_device *pdev;
+ int ret;
+
+ pdev = kunit_platform_device_alloc(test, DEVICE_NAME, PLATFORM_DEVID_NONE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
+
+ ret = kunit_platform_device_add(test, pdev);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_PTR_EQ(test, of_find_device_by_node(NULL), NULL);
+
+ KUNIT_EXPECT_PTR_EQ(test, bus_find_device_by_of_node(&platform_bus_type, NULL), NULL);
+ KUNIT_EXPECT_PTR_EQ(test, bus_find_device_by_fwnode(&platform_bus_type, NULL), NULL);
+ KUNIT_EXPECT_PTR_EQ(test, bus_find_device_by_acpi_dev(&platform_bus_type, NULL), NULL);
+
+ KUNIT_EXPECT_FALSE(test, device_match_of_node(&pdev->dev, NULL));
+ KUNIT_EXPECT_FALSE(test, device_match_fwnode(&pdev->dev, NULL));
+ KUNIT_EXPECT_FALSE(test, device_match_acpi_dev(&pdev->dev, NULL));
+ KUNIT_EXPECT_FALSE(test, device_match_acpi_handle(&pdev->dev, NULL));
+}
+
+static struct kunit_case platform_device_match_tests[] = {
+ KUNIT_CASE(platform_device_find_by_null_test),
+ {}
+};
+
+static struct kunit_suite platform_device_match_test_suite = {
+ .name = "platform-device-match",
+ .test_cases = platform_device_match_tests,
+};
+
+kunit_test_suites(
+ &platform_device_devm_test_suite,
+ &platform_device_match_test_suite,
+);
MODULE_DESCRIPTION("Test module for platform devices");
MODULE_AUTHOR("Maxime Ripard <mripard@kernel.org>");
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index 89f98be5c5b9..c890e2a5b428 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -23,23 +23,39 @@ static ssize_t name##_show(struct device *dev, \
#define define_siblings_read_func(name, mask) \
static ssize_t name##_read(struct file *file, struct kobject *kobj, \
- struct bin_attribute *attr, char *buf, \
+ const struct bin_attribute *attr, char *buf, \
loff_t off, size_t count) \
{ \
struct device *dev = kobj_to_dev(kobj); \
+ cpumask_var_t mask; \
+ ssize_t n; \
\
- return cpumap_print_bitmask_to_buf(buf, topology_##mask(dev->id), \
- off, count); \
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) \
+ return -ENOMEM; \
+ \
+ cpumask_copy(mask, topology_##mask(dev->id)); \
+ n = cpumap_print_bitmask_to_buf(buf, mask, off, count); \
+ free_cpumask_var(mask); \
+ \
+ return n; \
} \
\
static ssize_t name##_list_read(struct file *file, struct kobject *kobj, \
- struct bin_attribute *attr, char *buf, \
+ const struct bin_attribute *attr, char *buf, \
loff_t off, size_t count) \
{ \
struct device *dev = kobj_to_dev(kobj); \
+ cpumask_var_t mask; \
+ ssize_t n; \
+ \
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) \
+ return -ENOMEM; \
+ \
+ cpumask_copy(mask, topology_##mask(dev->id)); \
+ n = cpumap_print_list_to_buf(buf, mask, off, count); \
+ free_cpumask_var(mask); \
\
- return cpumap_print_list_to_buf(buf, topology_##mask(dev->id), \
- off, count); \
+ return n; \
}
define_id_show_func(physical_package_id, "%d");
@@ -62,50 +78,50 @@ define_id_show_func(ppin, "0x%llx");
static DEVICE_ATTR_ADMIN_RO(ppin);
define_siblings_read_func(thread_siblings, sibling_cpumask);
-static BIN_ATTR_RO(thread_siblings, CPUMAP_FILE_MAX_BYTES);
-static BIN_ATTR_RO(thread_siblings_list, CPULIST_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(thread_siblings, CPUMAP_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(thread_siblings_list, CPULIST_FILE_MAX_BYTES);
define_siblings_read_func(core_cpus, sibling_cpumask);
-static BIN_ATTR_RO(core_cpus, CPUMAP_FILE_MAX_BYTES);
-static BIN_ATTR_RO(core_cpus_list, CPULIST_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(core_cpus, CPUMAP_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(core_cpus_list, CPULIST_FILE_MAX_BYTES);
define_siblings_read_func(core_siblings, core_cpumask);
-static BIN_ATTR_RO(core_siblings, CPUMAP_FILE_MAX_BYTES);
-static BIN_ATTR_RO(core_siblings_list, CPULIST_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(core_siblings, CPUMAP_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(core_siblings_list, CPULIST_FILE_MAX_BYTES);
#ifdef TOPOLOGY_CLUSTER_SYSFS
define_siblings_read_func(cluster_cpus, cluster_cpumask);
-static BIN_ATTR_RO(cluster_cpus, CPUMAP_FILE_MAX_BYTES);
-static BIN_ATTR_RO(cluster_cpus_list, CPULIST_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(cluster_cpus, CPUMAP_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(cluster_cpus_list, CPULIST_FILE_MAX_BYTES);
#endif
#ifdef TOPOLOGY_DIE_SYSFS
define_siblings_read_func(die_cpus, die_cpumask);
-static BIN_ATTR_RO(die_cpus, CPUMAP_FILE_MAX_BYTES);
-static BIN_ATTR_RO(die_cpus_list, CPULIST_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(die_cpus, CPUMAP_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(die_cpus_list, CPULIST_FILE_MAX_BYTES);
#endif
define_siblings_read_func(package_cpus, core_cpumask);
-static BIN_ATTR_RO(package_cpus, CPUMAP_FILE_MAX_BYTES);
-static BIN_ATTR_RO(package_cpus_list, CPULIST_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(package_cpus, CPUMAP_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(package_cpus_list, CPULIST_FILE_MAX_BYTES);
#ifdef TOPOLOGY_BOOK_SYSFS
define_id_show_func(book_id, "%d");
static DEVICE_ATTR_RO(book_id);
define_siblings_read_func(book_siblings, book_cpumask);
-static BIN_ATTR_RO(book_siblings, CPUMAP_FILE_MAX_BYTES);
-static BIN_ATTR_RO(book_siblings_list, CPULIST_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(book_siblings, CPUMAP_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(book_siblings_list, CPULIST_FILE_MAX_BYTES);
#endif
#ifdef TOPOLOGY_DRAWER_SYSFS
define_id_show_func(drawer_id, "%d");
static DEVICE_ATTR_RO(drawer_id);
define_siblings_read_func(drawer_siblings, drawer_cpumask);
-static BIN_ATTR_RO(drawer_siblings, CPUMAP_FILE_MAX_BYTES);
-static BIN_ATTR_RO(drawer_siblings_list, CPULIST_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(drawer_siblings, CPUMAP_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(drawer_siblings_list, CPULIST_FILE_MAX_BYTES);
#endif
-static struct bin_attribute *bin_attrs[] = {
+static const struct bin_attribute *const bin_attrs[] = {
&bin_attr_core_cpus,
&bin_attr_core_cpus_list,
&bin_attr_thread_siblings,
@@ -192,3 +208,55 @@ static int __init topology_sysfs_init(void)
}
device_initcall(topology_sysfs_init);
+
+DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
+EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale);
+
+void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
+{
+ per_cpu(cpu_scale, cpu) = capacity;
+}
+
+static ssize_t cpu_capacity_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
+
+ return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
+}
+
+static DEVICE_ATTR_RO(cpu_capacity);
+
+static int cpu_capacity_sysctl_add(unsigned int cpu)
+{
+ struct device *cpu_dev = get_cpu_device(cpu);
+
+ if (!cpu_dev)
+ return -ENOENT;
+
+ device_create_file(cpu_dev, &dev_attr_cpu_capacity);
+
+ return 0;
+}
+
+static int cpu_capacity_sysctl_remove(unsigned int cpu)
+{
+ struct device *cpu_dev = get_cpu_device(cpu);
+
+ if (!cpu_dev)
+ return -ENOENT;
+
+ device_remove_file(cpu_dev, &dev_attr_cpu_capacity);
+
+ return 0;
+}
+
+static int register_cpu_capacity_sysctl(void)
+{
+ cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "topology/cpu-capacity",
+ cpu_capacity_sysctl_add, cpu_capacity_sysctl_remove);
+
+ return 0;
+}
+subsys_initcall(register_cpu_capacity_sysctl);
diff --git a/drivers/base/trace.h b/drivers/base/trace.h
index 3192e18f877e..3b83b13a57ff 100644
--- a/drivers/base/trace.h
+++ b/drivers/base/trace.h
@@ -24,18 +24,18 @@ DECLARE_EVENT_CLASS(devres,
__field(struct device *, dev)
__field(const char *, op)
__field(void *, node)
- __field(const char *, name)
+ __string(name, name)
__field(size_t, size)
),
TP_fast_assign(
- __assign_str(devname, dev_name(dev));
+ __assign_str(devname);
__entry->op = op;
__entry->node = node;
- __entry->name = name;
+ __assign_str(name);
__entry->size = size;
),
TP_printk("%s %3s %p %s (%zu bytes)", __get_str(devname),
- __entry->op, __entry->node, __entry->name, __entry->size)
+ __entry->op, __entry->node, __get_str(name), __entry->size)
);
DEFINE_EVENT(devres, devres_log,
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
index 5f90bac6bb09..658c7e2ac8bf 100644
--- a/drivers/bcma/driver_gpio.c
+++ b/drivers/bcma/driver_gpio.c
@@ -26,12 +26,14 @@ static int bcma_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
return !!bcma_chipco_gpio_in(cc, 1 << gpio);
}
-static void bcma_gpio_set_value(struct gpio_chip *chip, unsigned gpio,
- int value)
+static int bcma_gpio_set_value(struct gpio_chip *chip, unsigned int gpio,
+ int value)
{
struct bcma_drv_cc *cc = gpiochip_get_data(chip);
bcma_chipco_gpio_out(cc, 1 << gpio, value ? 1 << gpio : 0);
+
+ return 0;
}
static int bcma_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c
index ed3be52ab63d..8540052d37c5 100644
--- a/drivers/bcma/driver_pci_host.c
+++ b/drivers/bcma/driver_pci_host.c
@@ -334,7 +334,7 @@ static u8 bcma_find_pci_capability(struct bcma_drv_pci *pc, unsigned int dev,
}
/* If the root port is capable of returning Config Request
- * Retry Status (CRS) Completion Status to software then
+ * Retry Status (RRS) Completion Status to software then
* enable the feature.
*/
static void bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
@@ -348,10 +348,10 @@ static void bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
NULL);
root_cap = cap_ptr + PCI_EXP_RTCAP;
bcma_extpci_read_config(pc, 0, 0, root_cap, &val16, sizeof(u16));
- if (val16 & BCMA_CORE_PCI_RC_CRS_VISIBILITY) {
- /* Enable CRS software visibility */
+ if (val16 & BCMA_CORE_PCI_RC_RRS_VISIBILITY) {
+ /* Enable Configuration RRS Software Visibility */
root_ctrl = cap_ptr + PCI_EXP_RTCTL;
- val16 = PCI_EXP_RTCTL_CRSSVE;
+ val16 = PCI_EXP_RTCTL_RRS_SVE;
bcma_extpci_read_config(pc, 0, 0, root_ctrl, &val16,
sizeof(u16));
@@ -360,7 +360,7 @@ static void bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
* 100 ms wait time from the end of Reset. If the device is
* not done with its internal initialization, it must at
* least return a completion TLP, with a completion status
- * of "Configuration Request Retry Status (CRS)". The root
+ * of "Configuration Request Retry Status (RRS)". The root
* complex must complete the request to the host by returning
* a read-data value of 0001h for the Vendor ID field and
* all 1s for any additional bytes included in the request.
diff --git a/drivers/bcma/host_soc.c b/drivers/bcma/host_soc.c
index 8ae0b918e740..20b1816c570b 100644
--- a/drivers/bcma/host_soc.c
+++ b/drivers/bcma/host_soc.c
@@ -261,7 +261,7 @@ static struct platform_driver bcma_host_soc_driver = {
.of_match_table = bcma_host_soc_of_match,
},
.probe = bcma_host_soc_probe,
- .remove_new = bcma_host_soc_remove,
+ .remove = bcma_host_soc_remove,
};
int __init bcma_host_soc_register_driver(void)
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 6b5d34919c72..72f045e6ed51 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -26,7 +26,7 @@ static unsigned int bcma_bus_next_num;
/* bcma_buses_mutex locks the bcma_bus_next_num */
static DEFINE_MUTEX(bcma_buses_mutex);
-static int bcma_bus_match(struct device *dev, struct device_driver *drv);
+static int bcma_bus_match(struct device *dev, const struct device_driver *drv);
static int bcma_device_probe(struct device *dev);
static void bcma_device_remove(struct device *dev);
static int bcma_device_uevent(const struct device *dev, struct kobj_uevent_env *env);
@@ -294,6 +294,8 @@ static int bcma_register_devices(struct bcma_bus *bus)
int err;
list_for_each_entry(core, &bus->cores, list) {
+ struct device_node *np;
+
/* We support that core ourselves */
switch (core->id.id) {
case BCMA_CORE_4706_CHIPCOMMON:
@@ -311,6 +313,10 @@ static int bcma_register_devices(struct bcma_bus *bus)
if (bcma_is_core_needed_early(core->id.id))
continue;
+ np = core->dev.of_node;
+ if (np && !of_device_is_available(np))
+ continue;
+
/* Only first GMAC core on BCM4706 is connected and working */
if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
core->core_unit > 0)
@@ -584,10 +590,10 @@ void bcma_driver_unregister(struct bcma_driver *drv)
}
EXPORT_SYMBOL_GPL(bcma_driver_unregister);
-static int bcma_bus_match(struct device *dev, struct device_driver *drv)
+static int bcma_bus_match(struct device *dev, const struct device_driver *drv)
{
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
- struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
+ const struct bcma_driver *adrv = container_of_const(drv, struct bcma_driver, drv);
const struct bcma_device_id *cid = &core->id;
const struct bcma_device_id *did;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 5b9d4aaebb81..77d694448990 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -17,6 +17,7 @@ menuconfig BLK_DEV
if BLK_DEV
source "drivers/block/null_blk/Kconfig"
+source "drivers/block/rnull/Kconfig"
config BLK_DEV_FD
tristate "Normal floppy disk support"
@@ -130,7 +131,7 @@ config BLK_DEV_UBD_SYNC
kernel command line option. Alternatively, you can say Y here to
turn on synchronous operation by default for all block devices.
- If you're running a journalling file system (like reiserfs, for
+ If you're running a journalling file system (like xfs, for
example) in your virtual machine, you will want to say Y here. If
you care for the safety of the data in your virtual machine, Y is a
wise choice too. In all other cases (for example, if you're just
@@ -256,49 +257,6 @@ config BLK_DEV_RAM_SIZE
The default value is 4096 kilobytes. Only change this if you know
what you are doing.
-config CDROM_PKTCDVD
- tristate "Packet writing on CD/DVD media (DEPRECATED)"
- depends on !UML
- depends on SCSI
- select CDROM
- help
- Note: This driver is deprecated and will be removed from the
- kernel in the near future!
-
- If you have a CDROM/DVD drive that supports packet writing, say
- Y to include support. It should work with any MMC/Mt Fuji
- compliant ATAPI or SCSI drive, which is just about any newer
- DVD/CD writer.
-
- Currently only writing to CD-RW, DVD-RW, DVD+RW and DVDRAM discs
- is possible.
- DVD-RW disks must be in restricted overwrite mode.
-
- See the file <file:Documentation/cdrom/packet-writing.rst>
- for further information on the use of this driver.
-
- To compile this driver as a module, choose M here: the
- module will be called pktcdvd.
-
-config CDROM_PKTCDVD_BUFFERS
- int "Free buffers for data gathering"
- depends on CDROM_PKTCDVD
- default "8"
- help
- This controls the maximum number of active concurrent packets. More
- concurrent packets can increase write performance, but also require
- more memory. Each concurrent packet will require approximately 64Kb
- of non-swappable kernel memory, memory which will be allocated when
- a disc is opened for writing.
-
-config CDROM_PKTCDVD_WCACHE
- bool "Enable write caching"
- depends on CDROM_PKTCDVD
- help
- If enabled, write caching will be set for the CD-R/W device. For now
- this option is dangerous unless the CD-RW media is known good, as we
- don't do deferred write error handling yet.
-
config ATA_OVER_ETH
tristate "ATA over Ethernet support"
depends on NET
@@ -358,7 +316,7 @@ config BLK_DEV_RBD
tristate "Rados block device (RBD)"
depends on INET && BLOCK
select CEPH_LIB
- select LIBCRC32C
+ select CRC32
select CRYPTO_AES
select CRYPTO
help
@@ -379,12 +337,6 @@ config BLK_DEV_UBLK
definition isn't finalized yet, and might change according to future
requirement, so mark is as experimental now.
- Say Y if you want to get better performance because task_work_add()
- can be used in IO path for replacing io_uring cmd, which will become
- shared between IO tasks and ubq daemon, meantime task_work_add() can
- can handle batch more effectively, but task_work_add() isn't exported
- for module, so ublk has to be built to kernel.
-
config BLKDEV_UBLK_LEGACY_OPCODES
bool "Support legacy command opcode"
depends on BLK_DEV_UBLK
@@ -404,4 +356,23 @@ config BLKDEV_UBLK_LEGACY_OPCODES
source "drivers/block/rnbd/Kconfig"
+config BLK_DEV_ZONED_LOOP
+ tristate "Zoned loopback device support"
+ depends on BLK_DEV_ZONED
+ help
+ Saying Y here will allow you to use create a zoned block device using
+ regular files for zones (one file per zones). This is useful to test
+ file systems, device mapper and applications that support zoned block
+ devices. To create a zoned loop device, no user utility is needed, a
+ zoned loop device can be created (or re-started) using a command
+ like:
+
+ echo "add id=0,zone_size_mb=256,capacity_mb=16384,conv_zones=11" > \
+ /dev/zloop-control
+
+ See Documentation/admin-guide/blockdev/zoned_loop.rst for usage
+ details.
+
+ If unsure, say N.
+
endif # BLK_DEV
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 101612cba303..2d8096eb8cdf 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -20,7 +20,6 @@ obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o
obj-$(CONFIG_N64CART) += n64cart.o
obj-$(CONFIG_BLK_DEV_RAM) += brd.o
obj-$(CONFIG_BLK_DEV_LOOP) += loop.o
-obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
obj-$(CONFIG_SUNVDC) += sunvdc.o
obj-$(CONFIG_BLK_DEV_NBD) += nbd.o
@@ -36,7 +35,9 @@ obj-$(CONFIG_ZRAM) += zram/
obj-$(CONFIG_BLK_DEV_RNBD) += rnbd/
obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk/
+obj-$(CONFIG_BLK_DEV_RUST_NULL) += rnull/
obj-$(CONFIG_BLK_DEV_UBLK) += ublk_drv.o
+obj-$(CONFIG_BLK_DEV_ZONED_LOOP) += zloop.o
swim_mod-y := swim.o swim_asm.o
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index a25414228e47..2932b6653b6f 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -232,6 +232,7 @@ static DEFINE_MUTEX(amiflop_mutex);
static unsigned long int fd_def_df0 = FD_DD_3; /* default for df0 if it doesn't identify */
module_param(fd_def_df0, ulong, 0);
+MODULE_DESCRIPTION("Amiga floppy driver");
MODULE_LICENSE("GPL");
/*
@@ -456,7 +457,7 @@ static int fd_motor_on(int nr)
{
nr &= 3;
- del_timer(motor_off_timer + nr);
+ timer_delete(motor_off_timer + nr);
if (!unit[nr].motor) {
unit[nr].motor = 1;
@@ -1392,7 +1393,7 @@ static int non_int_flush_track (unsigned long nr)
nr&=3;
writefromint = 0;
- del_timer(&post_write_timer);
+ timer_delete(&post_write_timer);
get_fdc(nr);
if (!fd_motor_on(nr)) {
writepending = 0;
@@ -1434,7 +1435,7 @@ static int get_track(int drive, int track)
}
if (unit[drive].dirty == 1) {
- del_timer (flush_track_timer + drive);
+ timer_delete(flush_track_timer + drive);
non_int_flush_track (drive);
}
errcnt = 0;
@@ -1522,13 +1523,13 @@ static blk_status_t amiflop_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
}
-static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int fd_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- int drive = MINOR(bdev->bd_dev) & 3;
+ struct amiga_floppy_struct *p = disk->private_data;
- geo->heads = unit[drive].type->heads;
- geo->sectors = unit[drive].dtype->sects * unit[drive].type->sect_mult;
- geo->cylinders = unit[drive].type->tracks;
+ geo->heads = p->type->heads;
+ geo->sectors = p->dtype->sects * p->type->sect_mult;
+ geo->cylinders = p->type->tracks;
return 0;
}
@@ -1590,7 +1591,7 @@ static int fd_locked_ioctl(struct block_device *bdev, blk_mode_t mode,
case FDDEFPRM:
return -EINVAL;
case FDFLUSH: /* unconditionally, even if not needed */
- del_timer (flush_track_timer + drive);
+ timer_delete(flush_track_timer + drive);
non_int_flush_track(drive);
break;
#ifdef RAW_IOCTL
@@ -1713,7 +1714,7 @@ static void floppy_release(struct gendisk *disk)
mutex_lock(&amiflop_mutex);
if (unit[drive].dirty == 1) {
- del_timer (flush_track_timer + drive);
+ timer_delete(flush_track_timer + drive);
non_int_flush_track (drive);
}
@@ -1776,10 +1777,13 @@ static const struct blk_mq_ops amiflop_mq_ops = {
static int fd_alloc_disk(int drive, int system)
{
+ struct queue_limits lim = {
+ .features = BLK_FEAT_ROTATIONAL,
+ };
struct gendisk *disk;
int err;
- disk = blk_mq_alloc_disk(&unit[drive].tag_set, NULL, NULL);
+ disk = blk_mq_alloc_disk(&unit[drive].tag_set, &lim, NULL);
if (IS_ERR(disk))
return PTR_ERR(disk);
@@ -1815,7 +1819,6 @@ static int fd_alloc_drive(int drive)
unit[drive].tag_set.nr_maps = 1;
unit[drive].tag_set.queue_depth = 2;
unit[drive].tag_set.numa_node = NUMA_NO_NODE;
- unit[drive].tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
if (blk_mq_alloc_tag_set(&unit[drive].tag_set))
goto out_cleanup_trackbuf;
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 749ae1246f4c..d35caa3c69e1 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -80,6 +80,7 @@ enum {
DEVFL_NEWSIZE = (1<<6), /* need to update dev size in block layer */
DEVFL_FREEING = (1<<7), /* set when device is being cleaned up */
DEVFL_FREED = (1<<8), /* device has been cleaned up */
+ DEVFL_DEAD = (1<<9), /* device has timed out of aoe_deadsecs */
};
enum {
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index b6dac8cee70f..34ead75e7e02 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -269,9 +269,9 @@ static blk_status_t aoeblk_queue_rq(struct blk_mq_hw_ctx *hctx,
}
static int
-aoeblk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+aoeblk_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- struct aoedev *d = bdev->bd_disk->private_data;
+ struct aoedev *d = disk->private_data;
if ((d->flags & DEVFL_UP) == 0) {
printk(KERN_ERR "aoe: disk not up\n");
@@ -337,6 +337,7 @@ aoeblk_gdalloc(void *vp)
struct queue_limits lim = {
.max_hw_sectors = aoe_maxsectors,
.io_opt = SZ_2M,
+ .features = BLK_FEAT_ROTATIONAL,
};
ulong flags;
int late = 0;
@@ -367,7 +368,6 @@ aoeblk_gdalloc(void *vp)
set->nr_hw_queues = 1;
set->queue_depth = 128;
set->numa_node = NUMA_NO_NODE;
- set->flags = BLK_MQ_F_SHOULD_MERGE;
err = blk_mq_alloc_tag_set(set);
if (err) {
pr_err("aoe: cannot allocate tag set for %ld.%d\n",
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index cc9077b588d7..a9affb7c264d 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -14,7 +14,7 @@
#include <linux/workqueue.h>
#include <linux/kthread.h>
#include <net/net_namespace.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/uio.h>
#include "aoe.h"
@@ -361,6 +361,7 @@ ata_rw_frameinit(struct frame *f)
}
ah->cmdstat = ATA_CMD_PIO_READ | writebit | extbit;
+ dev_hold(t->ifp->nd);
skb->dev = t->ifp->nd;
}
@@ -401,6 +402,8 @@ aoecmd_ata_rw(struct aoedev *d)
__skb_queue_head_init(&queue);
__skb_queue_tail(&queue, skb);
aoenet_xmit(&queue);
+ } else {
+ dev_put(f->t->ifp->nd);
}
return 1;
}
@@ -483,10 +486,13 @@ resend(struct aoedev *d, struct frame *f)
memcpy(h->dst, t->addr, sizeof h->dst);
memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
+ dev_hold(t->ifp->nd);
skb->dev = t->ifp->nd;
skb = skb_clone(skb, GFP_ATOMIC);
- if (skb == NULL)
+ if (skb == NULL) {
+ dev_put(t->ifp->nd);
return;
+ }
f->sent = ktime_get();
__skb_queue_head_init(&queue);
__skb_queue_tail(&queue, skb);
@@ -617,6 +623,8 @@ probe(struct aoetgt *t)
__skb_queue_head_init(&queue);
__skb_queue_tail(&queue, skb);
aoenet_xmit(&queue);
+ } else {
+ dev_put(f->t->ifp->nd);
}
}
@@ -737,7 +745,7 @@ rexmit_timer(struct timer_list *timer)
int utgts; /* number of aoetgt descriptors (not slots) */
int since;
- d = from_timer(d, timer, timer);
+ d = timer_container_of(d, timer, timer);
spin_lock_irqsave(&d->lock, flags);
@@ -746,7 +754,7 @@ rexmit_timer(struct timer_list *timer)
utgts = count_targets(d, NULL);
- if (d->flags & DEVFL_TKILL) {
+ if (d->flags & (DEVFL_TKILL | DEVFL_DEAD)) {
spin_unlock_irqrestore(&d->lock, flags);
return;
}
@@ -778,7 +786,8 @@ rexmit_timer(struct timer_list *timer)
* to clean up.
*/
list_splice(&flist, &d->factive[0]);
- aoedev_downdev(d);
+ d->flags |= DEVFL_DEAD;
+ queue_work(aoe_wq, &d->work);
goto out;
}
@@ -890,6 +899,9 @@ aoecmd_sleepwork(struct work_struct *work)
{
struct aoedev *d = container_of(work, struct aoedev, work);
+ if (d->flags & DEVFL_DEAD)
+ aoedev_downdev(d);
+
if (d->flags & DEVFL_GDALLOC)
aoeblk_gdalloc(d);
@@ -1395,6 +1407,7 @@ aoecmd_ata_id(struct aoedev *d)
ah->cmdstat = ATA_CMD_ID_ATA;
ah->lba3 = 0xa0;
+ dev_hold(t->ifp->nd);
skb->dev = t->ifp->nd;
d->rttavg = RTTAVG_INIT;
@@ -1404,6 +1417,8 @@ aoecmd_ata_id(struct aoedev *d)
skb = skb_clone(skb, GFP_ATOMIC);
if (skb)
f->sent = ktime_get();
+ else
+ dev_put(t->ifp->nd);
return skb;
}
@@ -1746,6 +1761,6 @@ aoecmd_exit(void)
kfree(kts);
kfree(ktiowq);
- free_page((unsigned long) page_address(empty_page));
+ __free_page(empty_page);
empty_page = NULL;
}
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index 3523dd82d7a0..3a240755045b 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -149,7 +149,7 @@ dummy_timer(struct timer_list *t)
{
struct aoedev *d;
- d = from_timer(d, t, timer);
+ d = timer_container_of(d, t, timer);
if (d->flags & DEVFL_TKILL)
return;
d->timer.expires = jiffies + HZ;
@@ -198,9 +198,13 @@ aoedev_downdev(struct aoedev *d)
{
struct aoetgt *t, **tt, **te;
struct list_head *head, *pos, *nx;
+ struct request *rq, *rqnext;
int i;
+ unsigned long flags;
- d->flags &= ~DEVFL_UP;
+ spin_lock_irqsave(&d->lock, flags);
+ d->flags &= ~(DEVFL_UP | DEVFL_DEAD);
+ spin_unlock_irqrestore(&d->lock, flags);
/* clean out active and to-be-retransmitted buffers */
for (i = 0; i < NFACTIVE; i++) {
@@ -223,13 +227,21 @@ aoedev_downdev(struct aoedev *d)
/* clean out the in-process request (if any) */
aoe_failip(d);
+ /* clean out any queued block requests */
+ list_for_each_entry_safe(rq, rqnext, &d->rq_list, queuelist) {
+ list_del_init(&rq->queuelist);
+ blk_mq_start_request(rq);
+ blk_mq_end_request(rq, BLK_STS_IOERR);
+ }
+
/* fast fail all pending I/O */
if (d->blkq) {
/* UP is cleared, freeze+quiesce to insure all are errored */
- blk_mq_freeze_queue(d->blkq);
+ unsigned int memflags = blk_mq_freeze_queue(d->blkq);
+
blk_mq_quiesce_queue(d->blkq);
blk_mq_unquiesce_queue(d->blkq);
- blk_mq_unfreeze_queue(d->blkq);
+ blk_mq_unfreeze_queue(d->blkq, memflags);
}
if (d->gd)
@@ -273,7 +285,7 @@ freedev(struct aoedev *d)
if (!freeing)
return;
- del_timer_sync(&d->timer);
+ timer_delete_sync(&d->timer);
if (d->gd) {
aoedisk_rm_debugfs(d);
del_gendisk(d->gd);
diff --git a/drivers/block/aoe/aoemain.c b/drivers/block/aoe/aoemain.c
index 6238c4c87cfc..3b21750038ee 100644
--- a/drivers/block/aoe/aoemain.c
+++ b/drivers/block/aoe/aoemain.c
@@ -28,7 +28,7 @@ static void discover_timer(struct timer_list *t)
static void __exit
aoe_exit(void)
{
- del_timer_sync(&timer);
+ timer_delete_sync(&timer);
aoenet_exit();
unregister_blkdev(AOE_MAJOR, DEVICE_NAME);
@@ -44,7 +44,7 @@ aoe_init(void)
{
int ret;
- aoe_wq = alloc_workqueue("aoe_wq", 0, 0);
+ aoe_wq = alloc_workqueue("aoe_wq", WQ_PERCPU, 0);
if (!aoe_wq)
return -ENOMEM;
diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c
index 923a134fd766..66e617664c14 100644
--- a/drivers/block/aoe/aoenet.c
+++ b/drivers/block/aoe/aoenet.c
@@ -10,7 +10,7 @@
#include <linux/netdevice.h>
#include <linux/moduleparam.h>
#include <net/net_namespace.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "aoe.h"
#define NECODES 5
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index cacc4ba942a8..7fe14266c12c 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -494,7 +494,7 @@ static inline void start_timeout(void)
static inline void stop_timeout(void)
{
- del_timer(&timeout_timer);
+ timer_delete(&timeout_timer);
}
/* Select the side to use. */
@@ -746,6 +746,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
unsigned char *p;
int sect, nsect;
unsigned long flags;
+ unsigned int memflags;
int ret;
if (type) {
@@ -758,7 +759,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
}
q = unit[drive].disk[type]->queue;
- blk_mq_freeze_queue(q);
+ memflags = blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
local_irq_save(flags);
@@ -783,7 +784,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
contents become invalid! */
BufferDrive = -1;
/* stop deselect timer */
- del_timer( &motor_off_timer );
+ timer_delete(&motor_off_timer);
FILL( 60 * (nsect / 9), 0x4e );
for( sect = 0; sect < nsect; ++sect ) {
@@ -817,7 +818,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
ret = FormatError ? -EIO : 0;
out:
blk_mq_unquiesce_queue(q);
- blk_mq_unfreeze_queue(q);
+ blk_mq_unfreeze_queue(q, memflags);
return ret;
}
@@ -1137,7 +1138,7 @@ static void fd_rwsec_done( int status )
DPRINT(("fd_rwsec_done()\n"));
if (read_track) {
- del_timer(&readtrack_timer);
+ timer_delete(&readtrack_timer);
if (!MultReadInProgress)
return;
MultReadInProgress = 0;
@@ -1355,7 +1356,7 @@ static void fd_times_out(struct timer_list *unused)
/* If the timeout occurred while the readtrack_check timer was
* active, we need to cancel it, else bad things will happen */
if (UseTrackbuffer)
- del_timer( &readtrack_timer );
+ timer_delete(&readtrack_timer);
FDC_WRITE( FDCREG_CMD, FDCCMD_FORCI );
udelay( 25 );
@@ -1565,7 +1566,7 @@ static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
}
/* stop deselect timer */
- del_timer( &motor_off_timer );
+ timer_delete(&motor_off_timer);
ReqCnt = 0;
ReqCmd = rq_data_dir(fd_request);
@@ -1992,9 +1993,12 @@ static const struct blk_mq_ops ataflop_mq_ops = {
static int ataflop_alloc_disk(unsigned int drive, unsigned int type)
{
+ struct queue_limits lim = {
+ .features = BLK_FEAT_ROTATIONAL,
+ };
struct gendisk *disk;
- disk = blk_mq_alloc_disk(&unit[drive].tag_set, NULL, NULL);
+ disk = blk_mq_alloc_disk(&unit[drive].tag_set, &lim, NULL);
if (IS_ERR(disk))
return PTR_ERR(disk);
@@ -2051,7 +2055,7 @@ static void atari_floppy_cleanup(void)
blk_mq_free_tag_set(&unit[i].tag_set);
}
- del_timer_sync(&fd_timer);
+ timer_delete_sync(&fd_timer);
atari_stram_free(DMABuffer);
}
@@ -2085,7 +2089,6 @@ static int __init atari_floppy_init (void)
unit[i].tag_set.nr_maps = 1;
unit[i].tag_set.queue_depth = 2;
unit[i].tag_set.numa_node = NUMA_NO_NODE;
- unit[i].tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
ret = blk_mq_alloc_tag_set(&unit[i].tag_set);
if (ret)
goto err;
@@ -2197,4 +2200,5 @@ static void __exit atari_floppy_exit(void)
module_init(atari_floppy_init)
module_exit(atari_floppy_exit)
+MODULE_DESCRIPTION("Atari floppy driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index b900fe9e0030..9778259b30d4 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -44,42 +44,74 @@ struct brd_device {
};
/*
- * Look up and return a brd's page for a given sector.
+ * Look up and return a brd's page with reference grabbed for a given sector.
*/
static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
{
- return xa_load(&brd->brd_pages, sector >> PAGE_SECTORS_SHIFT);
+ struct page *page;
+ XA_STATE(xas, &brd->brd_pages, sector >> PAGE_SECTORS_SHIFT);
+
+ rcu_read_lock();
+repeat:
+ page = xas_load(&xas);
+ if (xas_retry(&xas, page)) {
+ xas_reset(&xas);
+ goto repeat;
+ }
+
+ if (!page)
+ goto out;
+
+ if (!get_page_unless_zero(page)) {
+ xas_reset(&xas);
+ goto repeat;
+ }
+
+ if (unlikely(page != xas_reload(&xas))) {
+ put_page(page);
+ xas_reset(&xas);
+ goto repeat;
+ }
+out:
+ rcu_read_unlock();
+
+ return page;
}
/*
* Insert a new page for a given sector, if one does not already exist.
+ * The returned page will grab reference.
*/
-static int brd_insert_page(struct brd_device *brd, sector_t sector, gfp_t gfp)
+static struct page *brd_insert_page(struct brd_device *brd, sector_t sector,
+ blk_opf_t opf)
{
- pgoff_t idx = sector >> PAGE_SECTORS_SHIFT;
- struct page *page;
- int ret = 0;
-
- page = brd_lookup_page(brd, sector);
- if (page)
- return 0;
+ gfp_t gfp = (opf & REQ_NOWAIT) ? GFP_NOWAIT : GFP_NOIO;
+ struct page *page, *ret;
page = alloc_page(gfp | __GFP_ZERO | __GFP_HIGHMEM);
if (!page)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
xa_lock(&brd->brd_pages);
- ret = __xa_insert(&brd->brd_pages, idx, page, gfp);
- if (!ret)
+ ret = __xa_cmpxchg(&brd->brd_pages, sector >> PAGE_SECTORS_SHIFT, NULL,
+ page, gfp);
+ if (!ret) {
brd->brd_nr_pages++;
- xa_unlock(&brd->brd_pages);
+ get_page(page);
+ xa_unlock(&brd->brd_pages);
+ return page;
+ }
- if (ret < 0) {
- __free_page(page);
- if (ret == -EBUSY)
- ret = 0;
+ if (!xa_is_err(ret)) {
+ get_page(ret);
+ xa_unlock(&brd->brd_pages);
+ put_page(page);
+ return ret;
}
- return ret;
+
+ xa_unlock(&brd->brd_pages);
+ put_page(page);
+ return ERR_PTR(xa_err(ret));
}
/*
@@ -92,7 +124,7 @@ static void brd_free_pages(struct brd_device *brd)
pgoff_t idx;
xa_for_each(&brd->brd_pages, idx, page) {
- __free_page(page);
+ put_page(page);
cond_resched();
}
@@ -100,156 +132,89 @@ static void brd_free_pages(struct brd_device *brd)
}
/*
- * copy_to_brd_setup must be called before copy_to_brd. It may sleep.
+ * Process a single segment. The segment is capped to not cross page boundaries
+ * in both the bio and the brd backing memory.
*/
-static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n,
- gfp_t gfp)
-{
- unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
- size_t copy;
- int ret;
-
- copy = min_t(size_t, n, PAGE_SIZE - offset);
- ret = brd_insert_page(brd, sector, gfp);
- if (ret)
- return ret;
- if (copy < n) {
- sector += copy >> SECTOR_SHIFT;
- ret = brd_insert_page(brd, sector, gfp);
- }
- return ret;
-}
-
-/*
- * Copy n bytes from src to the brd starting at sector. Does not sleep.
- */
-static void copy_to_brd(struct brd_device *brd, const void *src,
- sector_t sector, size_t n)
+static bool brd_rw_bvec(struct brd_device *brd, struct bio *bio)
{
+ struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter);
+ sector_t sector = bio->bi_iter.bi_sector;
+ u32 offset = (sector & (PAGE_SECTORS - 1)) << SECTOR_SHIFT;
+ blk_opf_t opf = bio->bi_opf;
struct page *page;
- void *dst;
- unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
- size_t copy;
+ void *kaddr;
+
+ bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset);
- copy = min_t(size_t, n, PAGE_SIZE - offset);
page = brd_lookup_page(brd, sector);
- BUG_ON(!page);
-
- dst = kmap_atomic(page);
- memcpy(dst + offset, src, copy);
- kunmap_atomic(dst);
-
- if (copy < n) {
- src += copy;
- sector += copy >> SECTOR_SHIFT;
- copy = n - copy;
- page = brd_lookup_page(brd, sector);
- BUG_ON(!page);
-
- dst = kmap_atomic(page);
- memcpy(dst, src, copy);
- kunmap_atomic(dst);
+ if (!page && op_is_write(opf)) {
+ page = brd_insert_page(brd, sector, opf);
+ if (IS_ERR(page))
+ goto out_error;
}
-}
-/*
- * Copy n bytes to dst from the brd starting at sector. Does not sleep.
- */
-static void copy_from_brd(void *dst, struct brd_device *brd,
- sector_t sector, size_t n)
-{
- struct page *page;
- void *src;
- unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
- size_t copy;
-
- copy = min_t(size_t, n, PAGE_SIZE - offset);
- page = brd_lookup_page(brd, sector);
- if (page) {
- src = kmap_atomic(page);
- memcpy(dst, src + offset, copy);
- kunmap_atomic(src);
- } else
- memset(dst, 0, copy);
-
- if (copy < n) {
- dst += copy;
- sector += copy >> SECTOR_SHIFT;
- copy = n - copy;
- page = brd_lookup_page(brd, sector);
- if (page) {
- src = kmap_atomic(page);
- memcpy(dst, src, copy);
- kunmap_atomic(src);
- } else
- memset(dst, 0, copy);
+ kaddr = bvec_kmap_local(&bv);
+ if (op_is_write(opf)) {
+ memcpy_to_page(page, offset, kaddr, bv.bv_len);
+ } else {
+ if (page)
+ memcpy_from_page(kaddr, page, offset, bv.bv_len);
+ else
+ memset(kaddr, 0, bv.bv_len);
}
+ kunmap_local(kaddr);
+
+ bio_advance_iter_single(bio, &bio->bi_iter, bv.bv_len);
+ if (page)
+ put_page(page);
+ return true;
+
+out_error:
+ if (PTR_ERR(page) == -ENOMEM && (opf & REQ_NOWAIT))
+ bio_wouldblock_error(bio);
+ else
+ bio_io_error(bio);
+ return false;
}
-/*
- * Process a single bvec of a bio.
- */
-static int brd_do_bvec(struct brd_device *brd, struct page *page,
- unsigned int len, unsigned int off, blk_opf_t opf,
- sector_t sector)
+static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size)
{
- void *mem;
- int err = 0;
-
- if (op_is_write(opf)) {
- /*
- * Must use NOIO because we don't want to recurse back into the
- * block or filesystem layers from page reclaim.
- */
- gfp_t gfp = opf & REQ_NOWAIT ? GFP_NOWAIT : GFP_NOIO;
+ sector_t aligned_sector = round_up(sector, PAGE_SECTORS);
+ sector_t aligned_end = round_down(
+ sector + (size >> SECTOR_SHIFT), PAGE_SECTORS);
+ struct page *page;
- err = copy_to_brd_setup(brd, sector, len, gfp);
- if (err)
- goto out;
- }
+ if (aligned_end <= aligned_sector)
+ return;
- mem = kmap_atomic(page);
- if (!op_is_write(opf)) {
- copy_from_brd(mem + off, brd, sector, len);
- flush_dcache_page(page);
- } else {
- flush_dcache_page(page);
- copy_to_brd(brd, mem + off, sector, len);
+ xa_lock(&brd->brd_pages);
+ while (aligned_sector < aligned_end && aligned_sector < rd_size * 2) {
+ page = __xa_erase(&brd->brd_pages, aligned_sector >> PAGE_SECTORS_SHIFT);
+ if (page) {
+ put_page(page);
+ brd->brd_nr_pages--;
+ }
+ aligned_sector += PAGE_SECTORS;
}
- kunmap_atomic(mem);
-
-out:
- return err;
+ xa_unlock(&brd->brd_pages);
}
static void brd_submit_bio(struct bio *bio)
{
struct brd_device *brd = bio->bi_bdev->bd_disk->private_data;
- sector_t sector = bio->bi_iter.bi_sector;
- struct bio_vec bvec;
- struct bvec_iter iter;
-
- bio_for_each_segment(bvec, bio, iter) {
- unsigned int len = bvec.bv_len;
- int err;
-
- /* Don't support un-aligned buffer */
- WARN_ON_ONCE((bvec.bv_offset & (SECTOR_SIZE - 1)) ||
- (len & (SECTOR_SIZE - 1)));
-
- err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
- bio->bi_opf, sector);
- if (err) {
- if (err == -ENOMEM && bio->bi_opf & REQ_NOWAIT) {
- bio_wouldblock_error(bio);
- return;
- }
- bio_io_error(bio);
- return;
- }
- sector += len >> SECTOR_SHIFT;
+
+ if (unlikely(op_is_discard(bio->bi_opf))) {
+ brd_do_discard(brd, bio->bi_iter.bi_sector,
+ bio->bi_iter.bi_size);
+ bio_endio(bio);
+ return;
}
+ do {
+ if (!brd_rw_bvec(brd, bio))
+ return;
+ } while (bio->bi_iter.bi_size);
+
bio_endio(bio);
}
@@ -273,6 +238,7 @@ static int max_part = 1;
module_param(max_part, int, 0444);
MODULE_PARM_DESC(max_part, "Num Minors to reserve between devices");
+MODULE_DESCRIPTION("Ram backed block device driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR);
MODULE_ALIAS("rd");
@@ -292,8 +258,40 @@ __setup("ramdisk_size=", ramdisk_size);
* (should share code eventually).
*/
static LIST_HEAD(brd_devices);
+static DEFINE_MUTEX(brd_devices_mutex);
static struct dentry *brd_debugfs_dir;
+static struct brd_device *brd_find_or_alloc_device(int i)
+{
+ struct brd_device *brd;
+
+ mutex_lock(&brd_devices_mutex);
+ list_for_each_entry(brd, &brd_devices, brd_list) {
+ if (brd->brd_number == i) {
+ mutex_unlock(&brd_devices_mutex);
+ return ERR_PTR(-EEXIST);
+ }
+ }
+
+ brd = kzalloc(sizeof(*brd), GFP_KERNEL);
+ if (!brd) {
+ mutex_unlock(&brd_devices_mutex);
+ return ERR_PTR(-ENOMEM);
+ }
+ brd->brd_number = i;
+ list_add_tail(&brd->brd_list, &brd_devices);
+ mutex_unlock(&brd_devices_mutex);
+ return brd;
+}
+
+static void brd_free_device(struct brd_device *brd)
+{
+ mutex_lock(&brd_devices_mutex);
+ list_del(&brd->brd_list);
+ mutex_unlock(&brd_devices_mutex);
+ kfree(brd);
+}
+
static int brd_alloc(int i)
{
struct brd_device *brd;
@@ -309,16 +307,16 @@ static int brd_alloc(int i)
* is harmless)
*/
.physical_block_size = PAGE_SIZE,
+ .max_hw_discard_sectors = UINT_MAX,
+ .max_discard_segments = 1,
+ .discard_granularity = PAGE_SIZE,
+ .features = BLK_FEAT_SYNCHRONOUS |
+ BLK_FEAT_NOWAIT,
};
- list_for_each_entry(brd, &brd_devices, brd_list)
- if (brd->brd_number == i)
- return -EEXIST;
- brd = kzalloc(sizeof(*brd), GFP_KERNEL);
- if (!brd)
- return -ENOMEM;
- brd->brd_number = i;
- list_add_tail(&brd->brd_list, &brd_devices);
+ brd = brd_find_or_alloc_device(i);
+ if (IS_ERR(brd))
+ return PTR_ERR(brd);
xa_init(&brd->brd_pages);
@@ -340,10 +338,6 @@ static int brd_alloc(int i)
strscpy(disk->disk_name, buf, DISK_NAME_LEN);
set_capacity(disk, rd_size * 2);
- /* Tell the block layer that this is not a rotational device */
- blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
- blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, disk->queue);
- blk_queue_flag_set(QUEUE_FLAG_NOWAIT, disk->queue);
err = add_disk(disk);
if (err)
goto out_cleanup_disk;
@@ -353,8 +347,7 @@ static int brd_alloc(int i)
out_cleanup_disk:
put_disk(disk);
out_free_dev:
- list_del(&brd->brd_list);
- kfree(brd);
+ brd_free_device(brd);
return err;
}
@@ -373,8 +366,7 @@ static void brd_cleanup(void)
del_gendisk(brd->brd_disk);
put_disk(brd->brd_disk);
brd_free_pages(brd);
- list_del(&brd->brd_list);
- kfree(brd);
+ brd_free_device(brd);
}
}
@@ -401,16 +393,6 @@ static int __init brd_init(void)
{
int err, i;
- brd_check_and_reset_par();
-
- brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL);
-
- for (i = 0; i < rd_nr; i++) {
- err = brd_alloc(i);
- if (err)
- goto out_free;
- }
-
/*
* brd module now has a feature to instantiate underlying device
* structure on-demand, provided that there is an access dev node.
@@ -426,11 +408,18 @@ static int __init brd_init(void)
* dynamically.
*/
+ brd_check_and_reset_par();
+
+ brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL);
+
if (__register_blkdev(RAMDISK_MAJOR, "ramdisk", brd_probe)) {
err = -EIO;
goto out_free;
}
+ for (i = 0; i < rd_nr; i++)
+ brd_alloc(i);
+
pr_info("brd: module loaded\n");
return 0;
diff --git a/drivers/block/drbd/Kconfig b/drivers/block/drbd/Kconfig
index 6fb4e38fca88..495a72da04c6 100644
--- a/drivers/block/drbd/Kconfig
+++ b/drivers/block/drbd/Kconfig
@@ -10,7 +10,7 @@ config BLK_DEV_DRBD
tristate "DRBD Distributed Replicated Block Device support"
depends on PROC_FS && INET
select LRU_CACHE
- select LIBCRC32C
+ select CRC32
help
NOTE: In order to authenticate connections you have to select
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 85ca000a0564..d90fa3e7f4cf 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -1210,7 +1210,7 @@ static int bm_rw(struct drbd_device *device, const unsigned int flags, unsigned
return err;
}
-/**
+/*
* drbd_bm_read() - Read the whole bitmap from its on disk location.
* @device: DRBD device.
*/
@@ -1221,7 +1221,7 @@ int drbd_bm_read(struct drbd_device *device,
return bm_rw(device, BM_AIO_READ, 0);
}
-/**
+/*
* drbd_bm_write() - Write the whole bitmap to its on disk location.
* @device: DRBD device.
*
@@ -1233,7 +1233,7 @@ int drbd_bm_write(struct drbd_device *device,
return bm_rw(device, 0, 0);
}
-/**
+/*
* drbd_bm_write_all() - Write the whole bitmap to its on disk location.
* @device: DRBD device.
*
@@ -1255,7 +1255,7 @@ int drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_ho
return bm_rw(device, BM_AIO_COPY_PAGES, upper_idx);
}
-/**
+/*
* drbd_bm_write_copy_pages() - Write the whole bitmap to its on disk location.
* @device: DRBD device.
*
@@ -1272,7 +1272,7 @@ int drbd_bm_write_copy_pages(struct drbd_device *device,
return bm_rw(device, BM_AIO_COPY_PAGES, 0);
}
-/**
+/*
* drbd_bm_write_hinted() - Write bitmap pages with "hint" marks, if they have changed.
* @device: DRBD device.
*/
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 94dc0a235919..f6d6276974ee 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -297,10 +297,6 @@ struct drbd_epoch {
unsigned long flags;
};
-/* Prototype declaration of function defined in drbd_receiver.c */
-int drbdd_init(struct drbd_thread *);
-int drbd_asender(struct drbd_thread *);
-
/* drbd_epoch flag bits */
enum {
DE_HAVE_BARRIER_NUMBER,
@@ -384,6 +380,9 @@ enum {
/* this is/was a write request */
__EE_WRITE,
+ /* hand back using mempool_free(e, drbd_buffer_page_pool) */
+ __EE_RELEASE_TO_MEMPOOL,
+
/* this is/was a write same request */
__EE_WRITE_SAME,
@@ -406,6 +405,7 @@ enum {
#define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE)
#define EE_SUBMITTED (1<<__EE_SUBMITTED)
#define EE_WRITE (1<<__EE_WRITE)
+#define EE_RELEASE_TO_MEMPOOL (1<<__EE_RELEASE_TO_MEMPOOL)
#define EE_WRITE_SAME (1<<__EE_WRITE_SAME)
#define EE_APPLICATION (1<<__EE_APPLICATION)
#define EE_RS_THIN_REQ (1<<__EE_RS_THIN_REQ)
@@ -862,9 +862,7 @@ struct drbd_device {
struct list_head sync_ee; /* IO in progress (P_RS_DATA_REPLY gets written to disk) */
struct list_head done_ee; /* need to send P_WRITE_ACK */
struct list_head read_ee; /* [RS]P_DATA_REQUEST being read */
- struct list_head net_ee; /* zero-copy network send in progress */
- int next_barrier_nr;
struct list_head resync_reads;
atomic_t pp_in_use; /* allocated from page pool */
atomic_t pp_in_use_by_net; /* sendpage()d, still referenced by tcp */
@@ -1334,24 +1332,6 @@ extern struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
extern mempool_t drbd_request_mempool;
extern mempool_t drbd_ee_mempool;
-/* drbd's page pool, used to buffer data received from the peer,
- * or data requested by the peer.
- *
- * This does not have an emergency reserve.
- *
- * When allocating from this pool, it first takes pages from the pool.
- * Only if the pool is depleted will try to allocate from the system.
- *
- * The assumption is that pages taken from this pool will be processed,
- * and given back, "quickly", and then can be recycled, so we can avoid
- * frequent calls to alloc_page(), and still will be able to make progress even
- * under memory pressure.
- */
-extern struct page *drbd_pp_pool;
-extern spinlock_t drbd_pp_lock;
-extern int drbd_pp_vacant;
-extern wait_queue_head_t drbd_pp_wait;
-
/* We also need a standard (emergency-reserve backed) page pool
* for meta data IO (activity log, bitmap).
* We can keep it global, as long as it is used as "N pages at a time".
@@ -1359,6 +1339,7 @@ extern wait_queue_head_t drbd_pp_wait;
*/
#define DRBD_MIN_POOL_PAGES 128
extern mempool_t drbd_md_io_page_pool;
+extern mempool_t drbd_buffer_page_pool;
/* We also need to make sure we get a bio
* when we need it for housekeeping purposes */
@@ -1369,7 +1350,6 @@ extern struct bio_set drbd_io_bio_set;
extern struct mutex resources_mutex;
-extern int conn_lowest_minor(struct drbd_connection *connection);
extern enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor);
extern void drbd_destroy_device(struct kref *kref);
extern void drbd_delete_device(struct drbd_device *device);
@@ -1390,9 +1370,6 @@ extern void conn_free_crypto(struct drbd_connection *connection);
extern void do_submit(struct work_struct *ws);
extern void __drbd_make_request(struct drbd_device *, struct bio *);
void drbd_submit_bio(struct bio *bio);
-extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
-extern int is_valid_ar_handle(struct drbd_request *, sector_t);
-
/* drbd_nl.c */
@@ -1474,7 +1451,6 @@ extern int w_resync_timer(struct drbd_work *, int);
extern int w_send_write_hint(struct drbd_work *, int);
extern int w_send_dblock(struct drbd_work *, int);
extern int w_send_read_req(struct drbd_work *, int);
-extern int w_e_reissue(struct drbd_work *, int);
extern int w_restart_disk_io(struct drbd_work *, int);
extern int w_send_out_of_sync(struct drbd_work *, int);
@@ -1488,7 +1464,6 @@ extern int drbd_issue_discard_or_zero_out(struct drbd_device *device,
sector_t start, unsigned int nr_sectors, int flags);
extern int drbd_receiver(struct drbd_thread *thi);
extern int drbd_ack_receiver(struct drbd_thread *thi);
-extern void drbd_send_ping_wf(struct work_struct *ws);
extern void drbd_send_acks_wf(struct work_struct *ws);
extern bool drbd_rs_c_min_rate_throttle(struct drbd_device *device);
extern bool drbd_rs_should_slow_down(struct drbd_peer_device *peer_device, sector_t sector,
@@ -1499,12 +1474,8 @@ extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *,
sector_t, unsigned int,
unsigned int,
gfp_t) __must_hold(local);
-extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *,
- int);
-#define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
-#define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
+extern void drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *req);
extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool);
-extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled);
extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
extern int drbd_connected(struct drbd_peer_device *);
@@ -1622,16 +1593,6 @@ static inline struct page *page_chain_next(struct page *page)
for (; page && ({ n = page_chain_next(page); 1; }); page = n)
-static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req)
-{
- struct page *page = peer_req->pages;
- page_chain_for_each(page) {
- if (page_count(page) > 1)
- return 1;
- }
- return 0;
-}
-
static inline union drbd_state drbd_read_state(struct drbd_device *device)
{
struct drbd_resource *resource = device->resource;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 113b441d4d36..c73376886e7a 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -114,20 +114,10 @@ struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
mempool_t drbd_request_mempool;
mempool_t drbd_ee_mempool;
mempool_t drbd_md_io_page_pool;
+mempool_t drbd_buffer_page_pool;
struct bio_set drbd_md_io_bio_set;
struct bio_set drbd_io_bio_set;
-/* I do not use a standard mempool, because:
- 1) I want to hand out the pre-allocated objects first.
- 2) I want to be able to interrupt sleeping allocation with a signal.
- Note: This is a single linked list, the next pointer is the private
- member of struct page.
- */
-struct page *drbd_pp_pool;
-DEFINE_SPINLOCK(drbd_pp_lock);
-int drbd_pp_vacant;
-wait_queue_head_t drbd_pp_wait;
-
DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
static const struct block_device_operations drbd_ops = {
@@ -471,20 +461,6 @@ void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
wait_for_completion(&thi->stop);
}
-int conn_lowest_minor(struct drbd_connection *connection)
-{
- struct drbd_peer_device *peer_device;
- int vnr = 0, minor = -1;
-
- rcu_read_lock();
- peer_device = idr_get_next(&connection->peer_devices, &vnr);
- if (peer_device)
- minor = device_to_minor(peer_device->device);
- rcu_read_unlock();
-
- return minor;
-}
-
#ifdef CONFIG_SMP
/*
* drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
@@ -1550,7 +1526,7 @@ static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *pa
* put_page(); and would cause either a VM_BUG directly, or
* __page_cache_release a page that would actually still be referenced
* by someone, leading to some obscure delayed Oops somewhere else. */
- if (!drbd_disable_sendpage && sendpage_ok(page))
+ if (!drbd_disable_sendpage && sendpages_ok(page, len, offset))
msg.msg_flags |= MSG_NOSIGNAL | MSG_SPLICE_PAGES;
drbd_update_congested(peer_device->connection);
@@ -1625,6 +1601,7 @@ static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *b
static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
struct drbd_peer_request *peer_req)
{
+ bool use_sendpage = !(peer_req->flags & EE_RELEASE_TO_MEMPOOL);
struct page *page = peer_req->pages;
unsigned len = peer_req->i.size;
int err;
@@ -1633,8 +1610,13 @@ static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
page_chain_for_each(page) {
unsigned l = min_t(unsigned, len, PAGE_SIZE);
- err = _drbd_send_page(peer_device, page, 0, l,
- page_chain_next(page) ? MSG_MORE : 0);
+ if (likely(use_sendpage))
+ err = _drbd_send_page(peer_device, page, 0, l,
+ page_chain_next(page) ? MSG_MORE : 0);
+ else
+ err = _drbd_no_send_page(peer_device, page, 0, l,
+ page_chain_next(page) ? MSG_MORE : 0);
+
if (err)
return err;
len -= l;
@@ -1976,7 +1958,6 @@ void drbd_init_set_defaults(struct drbd_device *device)
INIT_LIST_HEAD(&device->sync_ee);
INIT_LIST_HEAD(&device->done_ee);
INIT_LIST_HEAD(&device->read_ee);
- INIT_LIST_HEAD(&device->net_ee);
INIT_LIST_HEAD(&device->resync_reads);
INIT_LIST_HEAD(&device->resync_work.list);
INIT_LIST_HEAD(&device->unplug_work.list);
@@ -2057,7 +2038,6 @@ void drbd_device_cleanup(struct drbd_device *device)
D_ASSERT(device, list_empty(&device->sync_ee));
D_ASSERT(device, list_empty(&device->done_ee));
D_ASSERT(device, list_empty(&device->read_ee));
- D_ASSERT(device, list_empty(&device->net_ee));
D_ASSERT(device, list_empty(&device->resync_reads));
D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q));
D_ASSERT(device, list_empty(&device->resync_work.list));
@@ -2069,19 +2049,11 @@ void drbd_device_cleanup(struct drbd_device *device)
static void drbd_destroy_mempools(void)
{
- struct page *page;
-
- while (drbd_pp_pool) {
- page = drbd_pp_pool;
- drbd_pp_pool = (struct page *)page_private(page);
- __free_page(page);
- drbd_pp_vacant--;
- }
-
/* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
bioset_exit(&drbd_io_bio_set);
bioset_exit(&drbd_md_io_bio_set);
+ mempool_exit(&drbd_buffer_page_pool);
mempool_exit(&drbd_md_io_page_pool);
mempool_exit(&drbd_ee_mempool);
mempool_exit(&drbd_request_mempool);
@@ -2100,9 +2072,8 @@ static void drbd_destroy_mempools(void)
static int drbd_create_mempools(void)
{
- struct page *page;
const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count;
- int i, ret;
+ int ret;
/* caches */
drbd_request_cache = kmem_cache_create(
@@ -2139,6 +2110,10 @@ static int drbd_create_mempools(void)
if (ret)
goto Enomem;
+ ret = mempool_init_page_pool(&drbd_buffer_page_pool, number, 0);
+ if (ret)
+ goto Enomem;
+
ret = mempool_init_slab_pool(&drbd_request_mempool, number,
drbd_request_cache);
if (ret)
@@ -2148,15 +2123,6 @@ static int drbd_create_mempools(void)
if (ret)
goto Enomem;
- for (i = 0; i < number; i++) {
- page = alloc_page(GFP_HIGHUSER);
- if (!page)
- goto Enomem;
- set_page_private(page, (unsigned long)drbd_pp_pool);
- drbd_pp_pool = page;
- }
- drbd_pp_vacant = number;
-
return 0;
Enomem:
@@ -2183,10 +2149,6 @@ static void drbd_release_all_peer_reqs(struct drbd_device *device)
rr = drbd_free_peer_reqs(device, &device->done_ee);
if (rr)
drbd_err(device, "%d EEs in done list found!\n", rr);
-
- rr = drbd_free_peer_reqs(device, &device->net_ee);
- if (rr)
- drbd_err(device, "%d EEs in net list found!\n", rr);
}
/* caution. no locking. */
@@ -2697,6 +2659,9 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
* connect.
*/
.max_hw_sectors = DRBD_MAX_BIO_SIZE_SAFE >> 8,
+ .features = BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA |
+ BLK_FEAT_ROTATIONAL |
+ BLK_FEAT_STABLE_WRITES,
};
device = minor_to_device(minor);
@@ -2735,9 +2700,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
sprintf(disk->disk_name, "drbd%d", minor);
disk->private_data = device;
- blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, disk->queue);
- blk_queue_write_cache(disk->queue, true, true);
-
device->md_io.page = alloc_page(GFP_KERNEL);
if (!device->md_io.page)
goto out_no_io_page;
@@ -2877,11 +2839,6 @@ static int __init drbd_init(void)
return err;
}
- /*
- * allocate all necessary structs
- */
- init_waitqueue_head(&drbd_pp_wait);
-
drbd_proc = NULL; /* play safe for drbd_cleanup */
idr_init(&drbd_devices);
@@ -3048,7 +3005,7 @@ void drbd_md_sync(struct drbd_device *device)
BUILD_BUG_ON(UI_SIZE != 4);
BUILD_BUG_ON(sizeof(struct meta_data_on_disk) != 4096);
- del_timer(&device->md_sync_timer);
+ timer_delete(&device->md_sync_timer);
/* timer may be rearmed by drbd_md_mark_dirty() now. */
if (!test_and_clear_bit(MD_DIRTY, &device->flags))
return;
@@ -3399,10 +3356,12 @@ void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local)
void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local)
{
unsigned long flags;
- if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
+ spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
+ if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0) {
+ spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
return;
+ }
- spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
if (val == 0) {
drbd_uuid_move_history(device);
device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
@@ -3422,6 +3381,7 @@ void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local)
/**
* drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
* @device: DRBD device.
+ * @peer_device: Peer DRBD device.
*
* Sets all bits in the bitmap and writes the whole bitmap to stable storage.
*/
@@ -3448,6 +3408,7 @@ int drbd_bmio_set_n_write(struct drbd_device *device,
/**
* drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
* @device: DRBD device.
+ * @peer_device: Peer DRBD device.
*
* Clears all bits in the bitmap and writes the whole bitmap to stable storage.
*/
@@ -3501,6 +3462,7 @@ static int w_bitmap_io(struct drbd_work *w, int unused)
* @done: callback to be called after the bitmap IO was performed
* @why: Descriptive text of the reason for doing the IO
* @flags: Bitmap flags
+ * @peer_device: Peer DRBD device.
*
* While IO on the bitmap happens we freeze application IO thus we ensure
* that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
@@ -3549,6 +3511,7 @@ void drbd_queue_bitmap_io(struct drbd_device *device,
* @io_fn: IO callback to be called when bitmap IO is possible
* @why: Descriptive text of the reason for doing the IO
* @flags: Bitmap flags
+ * @peer_device: Peer DRBD device.
*
* freezes application IO while that the actual IO operations runs. This
* functions MAY NOT be called from worker context.
@@ -3599,7 +3562,8 @@ int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
static void md_sync_timer_fn(struct timer_list *t)
{
- struct drbd_device *device = from_timer(device, t, md_sync_timer);
+ struct drbd_device *device = timer_container_of(device, t,
+ md_sync_timer);
drbd_device_post_work(device, MD_SYNC);
}
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 5d65c9754d83..91f3b8afb63c 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -25,7 +25,7 @@
#include "drbd_protocol.h"
#include "drbd_req.h"
#include "drbd_state_change.h"
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/drbd_limits.h>
#include <linux/kthread.h>
@@ -1033,7 +1033,7 @@ drbd_determine_dev_size(struct drbd_device *device, enum dds_flags flags, struct
/* We do some synchronous IO below, which may take some time.
* Clear the timer, to avoid scary "timer expired!" messages,
* "Superblock" is written out at least twice below, anyways. */
- del_timer(&device->md_sync_timer);
+ timer_delete(&device->md_sync_timer);
/* We won't change the "al-extents" setting, we just may need
* to move the on-disk location of the activity log ringbuffer.
@@ -1330,6 +1330,7 @@ void drbd_reconsider_queue_parameters(struct drbd_device *device,
lim.max_write_zeroes_sectors = DRBD_MAX_BBIO_SECTORS;
else
lim.max_write_zeroes_sectors = 0;
+ lim.max_hw_wzeroes_unmap_sectors = 0;
if ((lim.discard_granularity >> SECTOR_SHIFT) >
lim.max_hw_discard_sectors) {
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 0c9f54197768..3de919b6f0e1 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -33,6 +33,7 @@
#include <linux/string.h>
#include <linux/scatterlist.h>
#include <linux/part_stat.h>
+#include <linux/mempool.h>
#include "drbd_int.h"
#include "drbd_protocol.h"
#include "drbd_req.h"
@@ -63,182 +64,31 @@ static int e_end_block(struct drbd_work *, int);
#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
-/*
- * some helper functions to deal with single linked page lists,
- * page->private being our "next" pointer.
- */
-
-/* If at least n pages are linked at head, get n pages off.
- * Otherwise, don't modify head, and return NULL.
- * Locking is the responsibility of the caller.
- */
-static struct page *page_chain_del(struct page **head, int n)
-{
- struct page *page;
- struct page *tmp;
-
- BUG_ON(!n);
- BUG_ON(!head);
-
- page = *head;
-
- if (!page)
- return NULL;
-
- while (page) {
- tmp = page_chain_next(page);
- if (--n == 0)
- break; /* found sufficient pages */
- if (tmp == NULL)
- /* insufficient pages, don't use any of them. */
- return NULL;
- page = tmp;
- }
-
- /* add end of list marker for the returned list */
- set_page_private(page, 0);
- /* actual return value, and adjustment of head */
- page = *head;
- *head = tmp;
- return page;
-}
-
-/* may be used outside of locks to find the tail of a (usually short)
- * "private" page chain, before adding it back to a global chain head
- * with page_chain_add() under a spinlock. */
-static struct page *page_chain_tail(struct page *page, int *len)
-{
- struct page *tmp;
- int i = 1;
- while ((tmp = page_chain_next(page))) {
- ++i;
- page = tmp;
- }
- if (len)
- *len = i;
- return page;
-}
-
-static int page_chain_free(struct page *page)
-{
- struct page *tmp;
- int i = 0;
- page_chain_for_each_safe(page, tmp) {
- put_page(page);
- ++i;
- }
- return i;
-}
-
-static void page_chain_add(struct page **head,
- struct page *chain_first, struct page *chain_last)
-{
-#if 1
- struct page *tmp;
- tmp = page_chain_tail(chain_first, NULL);
- BUG_ON(tmp != chain_last);
-#endif
-
- /* add chain to head */
- set_page_private(chain_last, (unsigned long)*head);
- *head = chain_first;
-}
-
-static struct page *__drbd_alloc_pages(struct drbd_device *device,
- unsigned int number)
+static struct page *__drbd_alloc_pages(unsigned int number)
{
struct page *page = NULL;
struct page *tmp = NULL;
unsigned int i = 0;
- /* Yes, testing drbd_pp_vacant outside the lock is racy.
- * So what. It saves a spin_lock. */
- if (drbd_pp_vacant >= number) {
- spin_lock(&drbd_pp_lock);
- page = page_chain_del(&drbd_pp_pool, number);
- if (page)
- drbd_pp_vacant -= number;
- spin_unlock(&drbd_pp_lock);
- if (page)
- return page;
- }
-
/* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
* "criss-cross" setup, that might cause write-out on some other DRBD,
* which in turn might block on the other node at this very place. */
for (i = 0; i < number; i++) {
- tmp = alloc_page(GFP_TRY);
+ tmp = mempool_alloc(&drbd_buffer_page_pool, GFP_TRY);
if (!tmp)
- break;
+ goto fail;
set_page_private(tmp, (unsigned long)page);
page = tmp;
}
-
- if (i == number)
- return page;
-
- /* Not enough pages immediately available this time.
- * No need to jump around here, drbd_alloc_pages will retry this
- * function "soon". */
- if (page) {
- tmp = page_chain_tail(page, NULL);
- spin_lock(&drbd_pp_lock);
- page_chain_add(&drbd_pp_pool, page, tmp);
- drbd_pp_vacant += i;
- spin_unlock(&drbd_pp_lock);
+ return page;
+fail:
+ page_chain_for_each_safe(page, tmp) {
+ set_page_private(page, 0);
+ mempool_free(page, &drbd_buffer_page_pool);
}
return NULL;
}
-static void reclaim_finished_net_peer_reqs(struct drbd_device *device,
- struct list_head *to_be_freed)
-{
- struct drbd_peer_request *peer_req, *tmp;
-
- /* The EEs are always appended to the end of the list. Since
- they are sent in order over the wire, they have to finish
- in order. As soon as we see the first not finished we can
- stop to examine the list... */
-
- list_for_each_entry_safe(peer_req, tmp, &device->net_ee, w.list) {
- if (drbd_peer_req_has_active_page(peer_req))
- break;
- list_move(&peer_req->w.list, to_be_freed);
- }
-}
-
-static void drbd_reclaim_net_peer_reqs(struct drbd_device *device)
-{
- LIST_HEAD(reclaimed);
- struct drbd_peer_request *peer_req, *t;
-
- spin_lock_irq(&device->resource->req_lock);
- reclaim_finished_net_peer_reqs(device, &reclaimed);
- spin_unlock_irq(&device->resource->req_lock);
- list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
- drbd_free_net_peer_req(device, peer_req);
-}
-
-static void conn_reclaim_net_peer_reqs(struct drbd_connection *connection)
-{
- struct drbd_peer_device *peer_device;
- int vnr;
-
- rcu_read_lock();
- idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
- struct drbd_device *device = peer_device->device;
- if (!atomic_read(&device->pp_in_use_by_net))
- continue;
-
- kref_get(&device->kref);
- rcu_read_unlock();
- drbd_reclaim_net_peer_reqs(device);
- kref_put(&device->kref, drbd_destroy_device);
- rcu_read_lock();
- }
- rcu_read_unlock();
-}
-
/**
* drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
* @peer_device: DRBD device.
@@ -263,9 +113,8 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int
bool retry)
{
struct drbd_device *device = peer_device->device;
- struct page *page = NULL;
+ struct page *page;
struct net_conf *nc;
- DEFINE_WAIT(wait);
unsigned int mxb;
rcu_read_lock();
@@ -273,37 +122,9 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int
mxb = nc ? nc->max_buffers : 1000000;
rcu_read_unlock();
- if (atomic_read(&device->pp_in_use) < mxb)
- page = __drbd_alloc_pages(device, number);
-
- /* Try to keep the fast path fast, but occasionally we need
- * to reclaim the pages we lended to the network stack. */
- if (page && atomic_read(&device->pp_in_use_by_net) > 512)
- drbd_reclaim_net_peer_reqs(device);
-
- while (page == NULL) {
- prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
-
- drbd_reclaim_net_peer_reqs(device);
-
- if (atomic_read(&device->pp_in_use) < mxb) {
- page = __drbd_alloc_pages(device, number);
- if (page)
- break;
- }
-
- if (!retry)
- break;
-
- if (signal_pending(current)) {
- drbd_warn(device, "drbd_alloc_pages interrupted!\n");
- break;
- }
-
- if (schedule_timeout(HZ/10) == 0)
- mxb = UINT_MAX;
- }
- finish_wait(&drbd_pp_wait, &wait);
+ if (atomic_read(&device->pp_in_use) >= mxb)
+ schedule_timeout_interruptible(HZ / 10);
+ page = __drbd_alloc_pages(number);
if (page)
atomic_add(number, &device->pp_in_use);
@@ -314,29 +135,25 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int
* Is also used from inside an other spin_lock_irq(&resource->req_lock);
* Either links the page chain back to the global pool,
* or returns all pages to the system. */
-static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net)
+static void drbd_free_pages(struct drbd_device *device, struct page *page)
{
- atomic_t *a = is_net ? &device->pp_in_use_by_net : &device->pp_in_use;
- int i;
+ struct page *tmp;
+ int i = 0;
if (page == NULL)
return;
- if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count)
- i = page_chain_free(page);
- else {
- struct page *tmp;
- tmp = page_chain_tail(page, &i);
- spin_lock(&drbd_pp_lock);
- page_chain_add(&drbd_pp_pool, page, tmp);
- drbd_pp_vacant += i;
- spin_unlock(&drbd_pp_lock);
- }
- i = atomic_sub_return(i, a);
+ page_chain_for_each_safe(page, tmp) {
+ set_page_private(page, 0);
+ if (page_count(page) == 1)
+ mempool_free(page, &drbd_buffer_page_pool);
+ else
+ put_page(page);
+ i++;
+ }
+ i = atomic_sub_return(i, &device->pp_in_use);
if (i < 0)
- drbd_warn(device, "ASSERTION FAILED: %s: %d < 0\n",
- is_net ? "pp_in_use_by_net" : "pp_in_use", i);
- wake_up(&drbd_pp_wait);
+ drbd_warn(device, "ASSERTION FAILED: pp_in_use: %d < 0\n", i);
}
/*
@@ -380,6 +197,8 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto
gfpflags_allow_blocking(gfp_mask));
if (!page)
goto fail;
+ if (!mempool_is_saturated(&drbd_buffer_page_pool))
+ peer_req->flags |= EE_RELEASE_TO_MEMPOOL;
}
memset(peer_req, 0, sizeof(*peer_req));
@@ -403,13 +222,12 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto
return NULL;
}
-void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req,
- int is_net)
+void drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req)
{
might_sleep();
if (peer_req->flags & EE_HAS_DIGEST)
kfree(peer_req->digest);
- drbd_free_pages(device, peer_req->pages, is_net);
+ drbd_free_pages(device, peer_req->pages);
D_ASSERT(device, atomic_read(&peer_req->pending_bios) == 0);
D_ASSERT(device, drbd_interval_empty(&peer_req->i));
if (!expect(device, !(peer_req->flags & EE_CALL_AL_COMPLETE_IO))) {
@@ -424,14 +242,13 @@ int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
LIST_HEAD(work_list);
struct drbd_peer_request *peer_req, *t;
int count = 0;
- int is_net = list == &device->net_ee;
spin_lock_irq(&device->resource->req_lock);
list_splice_init(list, &work_list);
spin_unlock_irq(&device->resource->req_lock);
list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
- __drbd_free_peer_req(device, peer_req, is_net);
+ drbd_free_peer_req(device, peer_req);
count++;
}
return count;
@@ -443,18 +260,13 @@ int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
static int drbd_finish_peer_reqs(struct drbd_device *device)
{
LIST_HEAD(work_list);
- LIST_HEAD(reclaimed);
struct drbd_peer_request *peer_req, *t;
int err = 0;
spin_lock_irq(&device->resource->req_lock);
- reclaim_finished_net_peer_reqs(device, &reclaimed);
list_splice_init(&device->done_ee, &work_list);
spin_unlock_irq(&device->resource->req_lock);
- list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
- drbd_free_net_peer_req(device, peer_req);
-
/* possible callbacks here:
* e_end_block, and e_end_resync_block, e_send_superseded.
* all ignore the last argument.
@@ -638,7 +450,7 @@ static struct socket *drbd_try_connect(struct drbd_connection *connection)
* a free one dynamically.
*/
what = "bind before connect";
- err = sock->ops->bind(sock, (struct sockaddr *) &src_in6, my_addr_len);
+ err = sock->ops->bind(sock, (struct sockaddr_unsized *) &src_in6, my_addr_len);
if (err < 0)
goto out;
@@ -646,7 +458,7 @@ static struct socket *drbd_try_connect(struct drbd_connection *connection)
* stay C_WF_CONNECTION, don't go Disconnecting! */
disconnect_on_error = 0;
what = "connect";
- err = sock->ops->connect(sock, (struct sockaddr *) &peer_in6, peer_addr_len, 0);
+ err = sock->ops->connect(sock, (struct sockaddr_unsized *) &peer_in6, peer_addr_len, 0);
out:
if (err < 0) {
@@ -725,7 +537,7 @@ static int prepare_listen_socket(struct drbd_connection *connection, struct acce
drbd_setbufsize(s_listen, sndbuf_size, rcvbuf_size);
what = "bind before listen";
- err = s_listen->ops->bind(s_listen, (struct sockaddr *)&my_addr, my_addr_len);
+ err = s_listen->ops->bind(s_listen, (struct sockaddr_unsized *)&my_addr, my_addr_len);
if (err < 0)
goto out;
@@ -1924,13 +1736,13 @@ read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
page = peer_req->pages;
page_chain_for_each(page) {
unsigned len = min_t(int, ds, PAGE_SIZE);
- data = kmap(page);
+ data = kmap_local_page(page);
err = drbd_recv_all_warn(peer_device->connection, data, len);
if (drbd_insert_fault(device, DRBD_FAULT_RECEIVE)) {
drbd_err(device, "Fault injection: Corrupting data on receive\n");
data[0] = data[0] ^ (unsigned long)-1;
}
- kunmap(page);
+ kunmap_local(data);
if (err) {
drbd_free_peer_req(device, peer_req);
return NULL;
@@ -1965,7 +1777,7 @@ static int drbd_drain_block(struct drbd_peer_device *peer_device, int data_size)
page = drbd_alloc_pages(peer_device, 1, 1);
- data = kmap(page);
+ data = kmap_local_page(page);
while (data_size) {
unsigned int len = min_t(int, data_size, PAGE_SIZE);
@@ -1974,8 +1786,8 @@ static int drbd_drain_block(struct drbd_peer_device *peer_device, int data_size)
break;
data_size -= len;
}
- kunmap(page);
- drbd_free_pages(peer_device->device, page, 0);
+ kunmap_local(data);
+ drbd_free_pages(peer_device->device, page);
return err;
}
@@ -2500,7 +2312,11 @@ static int handle_write_conflicts(struct drbd_device *device,
peer_req->w.cb = superseded ? e_send_superseded :
e_send_retry_write;
list_add_tail(&peer_req->w.list, &device->done_ee);
- queue_work(connection->ack_sender, &peer_req->peer_device->send_acks_work);
+ /* put is in drbd_send_acks_wf() */
+ kref_get(&device->kref);
+ if (!queue_work(connection->ack_sender,
+ &peer_req->peer_device->send_acks_work))
+ kref_put(&device->kref, drbd_destroy_device);
err = -ENOENT;
goto out;
@@ -5187,7 +5003,7 @@ static int drbd_disconnected(struct drbd_peer_device *peer_device)
atomic_set(&device->rs_pending_cnt, 0);
wake_up(&device->misc_wait);
- del_timer_sync(&device->resync_timer);
+ timer_delete_sync(&device->resync_timer);
resync_timer_fn(&device->resync_timer);
/* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
@@ -5220,16 +5036,6 @@ static int drbd_disconnected(struct drbd_peer_device *peer_device)
put_ldev(device);
}
- /* tcp_close and release of sendpage pages can be deferred. I don't
- * want to use SO_LINGER, because apparently it can be deferred for
- * more than 20 seconds (longest time I checked).
- *
- * Actually we don't care for exactly when the network stack does its
- * put_page(), but release our reference on these pages right here.
- */
- i = drbd_free_peer_reqs(device, &device->net_ee);
- if (i)
- drbd_info(device, "net_ee not empty, killed %u entries\n", i);
i = atomic_read(&device->pp_in_use_by_net);
if (i)
drbd_info(device, "pp_in_use_by_net = %d, expected 0\n", i);
@@ -5976,8 +5782,6 @@ int drbd_ack_receiver(struct drbd_thread *thi)
while (get_t_state(thi) == RUNNING) {
drbd_thread_current_set_cpu(thi);
- conn_reclaim_net_peer_reqs(connection);
-
if (test_and_clear_bit(SEND_PING, &connection->flags)) {
if (drbd_send_ping(connection)) {
drbd_err(connection, "drbd_send_ping has failed\n");
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 380e6584a4ee..d15826f6ee81 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -1699,7 +1699,8 @@ static bool net_timeout_reached(struct drbd_request *net_req,
void request_timer_fn(struct timer_list *t)
{
- struct drbd_device *device = from_timer(device, t, request_timer);
+ struct drbd_device *device = timer_container_of(device, t,
+ request_timer);
struct drbd_connection *connection = first_peer_device(device)->connection;
struct drbd_request *req_read, *req_write, *req_peer; /* oldest request */
struct net_conf *nc;
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
index e858e7e0383f..c2b6c4d9729d 100644
--- a/drivers/block/drbd/drbd_state.c
+++ b/drivers/block/drbd/drbd_state.c
@@ -876,7 +876,7 @@ is_valid_state(struct drbd_device *device, union drbd_state ns)
ns.disk == D_OUTDATED)
rv = SS_CONNECTED_OUTDATES;
- else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
+ else if (nc && (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
(nc->verify_alg[0] == 0))
rv = SS_NO_VERIFY_ALG;
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 4352a50fbb3f..dea3e79d044f 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -442,7 +442,8 @@ int w_resync_timer(struct drbd_work *w, int cancel)
void resync_timer_fn(struct timer_list *t)
{
- struct drbd_device *device = from_timer(device, t, resync_timer);
+ struct drbd_device *device = timer_container_of(device, t,
+ resync_timer);
drbd_queue_work_if_unqueued(
&first_peer_device(device)->connection->sender_work,
@@ -1029,22 +1030,6 @@ out:
return 1;
}
-/* helper */
-static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_request *peer_req)
-{
- if (drbd_peer_req_has_active_page(peer_req)) {
- /* This might happen if sendpage() has not finished */
- int i = PFN_UP(peer_req->i.size);
- atomic_add(i, &device->pp_in_use_by_net);
- atomic_sub(i, &device->pp_in_use);
- spin_lock_irq(&device->resource->req_lock);
- list_add_tail(&peer_req->w.list, &device->net_ee);
- spin_unlock_irq(&device->resource->req_lock);
- wake_up(&drbd_pp_wait);
- } else
- drbd_free_peer_req(device, peer_req);
-}
-
/**
* w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST
* @w: work object.
@@ -1058,9 +1043,8 @@ int w_e_end_data_req(struct drbd_work *w, int cancel)
int err;
if (unlikely(cancel)) {
- drbd_free_peer_req(device, peer_req);
- dec_unacked(device);
- return 0;
+ err = 0;
+ goto out;
}
if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
@@ -1073,12 +1057,12 @@ int w_e_end_data_req(struct drbd_work *w, int cancel)
err = drbd_send_ack(peer_device, P_NEG_DREPLY, peer_req);
}
- dec_unacked(device);
-
- move_to_net_ee_or_free(device, peer_req);
-
if (unlikely(err))
drbd_err(device, "drbd_send_block() failed\n");
+out:
+ dec_unacked(device);
+ drbd_free_peer_req(device, peer_req);
+
return err;
}
@@ -1119,9 +1103,8 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
int err;
if (unlikely(cancel)) {
- drbd_free_peer_req(device, peer_req);
- dec_unacked(device);
- return 0;
+ err = 0;
+ goto out;
}
if (get_ldev_if_state(device, D_FAILED)) {
@@ -1154,13 +1137,12 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
/* update resync data with failure */
drbd_rs_failed_io(peer_device, peer_req->i.sector, peer_req->i.size);
}
-
- dec_unacked(device);
-
- move_to_net_ee_or_free(device, peer_req);
-
if (unlikely(err))
drbd_err(device, "drbd_send_block() failed\n");
+out:
+ dec_unacked(device);
+ drbd_free_peer_req(device, peer_req);
+
return err;
}
@@ -1175,9 +1157,8 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
int err, eq = 0;
if (unlikely(cancel)) {
- drbd_free_peer_req(device, peer_req);
- dec_unacked(device);
- return 0;
+ err = 0;
+ goto out;
}
if (get_ldev(device)) {
@@ -1219,12 +1200,12 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
if (drbd_ratelimit())
drbd_err(device, "Sending NegDReply. I guess it gets messy.\n");
}
-
- dec_unacked(device);
- move_to_net_ee_or_free(device, peer_req);
-
if (unlikely(err))
drbd_err(device, "drbd_send_block/ack() failed\n");
+out:
+ dec_unacked(device);
+ drbd_free_peer_req(device, peer_req);
+
return err;
}
@@ -1698,7 +1679,8 @@ void drbd_rs_controller_reset(struct drbd_peer_device *peer_device)
void start_resync_timer_fn(struct timer_list *t)
{
- struct drbd_device *device = from_timer(device, t, start_resync_timer);
+ struct drbd_device *device = timer_container_of(device, t,
+ start_resync_timer);
drbd_device_post_work(device, RS_START);
}
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 25c9d85667f1..c28786e0fe1c 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -163,35 +163,35 @@
/* do print messages for unexpected interrupts */
static int print_unex = 1;
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/timer.h>
-#include <linux/workqueue.h>
-#include <linux/fdreg.h>
-#include <linux/fd.h>
-#include <linux/hdreg.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
+#include <linux/async.h>
#include <linux/bio.h>
-#include <linux/string.h>
-#include <linux/jiffies.h>
-#include <linux/fcntl.h>
+#include <linux/compat.h>
#include <linux/delay.h>
-#include <linux/mc146818rtc.h> /* CMOS defines */
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/fd.h>
+#include <linux/fdreg.h>
+#include <linux/fs.h>
+#include <linux/hdreg.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
#include <linux/major.h>
-#include <linux/platform_device.h>
+#include <linux/mc146818rtc.h> /* CMOS defines */
+#include <linux/mm.h>
#include <linux/mod_devicetable.h>
+#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
#include <linux/uaccess.h>
-#include <linux/async.h>
-#include <linux/compat.h>
+#include <linux/workqueue.h>
/*
* PS/2 floppies have much slower step rates than regular floppies.
@@ -233,8 +233,6 @@ static unsigned short virtual_dma_port = 0x3f0;
irqreturn_t floppy_interrupt(int irq, void *dev_id);
static int set_dor(int fdc, char mask, char data);
-#define K_64 0x10000 /* 64KB */
-
/* the following is the mask of allowed drives. By default units 2 and
* 3 of both floppy controllers are disabled, because switching on the
* motor of these drives causes system hangs on some PCI computers. drive
@@ -331,7 +329,7 @@ static bool initialized;
* This default is used whenever the current disk size is unknown.
* [Now it is rather a minimum]
*/
-#define MAX_DISK_SIZE 4 /* 3984 */
+#define MAX_DISK_SIZE (PAGE_SIZE / 1024)
/*
* globals used by 'result()'
@@ -937,7 +935,7 @@ static void floppy_off(unsigned int drive)
if (!(fdc_state[fdc].dor & (0x10 << UNIT(drive))))
return;
- del_timer(motor_off_timer + drive);
+ timer_delete(motor_off_timer + drive);
/* make spindle stop in a position which minimizes spinup time
* next time */
@@ -1918,7 +1916,7 @@ static int start_motor(void (*function)(void))
mask &= ~(0x10 << UNIT(current_drive));
/* starts motor and selects floppy */
- del_timer(motor_off_timer + current_drive);
+ timer_delete(motor_off_timer + current_drive);
set_dor(current_fdc, mask, data);
/* wait_for_completion also schedules reset if needed. */
@@ -3092,16 +3090,13 @@ static int raw_cmd_copyin(int cmd, void __user *param,
*rcmd = NULL;
loop:
- ptr = kmalloc(sizeof(struct floppy_raw_cmd), GFP_KERNEL);
- if (!ptr)
- return -ENOMEM;
+ ptr = memdup_user(param, sizeof(*ptr));
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
*rcmd = ptr;
- ret = copy_from_user(ptr, param, sizeof(*ptr));
ptr->next = NULL;
ptr->buffer_length = 0;
ptr->kernel_data = NULL;
- if (ret)
- return -EFAULT;
param += sizeof(struct floppy_raw_cmd);
if (ptr->cmd_count > FD_RAW_CMD_FULLSIZE)
return -EINVAL;
@@ -3363,9 +3358,9 @@ static int get_floppy_geometry(int drive, int type, struct floppy_struct **g)
return 0;
}
-static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int fd_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- int drive = (long)bdev->bd_disk->private_data;
+ int drive = (long)disk->private_data;
int type = ITYPE(drive_state[drive].fd_device);
struct floppy_struct *g;
int ret;
@@ -3411,7 +3406,7 @@ static int fd_locked_ioctl(struct block_device *bdev, blk_mode_t mode,
struct floppy_max_errors max_errors;
struct floppy_drive_params dp;
} inparam; /* parameters coming from user space */
- const void *outparam; /* parameters passed back to user space */
+ const void *outparam = NULL; /* parameters passed back to user space */
/* convert compatibility eject ioctls into floppy eject ioctl.
* We do this in order to provide a means to eject floppy disks before
@@ -4516,7 +4511,8 @@ static bool floppy_available(int drive)
static int floppy_alloc_disk(unsigned int drive, unsigned int type)
{
struct queue_limits lim = {
- .max_hw_sectors = 64,
+ .max_hw_sectors = 64,
+ .features = BLK_FEAT_ROTATIONAL,
};
struct gendisk *disk;
@@ -4595,7 +4591,6 @@ static int __init do_floppy_init(void)
tag_sets[drive].nr_maps = 1;
tag_sets[drive].queue_depth = 2;
tag_sets[drive].numa_node = NUMA_NO_NODE;
- tag_sets[drive].flags = BLK_MQ_F_SHOULD_MERGE;
err = blk_mq_alloc_tag_set(&tag_sets[drive]);
if (err)
goto out_put_disk;
@@ -4762,7 +4757,7 @@ out_put_disk:
for (drive = 0; drive < N_DRIVE; drive++) {
if (!disks[drive][0])
break;
- del_timer_sync(&motor_off_timer[drive]);
+ timer_delete_sync(&motor_off_timer[drive]);
put_disk(disks[drive][0]);
blk_mq_free_tag_set(&tag_sets[drive]);
}
@@ -4983,7 +4978,7 @@ static void __exit floppy_module_exit(void)
destroy_workqueue(floppy_wq);
for (drive = 0; drive < N_DRIVE; drive++) {
- del_timer_sync(&motor_off_timer[drive]);
+ timer_delete_sync(&motor_off_timer[drive]);
if (floppy_available(drive)) {
for (i = 0; i < ARRAY_SIZE(floppy_type); i++) {
@@ -5016,6 +5011,7 @@ module_param(floppy, charp, 0);
module_param(FLOPPY_IRQ, int, 0);
module_param(FLOPPY_DMA, int, 0);
MODULE_AUTHOR("Alain L. Knaff");
+MODULE_DESCRIPTION("Normal floppy disk support");
MODULE_LICENSE("GPL");
/* This doesn't actually get used other than for module information */
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 28a95fd366fe..272bc608e528 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -45,8 +45,6 @@ enum {
Lo_deleting,
};
-struct loop_func_table;
-
struct loop_device {
int lo_number;
loff_t lo_offset;
@@ -54,7 +52,8 @@ struct loop_device {
int lo_flags;
char lo_file_name[LO_NAME_SIZE];
- struct file * lo_backing_file;
+ struct file *lo_backing_file;
+ unsigned int lo_min_dio_size;
struct block_device *lo_device;
gfp_t old_gfp_mask;
@@ -68,7 +67,6 @@ struct loop_device {
struct list_head idle_worker_list;
struct rb_root worker_tree;
struct timer_list timer;
- bool use_dio;
bool sysfs_inited;
struct request_queue *lo_queue;
@@ -139,20 +137,35 @@ static void loop_global_unlock(struct loop_device *lo, bool global)
static int max_part;
static int part_shift;
-static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
+static loff_t lo_calculate_size(struct loop_device *lo, struct file *file)
{
loff_t loopsize;
+ int ret;
+
+ if (S_ISBLK(file_inode(file)->i_mode)) {
+ loopsize = i_size_read(file->f_mapping->host);
+ } else {
+ struct kstat stat;
+
+ /*
+ * Get the accurate file size. This provides better results than
+ * cached inode data, particularly for network filesystems where
+ * metadata may be stale.
+ */
+ ret = vfs_getattr_nosec(&file->f_path, &stat, STATX_SIZE, 0);
+ if (ret)
+ return 0;
+
+ loopsize = stat.size;
+ }
- /* Compute loopsize in bytes */
- loopsize = i_size_read(file->f_mapping->host);
- if (offset > 0)
- loopsize -= offset;
+ if (lo->lo_offset > 0)
+ loopsize -= lo->lo_offset;
/* offset is beyond i_size, weird but possible */
if (loopsize < 0)
return 0;
-
- if (sizelimit > 0 && sizelimit < loopsize)
- loopsize = sizelimit;
+ if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize)
+ loopsize = lo->lo_sizelimit;
/*
* Unfortunately, if we want to do I/O on the device,
* the number of 512-byte sectors has to fit into a sector_t.
@@ -160,66 +173,38 @@ static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
return loopsize >> 9;
}
-static loff_t get_loop_size(struct loop_device *lo, struct file *file)
-{
- return get_size(lo->lo_offset, lo->lo_sizelimit, file);
-}
-
/*
* We support direct I/O only if lo_offset is aligned with the logical I/O size
* of backing device, and the logical block size of loop is bigger than that of
* the backing device.
*/
-static bool lo_bdev_can_use_dio(struct loop_device *lo,
- struct block_device *backing_bdev)
+static bool lo_can_use_dio(struct loop_device *lo)
{
- unsigned short sb_bsize = bdev_logical_block_size(backing_bdev);
-
- if (queue_logical_block_size(lo->lo_queue) < sb_bsize)
+ if (!(lo->lo_backing_file->f_mode & FMODE_CAN_ODIRECT))
+ return false;
+ if (queue_logical_block_size(lo->lo_queue) < lo->lo_min_dio_size)
return false;
- if (lo->lo_offset & (sb_bsize - 1))
+ if (lo->lo_offset & (lo->lo_min_dio_size - 1))
return false;
return true;
}
-static void __loop_update_dio(struct loop_device *lo, bool dio)
+/*
+ * Direct I/O can be enabled either by using an O_DIRECT file descriptor, or by
+ * passing in the LO_FLAGS_DIRECT_IO flag from userspace. It will be silently
+ * disabled when the device block size is too small or the offset is unaligned.
+ *
+ * loop_get_status will always report the effective LO_FLAGS_DIRECT_IO flag and
+ * not the originally passed in one.
+ */
+static inline void loop_update_dio(struct loop_device *lo)
{
- struct file *file = lo->lo_backing_file;
- struct inode *inode = file->f_mapping->host;
- struct block_device *backing_bdev = NULL;
- bool use_dio;
+ lockdep_assert_held(&lo->lo_mutex);
+ WARN_ON_ONCE(lo->lo_state == Lo_bound &&
+ lo->lo_queue->mq_freeze_depth == 0);
- if (S_ISBLK(inode->i_mode))
- backing_bdev = I_BDEV(inode);
- else if (inode->i_sb->s_bdev)
- backing_bdev = inode->i_sb->s_bdev;
-
- use_dio = dio && (file->f_mode & FMODE_CAN_ODIRECT) &&
- (!backing_bdev || lo_bdev_can_use_dio(lo, backing_bdev));
-
- if (lo->use_dio == use_dio)
- return;
-
- /* flush dirty pages before changing direct IO */
- vfs_fsync(file, 0);
-
- /*
- * The flag of LO_FLAGS_DIRECT_IO is handled similarly with
- * LO_FLAGS_READ_ONLY, both are set from kernel, and losetup
- * will get updated by ioctl(LOOP_GET_STATUS)
- */
- if (lo->lo_state == Lo_bound)
- blk_mq_freeze_queue(lo->lo_queue);
- lo->use_dio = use_dio;
- if (use_dio) {
- blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, lo->lo_queue);
- lo->lo_flags |= LO_FLAGS_DIRECT_IO;
- } else {
- blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue);
+ if ((lo->lo_flags & LO_FLAGS_DIRECT_IO) && !lo_can_use_dio(lo))
lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
- }
- if (lo->lo_state == Lo_bound)
- blk_mq_unfreeze_queue(lo->lo_queue);
}
/**
@@ -236,70 +221,26 @@ static void loop_set_size(struct loop_device *lo, loff_t size)
kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
}
-static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
+static void loop_clear_limits(struct loop_device *lo, int mode)
{
- struct iov_iter i;
- ssize_t bw;
-
- iov_iter_bvec(&i, ITER_SOURCE, bvec, 1, bvec->bv_len);
+ struct queue_limits lim = queue_limits_start_update(lo->lo_queue);
- bw = vfs_iter_write(file, &i, ppos, 0);
-
- if (likely(bw == bvec->bv_len))
- return 0;
-
- printk_ratelimited(KERN_ERR
- "loop: Write error at byte offset %llu, length %i.\n",
- (unsigned long long)*ppos, bvec->bv_len);
- if (bw >= 0)
- bw = -EIO;
- return bw;
-}
+ if (mode & FALLOC_FL_ZERO_RANGE)
+ lim.max_write_zeroes_sectors = 0;
-static int lo_write_simple(struct loop_device *lo, struct request *rq,
- loff_t pos)
-{
- struct bio_vec bvec;
- struct req_iterator iter;
- int ret = 0;
-
- rq_for_each_segment(bvec, rq, iter) {
- ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos);
- if (ret < 0)
- break;
- cond_resched();
+ if (mode & FALLOC_FL_PUNCH_HOLE) {
+ lim.max_hw_discard_sectors = 0;
+ lim.discard_granularity = 0;
}
- return ret;
-}
-
-static int lo_read_simple(struct loop_device *lo, struct request *rq,
- loff_t pos)
-{
- struct bio_vec bvec;
- struct req_iterator iter;
- struct iov_iter i;
- ssize_t len;
-
- rq_for_each_segment(bvec, rq, iter) {
- iov_iter_bvec(&i, ITER_DEST, &bvec, 1, bvec.bv_len);
- len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
- if (len < 0)
- return len;
-
- flush_dcache_page(bvec.bv_page);
-
- if (len != bvec.bv_len) {
- struct bio *bio;
-
- __rq_for_each_bio(bio, rq)
- zero_fill_bio(bio);
- break;
- }
- cond_resched();
- }
-
- return 0;
+ /*
+ * XXX: this updates the queue limits without freezing the queue, which
+ * is against the locking protocol and dangerous. But we can't just
+ * freeze the queue as we're inside the ->queue_rq method here. So this
+ * should move out into a workqueue unless we get the file operations to
+ * advertise if they support specific fallocate operations.
+ */
+ queue_limits_commit_update(lo->lo_queue, &lim);
}
static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
@@ -320,6 +261,14 @@ static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
ret = file->f_op->fallocate(file, mode, pos, blk_rq_bytes(rq));
if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP))
return -EIO;
+
+ /*
+ * We initially configure the limits in a hope that fallocate is
+ * supported and clear them here if that turns out not to be true.
+ */
+ if (unlikely(ret == -EOPNOTSUPP))
+ loop_clear_limits(lo, mode);
+
return ret;
}
@@ -337,7 +286,7 @@ static void lo_complete_rq(struct request *rq)
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
blk_status_t ret = BLK_STS_OK;
- if (!cmd->use_aio || cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) ||
+ if (cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) ||
req_op(rq) != REQ_OP_READ) {
if (cmd->ret < 0)
ret = errno_to_blk_status(cmd->ret);
@@ -353,14 +302,13 @@ static void lo_complete_rq(struct request *rq)
cmd->ret = 0;
blk_mq_requeue_request(rq, true);
} else {
- if (cmd->use_aio) {
- struct bio *bio = rq->bio;
+ struct bio *bio = rq->bio;
- while (bio) {
- zero_fill_bio(bio);
- bio = bio->bi_next;
- }
+ while (bio) {
+ zero_fill_bio(bio);
+ bio = bio->bi_next;
}
+
ret = BLK_STS_IOERR;
end_io:
blk_mq_end_request(rq, ret);
@@ -375,6 +323,8 @@ static void lo_rw_aio_do_completion(struct loop_cmd *cmd)
return;
kfree(cmd->bvec);
cmd->bvec = NULL;
+ if (req_op(rq) == REQ_OP_WRITE)
+ kiocb_end_write(&cmd->iocb);
if (likely(!blk_should_fake_timeout(rq->q)))
blk_mq_complete_request(rq);
}
@@ -398,11 +348,10 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
struct file *file = lo->lo_backing_file;
struct bio_vec tmp;
unsigned int offset;
- int nr_bvec = 0;
+ unsigned int nr_bvec;
int ret;
- rq_for_each_bvec(tmp, rq, rq_iter)
- nr_bvec++;
+ nr_bvec = blk_rq_nr_bvec(rq);
if (rq->bio != rq->biotail) {
@@ -440,20 +389,26 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
cmd->iocb.ki_pos = pos;
cmd->iocb.ki_filp = file;
- cmd->iocb.ki_complete = lo_rw_aio_complete;
- cmd->iocb.ki_flags = IOCB_DIRECT;
- cmd->iocb.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
+ cmd->iocb.ki_ioprio = req_get_ioprio(rq);
+ if (cmd->use_aio) {
+ cmd->iocb.ki_complete = lo_rw_aio_complete;
+ cmd->iocb.ki_flags = IOCB_DIRECT;
+ } else {
+ cmd->iocb.ki_complete = NULL;
+ cmd->iocb.ki_flags = 0;
+ }
- if (rw == ITER_SOURCE)
- ret = call_write_iter(file, &cmd->iocb, &iter);
- else
- ret = call_read_iter(file, &cmd->iocb, &iter);
+ if (rw == ITER_SOURCE) {
+ kiocb_start_write(&cmd->iocb);
+ ret = file->f_op->write_iter(&cmd->iocb, &iter);
+ } else
+ ret = file->f_op->read_iter(&cmd->iocb, &iter);
lo_rw_aio_do_completion(cmd);
if (ret != -EIOCBQUEUED)
lo_rw_aio_complete(&cmd->iocb, ret);
- return 0;
+ return -EIOCBQUEUED;
}
static int do_req_filebacked(struct loop_device *lo, struct request *rq)
@@ -461,15 +416,6 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
loff_t pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
- /*
- * lo_write_simple and lo_read_simple should have been covered
- * by io submit style function like lo_rw_aio(), one blocker
- * is that lo_read_simple() need to call flush_dcache_page after
- * the page is written from kernel, and it isn't easy to handle
- * this in io submit style function which submits all segments
- * of the req at one time. And direct read IO doesn't need to
- * run flush_dcache_page().
- */
switch (req_op(rq)) {
case REQ_OP_FLUSH:
return lo_req_flush(lo, rq);
@@ -485,27 +431,15 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
case REQ_OP_DISCARD:
return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE);
case REQ_OP_WRITE:
- if (cmd->use_aio)
- return lo_rw_aio(lo, cmd, pos, ITER_SOURCE);
- else
- return lo_write_simple(lo, rq, pos);
+ return lo_rw_aio(lo, cmd, pos, ITER_SOURCE);
case REQ_OP_READ:
- if (cmd->use_aio)
- return lo_rw_aio(lo, cmd, pos, ITER_DEST);
- else
- return lo_read_simple(lo, rq, pos);
+ return lo_rw_aio(lo, cmd, pos, ITER_DEST);
default:
WARN_ON_ONCE(1);
return -EIO;
}
}
-static inline void loop_update_dio(struct loop_device *lo)
-{
- __loop_update_dio(lo, (lo->lo_backing_file->f_flags & O_DIRECT) |
- lo->use_dio);
-}
-
static void loop_reread_partitions(struct loop_device *lo)
{
int rc;
@@ -518,6 +452,28 @@ static void loop_reread_partitions(struct loop_device *lo)
__func__, lo->lo_number, lo->lo_file_name, rc);
}
+static unsigned int loop_query_min_dio_size(struct loop_device *lo)
+{
+ struct file *file = lo->lo_backing_file;
+ struct block_device *sb_bdev = file->f_mapping->host->i_sb->s_bdev;
+ struct kstat st;
+
+ /*
+ * Use the minimal dio alignment of the file system if provided.
+ */
+ if (!vfs_getattr(&file->f_path, &st, STATX_DIOALIGN, 0) &&
+ (st.result_mask & STATX_DIOALIGN))
+ return st.dio_offset_align;
+
+ /*
+ * In a perfect world this wouldn't be needed, but as of Linux 6.13 only
+ * a handful of file systems support the STATX_DIOALIGN flag.
+ */
+ if (sb_bdev)
+ return bdev_logical_block_size(sb_bdev);
+ return SECTOR_SIZE;
+}
+
static inline int is_loop_device(struct file *file)
{
struct inode *i = file->f_mapping->host;
@@ -550,6 +506,28 @@ static int loop_validate_file(struct file *file, struct block_device *bdev)
return 0;
}
+static void loop_assign_backing_file(struct loop_device *lo, struct file *file)
+{
+ lo->lo_backing_file = file;
+ lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping);
+ mapping_set_gfp_mask(file->f_mapping,
+ lo->old_gfp_mask & ~(__GFP_IO | __GFP_FS));
+ if (lo->lo_backing_file->f_flags & O_DIRECT)
+ lo->lo_flags |= LO_FLAGS_DIRECT_IO;
+ lo->lo_min_dio_size = loop_query_min_dio_size(lo);
+}
+
+static int loop_check_backing_file(struct file *file)
+{
+ if (!file->f_op->read_iter)
+ return -EINVAL;
+
+ if ((file->f_mode & FMODE_WRITE) && !file->f_op->write_iter)
+ return -EINVAL;
+
+ return 0;
+}
+
/*
* loop_change_fd switched the backing store of a loopback device to
* a new file. This is useful for operating system installers to free up
@@ -563,6 +541,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
{
struct file *file = fget(arg);
struct file *old_file;
+ unsigned int memflags;
int error;
bool partscan;
bool is_loop;
@@ -570,6 +549,12 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
if (!file)
return -EBADF;
+ error = loop_check_backing_file(file);
+ if (error) {
+ fput(file);
+ return error;
+ }
+
/* suppress uevents while reconfiguring the device */
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
@@ -595,19 +580,23 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
error = -EINVAL;
/* size of the new backing store needs to be the same */
- if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
+ if (lo_calculate_size(lo, file) != lo_calculate_size(lo, old_file))
goto out_err;
+ /*
+ * We might switch to direct I/O mode for the loop device, write back
+ * all dirty data the page cache now that so that the individual I/O
+ * operations don't have to do that.
+ */
+ vfs_fsync(file, 0);
+
/* and ... switch */
disk_force_media_change(lo->lo_disk);
- blk_mq_freeze_queue(lo->lo_queue);
+ memflags = blk_mq_freeze_queue(lo->lo_queue);
mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
- lo->lo_backing_file = file;
- lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping);
- mapping_set_gfp_mask(file->f_mapping,
- lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
+ loop_assign_backing_file(lo, file);
loop_update_dio(lo);
- blk_mq_unfreeze_queue(lo->lo_queue);
+ blk_mq_unfreeze_queue(lo->lo_queue, memflags);
partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
loop_global_unlock(lo, is_loop);
@@ -625,19 +614,20 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
* dependency.
*/
fput(old_file);
+ dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
if (partscan)
loop_reread_partitions(lo);
error = 0;
done:
- /* enable and uncork uevent now that we are done */
- dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
+ kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
return error;
out_err:
loop_global_unlock(lo, is_loop);
out_putf:
fput(file);
+ dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
goto done;
}
@@ -750,12 +740,11 @@ static void loop_sysfs_exit(struct loop_device *lo)
&loop_attribute_group);
}
-static void loop_config_discard(struct loop_device *lo,
- struct queue_limits *lim)
+static void loop_get_discard_config(struct loop_device *lo,
+ u32 *granularity, u32 *max_discard_sectors)
{
struct file *file = lo->lo_backing_file;
struct inode *inode = file->f_mapping->host;
- u32 granularity = 0, max_discard_sectors = 0;
struct kstatfs sbuf;
/*
@@ -766,27 +755,19 @@ static void loop_config_discard(struct loop_device *lo,
* file-backed loop devices: discarded regions read back as zero.
*/
if (S_ISBLK(inode->i_mode)) {
- struct request_queue *backingq = bdev_get_queue(I_BDEV(inode));
+ struct block_device *bdev = I_BDEV(inode);
- max_discard_sectors = backingq->limits.max_write_zeroes_sectors;
- granularity = bdev_discard_granularity(I_BDEV(inode)) ?:
- queue_physical_block_size(backingq);
+ *max_discard_sectors = bdev_write_zeroes_sectors(bdev);
+ *granularity = bdev_discard_granularity(bdev);
/*
* We use punch hole to reclaim the free space used by the
* image a.k.a. discard.
*/
} else if (file->f_op->fallocate && !vfs_statfs(&file->f_path, &sbuf)) {
- max_discard_sectors = UINT_MAX >> 9;
- granularity = sbuf.f_bsize;
+ *max_discard_sectors = UINT_MAX >> 9;
+ *granularity = sbuf.f_bsize;
}
-
- lim->max_hw_discard_sectors = max_discard_sectors;
- lim->max_write_zeroes_sectors = max_discard_sectors;
- if (max_discard_sectors)
- lim->discard_granularity = granularity;
- else
- lim->discard_granularity = 0;
}
struct loop_worker {
@@ -842,7 +823,7 @@ static void loop_queue_work(struct loop_device *lo, struct loop_cmd *cmd)
if (worker)
goto queue_work;
- worker = kzalloc(sizeof(struct loop_worker), GFP_NOWAIT | __GFP_NOWARN);
+ worker = kzalloc(sizeof(struct loop_worker), GFP_NOWAIT);
/*
* In the event we cannot allocate a worker, just queue on the
* rootcg worker and issue the I/O as the rootcg
@@ -916,24 +897,6 @@ static void loop_free_idle_workers_timer(struct timer_list *timer)
return loop_free_idle_workers(lo, false);
}
-static void loop_update_rotational(struct loop_device *lo)
-{
- struct file *file = lo->lo_backing_file;
- struct inode *file_inode = file->f_mapping->host;
- struct block_device *file_bdev = file_inode->i_sb->s_bdev;
- struct request_queue *q = lo->lo_queue;
- bool nonrot = true;
-
- /* not all filesystems (e.g. tmpfs) have a sb->s_bdev */
- if (file_bdev)
- nonrot = bdev_nonrot(file_bdev);
-
- if (nonrot)
- blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
- else
- blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
-}
-
/**
* loop_set_status_from_info - configure device from loop_info
* @lo: struct loop_device to configure
@@ -971,22 +934,49 @@ loop_set_status_from_info(struct loop_device *lo,
memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
lo->lo_file_name[LO_NAME_SIZE-1] = 0;
- lo->lo_flags = info->lo_flags;
return 0;
}
-static int loop_reconfigure_limits(struct loop_device *lo, unsigned short bsize,
- bool update_discard_settings)
+static unsigned int loop_default_blocksize(struct loop_device *lo)
{
- struct queue_limits lim;
+ /* In case of direct I/O, match underlying minimum I/O size */
+ if (lo->lo_flags & LO_FLAGS_DIRECT_IO)
+ return lo->lo_min_dio_size;
+ return SECTOR_SIZE;
+}
- lim = queue_limits_start_update(lo->lo_queue);
- lim.logical_block_size = bsize;
- lim.physical_block_size = bsize;
- lim.io_min = bsize;
- if (update_discard_settings)
- loop_config_discard(lo, &lim);
- return queue_limits_commit_update(lo->lo_queue, &lim);
+static void loop_update_limits(struct loop_device *lo, struct queue_limits *lim,
+ unsigned int bsize)
+{
+ struct file *file = lo->lo_backing_file;
+ struct inode *inode = file->f_mapping->host;
+ struct block_device *backing_bdev = NULL;
+ u32 granularity = 0, max_discard_sectors = 0;
+
+ if (S_ISBLK(inode->i_mode))
+ backing_bdev = I_BDEV(inode);
+ else if (inode->i_sb->s_bdev)
+ backing_bdev = inode->i_sb->s_bdev;
+
+ if (!bsize)
+ bsize = loop_default_blocksize(lo);
+
+ loop_get_discard_config(lo, &granularity, &max_discard_sectors);
+
+ lim->logical_block_size = bsize;
+ lim->physical_block_size = bsize;
+ lim->io_min = bsize;
+ lim->features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_ROTATIONAL);
+ if (file->f_op->fsync && !(lo->lo_flags & LO_FLAGS_READ_ONLY))
+ lim->features |= BLK_FEAT_WRITE_CACHE;
+ if (backing_bdev && !bdev_nonrot(backing_bdev))
+ lim->features |= BLK_FEAT_ROTATIONAL;
+ lim->max_hw_discard_sectors = max_discard_sectors;
+ lim->max_write_zeroes_sectors = max_discard_sectors;
+ if (max_discard_sectors)
+ lim->discard_granularity = granularity;
+ else
+ lim->discard_granularity = 0;
}
static int loop_configure(struct loop_device *lo, blk_mode_t mode,
@@ -994,16 +984,21 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
const struct loop_config *config)
{
struct file *file = fget(config->fd);
- struct inode *inode;
- struct address_space *mapping;
+ struct queue_limits lim;
int error;
loff_t size;
bool partscan;
- unsigned short bsize;
bool is_loop;
if (!file)
return -EBADF;
+
+ error = loop_check_backing_file(file);
+ if (error) {
+ fput(file);
+ return error;
+ }
+
is_loop = is_loop_device(file);
/* This is safe, since we have a reference from open(). */
@@ -1031,23 +1026,15 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
if (error)
goto out_unlock;
- mapping = file->f_mapping;
- inode = mapping->host;
-
if ((config->info.lo_flags & ~LOOP_CONFIGURE_SETTABLE_FLAGS) != 0) {
error = -EINVAL;
goto out_unlock;
}
- if (config->block_size) {
- error = blk_validate_block_size(config->block_size);
- if (error)
- goto out_unlock;
- }
-
error = loop_set_status_from_info(lo, &config->info);
if (error)
goto out_unlock;
+ lo->lo_flags = config->info.lo_flags;
if (!(file->f_mode & FMODE_WRITE) || !(mode & BLK_OPEN_WRITE) ||
!file->f_op->write_iter)
@@ -1069,32 +1056,27 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
disk_force_media_change(lo->lo_disk);
set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
- lo->use_dio = lo->lo_flags & LO_FLAGS_DIRECT_IO;
lo->lo_device = bdev;
- lo->lo_backing_file = file;
- lo->old_gfp_mask = mapping_gfp_mask(mapping);
- mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
+ loop_assign_backing_file(lo, file);
- if (!(lo->lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
- blk_queue_write_cache(lo->lo_queue, true, false);
-
- if (config->block_size)
- bsize = config->block_size;
- else if ((lo->lo_backing_file->f_flags & O_DIRECT) && inode->i_sb->s_bdev)
- /* In case of direct I/O, match underlying block size */
- bsize = bdev_logical_block_size(inode->i_sb->s_bdev);
- else
- bsize = 512;
-
- error = loop_reconfigure_limits(lo, bsize, true);
- if (WARN_ON_ONCE(error))
+ lim = queue_limits_start_update(lo->lo_queue);
+ loop_update_limits(lo, &lim, config->block_size);
+ /* No need to freeze the queue as the device isn't bound yet. */
+ error = queue_limits_commit_update(lo->lo_queue, &lim);
+ if (error)
goto out_unlock;
- loop_update_rotational(lo);
+ /*
+ * We might switch to direct I/O mode for the loop device, write back
+ * all dirty data the page cache now that so that the individual I/O
+ * operations don't have to do that.
+ */
+ vfs_fsync(file, 0);
+
loop_update_dio(lo);
loop_sysfs_init(lo);
- size = get_loop_size(lo, file);
+ size = lo_calculate_size(lo, file);
loop_set_size(lo, size);
/* Order wrt reading lo_state in loop_validate_file(). */
@@ -1107,8 +1089,8 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
if (partscan)
clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
- /* enable and uncork uevent now that we are done */
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
+ kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
loop_global_unlock(lo, is_loop);
if (partscan)
@@ -1131,22 +1113,12 @@ out_putf:
return error;
}
-static void __loop_clr_fd(struct loop_device *lo, bool release)
+static void __loop_clr_fd(struct loop_device *lo)
{
+ struct queue_limits lim;
struct file *filp;
gfp_t gfp = lo->old_gfp_mask;
- if (test_bit(QUEUE_FLAG_WC, &lo->lo_queue->queue_flags))
- blk_queue_write_cache(lo->lo_queue, false, false);
-
- /*
- * Freeze the request queue when unbinding on a live file descriptor and
- * thus an open device. When called from ->release we are guaranteed
- * that there is no I/O in progress already.
- */
- if (!release)
- blk_mq_freeze_queue(lo->lo_queue);
-
spin_lock_irq(&lo->lo_lock);
filp = lo->lo_backing_file;
lo->lo_backing_file = NULL;
@@ -1156,7 +1128,19 @@ static void __loop_clr_fd(struct loop_device *lo, bool release)
lo->lo_offset = 0;
lo->lo_sizelimit = 0;
memset(lo->lo_file_name, 0, LO_NAME_SIZE);
- loop_reconfigure_limits(lo, 512, false);
+
+ /*
+ * Reset the block size to the default.
+ *
+ * No queue freezing needed because this is called from the final
+ * ->release call only, so there can't be any outstanding I/O.
+ */
+ lim = queue_limits_start_update(lo->lo_queue);
+ lim.logical_block_size = SECTOR_SIZE;
+ lim.physical_block_size = SECTOR_SIZE;
+ lim.io_min = SECTOR_SIZE;
+ queue_limits_commit_update(lo->lo_queue, &lim);
+
invalidate_disk(lo->lo_disk);
loop_sysfs_exit(lo);
/* let user-space know about this change */
@@ -1164,8 +1148,6 @@ static void __loop_clr_fd(struct loop_device *lo, bool release)
mapping_set_gfp_mask(filp->f_mapping, gfp);
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
- if (!release)
- blk_mq_unfreeze_queue(lo->lo_queue);
disk_force_media_change(lo->lo_disk);
@@ -1180,11 +1162,7 @@ static void __loop_clr_fd(struct loop_device *lo, bool release)
* must be at least one and it can only become zero when the
* current holder is released.
*/
- if (!release)
- mutex_lock(&lo->lo_disk->open_mutex);
err = bdev_disk_changed(lo->lo_disk, false);
- if (!release)
- mutex_unlock(&lo->lo_disk->open_mutex);
if (err)
pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
__func__, lo->lo_number, err);
@@ -1233,24 +1211,16 @@ static int loop_clr_fd(struct loop_device *lo)
return -ENXIO;
}
/*
- * If we've explicitly asked to tear down the loop device,
- * and it has an elevated reference count, set it for auto-teardown when
- * the last reference goes away. This stops $!~#$@ udev from
- * preventing teardown because it decided that it needs to run blkid on
- * the loopback device whenever they appear. xfstests is notorious for
- * failing tests because blkid via udev races with a losetup
- * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d
- * command to fail with EBUSY.
+ * Mark the device for removing the backing device on last close.
+ * If we are the only opener, also switch the state to roundown here to
+ * prevent new openers from coming in.
*/
- if (disk_openers(lo->lo_disk) > 1) {
- lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
- loop_global_unlock(lo, true);
- return 0;
- }
- lo->lo_state = Lo_rundown;
+
+ lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
+ if (disk_openers(lo->lo_disk) == 1)
+ lo->lo_state = Lo_rundown;
loop_global_unlock(lo, true);
- __loop_clr_fd(lo, false);
return 0;
}
@@ -1258,9 +1228,9 @@ static int
loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
{
int err;
- int prev_lo_flags;
bool partscan = false;
bool size_changed = false;
+ unsigned int memflags;
err = mutex_lock_killable(&lo->lo_mutex);
if (err)
@@ -1277,38 +1247,29 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
invalidate_bdev(lo->lo_device);
}
- /* I/O need to be drained during transfer transition */
- blk_mq_freeze_queue(lo->lo_queue);
-
- prev_lo_flags = lo->lo_flags;
+ /* I/O needs to be drained before changing lo_offset or lo_sizelimit */
+ memflags = blk_mq_freeze_queue(lo->lo_queue);
err = loop_set_status_from_info(lo, info);
if (err)
goto out_unfreeze;
- /* Mask out flags that can't be set using LOOP_SET_STATUS. */
- lo->lo_flags &= LOOP_SET_STATUS_SETTABLE_FLAGS;
- /* For those flags, use the previous values instead */
- lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_SETTABLE_FLAGS;
- /* For flags that can't be cleared, use previous values too */
- lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_CLEARABLE_FLAGS;
+ partscan = !(lo->lo_flags & LO_FLAGS_PARTSCAN) &&
+ (info->lo_flags & LO_FLAGS_PARTSCAN);
- if (size_changed) {
- loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit,
- lo->lo_backing_file);
- loop_set_size(lo, new_size);
- }
+ lo->lo_flags &= ~LOOP_SET_STATUS_CLEARABLE_FLAGS;
+ lo->lo_flags |= (info->lo_flags & LOOP_SET_STATUS_SETTABLE_FLAGS);
- /* update dio if lo_offset or transfer is changed */
- __loop_update_dio(lo, lo->use_dio);
+ /* update the direct I/O flag if lo_offset changed */
+ loop_update_dio(lo);
out_unfreeze:
- blk_mq_unfreeze_queue(lo->lo_queue);
-
- if (!err && (lo->lo_flags & LO_FLAGS_PARTSCAN) &&
- !(prev_lo_flags & LO_FLAGS_PARTSCAN)) {
+ blk_mq_unfreeze_queue(lo->lo_queue, memflags);
+ if (partscan)
clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
- partscan = true;
+ if (!err && size_changed) {
+ loff_t new_size = lo_calculate_size(lo, lo->lo_backing_file);
+ loop_set_size(lo, new_size);
}
out_unlock:
mutex_unlock(&lo->lo_mutex);
@@ -1450,7 +1411,7 @@ static int loop_set_capacity(struct loop_device *lo)
if (unlikely(lo->lo_state != Lo_bound))
return -ENXIO;
- size = get_loop_size(lo, lo->lo_backing_file);
+ size = lo_calculate_size(lo, lo->lo_backing_file);
loop_set_size(lo, size);
return 0;
@@ -1458,40 +1419,75 @@ static int loop_set_capacity(struct loop_device *lo)
static int loop_set_dio(struct loop_device *lo, unsigned long arg)
{
- int error = -ENXIO;
- if (lo->lo_state != Lo_bound)
- goto out;
+ bool use_dio = !!arg;
+ unsigned int memflags;
- __loop_update_dio(lo, !!arg);
- if (lo->use_dio == !!arg)
+ if (lo->lo_state != Lo_bound)
+ return -ENXIO;
+ if (use_dio == !!(lo->lo_flags & LO_FLAGS_DIRECT_IO))
return 0;
- error = -EINVAL;
- out:
- return error;
+
+ if (use_dio) {
+ if (!lo_can_use_dio(lo))
+ return -EINVAL;
+ /* flush dirty pages before starting to use direct I/O */
+ vfs_fsync(lo->lo_backing_file, 0);
+ }
+
+ memflags = blk_mq_freeze_queue(lo->lo_queue);
+ if (use_dio)
+ lo->lo_flags |= LO_FLAGS_DIRECT_IO;
+ else
+ lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
+ blk_mq_unfreeze_queue(lo->lo_queue, memflags);
+ return 0;
}
-static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
+static int loop_set_block_size(struct loop_device *lo, blk_mode_t mode,
+ struct block_device *bdev, unsigned long arg)
{
+ struct queue_limits lim;
+ unsigned int memflags;
int err = 0;
- if (lo->lo_state != Lo_bound)
- return -ENXIO;
+ /*
+ * If we don't hold exclusive handle for the device, upgrade to it
+ * here to avoid changing device under exclusive owner.
+ */
+ if (!(mode & BLK_OPEN_EXCL)) {
+ err = bd_prepare_to_claim(bdev, loop_set_block_size, NULL);
+ if (err)
+ return err;
+ }
- err = blk_validate_block_size(arg);
+ err = mutex_lock_killable(&lo->lo_mutex);
if (err)
- return err;
+ goto abort_claim;
+
+ if (lo->lo_state != Lo_bound) {
+ err = -ENXIO;
+ goto unlock;
+ }
if (lo->lo_queue->limits.logical_block_size == arg)
- return 0;
+ goto unlock;
sync_blockdev(lo->lo_device);
invalidate_bdev(lo->lo_device);
- blk_mq_freeze_queue(lo->lo_queue);
- err = loop_reconfigure_limits(lo, arg, false);
+ lim = queue_limits_start_update(lo->lo_queue);
+ loop_update_limits(lo, &lim, arg);
+
+ memflags = blk_mq_freeze_queue(lo->lo_queue);
+ err = queue_limits_commit_update(lo->lo_queue, &lim);
loop_update_dio(lo);
- blk_mq_unfreeze_queue(lo->lo_queue);
+ blk_mq_unfreeze_queue(lo->lo_queue, memflags);
+unlock:
+ mutex_unlock(&lo->lo_mutex);
+abort_claim:
+ if (!(mode & BLK_OPEN_EXCL))
+ bd_abort_claiming(bdev, loop_set_block_size);
return err;
}
@@ -1510,9 +1506,6 @@ static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
case LOOP_SET_DIRECT_IO:
err = loop_set_dio(lo, arg);
break;
- case LOOP_SET_BLOCK_SIZE:
- err = loop_set_block_size(lo, arg);
- break;
default:
err = -EINVAL;
}
@@ -1567,9 +1560,12 @@ static int lo_ioctl(struct block_device *bdev, blk_mode_t mode,
break;
case LOOP_GET_STATUS64:
return loop_get_status64(lo, argp);
+ case LOOP_SET_BLOCK_SIZE:
+ if (!(mode & BLK_OPEN_WRITE) && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ return loop_set_block_size(lo, mode, bdev, arg);
case LOOP_SET_CAPACITY:
case LOOP_SET_DIRECT_IO:
- case LOOP_SET_BLOCK_SIZE:
if (!(mode & BLK_OPEN_WRITE) && !capable(CAP_SYS_ADMIN))
return -EPERM;
fallthrough;
@@ -1717,25 +1713,43 @@ static int lo_compat_ioctl(struct block_device *bdev, blk_mode_t mode,
}
#endif
+static int lo_open(struct gendisk *disk, blk_mode_t mode)
+{
+ struct loop_device *lo = disk->private_data;
+ int err;
+
+ err = mutex_lock_killable(&lo->lo_mutex);
+ if (err)
+ return err;
+
+ if (lo->lo_state == Lo_deleting || lo->lo_state == Lo_rundown)
+ err = -ENXIO;
+ mutex_unlock(&lo->lo_mutex);
+ return err;
+}
+
static void lo_release(struct gendisk *disk)
{
struct loop_device *lo = disk->private_data;
+ bool need_clear = false;
if (disk_openers(disk) > 0)
return;
+ /*
+ * Clear the backing device information if this is the last close of
+ * a device that's been marked for auto clear, or on which LOOP_CLR_FD
+ * has been called.
+ */
mutex_lock(&lo->lo_mutex);
- if (lo->lo_state == Lo_bound && (lo->lo_flags & LO_FLAGS_AUTOCLEAR)) {
+ if (lo->lo_state == Lo_bound && (lo->lo_flags & LO_FLAGS_AUTOCLEAR))
lo->lo_state = Lo_rundown;
- mutex_unlock(&lo->lo_mutex);
- /*
- * In autoclear mode, stop the loop thread
- * and remove configuration after last close.
- */
- __loop_clr_fd(lo, true);
- return;
- }
+
+ need_clear = (lo->lo_state == Lo_rundown);
mutex_unlock(&lo->lo_mutex);
+
+ if (need_clear)
+ __loop_clr_fd(lo);
}
static void lo_free_disk(struct gendisk *disk)
@@ -1752,6 +1766,7 @@ static void lo_free_disk(struct gendisk *disk)
static const struct block_device_operations lo_fops = {
.owner = THIS_MODULE,
+ .open = lo_open,
.release = lo_release,
.ioctl = lo_ioctl,
#ifdef CONFIG_COMPAT
@@ -1830,6 +1845,7 @@ static const struct kernel_param_ops loop_hw_qdepth_param_ops = {
device_param_cb(hw_queue_depth, &loop_hw_qdepth_param_ops, &hw_queue_depth, 0444);
MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: " __stringify(LOOP_DEFAULT_HW_Q_DEPTH));
+MODULE_DESCRIPTION("Loopback device support");
MODULE_LICENSE("GPL");
MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
@@ -1852,7 +1868,7 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
cmd->use_aio = false;
break;
default:
- cmd->use_aio = lo->use_dio;
+ cmd->use_aio = lo->lo_flags & LO_FLAGS_DIRECT_IO;
break;
}
@@ -1885,13 +1901,16 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
struct loop_device *lo = rq->q->queuedata;
int ret = 0;
struct mem_cgroup *old_memcg = NULL;
- const bool use_aio = cmd->use_aio;
if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
ret = -EIO;
goto failed;
}
+ /* We can block in this context, so ignore REQ_NOWAIT. */
+ if (rq->cmd_flags & REQ_NOWAIT)
+ rq->cmd_flags &= ~REQ_NOWAIT;
+
if (cmd_blkcg_css)
kthread_associate_blkcg(cmd_blkcg_css);
if (cmd_memcg_css)
@@ -1915,7 +1934,7 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
}
failed:
/* complete non-aio request */
- if (!use_aio || ret) {
+ if (ret != -EIOCBQUEUED) {
if (ret == -EOPNOTSUPP)
cmd->ret = ret;
else
@@ -2021,8 +2040,7 @@ static int loop_add(int i)
lo->tag_set.queue_depth = hw_queue_depth;
lo->tag_set.numa_node = NUMA_NO_NODE;
lo->tag_set.cmd_size = sizeof(struct loop_cmd);
- lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING |
- BLK_MQ_F_NO_SCHED_BY_DEFAULT;
+ lo->tag_set.flags = BLK_MQ_F_STACKING | BLK_MQ_F_NO_SCHED_BY_DEFAULT;
lo->tag_set.driver_data = lo;
err = blk_mq_alloc_tag_set(&lo->tag_set);
@@ -2037,14 +2055,6 @@ static int loop_add(int i)
lo->lo_queue = lo->lo_disk->queue;
/*
- * By default, we do buffer IO, so it doesn't make sense to enable
- * merge because the I/O submitted to backing file is handled page by
- * page. For directio mode, merge does help to dispatch bigger request
- * to underlayer disk. We will enable merge once directio is enabled.
- */
- blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue);
-
- /*
* Disable partition scanning by default. The in-kernel partition
* scanning can be requested individually per-device during its
* setup. Userspace can always add and remove partitions from all
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 43a187609ef7..567192e371a8 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -2040,11 +2040,12 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
* @dir Direction (read or write)
*
* return value
- * None
+ * 0 The IO completed successfully.
+ * -ENOMEM The DMA mapping failed.
*/
-static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
- struct mtip_cmd *command,
- struct blk_mq_hw_ctx *hctx)
+static int mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
+ struct mtip_cmd *command,
+ struct blk_mq_hw_ctx *hctx)
{
struct mtip_cmd_hdr *hdr =
dd->port->command_list + sizeof(struct mtip_cmd_hdr) * rq->tag;
@@ -2056,12 +2057,14 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
unsigned int nents;
/* Map the scatter list for DMA access */
- nents = blk_rq_map_sg(hctx->queue, rq, command->sg);
- nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir);
+ command->scatter_ents = blk_rq_map_sg(rq, command->sg);
+ nents = dma_map_sg(&dd->pdev->dev, command->sg,
+ command->scatter_ents, dma_dir);
+ if (!nents)
+ return -ENOMEM;
- prefetch(&port->flags);
- command->scatter_ents = nents;
+ prefetch(&port->flags);
/*
* The number of retries for this command before it is
@@ -2112,11 +2115,13 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
if (unlikely(port->flags & MTIP_PF_PAUSE_IO)) {
set_bit(rq->tag, port->cmds_to_issue);
set_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags);
- return;
+ return 0;
}
/* Issue the command to the hardware */
mtip_issue_ncq_command(port, rq->tag);
+
+ return 0;
}
/*
@@ -2259,35 +2264,20 @@ static const struct file_operations mtip_regs_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = mtip_hw_read_registers,
- .llseek = no_llseek,
};
static const struct file_operations mtip_flags_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = mtip_hw_read_flags,
- .llseek = no_llseek,
};
-static int mtip_hw_debugfs_init(struct driver_data *dd)
+static void mtip_hw_debugfs_init(struct driver_data *dd)
{
- if (!dfs_parent)
- return -1;
-
dd->dfs_node = debugfs_create_dir(dd->disk->disk_name, dfs_parent);
- if (IS_ERR_OR_NULL(dd->dfs_node)) {
- dev_warn(&dd->pdev->dev,
- "Error creating node %s under debugfs\n",
- dd->disk->disk_name);
- dd->dfs_node = NULL;
- return -1;
- }
-
debugfs_create_file("flags", 0444, dd->dfs_node, dd, &mtip_flags_fops);
debugfs_create_file("registers", 0444, dd->dfs_node, dd,
&mtip_regs_fops);
-
- return 0;
}
static void mtip_hw_debugfs_exit(struct driver_data *dd)
@@ -2716,7 +2706,12 @@ static int mtip_hw_init(struct driver_data *dd)
int rv;
unsigned long timeout, timetaken;
- dd->mmio = pcim_iomap_table(dd->pdev)[MTIP_ABAR];
+ dd->mmio = pcim_iomap_region(dd->pdev, MTIP_ABAR, MTIP_DRV_NAME);
+ if (IS_ERR(dd->mmio)) {
+ dev_err(&dd->pdev->dev, "Unable to request / ioremap PCI region\n");
+ return PTR_ERR(dd->mmio);
+ }
+
mtip_detect_product(dd);
if (dd->product_type == MTIP_PRODUCT_UNKNOWN) {
@@ -3153,17 +3148,17 @@ static int mtip_block_compat_ioctl(struct block_device *dev,
* that each partition is also 4KB aligned. Non-aligned partitions adversely
* affects performance.
*
- * @dev Pointer to the block_device strucutre.
+ * @disk Pointer to the gendisk strucutre.
* @geo Pointer to a hd_geometry structure.
*
* return value
* 0 Operation completed successfully.
* -ENOTTY An error occurred while reading the drive capacity.
*/
-static int mtip_block_getgeo(struct block_device *dev,
+static int mtip_block_getgeo(struct gendisk *disk,
struct hd_geometry *geo)
{
- struct driver_data *dd = dev->bd_disk->private_data;
+ struct driver_data *dd = disk->private_data;
sector_t capacity;
if (!dd)
@@ -3325,7 +3320,9 @@ static blk_status_t mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_mq_start_request(rq);
- mtip_hw_submit_io(dd, rq, cmd, hctx);
+ if (mtip_hw_submit_io(dd, rq, cmd, hctx))
+ return BLK_STS_IOERR;
+
return BLK_STS_OK;
}
@@ -3426,7 +3423,6 @@ static int mtip_block_initialize(struct driver_data *dd)
dd->tags.reserved_tags = 1;
dd->tags.cmd_size = sizeof(struct mtip_cmd);
dd->tags.numa_node = dd->numa_node;
- dd->tags.flags = BLK_MQ_F_SHOULD_MERGE;
dd->tags.driver_data = dd;
dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS;
@@ -3485,8 +3481,6 @@ skip_create_disk:
goto start_service_thread;
/* Set device limits. */
- blk_queue_flag_set(QUEUE_FLAG_NONROT, dd->queue);
- blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, dd->queue);
dma_set_max_seg_size(&dd->pdev->dev, 0x400000);
/* Set the capacity of the device in 512 byte sectors. */
@@ -3727,17 +3721,10 @@ static int mtip_pci_probe(struct pci_dev *pdev,
goto iomap_err;
}
- /* Map BAR5 to memory. */
- rv = pcim_iomap_regions(pdev, 1 << MTIP_ABAR, MTIP_DRV_NAME);
- if (rv < 0) {
- dev_err(&pdev->dev, "Unable to map regions\n");
- goto iomap_err;
- }
-
rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rv) {
dev_warn(&pdev->dev, "64-bit DMA enable failed\n");
- goto setmask_err;
+ goto iomap_err;
}
/* Copy the info we may need later into the private data structure. */
@@ -3753,7 +3740,7 @@ static int mtip_pci_probe(struct pci_dev *pdev,
if (!dd->isr_workq) {
dev_warn(&pdev->dev, "Can't create wq %d\n", dd->instance);
rv = -ENOMEM;
- goto setmask_err;
+ goto iomap_err;
}
memset(cpu_list, 0, sizeof(cpu_list));
@@ -3850,8 +3837,6 @@ msi_initialize_err:
drop_cpu(dd->work[1].cpu_binding);
drop_cpu(dd->work[2].cpu_binding);
}
-setmask_err:
- pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
iomap_err:
kfree(dd);
@@ -3927,7 +3912,6 @@ static void mtip_pci_remove(struct pci_dev *pdev)
pci_disable_msi(pdev);
- pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
pci_set_drvdata(pdev, NULL);
put_disk(dd->disk);
@@ -4045,10 +4029,6 @@ static int __init mtip_init(void)
mtip_major = error;
dfs_parent = debugfs_create_dir("rssd", NULL);
- if (IS_ERR_OR_NULL(dfs_parent)) {
- pr_warn("Error creating debugfs parent\n");
- dfs_parent = NULL;
- }
/* Register our PCI operations. */
error = pci_register_driver(&mtip_pci_driver);
diff --git a/drivers/block/n64cart.c b/drivers/block/n64cart.c
index 27b2187e7a6d..b9fdeff31caf 100644
--- a/drivers/block/n64cart.c
+++ b/drivers/block/n64cart.c
@@ -150,8 +150,6 @@ static int __init n64cart_probe(struct platform_device *pdev)
set_capacity(disk, size >> SECTOR_SHIFT);
set_disk_ro(disk, 1);
- blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
-
err = add_disk(disk);
if (err)
goto out_cleanup_disk;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 9d4ec9273bf9..f6c33b21f69e 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -62,6 +62,7 @@ struct nbd_sock {
bool dead;
int fallback_index;
int cookie;
+ struct work_struct work;
};
struct recv_thread_args {
@@ -141,6 +142,9 @@ struct nbd_device {
*/
#define NBD_CMD_INFLIGHT 2
+/* Just part of request header or data payload is sent successfully */
+#define NBD_CMD_PARTIAL_SEND 3
+
struct nbd_cmd {
struct nbd_device *nbd;
struct mutex lock;
@@ -181,6 +185,17 @@ static void nbd_requeue_cmd(struct nbd_cmd *cmd)
{
struct request *req = blk_mq_rq_from_pdu(cmd);
+ lockdep_assert_held(&cmd->lock);
+
+ /*
+ * Clear INFLIGHT flag so that this cmd won't be completed in
+ * normal completion path
+ *
+ * INFLIGHT flag will be set when the cmd is queued to nbd next
+ * time.
+ */
+ __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
+
if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
blk_mq_requeue_request(req, true);
}
@@ -222,7 +237,7 @@ static ssize_t pid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gendisk *disk = dev_to_disk(dev);
- struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
+ struct nbd_device *nbd = disk->private_data;
return sprintf(buf, "%d\n", nbd->pid);
}
@@ -236,7 +251,7 @@ static ssize_t backend_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gendisk *disk = dev_to_disk(dev);
- struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
+ struct nbd_device *nbd = disk->private_data;
return sprintf(buf, "%s\n", nbd->backend ?: "");
}
@@ -296,7 +311,7 @@ static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
if (args) {
INIT_WORK(&args->work, nbd_dead_link_work);
args->index = nbd->index;
- queue_work(system_wq, &args->work);
+ queue_work(system_percpu_wq, &args->work);
}
}
if (!nsock->dead) {
@@ -316,8 +331,7 @@ static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
nsock->sent = 0;
}
-static int __nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
- loff_t blksize)
+static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize, loff_t blksize)
{
struct queue_limits lim;
int error;
@@ -339,12 +353,25 @@ static int __nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
lim = queue_limits_start_update(nbd->disk->queue);
if (nbd->config->flags & NBD_FLAG_SEND_TRIM)
- lim.max_hw_discard_sectors = UINT_MAX;
+ lim.max_hw_discard_sectors = UINT_MAX >> SECTOR_SHIFT;
else
lim.max_hw_discard_sectors = 0;
+ if (!(nbd->config->flags & NBD_FLAG_SEND_FLUSH)) {
+ lim.features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA);
+ } else if (nbd->config->flags & NBD_FLAG_SEND_FUA) {
+ lim.features |= BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA;
+ } else {
+ lim.features |= BLK_FEAT_WRITE_CACHE;
+ lim.features &= ~BLK_FEAT_FUA;
+ }
+ if (nbd->config->flags & NBD_FLAG_ROTATIONAL)
+ lim.features |= BLK_FEAT_ROTATIONAL;
+ if (nbd->config->flags & NBD_FLAG_SEND_WRITE_ZEROES)
+ lim.max_write_zeroes_sectors = UINT_MAX >> SECTOR_SHIFT;
+
lim.logical_block_size = blksize;
lim.physical_block_size = blksize;
- error = queue_limits_commit_update(nbd->disk->queue, &lim);
+ error = queue_limits_commit_update_frozen(nbd->disk->queue, &lim);
if (error)
return error;
@@ -355,18 +382,6 @@ static int __nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
return 0;
}
-static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
- loff_t blksize)
-{
- int error;
-
- blk_mq_freeze_queue(nbd->disk->queue);
- error = __nbd_set_size(nbd, bytesize, blksize);
- blk_mq_unfreeze_queue(nbd->disk->queue);
-
- return error;
-}
-
static void nbd_complete_rq(struct request *req)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
@@ -410,6 +425,8 @@ static u32 req_to_nbd_cmd_type(struct request *req)
return NBD_CMD_WRITE;
case REQ_OP_READ:
return NBD_CMD_READ;
+ case REQ_OP_WRITE_ZEROES:
+ return NBD_CMD_WRITE_ZEROES;
default:
return U32_MAX;
}
@@ -440,6 +457,12 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
if (!mutex_trylock(&cmd->lock))
return BLK_EH_RESET_TIMER;
+ /* partial send is handled in nbd_sock's work function */
+ if (test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags)) {
+ mutex_unlock(&cmd->lock);
+ return BLK_EH_RESET_TIMER;
+ }
+
if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
mutex_unlock(&cmd->lock);
return BLK_EH_DONE;
@@ -480,8 +503,8 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
nbd_mark_nsock_dead(nbd, nsock, 1);
mutex_unlock(&nsock->tx_lock);
}
- mutex_unlock(&cmd->lock);
nbd_requeue_cmd(cmd);
+ mutex_unlock(&cmd->lock);
nbd_config_put(nbd);
return BLK_EH_DONE;
}
@@ -542,24 +565,27 @@ static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
msg.msg_iter = *iter;
noreclaim_flag = memalloc_noreclaim_save();
- do {
- sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
- sock->sk->sk_use_task_frag = false;
- msg.msg_flags = msg_flags | MSG_NOSIGNAL;
-
- if (send)
- result = sock_sendmsg(sock, &msg);
- else
- result = sock_recvmsg(sock, &msg, msg.msg_flags);
-
- if (result <= 0) {
- if (result == 0)
- result = -EPIPE; /* short read */
- break;
- }
- if (sent)
- *sent += result;
- } while (msg_data_left(&msg));
+
+ scoped_with_kernel_creds() {
+ do {
+ sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
+ sock->sk->sk_use_task_frag = false;
+ msg.msg_flags = msg_flags | MSG_NOSIGNAL;
+
+ if (send)
+ result = sock_sendmsg(sock, &msg);
+ else
+ result = sock_recvmsg(sock, &msg, msg.msg_flags);
+
+ if (result <= 0) {
+ if (result == 0)
+ result = -EPIPE; /* short read */
+ break;
+ }
+ if (sent)
+ *sent += result;
+ } while (msg_data_left(&msg));
+ }
memalloc_noreclaim_restore(noreclaim_flag);
@@ -588,8 +614,36 @@ static inline int was_interrupted(int result)
return result == -ERESTARTSYS || result == -EINTR;
}
-/* always call with the tx_lock held */
-static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
+/*
+ * We've already sent header or part of data payload, have no choice but
+ * to set pending and schedule it in work.
+ *
+ * And we have to return BLK_STS_OK to block core, otherwise this same
+ * request may be re-dispatched with different tag, but our header has
+ * been sent out with old tag, and this way does confuse reply handling.
+ */
+static void nbd_sched_pending_work(struct nbd_device *nbd,
+ struct nbd_sock *nsock,
+ struct nbd_cmd *cmd, int sent)
+{
+ struct request *req = blk_mq_rq_from_pdu(cmd);
+
+ /* pending work should be scheduled only once */
+ WARN_ON_ONCE(test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags));
+
+ nsock->pending = req;
+ nsock->sent = sent;
+ set_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags);
+ refcount_inc(&nbd->config_refs);
+ schedule_work(&nsock->work);
+}
+
+/*
+ * Returns BLK_STS_RESOURCE if the caller should retry after a delay.
+ * Returns BLK_STS_IOERR if sending failed.
+ */
+static blk_status_t nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd,
+ int index)
{
struct request *req = blk_mq_rq_from_pdu(cmd);
struct nbd_config *config = nbd->config;
@@ -598,28 +652,32 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)};
struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
struct iov_iter from;
- unsigned long size = blk_rq_bytes(req);
struct bio *bio;
u64 handle;
u32 type;
u32 nbd_cmd_flags = 0;
int sent = nsock->sent, skip = 0;
+ lockdep_assert_held(&cmd->lock);
+ lockdep_assert_held(&nsock->tx_lock);
+
iov_iter_kvec(&from, ITER_SOURCE, &iov, 1, sizeof(request));
type = req_to_nbd_cmd_type(req);
if (type == U32_MAX)
- return -EIO;
+ return BLK_STS_IOERR;
if (rq_data_dir(req) == WRITE &&
(config->flags & NBD_FLAG_READ_ONLY)) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Write on read-only\n");
- return -EIO;
+ return BLK_STS_IOERR;
}
if (req->cmd_flags & REQ_FUA)
nbd_cmd_flags |= NBD_CMD_FLAG_FUA;
+ if ((req->cmd_flags & REQ_NOUNMAP) && (type == NBD_CMD_WRITE_ZEROES))
+ nbd_cmd_flags |= NBD_CMD_FLAG_NO_HOLE;
/* We did a partial send previously, and we at least sent the whole
* request struct, so just go and send the rest of the pages in the
@@ -644,7 +702,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
request.type = htonl(type | nbd_cmd_flags);
if (type != NBD_CMD_FLUSH) {
request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
- request.len = htonl(size);
+ request.len = htonl(blk_rq_bytes(req));
}
handle = nbd_cmd_handle(cmd);
request.cookie = cpu_to_be64(handle);
@@ -665,15 +723,15 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
* completely done.
*/
if (sent) {
- nsock->pending = req;
- nsock->sent = sent;
+ nbd_sched_pending_work(nbd, nsock, cmd, sent);
+ return BLK_STS_OK;
}
set_bit(NBD_CMD_REQUEUED, &cmd->flags);
return BLK_STS_RESOURCE;
}
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Send control failed (result %d)\n", result);
- return -EAGAIN;
+ goto requeue;
}
send_pages:
if (type != NBD_CMD_WRITE)
@@ -703,19 +761,13 @@ send_pages:
result = sock_xmit(nbd, index, 1, &from, flags, &sent);
if (result < 0) {
if (was_interrupted(result)) {
- /* We've already sent the header, we
- * have no choice but to set pending and
- * return BUSY.
- */
- nsock->pending = req;
- nsock->sent = sent;
- set_bit(NBD_CMD_REQUEUED, &cmd->flags);
- return BLK_STS_RESOURCE;
+ nbd_sched_pending_work(nbd, nsock, cmd, sent);
+ return BLK_STS_OK;
}
dev_err(disk_to_dev(nbd->disk),
"Send data failed (result %d)\n",
result);
- return -EAGAIN;
+ goto requeue;
}
/*
* The completion might already have come in,
@@ -732,7 +784,62 @@ out:
trace_nbd_payload_sent(req, handle);
nsock->pending = NULL;
nsock->sent = 0;
- return 0;
+ __set_bit(NBD_CMD_INFLIGHT, &cmd->flags);
+ return BLK_STS_OK;
+
+requeue:
+ /*
+ * Can't requeue in case we are dealing with partial send
+ *
+ * We must run from pending work function.
+ * */
+ if (test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags))
+ return BLK_STS_OK;
+
+ /* retry on a different socket */
+ dev_err_ratelimited(disk_to_dev(nbd->disk),
+ "Request send failed, requeueing\n");
+ nbd_mark_nsock_dead(nbd, nsock, 1);
+ nbd_requeue_cmd(cmd);
+ return BLK_STS_OK;
+}
+
+/* handle partial sending */
+static void nbd_pending_cmd_work(struct work_struct *work)
+{
+ struct nbd_sock *nsock = container_of(work, struct nbd_sock, work);
+ struct request *req = nsock->pending;
+ struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
+ struct nbd_device *nbd = cmd->nbd;
+ unsigned long deadline = READ_ONCE(req->deadline);
+ unsigned int wait_ms = 2;
+
+ mutex_lock(&cmd->lock);
+
+ WARN_ON_ONCE(test_bit(NBD_CMD_REQUEUED, &cmd->flags));
+ if (WARN_ON_ONCE(!test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags)))
+ goto out;
+
+ mutex_lock(&nsock->tx_lock);
+ while (true) {
+ nbd_send_cmd(nbd, cmd, cmd->index);
+ if (!nsock->pending)
+ break;
+
+ /* don't bother timeout handler for partial sending */
+ if (READ_ONCE(jiffies) + msecs_to_jiffies(wait_ms) >= deadline) {
+ cmd->status = BLK_STS_IOERR;
+ blk_mq_complete_request(req);
+ break;
+ }
+ msleep(wait_ms);
+ wait_ms *= 2;
+ }
+ mutex_unlock(&nsock->tx_lock);
+ clear_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags);
+out:
+ mutex_unlock(&cmd->lock);
+ nbd_config_put(nbd);
}
static int nbd_read_reply(struct nbd_device *nbd, struct socket *sock,
@@ -914,9 +1021,9 @@ static void recv_work(struct work_struct *work)
nbd_mark_nsock_dead(nbd, nsock, 1);
mutex_unlock(&nsock->tx_lock);
- nbd_config_put(nbd);
atomic_dec(&config->recv_threads);
wake_up(&config->recv_wq);
+ nbd_config_put(nbd);
kfree(args);
}
@@ -1007,26 +1114,28 @@ static int wait_for_reconnect(struct nbd_device *nbd)
return !test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
}
-static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
+static blk_status_t nbd_handle_cmd(struct nbd_cmd *cmd, int index)
{
struct request *req = blk_mq_rq_from_pdu(cmd);
struct nbd_device *nbd = cmd->nbd;
struct nbd_config *config;
struct nbd_sock *nsock;
- int ret;
+ blk_status_t ret;
+
+ lockdep_assert_held(&cmd->lock);
config = nbd_get_config_unlocked(nbd);
if (!config) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Socks array is empty\n");
- return -EINVAL;
+ return BLK_STS_IOERR;
}
if (index >= config->num_connections) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Attempted send on invalid socket\n");
nbd_config_put(nbd);
- return -EINVAL;
+ return BLK_STS_IOERR;
}
cmd->status = BLK_STS_OK;
again:
@@ -1049,7 +1158,7 @@ again:
*/
sock_shutdown(nbd);
nbd_config_put(nbd);
- return -EIO;
+ return BLK_STS_IOERR;
}
goto again;
}
@@ -1062,27 +1171,10 @@ again:
blk_mq_start_request(req);
if (unlikely(nsock->pending && nsock->pending != req)) {
nbd_requeue_cmd(cmd);
- ret = 0;
+ ret = BLK_STS_OK;
goto out;
}
- /*
- * Some failures are related to the link going down, so anything that
- * returns EAGAIN can be retried on a different socket.
- */
ret = nbd_send_cmd(nbd, cmd, index);
- /*
- * Access to this flag is protected by cmd->lock, thus it's safe to set
- * the flag after nbd_send_cmd() succeed to send request to server.
- */
- if (!ret)
- __set_bit(NBD_CMD_INFLIGHT, &cmd->flags);
- else if (ret == -EAGAIN) {
- dev_err_ratelimited(disk_to_dev(nbd->disk),
- "Request send failed, requeueing\n");
- nbd_mark_nsock_dead(nbd, nsock, 1);
- nbd_requeue_cmd(cmd);
- ret = 0;
- }
out:
mutex_unlock(&nsock->tx_lock);
nbd_config_put(nbd);
@@ -1093,7 +1185,7 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
- int ret;
+ blk_status_t ret;
/*
* Since we look at the bio's to send the request over the network we
@@ -1113,10 +1205,6 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
* appropriate.
*/
ret = nbd_handle_cmd(cmd, hctx->queue_num);
- if (ret < 0)
- ret = BLK_STS_IOERR;
- else if (!ret)
- ret = BLK_STS_OK;
mutex_unlock(&cmd->lock);
return ret;
@@ -1132,6 +1220,14 @@ static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
if (!sock)
return NULL;
+ if (!sk_is_tcp(sock->sk) &&
+ !sk_is_stream_unix(sock->sk)) {
+ dev_err(disk_to_dev(nbd->disk), "Unsupported socket: should be TCP or UNIX.\n");
+ *err = -EINVAL;
+ sockfd_put(sock);
+ return NULL;
+ }
+
if (sock->ops->shutdown == sock_no_shutdown) {
dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
*err = -EINVAL;
@@ -1149,6 +1245,7 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
struct socket *sock;
struct nbd_sock **socks;
struct nbd_sock *nsock;
+ unsigned int memflags;
int err;
/* Arg will be cast to int, check it to avoid overflow */
@@ -1162,7 +1259,7 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
* We need to make sure we don't get any errant requests while we're
* reallocating the ->socks array.
*/
- blk_mq_freeze_queue(nbd->disk->queue);
+ memflags = blk_mq_freeze_queue(nbd->disk->queue);
if (!netlink && !nbd->task_setup &&
!test_bit(NBD_RT_BOUND, &config->runtime_flags))
@@ -1200,14 +1297,15 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
nsock->pending = NULL;
nsock->sent = 0;
nsock->cookie = 0;
+ INIT_WORK(&nsock->work, nbd_pending_cmd_work);
socks[config->num_connections++] = nsock;
atomic_inc(&config->live_connections);
- blk_mq_unfreeze_queue(nbd->disk->queue);
+ blk_mq_unfreeze_queue(nbd->disk->queue, memflags);
return 0;
put_socket:
- blk_mq_unfreeze_queue(nbd->disk->queue);
+ blk_mq_unfreeze_queue(nbd->disk->queue, memflags);
sockfd_put(sock);
return err;
}
@@ -1283,19 +1381,10 @@ static void nbd_bdev_reset(struct nbd_device *nbd)
static void nbd_parse_flags(struct nbd_device *nbd)
{
- struct nbd_config *config = nbd->config;
- if (config->flags & NBD_FLAG_READ_ONLY)
+ if (nbd->config->flags & NBD_FLAG_READ_ONLY)
set_disk_ro(nbd->disk, true);
else
set_disk_ro(nbd->disk, false);
- if (config->flags & NBD_FLAG_SEND_FLUSH) {
- if (config->flags & NBD_FLAG_SEND_FUA)
- blk_queue_write_cache(nbd->disk->queue, true, true);
- else
- blk_queue_write_cache(nbd->disk->queue, true, false);
- }
- else
- blk_queue_write_cache(nbd->disk->queue, false, false);
}
static void send_disconnects(struct nbd_device *nbd)
@@ -1395,7 +1484,17 @@ static int nbd_start_device(struct nbd_device *nbd)
return -EINVAL;
}
- blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
+retry:
+ mutex_unlock(&nbd->config_lock);
+ blk_mq_update_nr_hw_queues(&nbd->tag_set, num_connections);
+ mutex_lock(&nbd->config_lock);
+
+ /* if another code path updated nr_hw_queues, retry until succeed */
+ if (num_connections != config->num_connections) {
+ num_connections = config->num_connections;
+ goto retry;
+ }
+
nbd->pid = task_pid_nr(current);
nbd_parse_flags(nbd);
@@ -1708,6 +1807,10 @@ static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
seq_puts(s, "NBD_FLAG_SEND_FUA\n");
if (flags & NBD_FLAG_SEND_TRIM)
seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
+ if (flags & NBD_FLAG_SEND_WRITE_ZEROES)
+ seq_puts(s, "NBD_FLAG_SEND_WRITE_ZEROES\n");
+ if (flags & NBD_FLAG_ROTATIONAL)
+ seq_puts(s, "NBD_FLAG_ROTATIONAL\n");
return 0;
}
@@ -1805,7 +1908,7 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
{
struct queue_limits lim = {
.max_hw_sectors = 65536,
- .max_user_sectors = 256,
+ .io_opt = 256 << SECTOR_SHIFT,
.max_segments = USHRT_MAX,
.max_segment_size = UINT_MAX,
};
@@ -1822,8 +1925,7 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
nbd->tag_set.queue_depth = 128;
nbd->tag_set.numa_node = NUMA_NO_NODE;
nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
- nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
- BLK_MQ_F_BLOCKING;
+ nbd->tag_set.flags = BLK_MQ_F_BLOCKING;
nbd->tag_set.driver_data = nbd;
INIT_WORK(&nbd->remove_work, nbd_dev_remove_work);
nbd->backend = NULL;
@@ -1865,11 +1967,6 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
goto out_err_disk;
}
- /*
- * Tell the block layer that we are not a rotational device
- */
- blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
-
mutex_init(&nbd->config_lock);
refcount_set(&nbd->config_refs, 0);
/*
@@ -2122,9 +2219,7 @@ again:
goto out;
}
}
- ret = nbd_start_device(nbd);
- if (ret)
- goto out;
+
if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) {
nbd->backend = nla_strdup(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER],
GFP_KERNEL);
@@ -2140,13 +2235,16 @@ again:
goto out;
}
set_bit(NBD_RT_HAS_BACKEND_FILE, &config->runtime_flags);
+
+ ret = nbd_start_device(nbd);
out:
- mutex_unlock(&nbd->config_lock);
if (!ret) {
set_bit(NBD_RT_HAS_CONFIG_REF, &config->runtime_flags);
refcount_inc(&nbd->config_refs);
nbd_connect_reply(info, nbd->index);
}
+ mutex_unlock(&nbd->config_lock);
+
nbd_config_put(nbd);
if (put_dev)
nbd_put(nbd);
@@ -2166,6 +2264,7 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
flush_workqueue(nbd->recv_workq);
nbd_clear_que(nbd);
nbd->task_setup = NULL;
+ clear_bit(NBD_RT_BOUND, &nbd->config->runtime_flags);
mutex_unlock(&nbd->config_lock);
if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 4005a8b685e8..c7c0fb79a6bf 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -77,7 +77,7 @@ enum {
NULL_IRQ_TIMER = 2,
};
-static bool g_virt_boundary = false;
+static bool g_virt_boundary;
module_param_named(virt_boundary, g_virt_boundary, bool, 0444);
MODULE_PARM_DESC(virt_boundary, "Require a virtual boundary for the device. Default: False");
@@ -223,11 +223,11 @@ MODULE_PARM_DESC(discard, "Support discard operations (requires memory-backed nu
static unsigned long g_cache_size;
module_param_named(cache_size, g_cache_size, ulong, 0444);
-MODULE_PARM_DESC(mbps, "Cache size in MiB for memory-backed device. Default: 0 (none)");
+MODULE_PARM_DESC(cache_size, "Cache size in MiB for memory-backed device. Default: 0 (none)");
static bool g_fua = true;
module_param_named(fua, g_fua, bool, 0444);
-MODULE_PARM_DESC(zoned, "Enable/disable FUA support when cache_size is used. Default: true");
+MODULE_PARM_DESC(fua, "Enable/disable FUA support when cache_size is used. Default: true");
static unsigned int g_mbps;
module_param_named(mbps, g_mbps, uint, 0444);
@@ -262,6 +262,14 @@ module_param_named(zone_append_max_sectors, g_zone_append_max_sectors, int, 0444
MODULE_PARM_DESC(zone_append_max_sectors,
"Maximum size of a zone append command (in 512B sectors). Specify 0 for zone append emulation");
+static bool g_zone_full;
+module_param_named(zone_full, g_zone_full, bool, S_IRUGO);
+MODULE_PARM_DESC(zone_full, "Initialize the sequential write required zones of a zoned device to be full. Default: false");
+
+static bool g_rotational;
+module_param_named(rotational, g_rotational, bool, S_IRUGO);
+MODULE_PARM_DESC(rotational, "Set the rotational feature for the device. Default: false");
+
static struct nullb_device *null_alloc_dev(void);
static void null_free_dev(struct nullb_device *dev);
static void null_del_dev(struct nullb *nullb);
@@ -413,13 +421,25 @@ static int nullb_update_nr_hw_queues(struct nullb_device *dev,
static int nullb_apply_submit_queues(struct nullb_device *dev,
unsigned int submit_queues)
{
- return nullb_update_nr_hw_queues(dev, submit_queues, dev->poll_queues);
+ int ret;
+
+ mutex_lock(&lock);
+ ret = nullb_update_nr_hw_queues(dev, submit_queues, dev->poll_queues);
+ mutex_unlock(&lock);
+
+ return ret;
}
static int nullb_apply_poll_queues(struct nullb_device *dev,
unsigned int poll_queues)
{
- return nullb_update_nr_hw_queues(dev, dev->submit_queues, poll_queues);
+ int ret;
+
+ mutex_lock(&lock);
+ ret = nullb_update_nr_hw_queues(dev, dev->submit_queues, poll_queues);
+ mutex_unlock(&lock);
+
+ return ret;
}
NULLB_DEVICE_ATTR(size, ulong, NULL);
@@ -446,11 +466,15 @@ NULLB_DEVICE_ATTR(zone_nr_conv, uint, NULL);
NULLB_DEVICE_ATTR(zone_max_open, uint, NULL);
NULLB_DEVICE_ATTR(zone_max_active, uint, NULL);
NULLB_DEVICE_ATTR(zone_append_max_sectors, uint, NULL);
+NULLB_DEVICE_ATTR(zone_full, bool, NULL);
NULLB_DEVICE_ATTR(virt_boundary, bool, NULL);
NULLB_DEVICE_ATTR(no_sched, bool, NULL);
NULLB_DEVICE_ATTR(shared_tags, bool, NULL);
NULLB_DEVICE_ATTR(shared_tag_bitmap, bool, NULL);
NULLB_DEVICE_ATTR(fua, bool, NULL);
+NULLB_DEVICE_ATTR(rotational, bool, NULL);
+NULLB_DEVICE_ATTR(badblocks_once, bool, NULL);
+NULLB_DEVICE_ATTR(badblocks_partial_io, bool, NULL);
static ssize_t nullb_device_power_show(struct config_item *item, char *page)
{
@@ -468,28 +492,32 @@ static ssize_t nullb_device_power_store(struct config_item *item,
if (ret < 0)
return ret;
+ ret = count;
+ mutex_lock(&lock);
if (!dev->power && newp) {
if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags))
- return count;
+ goto out;
+
ret = null_add_dev(dev);
if (ret) {
clear_bit(NULLB_DEV_FL_UP, &dev->flags);
- return ret;
+ goto out;
}
set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
dev->power = newp;
+ ret = count;
} else if (dev->power && !newp) {
if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
- mutex_lock(&lock);
dev->power = newp;
null_del_dev(dev->nullb);
- mutex_unlock(&lock);
}
clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
}
- return count;
+out:
+ mutex_unlock(&lock);
+ return ret;
}
CONFIGFS_ATTR(nullb_device_, power);
@@ -533,14 +561,14 @@ static ssize_t nullb_device_badblocks_store(struct config_item *item,
goto out;
/* enable badblocks */
cmpxchg(&t_dev->badblocks.shift, -1, 0);
- if (buf[0] == '+')
- ret = badblocks_set(&t_dev->badblocks, start,
- end - start + 1, 1);
- else
- ret = badblocks_clear(&t_dev->badblocks, start,
- end - start + 1);
- if (ret == 0)
+ if (buf[0] == '+') {
+ if (badblocks_set(&t_dev->badblocks, start,
+ end - start + 1, 1))
+ ret = count;
+ } else if (badblocks_clear(&t_dev->badblocks, start,
+ end - start + 1)) {
ret = count;
+ }
out:
kfree(orig);
return ret;
@@ -566,39 +594,43 @@ static ssize_t nullb_device_zone_offline_store(struct config_item *item,
CONFIGFS_ATTR_WO(nullb_device_, zone_offline);
static struct configfs_attribute *nullb_device_attrs[] = {
- &nullb_device_attr_size,
+ &nullb_device_attr_badblocks,
+ &nullb_device_attr_badblocks_once,
+ &nullb_device_attr_badblocks_partial_io,
+ &nullb_device_attr_blocking,
+ &nullb_device_attr_blocksize,
+ &nullb_device_attr_cache_size,
&nullb_device_attr_completion_nsec,
- &nullb_device_attr_submit_queues,
- &nullb_device_attr_poll_queues,
+ &nullb_device_attr_discard,
+ &nullb_device_attr_fua,
&nullb_device_attr_home_node,
- &nullb_device_attr_queue_mode,
- &nullb_device_attr_blocksize,
- &nullb_device_attr_max_sectors,
- &nullb_device_attr_irqmode,
&nullb_device_attr_hw_queue_depth,
&nullb_device_attr_index,
- &nullb_device_attr_blocking,
- &nullb_device_attr_use_per_node_hctx,
- &nullb_device_attr_power,
- &nullb_device_attr_memory_backed,
- &nullb_device_attr_discard,
+ &nullb_device_attr_irqmode,
+ &nullb_device_attr_max_sectors,
&nullb_device_attr_mbps,
- &nullb_device_attr_cache_size,
- &nullb_device_attr_badblocks,
- &nullb_device_attr_zoned,
- &nullb_device_attr_zone_size,
+ &nullb_device_attr_memory_backed,
+ &nullb_device_attr_no_sched,
+ &nullb_device_attr_poll_queues,
+ &nullb_device_attr_power,
+ &nullb_device_attr_queue_mode,
+ &nullb_device_attr_rotational,
+ &nullb_device_attr_shared_tag_bitmap,
+ &nullb_device_attr_shared_tags,
+ &nullb_device_attr_size,
+ &nullb_device_attr_submit_queues,
+ &nullb_device_attr_use_per_node_hctx,
+ &nullb_device_attr_virt_boundary,
+ &nullb_device_attr_zone_append_max_sectors,
&nullb_device_attr_zone_capacity,
- &nullb_device_attr_zone_nr_conv,
- &nullb_device_attr_zone_max_open,
+ &nullb_device_attr_zone_full,
&nullb_device_attr_zone_max_active,
- &nullb_device_attr_zone_append_max_sectors,
- &nullb_device_attr_zone_readonly,
+ &nullb_device_attr_zone_max_open,
+ &nullb_device_attr_zone_nr_conv,
&nullb_device_attr_zone_offline,
- &nullb_device_attr_virt_boundary,
- &nullb_device_attr_no_sched,
- &nullb_device_attr_shared_tags,
- &nullb_device_attr_shared_tag_bitmap,
- &nullb_device_attr_fua,
+ &nullb_device_attr_zone_readonly,
+ &nullb_device_attr_zone_size,
+ &nullb_device_attr_zoned,
NULL,
};
@@ -676,15 +708,28 @@ nullb_group_drop_item(struct config_group *group, struct config_item *item)
static ssize_t memb_group_features_show(struct config_item *item, char *page)
{
- return snprintf(page, PAGE_SIZE,
- "badblocks,blocking,blocksize,cache_size,fua,"
- "completion_nsec,discard,home_node,hw_queue_depth,"
- "irqmode,max_sectors,mbps,memory_backed,no_sched,"
- "poll_queues,power,queue_mode,shared_tag_bitmap,"
- "shared_tags,size,submit_queues,use_per_node_hctx,"
- "virt_boundary,zoned,zone_capacity,zone_max_active,"
- "zone_max_open,zone_nr_conv,zone_offline,zone_readonly,"
- "zone_size,zone_append_max_sectors\n");
+
+ struct configfs_attribute **entry;
+ char delimiter = ',';
+ size_t left = PAGE_SIZE;
+ size_t written = 0;
+ int ret;
+
+ for (entry = &nullb_device_attrs[0]; *entry && left > 0; entry++) {
+ if (!*(entry + 1))
+ delimiter = '\n';
+ ret = snprintf(page + written, left, "%s%c", (*entry)->ca_name,
+ delimiter);
+ if (ret >= left) {
+ WARN_ONCE(1, "Too many null_blk features to print\n");
+ memzero_explicit(page, PAGE_SIZE);
+ return -ENOBUFS;
+ }
+ left -= ret;
+ written += ret;
+ }
+
+ return written;
}
CONFIGFS_ATTR_RO(memb_group_, features);
@@ -765,11 +810,13 @@ static struct nullb_device *null_alloc_dev(void)
dev->zone_max_open = g_zone_max_open;
dev->zone_max_active = g_zone_max_active;
dev->zone_append_max_sectors = g_zone_append_max_sectors;
+ dev->zone_full = g_zone_full;
dev->virt_boundary = g_virt_boundary;
dev->no_sched = g_no_sched;
dev->shared_tags = g_shared_tags;
dev->shared_tag_bitmap = g_shared_tag_bitmap;
dev->fua = g_fua;
+ dev->rotational = g_rotational;
return dev;
}
@@ -876,7 +923,7 @@ static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
if (radix_tree_insert(root, idx, t_page)) {
null_free_page(t_page);
t_page = radix_tree_lookup(root, idx);
- WARN_ON(!t_page || t_page->page->index != idx);
+ WARN_ON(!t_page || t_page->page->private != idx);
} else if (is_cache)
nullb->dev->curr_cache += PAGE_SIZE;
@@ -899,7 +946,7 @@ static void null_free_device_storage(struct nullb_device *dev, bool is_cache)
(void **)t_pages, pos, FREE_BATCH);
for (i = 0; i < nr_pages; i++) {
- pos = t_pages[i]->page->index;
+ pos = t_pages[i]->page->private;
ret = radix_tree_delete_item(root, pos, t_pages[i]);
WARN_ON(ret != t_pages[i]);
null_free_page(ret);
@@ -925,7 +972,7 @@ static struct nullb_page *__null_lookup_page(struct nullb *nullb,
root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
t_page = radix_tree_lookup(root, idx);
- WARN_ON(t_page && t_page->page->index != idx);
+ WARN_ON(t_page && t_page->page->private != idx);
if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap)))
return t_page;
@@ -968,7 +1015,7 @@ static struct nullb_page *null_insert_page(struct nullb *nullb,
spin_lock_irq(&nullb->lock);
idx = sector >> PAGE_SECTORS_SHIFT;
- t_page->page->index = idx;
+ t_page->page->private = idx;
t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache);
radix_tree_preload_end();
@@ -988,7 +1035,7 @@ static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
struct nullb_page *t_page, *ret;
void *dst, *src;
- idx = c_page->page->index;
+ idx = c_page->page->private;
t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true);
@@ -1047,7 +1094,7 @@ again:
* avoid race, we don't allow page free
*/
for (i = 0; i < nr_pages; i++) {
- nullb->cache_flush_pos = c_pages[i]->page->index;
+ nullb->cache_flush_pos = c_pages[i]->page->private;
/*
* We found the page which is being flushed to disk by other
* threads
@@ -1082,26 +1129,28 @@ again:
return 0;
}
-static int copy_to_nullb(struct nullb *nullb, struct page *source,
- unsigned int off, sector_t sector, size_t n, bool is_fua)
+static blk_status_t copy_to_nullb(struct nullb *nullb, void *source,
+ loff_t pos, size_t n, bool is_fua)
{
size_t temp, count = 0;
- unsigned int offset;
struct nullb_page *t_page;
+ sector_t sector;
while (count < n) {
- temp = min_t(size_t, nullb->dev->blocksize, n - count);
+ temp = min3(nullb->dev->blocksize, n - count,
+ PAGE_SIZE - offset_in_page(pos));
+ sector = pos >> SECTOR_SHIFT;
if (null_cache_active(nullb) && !is_fua)
null_make_cache_space(nullb, PAGE_SIZE);
- offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
t_page = null_insert_page(nullb, sector,
!null_cache_active(nullb) || is_fua);
if (!t_page)
- return -ENOSPC;
+ return BLK_STS_NOSPC;
- memcpy_page(t_page->page, offset, source, off + count, temp);
+ memcpy_to_page(t_page->page, offset_in_page(pos),
+ source + count, temp);
__set_bit(sector & SECTOR_MASK, t_page->bitmap);
@@ -1109,41 +1158,34 @@ static int copy_to_nullb(struct nullb *nullb, struct page *source,
null_free_sector(nullb, sector, true);
count += temp;
- sector += temp >> SECTOR_SHIFT;
+ pos += temp;
}
- return 0;
+ return BLK_STS_OK;
}
-static int copy_from_nullb(struct nullb *nullb, struct page *dest,
- unsigned int off, sector_t sector, size_t n)
+static void copy_from_nullb(struct nullb *nullb, void *dest, loff_t pos,
+ size_t n)
{
size_t temp, count = 0;
- unsigned int offset;
struct nullb_page *t_page;
+ sector_t sector;
while (count < n) {
- temp = min_t(size_t, nullb->dev->blocksize, n - count);
+ temp = min3(nullb->dev->blocksize, n - count,
+ PAGE_SIZE - offset_in_page(pos));
+ sector = pos >> SECTOR_SHIFT;
- offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
t_page = null_lookup_page(nullb, sector, false,
!null_cache_active(nullb));
-
if (t_page)
- memcpy_page(dest, off + count, t_page->page, offset,
- temp);
+ memcpy_from_page(dest + count, t_page->page,
+ offset_in_page(pos), temp);
else
- zero_user(dest, off + count, temp);
+ memset(dest + count, 0, temp);
count += temp;
- sector += temp >> SECTOR_SHIFT;
+ pos += temp;
}
- return 0;
-}
-
-static void nullb_fill_pattern(struct nullb *nullb, struct page *page,
- unsigned int len, unsigned int off)
-{
- memset_page(page, off, 0xff, len);
}
blk_status_t null_handle_discard(struct nullb_device *dev,
@@ -1187,60 +1229,77 @@ static blk_status_t null_handle_flush(struct nullb *nullb)
return errno_to_blk_status(err);
}
-static int null_transfer(struct nullb *nullb, struct page *page,
- unsigned int len, unsigned int off, bool is_write, sector_t sector,
+static blk_status_t null_transfer(struct nullb *nullb, struct page *page,
+ unsigned int len, unsigned int off, bool is_write, loff_t pos,
bool is_fua)
{
struct nullb_device *dev = nullb->dev;
+ blk_status_t err = BLK_STS_OK;
unsigned int valid_len = len;
- int err = 0;
+ void *p;
+ p = kmap_local_page(page) + off;
if (!is_write) {
- if (dev->zoned)
+ if (dev->zoned) {
valid_len = null_zone_valid_read_len(nullb,
- sector, len);
+ pos >> SECTOR_SHIFT, len);
+ if (valid_len && valid_len != len)
+ valid_len -= pos & (SECTOR_SIZE - 1);
+ }
if (valid_len) {
- err = copy_from_nullb(nullb, page, off,
- sector, valid_len);
+ copy_from_nullb(nullb, p, pos, valid_len);
off += valid_len;
len -= valid_len;
}
if (len)
- nullb_fill_pattern(nullb, page, len, off);
+ memset(p + valid_len, 0xff, len);
flush_dcache_page(page);
} else {
flush_dcache_page(page);
- err = copy_to_nullb(nullb, page, off, sector, len, is_fua);
+ err = copy_to_nullb(nullb, p, pos, len, is_fua);
}
+ kunmap_local(p);
return err;
}
-static int null_handle_rq(struct nullb_cmd *cmd)
+/*
+ * Transfer data for the given request. The transfer size is capped with the
+ * nr_sectors argument.
+ */
+static blk_status_t null_handle_data_transfer(struct nullb_cmd *cmd,
+ sector_t nr_sectors)
{
struct request *rq = blk_mq_rq_from_pdu(cmd);
struct nullb *nullb = cmd->nq->dev->nullb;
- int err = 0;
+ blk_status_t err = BLK_STS_OK;
unsigned int len;
- sector_t sector = blk_rq_pos(rq);
+ loff_t pos = blk_rq_pos(rq) << SECTOR_SHIFT;
+ unsigned int max_bytes = nr_sectors << SECTOR_SHIFT;
+ unsigned int transferred_bytes = 0;
struct req_iterator iter;
struct bio_vec bvec;
spin_lock_irq(&nullb->lock);
rq_for_each_segment(bvec, rq, iter) {
len = bvec.bv_len;
+ if (transferred_bytes + len > max_bytes)
+ len = max_bytes - transferred_bytes;
err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
- op_is_write(req_op(rq)), sector,
+ op_is_write(req_op(rq)), pos,
rq->cmd_flags & REQ_FUA);
if (err)
break;
- sector += len >> SECTOR_SHIFT;
+ pos += len;
+ transferred_bytes += len;
+ if (transferred_bytes >= max_bytes)
+ break;
}
spin_unlock_irq(&nullb->lock);
- return errno_to_blk_status(err);
+ return err;
}
static inline blk_status_t null_handle_throttled(struct nullb_cmd *cmd)
@@ -1264,31 +1323,51 @@ static inline blk_status_t null_handle_throttled(struct nullb_cmd *cmd)
return sts;
}
-static inline blk_status_t null_handle_badblocks(struct nullb_cmd *cmd,
- sector_t sector,
- sector_t nr_sectors)
+/*
+ * Check if the command should fail for the badblocks. If so, return
+ * BLK_STS_IOERR and return number of partial I/O sectors to be written or read,
+ * which may be less than the requested number of sectors.
+ *
+ * @cmd: The command to handle.
+ * @sector: The start sector for I/O.
+ * @nr_sectors: Specifies number of sectors to write or read, and returns the
+ * number of sectors to be written or read.
+ */
+blk_status_t null_handle_badblocks(struct nullb_cmd *cmd, sector_t sector,
+ unsigned int *nr_sectors)
{
struct badblocks *bb = &cmd->nq->dev->badblocks;
- sector_t first_bad;
- int bad_sectors;
+ struct nullb_device *dev = cmd->nq->dev;
+ unsigned int block_sectors = dev->blocksize >> SECTOR_SHIFT;
+ sector_t first_bad, bad_sectors;
+ unsigned int partial_io_sectors = 0;
- if (badblocks_check(bb, sector, nr_sectors, &first_bad, &bad_sectors))
- return BLK_STS_IOERR;
+ if (!badblocks_check(bb, sector, *nr_sectors, &first_bad, &bad_sectors))
+ return BLK_STS_OK;
- return BLK_STS_OK;
+ if (cmd->nq->dev->badblocks_once)
+ badblocks_clear(bb, first_bad, bad_sectors);
+
+ if (cmd->nq->dev->badblocks_partial_io) {
+ if (!IS_ALIGNED(first_bad, block_sectors))
+ first_bad = ALIGN_DOWN(first_bad, block_sectors);
+ if (sector < first_bad)
+ partial_io_sectors = first_bad - sector;
+ }
+ *nr_sectors = partial_io_sectors;
+
+ return BLK_STS_IOERR;
}
-static inline blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd,
- enum req_op op,
- sector_t sector,
- sector_t nr_sectors)
+blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd, enum req_op op,
+ sector_t sector, sector_t nr_sectors)
{
struct nullb_device *dev = cmd->nq->dev;
if (op == REQ_OP_DISCARD)
return null_handle_discard(dev, sector, nr_sectors);
- return null_handle_rq(cmd);
+ return null_handle_data_transfer(cmd, nr_sectors);
}
static void nullb_zero_read_cmd_buffer(struct nullb_cmd *cmd)
@@ -1335,18 +1414,19 @@ blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op,
sector_t sector, unsigned int nr_sectors)
{
struct nullb_device *dev = cmd->nq->dev;
+ blk_status_t badblocks_ret = BLK_STS_OK;
blk_status_t ret;
- if (dev->badblocks.shift != -1) {
- ret = null_handle_badblocks(cmd, sector, nr_sectors);
+ if (dev->badblocks.shift != -1)
+ badblocks_ret = null_handle_badblocks(cmd, sector, &nr_sectors);
+
+ if (dev->memory_backed && nr_sectors) {
+ ret = null_handle_memory_backed(cmd, op, sector, nr_sectors);
if (ret != BLK_STS_OK)
return ret;
}
- if (dev->memory_backed)
- return null_handle_memory_backed(cmd, op, sector, nr_sectors);
-
- return BLK_STS_OK;
+ return badblocks_ret;
}
static void null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
@@ -1395,8 +1475,7 @@ static void nullb_setup_bwtimer(struct nullb *nullb)
{
ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
- hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- nullb->bw_timer.function = nullb_bwtimer_fn;
+ hrtimer_setup(&nullb->bw_timer, nullb_bwtimer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps));
hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL);
}
@@ -1518,8 +1597,8 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
cmd = blk_mq_rq_to_pdu(req);
cmd->error = null_process_cmd(cmd, req_op(req), blk_rq_pos(req),
blk_rq_sectors(req));
- if (!blk_mq_add_to_batch(req, iob, (__force int) cmd->error,
- blk_mq_end_request_batch))
+ if (!blk_mq_add_to_batch(req, iob, cmd->error != BLK_STS_OK,
+ blk_mq_end_request_batch))
blk_mq_end_request(req, cmd->error);
nr++;
}
@@ -1573,8 +1652,8 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
if (!is_poll && nq->dev->irqmode == NULL_IRQ_TIMER) {
- hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- cmd->timer.function = null_cmd_timer_expired;
+ hrtimer_setup(&cmd->timer, null_cmd_timer_expired, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
cmd->error = BLK_STS_OK;
cmd->nq = nq;
@@ -1615,10 +1694,9 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
}
-static void null_queue_rqs(struct request **rqlist)
+static void null_queue_rqs(struct rq_list *rqlist)
{
- struct request *requeue_list = NULL;
- struct request **requeue_lastp = &requeue_list;
+ struct rq_list requeue_list = {};
struct blk_mq_queue_data bd = { };
blk_status_t ret;
@@ -1628,8 +1706,8 @@ static void null_queue_rqs(struct request **rqlist)
bd.rq = rq;
ret = null_queue_rq(rq->mq_hctx, &bd);
if (ret != BLK_STS_OK)
- rq_list_add_tail(&requeue_lastp, rq);
- } while (!rq_list_empty(*rqlist));
+ rq_list_add_tail(&requeue_list, rq);
+ } while (!rq_list_empty(rqlist));
*rqlist = requeue_list;
}
@@ -1761,9 +1839,8 @@ static int null_init_global_tag_set(void)
tag_set.nr_hw_queues = g_submit_queues;
tag_set.queue_depth = g_hw_queue_depth;
tag_set.numa_node = g_home_node;
- tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
if (g_no_sched)
- tag_set.flags |= BLK_MQ_F_NO_SCHED;
+ tag_set.flags |= BLK_MQ_F_NO_SCHED_BY_DEFAULT;
if (g_shared_tag_bitmap)
tag_set.flags |= BLK_MQ_F_TAG_HCTX_SHARED;
if (g_blocking)
@@ -1787,9 +1864,8 @@ static int null_setup_tagset(struct nullb *nullb)
nullb->tag_set->nr_hw_queues = nullb->dev->submit_queues;
nullb->tag_set->queue_depth = nullb->dev->hw_queue_depth;
nullb->tag_set->numa_node = nullb->dev->home_node;
- nullb->tag_set->flags = BLK_MQ_F_SHOULD_MERGE;
if (nullb->dev->no_sched)
- nullb->tag_set->flags |= BLK_MQ_F_NO_SCHED;
+ nullb->tag_set->flags |= BLK_MQ_F_NO_SCHED_BY_DEFAULT;
if (nullb->dev->shared_tag_bitmap)
nullb->tag_set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
if (nullb->dev->blocking)
@@ -1808,9 +1884,6 @@ static int null_validate_conf(struct nullb_device *dev)
dev->queue_mode = NULL_Q_MQ;
}
- dev->blocksize = round_down(dev->blocksize, 512);
- dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
-
if (dev->use_per_node_hctx) {
if (dev->submit_queues != nr_online_nodes)
dev->submit_queues = nr_online_nodes;
@@ -1876,6 +1949,7 @@ static int null_add_dev(struct nullb_device *dev)
.logical_block_size = dev->blocksize,
.physical_block_size = dev->blocksize,
.max_hw_sectors = dev->max_sectors,
+ .dma_alignment = 1,
};
struct nullb *nullb;
@@ -1912,6 +1986,16 @@ static int null_add_dev(struct nullb_device *dev)
goto out_cleanup_tags;
}
+ if (dev->cache_size > 0) {
+ set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
+ lim.features |= BLK_FEAT_WRITE_CACHE;
+ if (dev->fua)
+ lim.features |= BLK_FEAT_FUA;
+ }
+
+ if (dev->rotational)
+ lim.features |= BLK_FEAT_ROTATIONAL;
+
nullb->disk = blk_mq_alloc_disk(nullb->tag_set, &lim, nullb);
if (IS_ERR(nullb->disk)) {
rv = PTR_ERR(nullb->disk);
@@ -1924,23 +2008,14 @@ static int null_add_dev(struct nullb_device *dev)
nullb_setup_bwtimer(nullb);
}
- if (dev->cache_size > 0) {
- set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
- blk_queue_write_cache(nullb->q, true, dev->fua);
- }
-
nullb->q->queuedata = nullb;
- blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
- mutex_lock(&lock);
rv = ida_alloc(&nullb_indexes, GFP_KERNEL);
- if (rv < 0) {
- mutex_unlock(&lock);
+ if (rv < 0)
goto out_cleanup_disk;
- }
+
nullb->index = rv;
dev->index = rv;
- mutex_unlock(&lock);
if (config_item_name(&dev->group.cg_item)) {
/* Use configfs dir name as the device name */
@@ -1957,7 +2032,7 @@ static int null_add_dev(struct nullb_device *dev)
nullb->disk->minors = 1;
nullb->disk->fops = &null_ops;
nullb->disk->private_data = nullb;
- strscpy_pad(nullb->disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
+ strscpy(nullb->disk->disk_name, nullb->disk_name);
if (nullb->dev->zoned) {
rv = null_register_zoned_dev(nullb);
@@ -1969,9 +2044,7 @@ static int null_add_dev(struct nullb_device *dev)
if (rv)
goto out_ida_free;
- mutex_lock(&lock);
list_add_tail(&nullb->list, &nullb_list);
- mutex_unlock(&lock);
pr_info("disk %s created\n", nullb->disk_name);
@@ -2020,7 +2093,9 @@ static int null_create_dev(void)
if (!dev)
return -ENOMEM;
+ mutex_lock(&lock);
ret = null_add_dev(dev);
+ mutex_unlock(&lock);
if (ret) {
null_free_dev(dev);
return ret;
diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h
index 3234e6c85eed..6c4c4bbe7dad 100644
--- a/drivers/block/null_blk/null_blk.h
+++ b/drivers/block/null_blk/null_blk.h
@@ -63,6 +63,8 @@ struct nullb_device {
unsigned long flags; /* device flags */
unsigned int curr_cache;
struct badblocks badblocks;
+ bool badblocks_once;
+ bool badblocks_partial_io;
unsigned int nr_zones;
unsigned int nr_zones_imp_open;
@@ -101,11 +103,13 @@ struct nullb_device {
bool memory_backed; /* if data is stored in memory */
bool discard; /* if support discard */
bool zoned; /* if device is zoned */
+ bool zone_full; /* Initialize zones to be full */
bool virt_boundary; /* virtual boundary on/off for the device */
bool no_sched; /* no IO scheduler for the device */
bool shared_tags; /* share tag set between devices for blk-mq */
bool shared_tag_bitmap; /* use hostwide shared tags */
bool fua; /* Support FUA */
+ bool rotational; /* Fake rotational device */
};
struct nullb {
@@ -129,13 +133,18 @@ blk_status_t null_handle_discard(struct nullb_device *dev, sector_t sector,
sector_t nr_sectors);
blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op,
sector_t sector, unsigned int nr_sectors);
+blk_status_t null_handle_badblocks(struct nullb_cmd *cmd, sector_t sector,
+ unsigned int *nr_sectors);
+blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd, enum req_op op,
+ sector_t sector, sector_t nr_sectors);
#ifdef CONFIG_BLK_DEV_ZONED
int null_init_zoned_dev(struct nullb_device *dev, struct queue_limits *lim);
int null_register_zoned_dev(struct nullb *nullb);
void null_free_zoned_dev(struct nullb_device *dev);
int null_report_zones(struct gendisk *disk, sector_t sector,
- unsigned int nr_zones, report_zones_cb cb, void *data);
+ unsigned int nr_zones,
+ struct blk_report_zones_args *args);
blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op,
sector_t sector, sector_t nr_sectors);
size_t null_zone_valid_read_len(struct nullb *nullb,
diff --git a/drivers/block/null_blk/trace.h b/drivers/block/null_blk/trace.h
index ef2d05d5f0df..82b8f6a5e5f0 100644
--- a/drivers/block/null_blk/trace.h
+++ b/drivers/block/null_blk/trace.h
@@ -36,7 +36,12 @@ TRACE_EVENT(nullb_zone_op,
TP_ARGS(cmd, zone_no, zone_cond),
TP_STRUCT__entry(
__array(char, disk, DISK_NAME_LEN)
- __field(enum req_op, op)
+ /*
+ * __field() uses is_signed_type(). is_signed_type() does not
+ * support bitwise types. Use __field_struct() instead because
+ * it does not use is_signed_type().
+ */
+ __field_struct(enum req_op, op)
__field(unsigned int, zone_no)
__field(unsigned int, zone_cond)
),
diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
index 5b5a63adacc1..0ada35dc0989 100644
--- a/drivers/block/null_blk/zoned.c
+++ b/drivers/block/null_blk/zoned.c
@@ -74,6 +74,17 @@ int null_init_zoned_dev(struct nullb_device *dev,
return -EINVAL;
}
+ /*
+ * If a smaller zone capacity was requested, do not allow a smaller last
+ * zone at the same time as such zone configuration does not correspond
+ * to any real zoned device.
+ */
+ if (dev->zone_capacity != dev->zone_size &&
+ dev->size & (dev->zone_size - 1)) {
+ pr_err("A smaller last zone is not allowed with zone capacity smaller than zone size.\n");
+ return -EINVAL;
+ }
+
zone_capacity_sects = mb_to_sects(dev->zone_capacity);
dev_capacity_sects = mb_to_sects(dev->size);
dev->zone_size_sects = mb_to_sects(dev->zone_size);
@@ -108,7 +119,7 @@ int null_init_zoned_dev(struct nullb_device *dev,
if (dev->zone_max_active && dev->zone_max_open > dev->zone_max_active) {
dev->zone_max_open = dev->zone_max_active;
pr_info("changed the maximum number of open zones to %u\n",
- dev->nr_zones);
+ dev->zone_max_open);
} else if (dev->zone_max_open >= dev->nr_zones - dev->zone_nr_conv) {
dev->zone_max_open = 0;
pr_info("zone_max_open limit disabled, limit >= zone count\n");
@@ -134,7 +145,7 @@ int null_init_zoned_dev(struct nullb_device *dev,
zone = &dev->zones[i];
null_init_zone_lock(dev, zone);
- zone->start = zone->wp = sector;
+ zone->start = sector;
if (zone->start + dev->zone_size_sects > dev_capacity_sects)
zone->len = dev_capacity_sects - zone->start;
else
@@ -142,14 +153,20 @@ int null_init_zoned_dev(struct nullb_device *dev,
zone->capacity =
min_t(sector_t, zone->len, zone_capacity_sects);
zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
- zone->cond = BLK_ZONE_COND_EMPTY;
+ if (dev->zone_full) {
+ zone->cond = BLK_ZONE_COND_FULL;
+ zone->wp = zone->start + zone->capacity;
+ } else{
+ zone->cond = BLK_ZONE_COND_EMPTY;
+ zone->wp = zone->start;
+ }
sector += dev->zone_size_sects;
}
- lim->zoned = true;
+ lim->features |= BLK_FEAT_ZONED;
lim->chunk_sectors = dev->zone_size_sects;
- lim->max_zone_append_sectors = dev->zone_append_max_sectors;
+ lim->max_hw_zone_append_sectors = dev->zone_append_max_sectors;
lim->max_open_zones = dev->zone_max_open;
lim->max_active_zones = dev->zone_max_active;
return 0;
@@ -160,9 +177,6 @@ int null_register_zoned_dev(struct nullb *nullb)
struct request_queue *q = nullb->q;
struct gendisk *disk = nullb->disk;
- blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
- disk->nr_zones = bdev_nr_zones(disk->part0);
-
pr_info("%s: using %s zone append\n",
disk->disk_name,
queue_emulates_zone_append(q) ? "emulated" : "native");
@@ -177,7 +191,7 @@ void null_free_zoned_dev(struct nullb_device *dev)
}
int null_report_zones(struct gendisk *disk, sector_t sector,
- unsigned int nr_zones, report_zones_cb cb, void *data)
+ unsigned int nr_zones, struct blk_report_zones_args *args)
{
struct nullb *nullb = disk->private_data;
struct nullb_device *dev = nullb->dev;
@@ -211,7 +225,7 @@ int null_report_zones(struct gendisk *disk, sector_t sector,
blkz.capacity = zone->capacity;
null_unlock_zone(dev, zone);
- error = cb(&blkz, i, data);
+ error = disk_report_zone(disk, &blkz, i, args);
if (error)
return error;
}
@@ -228,7 +242,7 @@ size_t null_zone_valid_read_len(struct nullb *nullb,
{
struct nullb_device *dev = nullb->dev;
struct nullb_zone *zone = &dev->zones[null_zone_no(dev, sector)];
- unsigned int nr_sectors = len >> SECTOR_SHIFT;
+ unsigned int nr_sectors = DIV_ROUND_UP(len, SECTOR_SIZE);
/* Read must be below the write pointer position */
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL ||
@@ -339,6 +353,7 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
struct nullb_device *dev = cmd->nq->dev;
unsigned int zno = null_zone_no(dev, sector);
struct nullb_zone *zone = &dev->zones[zno];
+ blk_status_t badblocks_ret = BLK_STS_OK;
blk_status_t ret;
trace_nullb_zone_op(cmd, zno, zone->cond);
@@ -398,9 +413,20 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
zone->cond = BLK_ZONE_COND_IMP_OPEN;
}
- ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
- if (ret != BLK_STS_OK)
- goto unlock_zone;
+ if (dev->badblocks.shift != -1) {
+ badblocks_ret = null_handle_badblocks(cmd, sector, &nr_sectors);
+ if (badblocks_ret != BLK_STS_OK && !nr_sectors) {
+ ret = badblocks_ret;
+ goto unlock_zone;
+ }
+ }
+
+ if (dev->memory_backed) {
+ ret = null_handle_memory_backed(cmd, REQ_OP_WRITE, sector,
+ nr_sectors);
+ if (ret != BLK_STS_OK)
+ goto unlock_zone;
+ }
zone->wp += nr_sectors;
if (zone->wp == zone->start + zone->capacity) {
@@ -415,7 +441,7 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
zone->cond = BLK_ZONE_COND_FULL;
}
- ret = BLK_STS_OK;
+ ret = badblocks_ret;
unlock_zone:
null_unlock_zone(dev, zone);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
deleted file mode 100644
index 21728e9ea5c3..000000000000
--- a/drivers/block/pktcdvd.c
+++ /dev/null
@@ -1,2923 +0,0 @@
-/*
- * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
- * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com>
- * Copyright (C) 2006 Thomas Maier <balagi@justmail.de>
- *
- * May be copied or modified under the terms of the GNU General Public
- * License. See linux/COPYING for more information.
- *
- * Packet writing layer for ATAPI and SCSI CD-RW, DVD+RW, DVD-RW and
- * DVD-RAM devices.
- *
- * Theory of operation:
- *
- * At the lowest level, there is the standard driver for the CD/DVD device,
- * such as drivers/scsi/sr.c. This driver can handle read and write requests,
- * but it doesn't know anything about the special restrictions that apply to
- * packet writing. One restriction is that write requests must be aligned to
- * packet boundaries on the physical media, and the size of a write request
- * must be equal to the packet size. Another restriction is that a
- * GPCMD_FLUSH_CACHE command has to be issued to the drive before a read
- * command, if the previous command was a write.
- *
- * The purpose of the packet writing driver is to hide these restrictions from
- * higher layers, such as file systems, and present a block device that can be
- * randomly read and written using 2kB-sized blocks.
- *
- * The lowest layer in the packet writing driver is the packet I/O scheduler.
- * Its data is defined by the struct packet_iosched and includes two bio
- * queues with pending read and write requests. These queues are processed
- * by the pkt_iosched_process_queue() function. The write requests in this
- * queue are already properly aligned and sized. This layer is responsible for
- * issuing the flush cache commands and scheduling the I/O in a good order.
- *
- * The next layer transforms unaligned write requests to aligned writes. This
- * transformation requires reading missing pieces of data from the underlying
- * block device, assembling the pieces to full packets and queuing them to the
- * packet I/O scheduler.
- *
- * At the top layer there is a custom ->submit_bio function that forwards
- * read requests directly to the iosched queue and puts write requests in the
- * unaligned write queue. A kernel thread performs the necessary read
- * gathering to convert the unaligned writes to aligned writes and then feeds
- * them to the packet I/O scheduler.
- *
- *************************************************************************/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/backing-dev.h>
-#include <linux/compat.h>
-#include <linux/debugfs.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/file.h>
-#include <linux/freezer.h>
-#include <linux/kernel.h>
-#include <linux/kthread.h>
-#include <linux/miscdevice.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/nospec.h>
-#include <linux/pktcdvd.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-#include <linux/uaccess.h>
-
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_ioctl.h>
-
-#include <asm/unaligned.h>
-
-#define DRIVER_NAME "pktcdvd"
-
-#define MAX_SPEED 0xffff
-
-static DEFINE_MUTEX(pktcdvd_mutex);
-static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
-static struct proc_dir_entry *pkt_proc;
-static int pktdev_major;
-static int write_congestion_on = PKT_WRITE_CONGESTION_ON;
-static int write_congestion_off = PKT_WRITE_CONGESTION_OFF;
-static struct mutex ctl_mutex; /* Serialize open/close/setup/teardown */
-static mempool_t psd_pool;
-static struct bio_set pkt_bio_set;
-
-/* /sys/class/pktcdvd */
-static struct class class_pktcdvd;
-static struct dentry *pkt_debugfs_root = NULL; /* /sys/kernel/debug/pktcdvd */
-
-/* forward declaration */
-static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev);
-static int pkt_remove_dev(dev_t pkt_dev);
-
-static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
-{
- return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
-}
-
-/**********************************************************
- * sysfs interface for pktcdvd
- * by (C) 2006 Thomas Maier <balagi@justmail.de>
-
- /sys/class/pktcdvd/pktcdvd[0-7]/
- stat/reset
- stat/packets_started
- stat/packets_finished
- stat/kb_written
- stat/kb_read
- stat/kb_read_gather
- write_queue/size
- write_queue/congestion_off
- write_queue/congestion_on
- **********************************************************/
-
-static ssize_t packets_started_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%lu\n", pd->stats.pkt_started);
-}
-static DEVICE_ATTR_RO(packets_started);
-
-static ssize_t packets_finished_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%lu\n", pd->stats.pkt_ended);
-}
-static DEVICE_ATTR_RO(packets_finished);
-
-static ssize_t kb_written_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%lu\n", pd->stats.secs_w >> 1);
-}
-static DEVICE_ATTR_RO(kb_written);
-
-static ssize_t kb_read_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%lu\n", pd->stats.secs_r >> 1);
-}
-static DEVICE_ATTR_RO(kb_read);
-
-static ssize_t kb_read_gather_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%lu\n", pd->stats.secs_rg >> 1);
-}
-static DEVICE_ATTR_RO(kb_read_gather);
-
-static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
-
- if (len > 0) {
- pd->stats.pkt_started = 0;
- pd->stats.pkt_ended = 0;
- pd->stats.secs_w = 0;
- pd->stats.secs_rg = 0;
- pd->stats.secs_r = 0;
- }
- return len;
-}
-static DEVICE_ATTR_WO(reset);
-
-static struct attribute *pkt_stat_attrs[] = {
- &dev_attr_packets_finished.attr,
- &dev_attr_packets_started.attr,
- &dev_attr_kb_read.attr,
- &dev_attr_kb_written.attr,
- &dev_attr_kb_read_gather.attr,
- &dev_attr_reset.attr,
- NULL,
-};
-
-static const struct attribute_group pkt_stat_group = {
- .name = "stat",
- .attrs = pkt_stat_attrs,
-};
-
-static ssize_t size_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
- int n;
-
- spin_lock(&pd->lock);
- n = sysfs_emit(buf, "%d\n", pd->bio_queue_size);
- spin_unlock(&pd->lock);
- return n;
-}
-static DEVICE_ATTR_RO(size);
-
-static void init_write_congestion_marks(int* lo, int* hi)
-{
- if (*hi > 0) {
- *hi = max(*hi, 500);
- *hi = min(*hi, 1000000);
- if (*lo <= 0)
- *lo = *hi - 100;
- else {
- *lo = min(*lo, *hi - 100);
- *lo = max(*lo, 100);
- }
- } else {
- *hi = -1;
- *lo = -1;
- }
-}
-
-static ssize_t congestion_off_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
- int n;
-
- spin_lock(&pd->lock);
- n = sysfs_emit(buf, "%d\n", pd->write_congestion_off);
- spin_unlock(&pd->lock);
- return n;
-}
-
-static ssize_t congestion_off_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
- int val, ret;
-
- ret = kstrtoint(buf, 10, &val);
- if (ret)
- return ret;
-
- spin_lock(&pd->lock);
- pd->write_congestion_off = val;
- init_write_congestion_marks(&pd->write_congestion_off, &pd->write_congestion_on);
- spin_unlock(&pd->lock);
- return len;
-}
-static DEVICE_ATTR_RW(congestion_off);
-
-static ssize_t congestion_on_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
- int n;
-
- spin_lock(&pd->lock);
- n = sysfs_emit(buf, "%d\n", pd->write_congestion_on);
- spin_unlock(&pd->lock);
- return n;
-}
-
-static ssize_t congestion_on_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
- int val, ret;
-
- ret = kstrtoint(buf, 10, &val);
- if (ret)
- return ret;
-
- spin_lock(&pd->lock);
- pd->write_congestion_on = val;
- init_write_congestion_marks(&pd->write_congestion_off, &pd->write_congestion_on);
- spin_unlock(&pd->lock);
- return len;
-}
-static DEVICE_ATTR_RW(congestion_on);
-
-static struct attribute *pkt_wq_attrs[] = {
- &dev_attr_congestion_on.attr,
- &dev_attr_congestion_off.attr,
- &dev_attr_size.attr,
- NULL,
-};
-
-static const struct attribute_group pkt_wq_group = {
- .name = "write_queue",
- .attrs = pkt_wq_attrs,
-};
-
-static const struct attribute_group *pkt_groups[] = {
- &pkt_stat_group,
- &pkt_wq_group,
- NULL,
-};
-
-static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
-{
- if (class_is_registered(&class_pktcdvd)) {
- pd->dev = device_create_with_groups(&class_pktcdvd, NULL,
- MKDEV(0, 0), pd, pkt_groups,
- "%s", pd->disk->disk_name);
- if (IS_ERR(pd->dev))
- pd->dev = NULL;
- }
-}
-
-static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd)
-{
- if (class_is_registered(&class_pktcdvd))
- device_unregister(pd->dev);
-}
-
-
-/********************************************************************
- /sys/class/pktcdvd/
- add map block device
- remove unmap packet dev
- device_map show mappings
- *******************************************************************/
-
-static ssize_t device_map_show(const struct class *c, const struct class_attribute *attr,
- char *data)
-{
- int n = 0;
- int idx;
- mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
- for (idx = 0; idx < MAX_WRITERS; idx++) {
- struct pktcdvd_device *pd = pkt_devs[idx];
- if (!pd)
- continue;
- n += sysfs_emit_at(data, n, "%s %u:%u %u:%u\n",
- pd->disk->disk_name,
- MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev),
- MAJOR(file_bdev(pd->bdev_file)->bd_dev),
- MINOR(file_bdev(pd->bdev_file)->bd_dev));
- }
- mutex_unlock(&ctl_mutex);
- return n;
-}
-static CLASS_ATTR_RO(device_map);
-
-static ssize_t add_store(const struct class *c, const struct class_attribute *attr,
- const char *buf, size_t count)
-{
- unsigned int major, minor;
-
- if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
- /* pkt_setup_dev() expects caller to hold reference to self */
- if (!try_module_get(THIS_MODULE))
- return -ENODEV;
-
- pkt_setup_dev(MKDEV(major, minor), NULL);
-
- module_put(THIS_MODULE);
-
- return count;
- }
-
- return -EINVAL;
-}
-static CLASS_ATTR_WO(add);
-
-static ssize_t remove_store(const struct class *c, const struct class_attribute *attr,
- const char *buf, size_t count)
-{
- unsigned int major, minor;
- if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
- pkt_remove_dev(MKDEV(major, minor));
- return count;
- }
- return -EINVAL;
-}
-static CLASS_ATTR_WO(remove);
-
-static struct attribute *class_pktcdvd_attrs[] = {
- &class_attr_add.attr,
- &class_attr_remove.attr,
- &class_attr_device_map.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(class_pktcdvd);
-
-static struct class class_pktcdvd = {
- .name = DRIVER_NAME,
- .class_groups = class_pktcdvd_groups,
-};
-
-static int pkt_sysfs_init(void)
-{
- /*
- * create control files in sysfs
- * /sys/class/pktcdvd/...
- */
- return class_register(&class_pktcdvd);
-}
-
-static void pkt_sysfs_cleanup(void)
-{
- class_unregister(&class_pktcdvd);
-}
-
-/********************************************************************
- entries in debugfs
-
- /sys/kernel/debug/pktcdvd[0-7]/
- info
-
- *******************************************************************/
-
-static void pkt_count_states(struct pktcdvd_device *pd, int *states)
-{
- struct packet_data *pkt;
- int i;
-
- for (i = 0; i < PACKET_NUM_STATES; i++)
- states[i] = 0;
-
- spin_lock(&pd->cdrw.active_list_lock);
- list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
- states[pkt->state]++;
- }
- spin_unlock(&pd->cdrw.active_list_lock);
-}
-
-static int pkt_seq_show(struct seq_file *m, void *p)
-{
- struct pktcdvd_device *pd = m->private;
- char *msg;
- int states[PACKET_NUM_STATES];
-
- seq_printf(m, "Writer %s mapped to %pg:\n", pd->disk->disk_name,
- file_bdev(pd->bdev_file));
-
- seq_printf(m, "\nSettings:\n");
- seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
-
- if (pd->settings.write_type == 0)
- msg = "Packet";
- else
- msg = "Unknown";
- seq_printf(m, "\twrite type:\t\t%s\n", msg);
-
- seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable");
- seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss);
-
- seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode);
-
- if (pd->settings.block_mode == PACKET_BLOCK_MODE1)
- msg = "Mode 1";
- else if (pd->settings.block_mode == PACKET_BLOCK_MODE2)
- msg = "Mode 2";
- else
- msg = "Unknown";
- seq_printf(m, "\tblock mode:\t\t%s\n", msg);
-
- seq_printf(m, "\nStatistics:\n");
- seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started);
- seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended);
- seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1);
- seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1);
- seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1);
-
- seq_printf(m, "\nMisc:\n");
- seq_printf(m, "\treference count:\t%d\n", pd->refcnt);
- seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags);
- seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed);
- seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed);
- seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset);
- seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset);
-
- seq_printf(m, "\nQueue state:\n");
- seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size);
- seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios));
- seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", pd->current_sector);
-
- pkt_count_states(pd, states);
- seq_printf(m, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
- states[0], states[1], states[2], states[3], states[4], states[5]);
-
- seq_printf(m, "\twrite congestion marks:\toff=%d on=%d\n",
- pd->write_congestion_off,
- pd->write_congestion_on);
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(pkt_seq);
-
-static void pkt_debugfs_dev_new(struct pktcdvd_device *pd)
-{
- if (!pkt_debugfs_root)
- return;
- pd->dfs_d_root = debugfs_create_dir(pd->disk->disk_name, pkt_debugfs_root);
- if (!pd->dfs_d_root)
- return;
-
- pd->dfs_f_info = debugfs_create_file("info", 0444, pd->dfs_d_root,
- pd, &pkt_seq_fops);
-}
-
-static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd)
-{
- if (!pkt_debugfs_root)
- return;
- debugfs_remove(pd->dfs_f_info);
- debugfs_remove(pd->dfs_d_root);
- pd->dfs_f_info = NULL;
- pd->dfs_d_root = NULL;
-}
-
-static void pkt_debugfs_init(void)
-{
- pkt_debugfs_root = debugfs_create_dir(DRIVER_NAME, NULL);
-}
-
-static void pkt_debugfs_cleanup(void)
-{
- debugfs_remove(pkt_debugfs_root);
- pkt_debugfs_root = NULL;
-}
-
-/* ----------------------------------------------------------*/
-
-
-static void pkt_bio_finished(struct pktcdvd_device *pd)
-{
- struct device *ddev = disk_to_dev(pd->disk);
-
- BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
- if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
- dev_dbg(ddev, "queue empty\n");
- atomic_set(&pd->iosched.attention, 1);
- wake_up(&pd->wqueue);
- }
-}
-
-/*
- * Allocate a packet_data struct
- */
-static struct packet_data *pkt_alloc_packet_data(int frames)
-{
- int i;
- struct packet_data *pkt;
-
- pkt = kzalloc(sizeof(struct packet_data), GFP_KERNEL);
- if (!pkt)
- goto no_pkt;
-
- pkt->frames = frames;
- pkt->w_bio = bio_kmalloc(frames, GFP_KERNEL);
- if (!pkt->w_bio)
- goto no_bio;
-
- for (i = 0; i < frames / FRAMES_PER_PAGE; i++) {
- pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
- if (!pkt->pages[i])
- goto no_page;
- }
-
- spin_lock_init(&pkt->lock);
- bio_list_init(&pkt->orig_bios);
-
- for (i = 0; i < frames; i++) {
- pkt->r_bios[i] = bio_kmalloc(1, GFP_KERNEL);
- if (!pkt->r_bios[i])
- goto no_rd_bio;
- }
-
- return pkt;
-
-no_rd_bio:
- for (i = 0; i < frames; i++)
- kfree(pkt->r_bios[i]);
-no_page:
- for (i = 0; i < frames / FRAMES_PER_PAGE; i++)
- if (pkt->pages[i])
- __free_page(pkt->pages[i]);
- kfree(pkt->w_bio);
-no_bio:
- kfree(pkt);
-no_pkt:
- return NULL;
-}
-
-/*
- * Free a packet_data struct
- */
-static void pkt_free_packet_data(struct packet_data *pkt)
-{
- int i;
-
- for (i = 0; i < pkt->frames; i++)
- kfree(pkt->r_bios[i]);
- for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++)
- __free_page(pkt->pages[i]);
- kfree(pkt->w_bio);
- kfree(pkt);
-}
-
-static void pkt_shrink_pktlist(struct pktcdvd_device *pd)
-{
- struct packet_data *pkt, *next;
-
- BUG_ON(!list_empty(&pd->cdrw.pkt_active_list));
-
- list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) {
- pkt_free_packet_data(pkt);
- }
- INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
-}
-
-static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
-{
- struct packet_data *pkt;
-
- BUG_ON(!list_empty(&pd->cdrw.pkt_free_list));
-
- while (nr_packets > 0) {
- pkt = pkt_alloc_packet_data(pd->settings.size >> 2);
- if (!pkt) {
- pkt_shrink_pktlist(pd);
- return 0;
- }
- pkt->id = nr_packets;
- pkt->pd = pd;
- list_add(&pkt->list, &pd->cdrw.pkt_free_list);
- nr_packets--;
- }
- return 1;
-}
-
-static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node)
-{
- struct rb_node *n = rb_next(&node->rb_node);
- if (!n)
- return NULL;
- return rb_entry(n, struct pkt_rb_node, rb_node);
-}
-
-static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node)
-{
- rb_erase(&node->rb_node, &pd->bio_queue);
- mempool_free(node, &pd->rb_pool);
- pd->bio_queue_size--;
- BUG_ON(pd->bio_queue_size < 0);
-}
-
-/*
- * Find the first node in the pd->bio_queue rb tree with a starting sector >= s.
- */
-static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s)
-{
- struct rb_node *n = pd->bio_queue.rb_node;
- struct rb_node *next;
- struct pkt_rb_node *tmp;
-
- if (!n) {
- BUG_ON(pd->bio_queue_size > 0);
- return NULL;
- }
-
- for (;;) {
- tmp = rb_entry(n, struct pkt_rb_node, rb_node);
- if (s <= tmp->bio->bi_iter.bi_sector)
- next = n->rb_left;
- else
- next = n->rb_right;
- if (!next)
- break;
- n = next;
- }
-
- if (s > tmp->bio->bi_iter.bi_sector) {
- tmp = pkt_rbtree_next(tmp);
- if (!tmp)
- return NULL;
- }
- BUG_ON(s > tmp->bio->bi_iter.bi_sector);
- return tmp;
-}
-
-/*
- * Insert a node into the pd->bio_queue rb tree.
- */
-static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node)
-{
- struct rb_node **p = &pd->bio_queue.rb_node;
- struct rb_node *parent = NULL;
- sector_t s = node->bio->bi_iter.bi_sector;
- struct pkt_rb_node *tmp;
-
- while (*p) {
- parent = *p;
- tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
- if (s < tmp->bio->bi_iter.bi_sector)
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
- }
- rb_link_node(&node->rb_node, parent, p);
- rb_insert_color(&node->rb_node, &pd->bio_queue);
- pd->bio_queue_size++;
-}
-
-/*
- * Send a packet_command to the underlying block device and
- * wait for completion.
- */
-static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
-{
- struct request_queue *q = bdev_get_queue(file_bdev(pd->bdev_file));
- struct scsi_cmnd *scmd;
- struct request *rq;
- int ret = 0;
-
- rq = scsi_alloc_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
- REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
- scmd = blk_mq_rq_to_pdu(rq);
-
- if (cgc->buflen) {
- ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
- GFP_NOIO);
- if (ret)
- goto out;
- }
-
- scmd->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
- memcpy(scmd->cmnd, cgc->cmd, CDROM_PACKET_SIZE);
-
- rq->timeout = 60*HZ;
- if (cgc->quiet)
- rq->rq_flags |= RQF_QUIET;
-
- blk_execute_rq(rq, false);
- if (scmd->result)
- ret = -EIO;
-out:
- blk_mq_free_request(rq);
- return ret;
-}
-
-static const char *sense_key_string(__u8 index)
-{
- static const char * const info[] = {
- "No sense", "Recovered error", "Not ready",
- "Medium error", "Hardware error", "Illegal request",
- "Unit attention", "Data protect", "Blank check",
- };
-
- return index < ARRAY_SIZE(info) ? info[index] : "INVALID";
-}
-
-/*
- * A generic sense dump / resolve mechanism should be implemented across
- * all ATAPI + SCSI devices.
- */
-static void pkt_dump_sense(struct pktcdvd_device *pd,
- struct packet_command *cgc)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- struct scsi_sense_hdr *sshdr = cgc->sshdr;
-
- if (sshdr)
- dev_err(ddev, "%*ph - sense %02x.%02x.%02x (%s)\n",
- CDROM_PACKET_SIZE, cgc->cmd,
- sshdr->sense_key, sshdr->asc, sshdr->ascq,
- sense_key_string(sshdr->sense_key));
- else
- dev_err(ddev, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd);
-}
-
-/*
- * flush the drive cache to media
- */
-static int pkt_flush_cache(struct pktcdvd_device *pd)
-{
- struct packet_command cgc;
-
- init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
- cgc.cmd[0] = GPCMD_FLUSH_CACHE;
- cgc.quiet = 1;
-
- /*
- * the IMMED bit -- we default to not setting it, although that
- * would allow a much faster close, this is safer
- */
-#if 0
- cgc.cmd[1] = 1 << 1;
-#endif
- return pkt_generic_packet(pd, &cgc);
-}
-
-/*
- * speed is given as the normal factor, e.g. 4 for 4x
- */
-static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
- unsigned write_speed, unsigned read_speed)
-{
- struct packet_command cgc;
- struct scsi_sense_hdr sshdr;
- int ret;
-
- init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
- cgc.sshdr = &sshdr;
- cgc.cmd[0] = GPCMD_SET_SPEED;
- put_unaligned_be16(read_speed, &cgc.cmd[2]);
- put_unaligned_be16(write_speed, &cgc.cmd[4]);
-
- ret = pkt_generic_packet(pd, &cgc);
- if (ret)
- pkt_dump_sense(pd, &cgc);
-
- return ret;
-}
-
-/*
- * Queue a bio for processing by the low-level CD device. Must be called
- * from process context.
- */
-static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio)
-{
- /*
- * Some CDRW drives can not handle writes larger than one packet,
- * even if the size is a multiple of the packet size.
- */
- bio->bi_opf |= REQ_NOMERGE;
-
- spin_lock(&pd->iosched.lock);
- if (bio_data_dir(bio) == READ)
- bio_list_add(&pd->iosched.read_queue, bio);
- else
- bio_list_add(&pd->iosched.write_queue, bio);
- spin_unlock(&pd->iosched.lock);
-
- atomic_set(&pd->iosched.attention, 1);
- wake_up(&pd->wqueue);
-}
-
-/*
- * Process the queued read/write requests. This function handles special
- * requirements for CDRW drives:
- * - A cache flush command must be inserted before a read request if the
- * previous request was a write.
- * - Switching between reading and writing is slow, so don't do it more often
- * than necessary.
- * - Optimize for throughput at the expense of latency. This means that streaming
- * writes will never be interrupted by a read, but if the drive has to seek
- * before the next write, switch to reading instead if there are any pending
- * read requests.
- * - Set the read speed according to current usage pattern. When only reading
- * from the device, it's best to use the highest possible read speed, but
- * when switching often between reading and writing, it's better to have the
- * same read and write speeds.
- */
-static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
-{
- struct device *ddev = disk_to_dev(pd->disk);
-
- if (atomic_read(&pd->iosched.attention) == 0)
- return;
- atomic_set(&pd->iosched.attention, 0);
-
- for (;;) {
- struct bio *bio;
- int reads_queued, writes_queued;
-
- spin_lock(&pd->iosched.lock);
- reads_queued = !bio_list_empty(&pd->iosched.read_queue);
- writes_queued = !bio_list_empty(&pd->iosched.write_queue);
- spin_unlock(&pd->iosched.lock);
-
- if (!reads_queued && !writes_queued)
- break;
-
- if (pd->iosched.writing) {
- int need_write_seek = 1;
- spin_lock(&pd->iosched.lock);
- bio = bio_list_peek(&pd->iosched.write_queue);
- spin_unlock(&pd->iosched.lock);
- if (bio && (bio->bi_iter.bi_sector ==
- pd->iosched.last_write))
- need_write_seek = 0;
- if (need_write_seek && reads_queued) {
- if (atomic_read(&pd->cdrw.pending_bios) > 0) {
- dev_dbg(ddev, "write, waiting\n");
- break;
- }
- pkt_flush_cache(pd);
- pd->iosched.writing = 0;
- }
- } else {
- if (!reads_queued && writes_queued) {
- if (atomic_read(&pd->cdrw.pending_bios) > 0) {
- dev_dbg(ddev, "read, waiting\n");
- break;
- }
- pd->iosched.writing = 1;
- }
- }
-
- spin_lock(&pd->iosched.lock);
- if (pd->iosched.writing)
- bio = bio_list_pop(&pd->iosched.write_queue);
- else
- bio = bio_list_pop(&pd->iosched.read_queue);
- spin_unlock(&pd->iosched.lock);
-
- if (!bio)
- continue;
-
- if (bio_data_dir(bio) == READ)
- pd->iosched.successive_reads +=
- bio->bi_iter.bi_size >> 10;
- else {
- pd->iosched.successive_reads = 0;
- pd->iosched.last_write = bio_end_sector(bio);
- }
- if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
- if (pd->read_speed == pd->write_speed) {
- pd->read_speed = MAX_SPEED;
- pkt_set_speed(pd, pd->write_speed, pd->read_speed);
- }
- } else {
- if (pd->read_speed != pd->write_speed) {
- pd->read_speed = pd->write_speed;
- pkt_set_speed(pd, pd->write_speed, pd->read_speed);
- }
- }
-
- atomic_inc(&pd->cdrw.pending_bios);
- submit_bio_noacct(bio);
- }
-}
-
-/*
- * Special care is needed if the underlying block device has a small
- * max_phys_segments value.
- */
-static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
-{
- struct device *ddev = disk_to_dev(pd->disk);
-
- if ((pd->settings.size << 9) / CD_FRAMESIZE <= queue_max_segments(q)) {
- /*
- * The cdrom device can handle one segment/frame
- */
- clear_bit(PACKET_MERGE_SEGS, &pd->flags);
- return 0;
- }
-
- if ((pd->settings.size << 9) / PAGE_SIZE <= queue_max_segments(q)) {
- /*
- * We can handle this case at the expense of some extra memory
- * copies during write operations
- */
- set_bit(PACKET_MERGE_SEGS, &pd->flags);
- return 0;
- }
-
- dev_err(ddev, "cdrom max_phys_segments too small\n");
- return -EIO;
-}
-
-static void pkt_end_io_read(struct bio *bio)
-{
- struct packet_data *pkt = bio->bi_private;
- struct pktcdvd_device *pd = pkt->pd;
- BUG_ON(!pd);
-
- dev_dbg(disk_to_dev(pd->disk), "bio=%p sec0=%llx sec=%llx err=%d\n",
- bio, pkt->sector, bio->bi_iter.bi_sector, bio->bi_status);
-
- if (bio->bi_status)
- atomic_inc(&pkt->io_errors);
- bio_uninit(bio);
- if (atomic_dec_and_test(&pkt->io_wait)) {
- atomic_inc(&pkt->run_sm);
- wake_up(&pd->wqueue);
- }
- pkt_bio_finished(pd);
-}
-
-static void pkt_end_io_packet_write(struct bio *bio)
-{
- struct packet_data *pkt = bio->bi_private;
- struct pktcdvd_device *pd = pkt->pd;
- BUG_ON(!pd);
-
- dev_dbg(disk_to_dev(pd->disk), "id=%d, err=%d\n", pkt->id, bio->bi_status);
-
- pd->stats.pkt_ended++;
-
- bio_uninit(bio);
- pkt_bio_finished(pd);
- atomic_dec(&pkt->io_wait);
- atomic_inc(&pkt->run_sm);
- wake_up(&pd->wqueue);
-}
-
-/*
- * Schedule reads for the holes in a packet
- */
-static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- int frames_read = 0;
- struct bio *bio;
- int f;
- char written[PACKET_MAX_SIZE];
-
- BUG_ON(bio_list_empty(&pkt->orig_bios));
-
- atomic_set(&pkt->io_wait, 0);
- atomic_set(&pkt->io_errors, 0);
-
- /*
- * Figure out which frames we need to read before we can write.
- */
- memset(written, 0, sizeof(written));
- spin_lock(&pkt->lock);
- bio_list_for_each(bio, &pkt->orig_bios) {
- int first_frame = (bio->bi_iter.bi_sector - pkt->sector) /
- (CD_FRAMESIZE >> 9);
- int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE;
- pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
- BUG_ON(first_frame < 0);
- BUG_ON(first_frame + num_frames > pkt->frames);
- for (f = first_frame; f < first_frame + num_frames; f++)
- written[f] = 1;
- }
- spin_unlock(&pkt->lock);
-
- if (pkt->cache_valid) {
- dev_dbg(ddev, "zone %llx cached\n", pkt->sector);
- goto out_account;
- }
-
- /*
- * Schedule reads for missing parts of the packet.
- */
- for (f = 0; f < pkt->frames; f++) {
- int p, offset;
-
- if (written[f])
- continue;
-
- bio = pkt->r_bios[f];
- bio_init(bio, file_bdev(pd->bdev_file), bio->bi_inline_vecs, 1,
- REQ_OP_READ);
- bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
- bio->bi_end_io = pkt_end_io_read;
- bio->bi_private = pkt;
-
- p = (f * CD_FRAMESIZE) / PAGE_SIZE;
- offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
- dev_dbg(ddev, "Adding frame %d, page:%p offs:%d\n", f,
- pkt->pages[p], offset);
- if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))
- BUG();
-
- atomic_inc(&pkt->io_wait);
- pkt_queue_bio(pd, bio);
- frames_read++;
- }
-
-out_account:
- dev_dbg(ddev, "need %d frames for zone %llx\n", frames_read, pkt->sector);
- pd->stats.pkt_started++;
- pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
-}
-
-/*
- * Find a packet matching zone, or the least recently used packet if
- * there is no match.
- */
-static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone)
-{
- struct packet_data *pkt;
-
- list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) {
- if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) {
- list_del_init(&pkt->list);
- if (pkt->sector != zone)
- pkt->cache_valid = 0;
- return pkt;
- }
- }
- BUG();
- return NULL;
-}
-
-static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt)
-{
- if (pkt->cache_valid) {
- list_add(&pkt->list, &pd->cdrw.pkt_free_list);
- } else {
- list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list);
- }
-}
-
-static inline void pkt_set_state(struct device *ddev, struct packet_data *pkt,
- enum packet_data_state state)
-{
- static const char *state_name[] = {
- "IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
- };
- enum packet_data_state old_state = pkt->state;
-
- dev_dbg(ddev, "pkt %2d : s=%6llx %s -> %s\n",
- pkt->id, pkt->sector, state_name[old_state], state_name[state]);
-
- pkt->state = state;
-}
-
-/*
- * Scan the work queue to see if we can start a new packet.
- * returns non-zero if any work was done.
- */
-static int pkt_handle_queue(struct pktcdvd_device *pd)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- struct packet_data *pkt, *p;
- struct bio *bio = NULL;
- sector_t zone = 0; /* Suppress gcc warning */
- struct pkt_rb_node *node, *first_node;
- struct rb_node *n;
-
- atomic_set(&pd->scan_queue, 0);
-
- if (list_empty(&pd->cdrw.pkt_free_list)) {
- dev_dbg(ddev, "no pkt\n");
- return 0;
- }
-
- /*
- * Try to find a zone we are not already working on.
- */
- spin_lock(&pd->lock);
- first_node = pkt_rbtree_find(pd, pd->current_sector);
- if (!first_node) {
- n = rb_first(&pd->bio_queue);
- if (n)
- first_node = rb_entry(n, struct pkt_rb_node, rb_node);
- }
- node = first_node;
- while (node) {
- bio = node->bio;
- zone = get_zone(bio->bi_iter.bi_sector, pd);
- list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
- if (p->sector == zone) {
- bio = NULL;
- goto try_next_bio;
- }
- }
- break;
-try_next_bio:
- node = pkt_rbtree_next(node);
- if (!node) {
- n = rb_first(&pd->bio_queue);
- if (n)
- node = rb_entry(n, struct pkt_rb_node, rb_node);
- }
- if (node == first_node)
- node = NULL;
- }
- spin_unlock(&pd->lock);
- if (!bio) {
- dev_dbg(ddev, "no bio\n");
- return 0;
- }
-
- pkt = pkt_get_packet_data(pd, zone);
-
- pd->current_sector = zone + pd->settings.size;
- pkt->sector = zone;
- BUG_ON(pkt->frames != pd->settings.size >> 2);
- pkt->write_size = 0;
-
- /*
- * Scan work queue for bios in the same zone and link them
- * to this packet.
- */
- spin_lock(&pd->lock);
- dev_dbg(ddev, "looking for zone %llx\n", zone);
- while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
- sector_t tmp = get_zone(node->bio->bi_iter.bi_sector, pd);
-
- bio = node->bio;
- dev_dbg(ddev, "found zone=%llx\n", tmp);
- if (tmp != zone)
- break;
- pkt_rbtree_erase(pd, node);
- spin_lock(&pkt->lock);
- bio_list_add(&pkt->orig_bios, bio);
- pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE;
- spin_unlock(&pkt->lock);
- }
- /* check write congestion marks, and if bio_queue_size is
- * below, wake up any waiters
- */
- if (pd->congested &&
- pd->bio_queue_size <= pd->write_congestion_off) {
- pd->congested = false;
- wake_up_var(&pd->congested);
- }
- spin_unlock(&pd->lock);
-
- pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
- pkt_set_state(ddev, pkt, PACKET_WAITING_STATE);
- atomic_set(&pkt->run_sm, 1);
-
- spin_lock(&pd->cdrw.active_list_lock);
- list_add(&pkt->list, &pd->cdrw.pkt_active_list);
- spin_unlock(&pd->cdrw.active_list_lock);
-
- return 1;
-}
-
-/**
- * bio_list_copy_data - copy contents of data buffers from one chain of bios to
- * another
- * @src: source bio list
- * @dst: destination bio list
- *
- * Stops when it reaches the end of either the @src list or @dst list - that is,
- * copies min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of
- * bios).
- */
-static void bio_list_copy_data(struct bio *dst, struct bio *src)
-{
- struct bvec_iter src_iter = src->bi_iter;
- struct bvec_iter dst_iter = dst->bi_iter;
-
- while (1) {
- if (!src_iter.bi_size) {
- src = src->bi_next;
- if (!src)
- break;
-
- src_iter = src->bi_iter;
- }
-
- if (!dst_iter.bi_size) {
- dst = dst->bi_next;
- if (!dst)
- break;
-
- dst_iter = dst->bi_iter;
- }
-
- bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
- }
-}
-
-/*
- * Assemble a bio to write one packet and queue the bio for processing
- * by the underlying block device.
- */
-static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- int f;
-
- bio_init(pkt->w_bio, file_bdev(pd->bdev_file), pkt->w_bio->bi_inline_vecs,
- pkt->frames, REQ_OP_WRITE);
- pkt->w_bio->bi_iter.bi_sector = pkt->sector;
- pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
- pkt->w_bio->bi_private = pkt;
-
- /* XXX: locking? */
- for (f = 0; f < pkt->frames; f++) {
- struct page *page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
- unsigned offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
-
- if (!bio_add_page(pkt->w_bio, page, CD_FRAMESIZE, offset))
- BUG();
- }
- dev_dbg(ddev, "vcnt=%d\n", pkt->w_bio->bi_vcnt);
-
- /*
- * Fill-in bvec with data from orig_bios.
- */
- spin_lock(&pkt->lock);
- bio_list_copy_data(pkt->w_bio, pkt->orig_bios.head);
-
- pkt_set_state(ddev, pkt, PACKET_WRITE_WAIT_STATE);
- spin_unlock(&pkt->lock);
-
- dev_dbg(ddev, "Writing %d frames for zone %llx\n", pkt->write_size, pkt->sector);
-
- if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames))
- pkt->cache_valid = 1;
- else
- pkt->cache_valid = 0;
-
- /* Start the write request */
- atomic_set(&pkt->io_wait, 1);
- pkt_queue_bio(pd, pkt->w_bio);
-}
-
-static void pkt_finish_packet(struct packet_data *pkt, blk_status_t status)
-{
- struct bio *bio;
-
- if (status)
- pkt->cache_valid = 0;
-
- /* Finish all bios corresponding to this packet */
- while ((bio = bio_list_pop(&pkt->orig_bios))) {
- bio->bi_status = status;
- bio_endio(bio);
- }
-}
-
-static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt)
-{
- struct device *ddev = disk_to_dev(pd->disk);
-
- dev_dbg(ddev, "pkt %d\n", pkt->id);
-
- for (;;) {
- switch (pkt->state) {
- case PACKET_WAITING_STATE:
- if ((pkt->write_size < pkt->frames) && (pkt->sleep_time > 0))
- return;
-
- pkt->sleep_time = 0;
- pkt_gather_data(pd, pkt);
- pkt_set_state(ddev, pkt, PACKET_READ_WAIT_STATE);
- break;
-
- case PACKET_READ_WAIT_STATE:
- if (atomic_read(&pkt->io_wait) > 0)
- return;
-
- if (atomic_read(&pkt->io_errors) > 0) {
- pkt_set_state(ddev, pkt, PACKET_RECOVERY_STATE);
- } else {
- pkt_start_write(pd, pkt);
- }
- break;
-
- case PACKET_WRITE_WAIT_STATE:
- if (atomic_read(&pkt->io_wait) > 0)
- return;
-
- if (!pkt->w_bio->bi_status) {
- pkt_set_state(ddev, pkt, PACKET_FINISHED_STATE);
- } else {
- pkt_set_state(ddev, pkt, PACKET_RECOVERY_STATE);
- }
- break;
-
- case PACKET_RECOVERY_STATE:
- dev_dbg(ddev, "No recovery possible\n");
- pkt_set_state(ddev, pkt, PACKET_FINISHED_STATE);
- break;
-
- case PACKET_FINISHED_STATE:
- pkt_finish_packet(pkt, pkt->w_bio->bi_status);
- return;
-
- default:
- BUG();
- break;
- }
- }
-}
-
-static void pkt_handle_packets(struct pktcdvd_device *pd)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- struct packet_data *pkt, *next;
-
- /*
- * Run state machine for active packets
- */
- list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
- if (atomic_read(&pkt->run_sm) > 0) {
- atomic_set(&pkt->run_sm, 0);
- pkt_run_state_machine(pd, pkt);
- }
- }
-
- /*
- * Move no longer active packets to the free list
- */
- spin_lock(&pd->cdrw.active_list_lock);
- list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) {
- if (pkt->state == PACKET_FINISHED_STATE) {
- list_del(&pkt->list);
- pkt_put_packet_data(pd, pkt);
- pkt_set_state(ddev, pkt, PACKET_IDLE_STATE);
- atomic_set(&pd->scan_queue, 1);
- }
- }
- spin_unlock(&pd->cdrw.active_list_lock);
-}
-
-/*
- * kcdrwd is woken up when writes have been queued for one of our
- * registered devices
- */
-static int kcdrwd(void *foobar)
-{
- struct pktcdvd_device *pd = foobar;
- struct device *ddev = disk_to_dev(pd->disk);
- struct packet_data *pkt;
- int states[PACKET_NUM_STATES];
- long min_sleep_time, residue;
-
- set_user_nice(current, MIN_NICE);
- set_freezable();
-
- for (;;) {
- DECLARE_WAITQUEUE(wait, current);
-
- /*
- * Wait until there is something to do
- */
- add_wait_queue(&pd->wqueue, &wait);
- for (;;) {
- set_current_state(TASK_INTERRUPTIBLE);
-
- /* Check if we need to run pkt_handle_queue */
- if (atomic_read(&pd->scan_queue) > 0)
- goto work_to_do;
-
- /* Check if we need to run the state machine for some packet */
- list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
- if (atomic_read(&pkt->run_sm) > 0)
- goto work_to_do;
- }
-
- /* Check if we need to process the iosched queues */
- if (atomic_read(&pd->iosched.attention) != 0)
- goto work_to_do;
-
- /* Otherwise, go to sleep */
- pkt_count_states(pd, states);
- dev_dbg(ddev, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
- states[0], states[1], states[2], states[3], states[4], states[5]);
-
- min_sleep_time = MAX_SCHEDULE_TIMEOUT;
- list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
- if (pkt->sleep_time && pkt->sleep_time < min_sleep_time)
- min_sleep_time = pkt->sleep_time;
- }
-
- dev_dbg(ddev, "sleeping\n");
- residue = schedule_timeout(min_sleep_time);
- dev_dbg(ddev, "wake up\n");
-
- /* make swsusp happy with our thread */
- try_to_freeze();
-
- list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
- if (!pkt->sleep_time)
- continue;
- pkt->sleep_time -= min_sleep_time - residue;
- if (pkt->sleep_time <= 0) {
- pkt->sleep_time = 0;
- atomic_inc(&pkt->run_sm);
- }
- }
-
- if (kthread_should_stop())
- break;
- }
-work_to_do:
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&pd->wqueue, &wait);
-
- if (kthread_should_stop())
- break;
-
- /*
- * if pkt_handle_queue returns true, we can queue
- * another request.
- */
- while (pkt_handle_queue(pd))
- ;
-
- /*
- * Handle packet state machine
- */
- pkt_handle_packets(pd);
-
- /*
- * Handle iosched queues
- */
- pkt_iosched_process_queue(pd);
- }
-
- return 0;
-}
-
-static void pkt_print_settings(struct pktcdvd_device *pd)
-{
- dev_info(disk_to_dev(pd->disk), "%s packets, %u blocks, Mode-%c disc\n",
- pd->settings.fp ? "Fixed" : "Variable",
- pd->settings.size >> 2,
- pd->settings.block_mode == 8 ? '1' : '2');
-}
-
-static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control)
-{
- memset(cgc->cmd, 0, sizeof(cgc->cmd));
-
- cgc->cmd[0] = GPCMD_MODE_SENSE_10;
- cgc->cmd[2] = page_code | (page_control << 6);
- put_unaligned_be16(cgc->buflen, &cgc->cmd[7]);
- cgc->data_direction = CGC_DATA_READ;
- return pkt_generic_packet(pd, cgc);
-}
-
-static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc)
-{
- memset(cgc->cmd, 0, sizeof(cgc->cmd));
- memset(cgc->buffer, 0, 2);
- cgc->cmd[0] = GPCMD_MODE_SELECT_10;
- cgc->cmd[1] = 0x10; /* PF */
- put_unaligned_be16(cgc->buflen, &cgc->cmd[7]);
- cgc->data_direction = CGC_DATA_WRITE;
- return pkt_generic_packet(pd, cgc);
-}
-
-static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di)
-{
- struct packet_command cgc;
- int ret;
-
- /* set up command and get the disc info */
- init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ);
- cgc.cmd[0] = GPCMD_READ_DISC_INFO;
- cgc.cmd[8] = cgc.buflen = 2;
- cgc.quiet = 1;
-
- ret = pkt_generic_packet(pd, &cgc);
- if (ret)
- return ret;
-
- /* not all drives have the same disc_info length, so requeue
- * packet with the length the drive tells us it can supply
- */
- cgc.buflen = be16_to_cpu(di->disc_information_length) +
- sizeof(di->disc_information_length);
-
- if (cgc.buflen > sizeof(disc_information))
- cgc.buflen = sizeof(disc_information);
-
- cgc.cmd[8] = cgc.buflen;
- return pkt_generic_packet(pd, &cgc);
-}
-
-static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti)
-{
- struct packet_command cgc;
- int ret;
-
- init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ);
- cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO;
- cgc.cmd[1] = type & 3;
- put_unaligned_be16(track, &cgc.cmd[4]);
- cgc.cmd[8] = 8;
- cgc.quiet = 1;
-
- ret = pkt_generic_packet(pd, &cgc);
- if (ret)
- return ret;
-
- cgc.buflen = be16_to_cpu(ti->track_information_length) +
- sizeof(ti->track_information_length);
-
- if (cgc.buflen > sizeof(track_information))
- cgc.buflen = sizeof(track_information);
-
- cgc.cmd[8] = cgc.buflen;
- return pkt_generic_packet(pd, &cgc);
-}
-
-static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd,
- long *last_written)
-{
- disc_information di;
- track_information ti;
- __u32 last_track;
- int ret;
-
- ret = pkt_get_disc_info(pd, &di);
- if (ret)
- return ret;
-
- last_track = (di.last_track_msb << 8) | di.last_track_lsb;
- ret = pkt_get_track_info(pd, last_track, 1, &ti);
- if (ret)
- return ret;
-
- /* if this track is blank, try the previous. */
- if (ti.blank) {
- last_track--;
- ret = pkt_get_track_info(pd, last_track, 1, &ti);
- if (ret)
- return ret;
- }
-
- /* if last recorded field is valid, return it. */
- if (ti.lra_v) {
- *last_written = be32_to_cpu(ti.last_rec_address);
- } else {
- /* make it up instead */
- *last_written = be32_to_cpu(ti.track_start) +
- be32_to_cpu(ti.track_size);
- if (ti.free_blocks)
- *last_written -= (be32_to_cpu(ti.free_blocks) + 7);
- }
- return 0;
-}
-
-/*
- * write mode select package based on pd->settings
- */
-static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- struct packet_command cgc;
- struct scsi_sense_hdr sshdr;
- write_param_page *wp;
- char buffer[128];
- int ret, size;
-
- /* doesn't apply to DVD+RW or DVD-RAM */
- if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12))
- return 0;
-
- memset(buffer, 0, sizeof(buffer));
- init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
- cgc.sshdr = &sshdr;
- ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
- if (ret) {
- pkt_dump_sense(pd, &cgc);
- return ret;
- }
-
- size = 2 + get_unaligned_be16(&buffer[0]);
- pd->mode_offset = get_unaligned_be16(&buffer[6]);
- if (size > sizeof(buffer))
- size = sizeof(buffer);
-
- /*
- * now get it all
- */
- init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
- cgc.sshdr = &sshdr;
- ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
- if (ret) {
- pkt_dump_sense(pd, &cgc);
- return ret;
- }
-
- /*
- * write page is offset header + block descriptor length
- */
- wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset];
-
- wp->fp = pd->settings.fp;
- wp->track_mode = pd->settings.track_mode;
- wp->write_type = pd->settings.write_type;
- wp->data_block_type = pd->settings.block_mode;
-
- wp->multi_session = 0;
-
-#ifdef PACKET_USE_LS
- wp->link_size = 7;
- wp->ls_v = 1;
-#endif
-
- if (wp->data_block_type == PACKET_BLOCK_MODE1) {
- wp->session_format = 0;
- wp->subhdr2 = 0x20;
- } else if (wp->data_block_type == PACKET_BLOCK_MODE2) {
- wp->session_format = 0x20;
- wp->subhdr2 = 8;
-#if 0
- wp->mcn[0] = 0x80;
- memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1);
-#endif
- } else {
- /*
- * paranoia
- */
- dev_err(ddev, "write mode wrong %d\n", wp->data_block_type);
- return 1;
- }
- wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
-
- cgc.buflen = cgc.cmd[8] = size;
- ret = pkt_mode_select(pd, &cgc);
- if (ret) {
- pkt_dump_sense(pd, &cgc);
- return ret;
- }
-
- pkt_print_settings(pd);
- return 0;
-}
-
-/*
- * 1 -- we can write to this track, 0 -- we can't
- */
-static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
-{
- struct device *ddev = disk_to_dev(pd->disk);
-
- switch (pd->mmc3_profile) {
- case 0x1a: /* DVD+RW */
- case 0x12: /* DVD-RAM */
- /* The track is always writable on DVD+RW/DVD-RAM */
- return 1;
- default:
- break;
- }
-
- if (!ti->packet || !ti->fp)
- return 0;
-
- /*
- * "good" settings as per Mt Fuji.
- */
- if (ti->rt == 0 && ti->blank == 0)
- return 1;
-
- if (ti->rt == 0 && ti->blank == 1)
- return 1;
-
- if (ti->rt == 1 && ti->blank == 0)
- return 1;
-
- dev_err(ddev, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
- return 0;
-}
-
-/*
- * 1 -- we can write to this disc, 0 -- we can't
- */
-static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
-{
- struct device *ddev = disk_to_dev(pd->disk);
-
- switch (pd->mmc3_profile) {
- case 0x0a: /* CD-RW */
- case 0xffff: /* MMC3 not supported */
- break;
- case 0x1a: /* DVD+RW */
- case 0x13: /* DVD-RW */
- case 0x12: /* DVD-RAM */
- return 1;
- default:
- dev_dbg(ddev, "Wrong disc profile (%x)\n", pd->mmc3_profile);
- return 0;
- }
-
- /*
- * for disc type 0xff we should probably reserve a new track.
- * but i'm not sure, should we leave this to user apps? probably.
- */
- if (di->disc_type == 0xff) {
- dev_notice(ddev, "unknown disc - no track?\n");
- return 0;
- }
-
- if (di->disc_type != 0x20 && di->disc_type != 0) {
- dev_err(ddev, "wrong disc type (%x)\n", di->disc_type);
- return 0;
- }
-
- if (di->erasable == 0) {
- dev_err(ddev, "disc not erasable\n");
- return 0;
- }
-
- if (di->border_status == PACKET_SESSION_RESERVED) {
- dev_err(ddev, "can't write to last track (reserved)\n");
- return 0;
- }
-
- return 1;
-}
-
-static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- struct packet_command cgc;
- unsigned char buf[12];
- disc_information di;
- track_information ti;
- int ret, track;
-
- init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
- cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
- cgc.cmd[8] = 8;
- ret = pkt_generic_packet(pd, &cgc);
- pd->mmc3_profile = ret ? 0xffff : get_unaligned_be16(&buf[6]);
-
- memset(&di, 0, sizeof(disc_information));
- memset(&ti, 0, sizeof(track_information));
-
- ret = pkt_get_disc_info(pd, &di);
- if (ret) {
- dev_err(ddev, "failed get_disc\n");
- return ret;
- }
-
- if (!pkt_writable_disc(pd, &di))
- return -EROFS;
-
- pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
-
- track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
- ret = pkt_get_track_info(pd, track, 1, &ti);
- if (ret) {
- dev_err(ddev, "failed get_track\n");
- return ret;
- }
-
- if (!pkt_writable_track(pd, &ti)) {
- dev_err(ddev, "can't write to this track\n");
- return -EROFS;
- }
-
- /*
- * we keep packet size in 512 byte units, makes it easier to
- * deal with request calculations.
- */
- pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
- if (pd->settings.size == 0) {
- dev_notice(ddev, "detected zero packet size!\n");
- return -ENXIO;
- }
- if (pd->settings.size > PACKET_MAX_SECTORS) {
- dev_err(ddev, "packet size is too big\n");
- return -EROFS;
- }
- pd->settings.fp = ti.fp;
- pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
-
- if (ti.nwa_v) {
- pd->nwa = be32_to_cpu(ti.next_writable);
- set_bit(PACKET_NWA_VALID, &pd->flags);
- }
-
- /*
- * in theory we could use lra on -RW media as well and just zero
- * blocks that haven't been written yet, but in practice that
- * is just a no-go. we'll use that for -R, naturally.
- */
- if (ti.lra_v) {
- pd->lra = be32_to_cpu(ti.last_rec_address);
- set_bit(PACKET_LRA_VALID, &pd->flags);
- } else {
- pd->lra = 0xffffffff;
- set_bit(PACKET_LRA_VALID, &pd->flags);
- }
-
- /*
- * fine for now
- */
- pd->settings.link_loss = 7;
- pd->settings.write_type = 0; /* packet */
- pd->settings.track_mode = ti.track_mode;
-
- /*
- * mode1 or mode2 disc
- */
- switch (ti.data_mode) {
- case PACKET_MODE1:
- pd->settings.block_mode = PACKET_BLOCK_MODE1;
- break;
- case PACKET_MODE2:
- pd->settings.block_mode = PACKET_BLOCK_MODE2;
- break;
- default:
- dev_err(ddev, "unknown data mode\n");
- return -EROFS;
- }
- return 0;
-}
-
-/*
- * enable/disable write caching on drive
- */
-static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- struct packet_command cgc;
- struct scsi_sense_hdr sshdr;
- unsigned char buf[64];
- bool set = IS_ENABLED(CONFIG_CDROM_PKTCDVD_WCACHE);
- int ret;
-
- init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
- cgc.sshdr = &sshdr;
- cgc.buflen = pd->mode_offset + 12;
-
- /*
- * caching mode page might not be there, so quiet this command
- */
- cgc.quiet = 1;
-
- ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0);
- if (ret)
- return ret;
-
- /*
- * use drive write caching -- we need deferred error handling to be
- * able to successfully recover with this option (drive will return good
- * status as soon as the cdb is validated).
- */
- buf[pd->mode_offset + 10] |= (set << 2);
-
- cgc.buflen = cgc.cmd[8] = 2 + get_unaligned_be16(&buf[0]);
- ret = pkt_mode_select(pd, &cgc);
- if (ret) {
- dev_err(ddev, "write caching control failed\n");
- pkt_dump_sense(pd, &cgc);
- } else if (!ret && set)
- dev_notice(ddev, "enabled write caching\n");
- return ret;
-}
-
-static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag)
-{
- struct packet_command cgc;
-
- init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
- cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
- cgc.cmd[4] = lockflag ? 1 : 0;
- return pkt_generic_packet(pd, &cgc);
-}
-
-/*
- * Returns drive maximum write speed
- */
-static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
- unsigned *write_speed)
-{
- struct packet_command cgc;
- struct scsi_sense_hdr sshdr;
- unsigned char buf[256+18];
- unsigned char *cap_buf;
- int ret, offset;
-
- cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset];
- init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
- cgc.sshdr = &sshdr;
-
- ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
- if (ret) {
- cgc.buflen = pd->mode_offset + cap_buf[1] + 2 +
- sizeof(struct mode_page_header);
- ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
- if (ret) {
- pkt_dump_sense(pd, &cgc);
- return ret;
- }
- }
-
- offset = 20; /* Obsoleted field, used by older drives */
- if (cap_buf[1] >= 28)
- offset = 28; /* Current write speed selected */
- if (cap_buf[1] >= 30) {
- /* If the drive reports at least one "Logical Unit Write
- * Speed Performance Descriptor Block", use the information
- * in the first block. (contains the highest speed)
- */
- int num_spdb = get_unaligned_be16(&cap_buf[30]);
- if (num_spdb > 0)
- offset = 34;
- }
-
- *write_speed = get_unaligned_be16(&cap_buf[offset]);
- return 0;
-}
-
-/* These tables from cdrecord - I don't have orange book */
-/* standard speed CD-RW (1-4x) */
-static char clv_to_speed[16] = {
- /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
- 0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
-};
-/* high speed CD-RW (-10x) */
-static char hs_clv_to_speed[16] = {
- /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
- 0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
-};
-/* ultra high speed CD-RW */
-static char us_clv_to_speed[16] = {
- /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
- 0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0
-};
-
-/*
- * reads the maximum media speed from ATIP
- */
-static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
- unsigned *speed)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- struct packet_command cgc;
- struct scsi_sense_hdr sshdr;
- unsigned char buf[64];
- unsigned int size, st, sp;
- int ret;
-
- init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ);
- cgc.sshdr = &sshdr;
- cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
- cgc.cmd[1] = 2;
- cgc.cmd[2] = 4; /* READ ATIP */
- cgc.cmd[8] = 2;
- ret = pkt_generic_packet(pd, &cgc);
- if (ret) {
- pkt_dump_sense(pd, &cgc);
- return ret;
- }
- size = 2 + get_unaligned_be16(&buf[0]);
- if (size > sizeof(buf))
- size = sizeof(buf);
-
- init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
- cgc.sshdr = &sshdr;
- cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
- cgc.cmd[1] = 2;
- cgc.cmd[2] = 4;
- cgc.cmd[8] = size;
- ret = pkt_generic_packet(pd, &cgc);
- if (ret) {
- pkt_dump_sense(pd, &cgc);
- return ret;
- }
-
- if (!(buf[6] & 0x40)) {
- dev_notice(ddev, "disc type is not CD-RW\n");
- return 1;
- }
- if (!(buf[6] & 0x4)) {
- dev_notice(ddev, "A1 values on media are not valid, maybe not CDRW?\n");
- return 1;
- }
-
- st = (buf[6] >> 3) & 0x7; /* disc sub-type */
-
- sp = buf[16] & 0xf; /* max speed from ATIP A1 field */
-
- /* Info from cdrecord */
- switch (st) {
- case 0: /* standard speed */
- *speed = clv_to_speed[sp];
- break;
- case 1: /* high speed */
- *speed = hs_clv_to_speed[sp];
- break;
- case 2: /* ultra high speed */
- *speed = us_clv_to_speed[sp];
- break;
- default:
- dev_notice(ddev, "unknown disc sub-type %d\n", st);
- return 1;
- }
- if (*speed) {
- dev_info(ddev, "maximum media speed: %d\n", *speed);
- return 0;
- } else {
- dev_notice(ddev, "unknown speed %d for sub-type %d\n", sp, st);
- return 1;
- }
-}
-
-static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- struct packet_command cgc;
- struct scsi_sense_hdr sshdr;
- int ret;
-
- dev_dbg(ddev, "Performing OPC\n");
-
- init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
- cgc.sshdr = &sshdr;
- cgc.timeout = 60*HZ;
- cgc.cmd[0] = GPCMD_SEND_OPC;
- cgc.cmd[1] = 1;
- ret = pkt_generic_packet(pd, &cgc);
- if (ret)
- pkt_dump_sense(pd, &cgc);
- return ret;
-}
-
-static int pkt_open_write(struct pktcdvd_device *pd)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- int ret;
- unsigned int write_speed, media_write_speed, read_speed;
-
- ret = pkt_probe_settings(pd);
- if (ret) {
- dev_dbg(ddev, "failed probe\n");
- return ret;
- }
-
- ret = pkt_set_write_settings(pd);
- if (ret) {
- dev_notice(ddev, "failed saving write settings\n");
- return -EIO;
- }
-
- pkt_write_caching(pd);
-
- ret = pkt_get_max_speed(pd, &write_speed);
- if (ret)
- write_speed = 16 * 177;
- switch (pd->mmc3_profile) {
- case 0x13: /* DVD-RW */
- case 0x1a: /* DVD+RW */
- case 0x12: /* DVD-RAM */
- dev_notice(ddev, "write speed %ukB/s\n", write_speed);
- break;
- default:
- ret = pkt_media_speed(pd, &media_write_speed);
- if (ret)
- media_write_speed = 16;
- write_speed = min(write_speed, media_write_speed * 177);
- dev_notice(ddev, "write speed %ux\n", write_speed / 176);
- break;
- }
- read_speed = write_speed;
-
- ret = pkt_set_speed(pd, write_speed, read_speed);
- if (ret) {
- dev_notice(ddev, "couldn't set write speed\n");
- return -EIO;
- }
- pd->write_speed = write_speed;
- pd->read_speed = read_speed;
-
- ret = pkt_perform_opc(pd);
- if (ret)
- dev_notice(ddev, "Optimum Power Calibration failed\n");
-
- return 0;
-}
-
-/*
- * called at open time.
- */
-static int pkt_open_dev(struct pktcdvd_device *pd, bool write)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- int ret;
- long lba;
- struct request_queue *q;
- struct file *bdev_file;
-
- /*
- * We need to re-open the cdrom device without O_NONBLOCK to be able
- * to read/write from/to it. It is already opened in O_NONBLOCK mode
- * so open should not fail.
- */
- bdev_file = bdev_file_open_by_dev(file_bdev(pd->bdev_file)->bd_dev,
- BLK_OPEN_READ, pd, NULL);
- if (IS_ERR(bdev_file)) {
- ret = PTR_ERR(bdev_file);
- goto out;
- }
- pd->f_open_bdev = bdev_file;
-
- ret = pkt_get_last_written(pd, &lba);
- if (ret) {
- dev_err(ddev, "pkt_get_last_written failed\n");
- goto out_putdev;
- }
-
- set_capacity(pd->disk, lba << 2);
- set_capacity_and_notify(file_bdev(pd->bdev_file)->bd_disk, lba << 2);
-
- q = bdev_get_queue(file_bdev(pd->bdev_file));
- if (write) {
- ret = pkt_open_write(pd);
- if (ret)
- goto out_putdev;
- set_bit(PACKET_WRITABLE, &pd->flags);
- } else {
- pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
- clear_bit(PACKET_WRITABLE, &pd->flags);
- }
-
- ret = pkt_set_segment_merging(pd, q);
- if (ret)
- goto out_putdev;
-
- if (write) {
- if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
- dev_err(ddev, "not enough memory for buffers\n");
- ret = -ENOMEM;
- goto out_putdev;
- }
- dev_info(ddev, "%lukB available on disc\n", lba << 1);
- }
-
- return 0;
-
-out_putdev:
- fput(bdev_file);
-out:
- return ret;
-}
-
-/*
- * called when the device is closed. makes sure that the device flushes
- * the internal cache before we close.
- */
-static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
-{
- struct device *ddev = disk_to_dev(pd->disk);
-
- if (flush && pkt_flush_cache(pd))
- dev_notice(ddev, "not flushing cache\n");
-
- pkt_lock_door(pd, 0);
-
- pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
- fput(pd->f_open_bdev);
- pd->f_open_bdev = NULL;
-
- pkt_shrink_pktlist(pd);
-}
-
-static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
-{
- if (dev_minor >= MAX_WRITERS)
- return NULL;
-
- dev_minor = array_index_nospec(dev_minor, MAX_WRITERS);
- return pkt_devs[dev_minor];
-}
-
-static int pkt_open(struct gendisk *disk, blk_mode_t mode)
-{
- struct pktcdvd_device *pd = NULL;
- int ret;
-
- mutex_lock(&pktcdvd_mutex);
- mutex_lock(&ctl_mutex);
- pd = pkt_find_dev_from_minor(disk->first_minor);
- if (!pd) {
- ret = -ENODEV;
- goto out;
- }
- BUG_ON(pd->refcnt < 0);
-
- pd->refcnt++;
- if (pd->refcnt > 1) {
- if ((mode & BLK_OPEN_WRITE) &&
- !test_bit(PACKET_WRITABLE, &pd->flags)) {
- ret = -EBUSY;
- goto out_dec;
- }
- } else {
- ret = pkt_open_dev(pd, mode & BLK_OPEN_WRITE);
- if (ret)
- goto out_dec;
- /*
- * needed here as well, since ext2 (among others) may change
- * the blocksize at mount time
- */
- set_blocksize(disk->part0, CD_FRAMESIZE);
- }
- mutex_unlock(&ctl_mutex);
- mutex_unlock(&pktcdvd_mutex);
- return 0;
-
-out_dec:
- pd->refcnt--;
-out:
- mutex_unlock(&ctl_mutex);
- mutex_unlock(&pktcdvd_mutex);
- return ret;
-}
-
-static void pkt_release(struct gendisk *disk)
-{
- struct pktcdvd_device *pd = disk->private_data;
-
- mutex_lock(&pktcdvd_mutex);
- mutex_lock(&ctl_mutex);
- pd->refcnt--;
- BUG_ON(pd->refcnt < 0);
- if (pd->refcnt == 0) {
- int flush = test_bit(PACKET_WRITABLE, &pd->flags);
- pkt_release_dev(pd, flush);
- }
- mutex_unlock(&ctl_mutex);
- mutex_unlock(&pktcdvd_mutex);
-}
-
-
-static void pkt_end_io_read_cloned(struct bio *bio)
-{
- struct packet_stacked_data *psd = bio->bi_private;
- struct pktcdvd_device *pd = psd->pd;
-
- psd->bio->bi_status = bio->bi_status;
- bio_put(bio);
- bio_endio(psd->bio);
- mempool_free(psd, &psd_pool);
- pkt_bio_finished(pd);
-}
-
-static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
-{
- struct bio *cloned_bio = bio_alloc_clone(file_bdev(pd->bdev_file), bio,
- GFP_NOIO, &pkt_bio_set);
- struct packet_stacked_data *psd = mempool_alloc(&psd_pool, GFP_NOIO);
-
- psd->pd = pd;
- psd->bio = bio;
- cloned_bio->bi_private = psd;
- cloned_bio->bi_end_io = pkt_end_io_read_cloned;
- pd->stats.secs_r += bio_sectors(bio);
- pkt_queue_bio(pd, cloned_bio);
-}
-
-static void pkt_make_request_write(struct bio *bio)
-{
- struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->private_data;
- sector_t zone;
- struct packet_data *pkt;
- int was_empty, blocked_bio;
- struct pkt_rb_node *node;
-
- zone = get_zone(bio->bi_iter.bi_sector, pd);
-
- /*
- * If we find a matching packet in state WAITING or READ_WAIT, we can
- * just append this bio to that packet.
- */
- spin_lock(&pd->cdrw.active_list_lock);
- blocked_bio = 0;
- list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
- if (pkt->sector == zone) {
- spin_lock(&pkt->lock);
- if ((pkt->state == PACKET_WAITING_STATE) ||
- (pkt->state == PACKET_READ_WAIT_STATE)) {
- bio_list_add(&pkt->orig_bios, bio);
- pkt->write_size +=
- bio->bi_iter.bi_size / CD_FRAMESIZE;
- if ((pkt->write_size >= pkt->frames) &&
- (pkt->state == PACKET_WAITING_STATE)) {
- atomic_inc(&pkt->run_sm);
- wake_up(&pd->wqueue);
- }
- spin_unlock(&pkt->lock);
- spin_unlock(&pd->cdrw.active_list_lock);
- return;
- } else {
- blocked_bio = 1;
- }
- spin_unlock(&pkt->lock);
- }
- }
- spin_unlock(&pd->cdrw.active_list_lock);
-
- /*
- * Test if there is enough room left in the bio work queue
- * (queue size >= congestion on mark).
- * If not, wait till the work queue size is below the congestion off mark.
- */
- spin_lock(&pd->lock);
- if (pd->write_congestion_on > 0
- && pd->bio_queue_size >= pd->write_congestion_on) {
- struct wait_bit_queue_entry wqe;
-
- init_wait_var_entry(&wqe, &pd->congested, 0);
- for (;;) {
- prepare_to_wait_event(__var_waitqueue(&pd->congested),
- &wqe.wq_entry,
- TASK_UNINTERRUPTIBLE);
- if (pd->bio_queue_size <= pd->write_congestion_off)
- break;
- pd->congested = true;
- spin_unlock(&pd->lock);
- schedule();
- spin_lock(&pd->lock);
- }
- }
- spin_unlock(&pd->lock);
-
- /*
- * No matching packet found. Store the bio in the work queue.
- */
- node = mempool_alloc(&pd->rb_pool, GFP_NOIO);
- node->bio = bio;
- spin_lock(&pd->lock);
- BUG_ON(pd->bio_queue_size < 0);
- was_empty = (pd->bio_queue_size == 0);
- pkt_rbtree_insert(pd, node);
- spin_unlock(&pd->lock);
-
- /*
- * Wake up the worker thread.
- */
- atomic_set(&pd->scan_queue, 1);
- if (was_empty) {
- /* This wake_up is required for correct operation */
- wake_up(&pd->wqueue);
- } else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) {
- /*
- * This wake up is not required for correct operation,
- * but improves performance in some cases.
- */
- wake_up(&pd->wqueue);
- }
-}
-
-static void pkt_submit_bio(struct bio *bio)
-{
- struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->private_data;
- struct device *ddev = disk_to_dev(pd->disk);
- struct bio *split;
-
- bio = bio_split_to_limits(bio);
- if (!bio)
- return;
-
- dev_dbg(ddev, "start = %6llx stop = %6llx\n",
- bio->bi_iter.bi_sector, bio_end_sector(bio));
-
- /*
- * Clone READ bios so we can have our own bi_end_io callback.
- */
- if (bio_data_dir(bio) == READ) {
- pkt_make_request_read(pd, bio);
- return;
- }
-
- if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
- dev_notice(ddev, "WRITE for ro device (%llu)\n", bio->bi_iter.bi_sector);
- goto end_io;
- }
-
- if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) {
- dev_err(ddev, "wrong bio size\n");
- goto end_io;
- }
-
- do {
- sector_t zone = get_zone(bio->bi_iter.bi_sector, pd);
- sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd);
-
- if (last_zone != zone) {
- BUG_ON(last_zone != zone + pd->settings.size);
-
- split = bio_split(bio, last_zone -
- bio->bi_iter.bi_sector,
- GFP_NOIO, &pkt_bio_set);
- bio_chain(split, bio);
- } else {
- split = bio;
- }
-
- pkt_make_request_write(split);
- } while (split != bio);
-
- return;
-end_io:
- bio_io_error(bio);
-}
-
-static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- int i;
- struct file *bdev_file;
- struct scsi_device *sdev;
-
- if (pd->pkt_dev == dev) {
- dev_err(ddev, "recursive setup not allowed\n");
- return -EBUSY;
- }
- for (i = 0; i < MAX_WRITERS; i++) {
- struct pktcdvd_device *pd2 = pkt_devs[i];
- if (!pd2)
- continue;
- if (file_bdev(pd2->bdev_file)->bd_dev == dev) {
- dev_err(ddev, "%pg already setup\n",
- file_bdev(pd2->bdev_file));
- return -EBUSY;
- }
- if (pd2->pkt_dev == dev) {
- dev_err(ddev, "can't chain pktcdvd devices\n");
- return -EBUSY;
- }
- }
-
- bdev_file = bdev_file_open_by_dev(dev, BLK_OPEN_READ | BLK_OPEN_NDELAY,
- NULL, NULL);
- if (IS_ERR(bdev_file))
- return PTR_ERR(bdev_file);
- sdev = scsi_device_from_queue(file_bdev(bdev_file)->bd_disk->queue);
- if (!sdev) {
- fput(bdev_file);
- return -EINVAL;
- }
- put_device(&sdev->sdev_gendev);
-
- /* This is safe, since we have a reference from open(). */
- __module_get(THIS_MODULE);
-
- pd->bdev_file = bdev_file;
- set_blocksize(file_bdev(bdev_file), CD_FRAMESIZE);
-
- atomic_set(&pd->cdrw.pending_bios, 0);
- pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->disk->disk_name);
- if (IS_ERR(pd->cdrw.thread)) {
- dev_err(ddev, "can't start kernel thread\n");
- goto out_mem;
- }
-
- proc_create_single_data(pd->disk->disk_name, 0, pkt_proc, pkt_seq_show, pd);
- dev_notice(ddev, "writer mapped to %pg\n", file_bdev(bdev_file));
- return 0;
-
-out_mem:
- fput(bdev_file);
- /* This is safe: open() is still holding a reference. */
- module_put(THIS_MODULE);
- return -ENOMEM;
-}
-
-static int pkt_ioctl(struct block_device *bdev, blk_mode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- struct pktcdvd_device *pd = bdev->bd_disk->private_data;
- struct device *ddev = disk_to_dev(pd->disk);
- int ret;
-
- dev_dbg(ddev, "cmd %x, dev %d:%d\n", cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
-
- mutex_lock(&pktcdvd_mutex);
- switch (cmd) {
- case CDROMEJECT:
- /*
- * The door gets locked when the device is opened, so we
- * have to unlock it or else the eject command fails.
- */
- if (pd->refcnt == 1)
- pkt_lock_door(pd, 0);
- fallthrough;
- /*
- * forward selected CDROM ioctls to CD-ROM, for UDF
- */
- case CDROMMULTISESSION:
- case CDROMREADTOCENTRY:
- case CDROM_LAST_WRITTEN:
- case CDROM_SEND_PACKET:
- case SCSI_IOCTL_SEND_COMMAND:
- if (!bdev->bd_disk->fops->ioctl)
- ret = -ENOTTY;
- else
- ret = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg);
- break;
- default:
- dev_dbg(ddev, "Unknown ioctl (%x)\n", cmd);
- ret = -ENOTTY;
- }
- mutex_unlock(&pktcdvd_mutex);
-
- return ret;
-}
-
-static unsigned int pkt_check_events(struct gendisk *disk,
- unsigned int clearing)
-{
- struct pktcdvd_device *pd = disk->private_data;
- struct gendisk *attached_disk;
-
- if (!pd)
- return 0;
- if (!pd->bdev_file)
- return 0;
- attached_disk = file_bdev(pd->bdev_file)->bd_disk;
- if (!attached_disk || !attached_disk->fops->check_events)
- return 0;
- return attached_disk->fops->check_events(attached_disk, clearing);
-}
-
-static char *pkt_devnode(struct gendisk *disk, umode_t *mode)
-{
- return kasprintf(GFP_KERNEL, "pktcdvd/%s", disk->disk_name);
-}
-
-static const struct block_device_operations pktcdvd_ops = {
- .owner = THIS_MODULE,
- .submit_bio = pkt_submit_bio,
- .open = pkt_open,
- .release = pkt_release,
- .ioctl = pkt_ioctl,
- .compat_ioctl = blkdev_compat_ptr_ioctl,
- .check_events = pkt_check_events,
- .devnode = pkt_devnode,
-};
-
-/*
- * Set up mapping from pktcdvd device to CD-ROM device.
- */
-static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
-{
- struct queue_limits lim = {
- .max_hw_sectors = PACKET_MAX_SECTORS,
- .logical_block_size = CD_FRAMESIZE,
- };
- int idx;
- int ret = -ENOMEM;
- struct pktcdvd_device *pd;
- struct gendisk *disk;
-
- mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-
- for (idx = 0; idx < MAX_WRITERS; idx++)
- if (!pkt_devs[idx])
- break;
- if (idx == MAX_WRITERS) {
- pr_err("max %d writers supported\n", MAX_WRITERS);
- ret = -EBUSY;
- goto out_mutex;
- }
-
- pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL);
- if (!pd)
- goto out_mutex;
-
- ret = mempool_init_kmalloc_pool(&pd->rb_pool, PKT_RB_POOL_SIZE,
- sizeof(struct pkt_rb_node));
- if (ret)
- goto out_mem;
-
- INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
- INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
- spin_lock_init(&pd->cdrw.active_list_lock);
-
- spin_lock_init(&pd->lock);
- spin_lock_init(&pd->iosched.lock);
- bio_list_init(&pd->iosched.read_queue);
- bio_list_init(&pd->iosched.write_queue);
- init_waitqueue_head(&pd->wqueue);
- pd->bio_queue = RB_ROOT;
-
- pd->write_congestion_on = write_congestion_on;
- pd->write_congestion_off = write_congestion_off;
-
- disk = blk_alloc_disk(&lim, NUMA_NO_NODE);
- if (IS_ERR(disk)) {
- ret = PTR_ERR(disk);
- goto out_mem;
- }
- pd->disk = disk;
- disk->major = pktdev_major;
- disk->first_minor = idx;
- disk->minors = 1;
- disk->fops = &pktcdvd_ops;
- disk->flags = GENHD_FL_REMOVABLE | GENHD_FL_NO_PART;
- snprintf(disk->disk_name, sizeof(disk->disk_name), DRIVER_NAME"%d", idx);
- disk->private_data = pd;
-
- pd->pkt_dev = MKDEV(pktdev_major, idx);
- ret = pkt_new_dev(pd, dev);
- if (ret)
- goto out_mem2;
-
- /* inherit events of the host device */
- disk->events = file_bdev(pd->bdev_file)->bd_disk->events;
-
- ret = add_disk(disk);
- if (ret)
- goto out_mem2;
-
- pkt_sysfs_dev_new(pd);
- pkt_debugfs_dev_new(pd);
-
- pkt_devs[idx] = pd;
- if (pkt_dev)
- *pkt_dev = pd->pkt_dev;
-
- mutex_unlock(&ctl_mutex);
- return 0;
-
-out_mem2:
- put_disk(disk);
-out_mem:
- mempool_exit(&pd->rb_pool);
- kfree(pd);
-out_mutex:
- mutex_unlock(&ctl_mutex);
- pr_err("setup of pktcdvd device failed\n");
- return ret;
-}
-
-/*
- * Tear down mapping from pktcdvd device to CD-ROM device.
- */
-static int pkt_remove_dev(dev_t pkt_dev)
-{
- struct pktcdvd_device *pd;
- struct device *ddev;
- int idx;
- int ret = 0;
-
- mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-
- for (idx = 0; idx < MAX_WRITERS; idx++) {
- pd = pkt_devs[idx];
- if (pd && (pd->pkt_dev == pkt_dev))
- break;
- }
- if (idx == MAX_WRITERS) {
- pr_debug("dev not setup\n");
- ret = -ENXIO;
- goto out;
- }
-
- if (pd->refcnt > 0) {
- ret = -EBUSY;
- goto out;
- }
-
- ddev = disk_to_dev(pd->disk);
-
- if (!IS_ERR(pd->cdrw.thread))
- kthread_stop(pd->cdrw.thread);
-
- pkt_devs[idx] = NULL;
-
- pkt_debugfs_dev_remove(pd);
- pkt_sysfs_dev_remove(pd);
-
- fput(pd->bdev_file);
-
- remove_proc_entry(pd->disk->disk_name, pkt_proc);
- dev_notice(ddev, "writer unmapped\n");
-
- del_gendisk(pd->disk);
- put_disk(pd->disk);
-
- mempool_exit(&pd->rb_pool);
- kfree(pd);
-
- /* This is safe: open() is still holding a reference. */
- module_put(THIS_MODULE);
-
-out:
- mutex_unlock(&ctl_mutex);
- return ret;
-}
-
-static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
-{
- struct pktcdvd_device *pd;
-
- mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-
- pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index);
- if (pd) {
- ctrl_cmd->dev = new_encode_dev(file_bdev(pd->bdev_file)->bd_dev);
- ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);
- } else {
- ctrl_cmd->dev = 0;
- ctrl_cmd->pkt_dev = 0;
- }
- ctrl_cmd->num_devices = MAX_WRITERS;
-
- mutex_unlock(&ctl_mutex);
-}
-
-static long pkt_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- void __user *argp = (void __user *)arg;
- struct pkt_ctrl_command ctrl_cmd;
- int ret = 0;
- dev_t pkt_dev = 0;
-
- if (cmd != PACKET_CTRL_CMD)
- return -ENOTTY;
-
- if (copy_from_user(&ctrl_cmd, argp, sizeof(struct pkt_ctrl_command)))
- return -EFAULT;
-
- switch (ctrl_cmd.command) {
- case PKT_CTRL_CMD_SETUP:
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev);
- ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev);
- break;
- case PKT_CTRL_CMD_TEARDOWN:
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev));
- break;
- case PKT_CTRL_CMD_STATUS:
- pkt_get_status(&ctrl_cmd);
- break;
- default:
- return -ENOTTY;
- }
-
- if (copy_to_user(argp, &ctrl_cmd, sizeof(struct pkt_ctrl_command)))
- return -EFAULT;
- return ret;
-}
-
-#ifdef CONFIG_COMPAT
-static long pkt_ctl_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- return pkt_ctl_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
-}
-#endif
-
-static const struct file_operations pkt_ctl_fops = {
- .open = nonseekable_open,
- .unlocked_ioctl = pkt_ctl_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = pkt_ctl_compat_ioctl,
-#endif
- .owner = THIS_MODULE,
- .llseek = no_llseek,
-};
-
-static struct miscdevice pkt_misc = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = DRIVER_NAME,
- .nodename = "pktcdvd/control",
- .fops = &pkt_ctl_fops
-};
-
-static int __init pkt_init(void)
-{
- int ret;
-
- mutex_init(&ctl_mutex);
-
- ret = mempool_init_kmalloc_pool(&psd_pool, PSD_POOL_SIZE,
- sizeof(struct packet_stacked_data));
- if (ret)
- return ret;
- ret = bioset_init(&pkt_bio_set, BIO_POOL_SIZE, 0, 0);
- if (ret) {
- mempool_exit(&psd_pool);
- return ret;
- }
-
- ret = register_blkdev(pktdev_major, DRIVER_NAME);
- if (ret < 0) {
- pr_err("unable to register block device\n");
- goto out2;
- }
- if (!pktdev_major)
- pktdev_major = ret;
-
- ret = pkt_sysfs_init();
- if (ret)
- goto out;
-
- pkt_debugfs_init();
-
- ret = misc_register(&pkt_misc);
- if (ret) {
- pr_err("unable to register misc device\n");
- goto out_misc;
- }
-
- pkt_proc = proc_mkdir("driver/"DRIVER_NAME, NULL);
-
- return 0;
-
-out_misc:
- pkt_debugfs_cleanup();
- pkt_sysfs_cleanup();
-out:
- unregister_blkdev(pktdev_major, DRIVER_NAME);
-out2:
- mempool_exit(&psd_pool);
- bioset_exit(&pkt_bio_set);
- return ret;
-}
-
-static void __exit pkt_exit(void)
-{
- remove_proc_entry("driver/"DRIVER_NAME, NULL);
- misc_deregister(&pkt_misc);
-
- pkt_debugfs_cleanup();
- pkt_sysfs_cleanup();
-
- unregister_blkdev(pktdev_major, DRIVER_NAME);
- mempool_exit(&psd_pool);
- bioset_exit(&pkt_bio_set);
-}
-
-MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
-MODULE_AUTHOR("Jens Axboe <axboe@suse.de>");
-MODULE_LICENSE("GPL");
-
-module_init(pkt_init);
-module_exit(pkt_exit);
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index b810ac0a5c4b..8892f218a814 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -85,10 +85,14 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
struct bio_vec bvec;
rq_for_each_segment(bvec, req, iter) {
+ dev_dbg(&dev->sbd.core, "%s:%u: %u sectors from %llu\n",
+ __func__, __LINE__, bio_sectors(iter.bio),
+ iter.bio->bi_iter.bi_sector);
if (gather)
memcpy_from_bvec(dev->bounce_buf + offset, &bvec);
else
memcpy_to_bvec(&bvec, dev->bounce_buf + offset);
+ offset += bvec.bv_len;
}
}
@@ -384,13 +388,13 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
unsigned int devidx;
struct queue_limits lim = {
.logical_block_size = dev->blk_size,
- .max_hw_sectors = dev->bounce_size >> 9,
+ .max_hw_sectors = BOUNCE_SIZE >> 9,
.max_segments = -1,
- .max_segment_size = dev->bounce_size,
+ .max_segment_size = BOUNCE_SIZE,
.dma_alignment = dev->blk_size - 1,
+ .features = BLK_FEAT_WRITE_CACHE |
+ BLK_FEAT_ROTATIONAL,
};
-
- struct request_queue *queue;
struct gendisk *gendisk;
if (dev->blk_size < 512) {
@@ -434,8 +438,7 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
ps3disk_identify(dev);
- error = blk_mq_alloc_sq_tag_set(&priv->tag_set, &ps3disk_mq_ops, 1,
- BLK_MQ_F_SHOULD_MERGE);
+ error = blk_mq_alloc_sq_tag_set(&priv->tag_set, &ps3disk_mq_ops, 1, 0);
if (error)
goto fail_teardown;
@@ -447,10 +450,6 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
goto fail_free_tag_set;
}
- queue = gendisk->queue;
-
- blk_queue_write_cache(queue, true, false);
-
priv->gendisk = gendisk;
gendisk->major = ps3disk_major;
gendisk->first_minor = devidx * PS3DISK_MINORS;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 26ff5cd2bf0a..af0e21149dbc 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -362,7 +362,7 @@ enum rbd_watch_state {
enum rbd_lock_state {
RBD_LOCK_STATE_UNLOCKED,
RBD_LOCK_STATE_LOCKED,
- RBD_LOCK_STATE_RELEASING,
+ RBD_LOCK_STATE_QUIESCING,
};
/* WatchNotify::ClientId */
@@ -422,7 +422,7 @@ struct rbd_device {
struct list_head running_list;
struct completion acquire_wait;
int acquire_err;
- struct completion releasing_wait;
+ struct completion quiescing_wait;
spinlock_t object_map_lock;
u8 *object_map;
@@ -525,7 +525,7 @@ static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
lockdep_assert_held(&rbd_dev->lock_rwsem);
return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
- rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING;
+ rbd_dev->lock_state == RBD_LOCK_STATE_QUIESCING;
}
static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
@@ -3457,13 +3457,14 @@ static void rbd_lock_del_request(struct rbd_img_request *img_req)
lockdep_assert_held(&rbd_dev->lock_rwsem);
spin_lock(&rbd_dev->lock_lists_lock);
if (!list_empty(&img_req->lock_item)) {
+ rbd_assert(!list_empty(&rbd_dev->running_list));
list_del_init(&img_req->lock_item);
- need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
+ need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_QUIESCING &&
list_empty(&rbd_dev->running_list));
}
spin_unlock(&rbd_dev->lock_lists_lock);
if (need_wakeup)
- complete(&rbd_dev->releasing_wait);
+ complete(&rbd_dev->quiescing_wait);
}
static int rbd_img_exclusive_lock(struct rbd_img_request *img_req)
@@ -3476,11 +3477,6 @@ static int rbd_img_exclusive_lock(struct rbd_img_request *img_req)
if (rbd_lock_add_request(img_req))
return 1;
- if (rbd_dev->opts->exclusive) {
- WARN_ON(1); /* lock got released? */
- return -EROFS;
- }
-
/*
* Note the use of mod_delayed_work() in rbd_acquire_lock()
* and cancel_delayed_work() in wake_lock_waiters().
@@ -4181,16 +4177,16 @@ static bool rbd_quiesce_lock(struct rbd_device *rbd_dev)
/*
* Ensure that all in-flight IO is flushed.
*/
- rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
- rbd_assert(!completion_done(&rbd_dev->releasing_wait));
+ rbd_dev->lock_state = RBD_LOCK_STATE_QUIESCING;
+ rbd_assert(!completion_done(&rbd_dev->quiescing_wait));
if (list_empty(&rbd_dev->running_list))
return true;
up_write(&rbd_dev->lock_rwsem);
- wait_for_completion(&rbd_dev->releasing_wait);
+ wait_for_completion(&rbd_dev->quiescing_wait);
down_write(&rbd_dev->lock_rwsem);
- if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
+ if (rbd_dev->lock_state != RBD_LOCK_STATE_QUIESCING)
return false;
rbd_assert(list_empty(&rbd_dev->running_list));
@@ -4601,6 +4597,10 @@ static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
rbd_warn(rbd_dev, "failed to update lock cookie: %d",
ret);
+ if (rbd_dev->opts->exclusive)
+ rbd_warn(rbd_dev,
+ "temporarily releasing lock on exclusive mapping");
+
/*
* Lock cookie cannot be updated on older OSDs, so do
* a manual release and queue an acquire.
@@ -4949,14 +4949,12 @@ static const struct blk_mq_ops rbd_mq_ops = {
static int rbd_init_disk(struct rbd_device *rbd_dev)
{
struct gendisk *disk;
- struct request_queue *q;
unsigned int objset_bytes =
rbd_dev->layout.object_size * rbd_dev->layout.stripe_count;
struct queue_limits lim = {
.max_hw_sectors = objset_bytes >> SECTOR_SHIFT,
- .max_user_sectors = objset_bytes >> SECTOR_SHIFT,
+ .io_opt = objset_bytes,
.io_min = rbd_dev->opts->alloc_size,
- .io_opt = rbd_dev->opts->alloc_size,
.max_segments = USHRT_MAX,
.max_segment_size = UINT_MAX,
};
@@ -4966,7 +4964,6 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
rbd_dev->tag_set.ops = &rbd_mq_ops;
rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
- rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
rbd_dev->tag_set.nr_hw_queues = num_present_cpus();
rbd_dev->tag_set.cmd_size = sizeof(struct rbd_img_request);
@@ -4980,12 +4977,14 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
lim.max_write_zeroes_sectors = objset_bytes >> SECTOR_SHIFT;
}
+ if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
+ lim.features |= BLK_FEAT_STABLE_WRITES;
+
disk = blk_mq_alloc_disk(&rbd_dev->tag_set, &lim, rbd_dev);
if (IS_ERR(disk)) {
err = PTR_ERR(disk);
goto out_tag_set;
}
- q = disk->queue;
snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
rbd_dev->dev_id);
@@ -4997,13 +4996,6 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
disk->minors = RBD_MINORS_PER_MAJOR;
disk->fops = &rbd_bd_ops;
disk->private_data = rbd_dev;
-
- blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
- /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
-
- if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
- blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
-
rbd_dev->disk = disk;
return 0;
@@ -5383,7 +5375,7 @@ static struct rbd_device *__rbd_dev_create(struct rbd_spec *spec)
INIT_LIST_HEAD(&rbd_dev->acquiring_list);
INIT_LIST_HEAD(&rbd_dev->running_list);
init_completion(&rbd_dev->acquire_wait);
- init_completion(&rbd_dev->releasing_wait);
+ init_completion(&rbd_dev->quiescing_wait);
spin_lock_init(&rbd_dev->object_map_lock);
@@ -6589,11 +6581,6 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
if (ret)
return ret;
- /*
- * The lock may have been released by now, unless automatic lock
- * transitions are disabled.
- */
- rbd_assert(!rbd_dev->opts->exclusive || rbd_is_lock_owner(rbd_dev));
return 0;
}
@@ -7294,8 +7281,10 @@ static ssize_t do_rbd_remove(const char *buf, size_t count)
* Prevent new IO from being queued and wait for existing
* IO to complete/fail.
*/
- blk_mq_freeze_queue(rbd_dev->disk->queue);
+ unsigned int memflags = blk_mq_freeze_queue(rbd_dev->disk->queue);
+
blk_mark_disk_dead(rbd_dev->disk);
+ blk_mq_unfreeze_queue(rbd_dev->disk->queue, memflags);
}
del_gendisk(rbd_dev->disk);
@@ -7400,7 +7389,7 @@ static int __init rbd_init(void)
* The number of active work items is limited by the number of
* rbd devices * queue depth, so leave @max_active at default.
*/
- rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
+ rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!rbd_wq) {
rc = -ENOMEM;
goto err_out_slab;
diff --git a/drivers/block/rnbd/rnbd-clt-sysfs.c b/drivers/block/rnbd/rnbd-clt-sysfs.c
index 39887556cf95..6ea7c12e3a87 100644
--- a/drivers/block/rnbd/rnbd-clt-sysfs.c
+++ b/drivers/block/rnbd/rnbd-clt-sysfs.c
@@ -475,7 +475,7 @@ void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev)
}
}
-static struct kobj_type rnbd_dev_ktype = {
+static const struct kobj_type rnbd_dev_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = rnbd_dev_groups,
};
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index b7ffe03c6160..f1409e54010a 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -942,11 +942,11 @@ static void rnbd_client_release(struct gendisk *gen)
rnbd_clt_put_dev(dev);
}
-static int rnbd_client_getgeo(struct block_device *block_device,
+static int rnbd_client_getgeo(struct gendisk *disk,
struct hd_geometry *geo)
{
u64 size;
- struct rnbd_clt_dev *dev = block_device->bd_disk->private_data;
+ struct rnbd_clt_dev *dev = disk->private_data;
struct queue_limits *limit = &dev->queue->limits;
size = dev->size * (limit->logical_block_size / SECTOR_SIZE);
@@ -1010,7 +1010,7 @@ static int rnbd_client_xfer_request(struct rnbd_clt_dev *dev,
* See queue limits.
*/
if ((req_op(rq) != REQ_OP_DISCARD) && (req_op(rq) != REQ_OP_WRITE_ZEROES))
- sg_cnt = blk_rq_map_sg(dev->queue, rq, iu->sgt.sgl);
+ sg_cnt = blk_rq_map_sg(rq, iu->sgt.sgl);
if (sg_cnt == 0)
sg_mark_end(&iu->sgt.sgl[0]);
@@ -1209,8 +1209,7 @@ static int setup_mq_tags(struct rnbd_clt_session *sess)
tag_set->ops = &rnbd_mq_ops;
tag_set->queue_depth = sess->queue_depth;
tag_set->numa_node = NUMA_NO_NODE;
- tag_set->flags = BLK_MQ_F_SHOULD_MERGE |
- BLK_MQ_F_TAG_QUEUE_SHARED;
+ tag_set->flags = BLK_MQ_F_TAG_QUEUE_SHARED;
tag_set->cmd_size = sizeof(struct rnbd_iu) + RNBD_RDMA_SGL_SIZE;
/* for HCTX_TYPE_DEFAULT, HCTX_TYPE_READ, HCTX_TYPE_POLL */
@@ -1352,10 +1351,6 @@ static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev,
if (dev->access_mode == RNBD_ACCESS_RO)
set_disk_ro(dev->gd, true);
- /*
- * Network device does not need rotational
- */
- blk_queue_flag_set(QUEUE_FLAG_NONROT, dev->queue);
err = add_disk(dev->gd);
if (err)
put_disk(dev->gd);
@@ -1389,18 +1384,18 @@ static int rnbd_client_setup_device(struct rnbd_clt_dev *dev,
le32_to_cpu(rsp->max_discard_sectors);
}
+ if (rsp->cache_policy & RNBD_WRITEBACK) {
+ lim.features |= BLK_FEAT_WRITE_CACHE;
+ if (rsp->cache_policy & RNBD_FUA)
+ lim.features |= BLK_FEAT_FUA;
+ }
+
dev->gd = blk_mq_alloc_disk(&dev->sess->tag_set, &lim, dev);
if (IS_ERR(dev->gd))
return PTR_ERR(dev->gd);
dev->queue = dev->gd->queue;
rnbd_init_mq_hw_queues(dev);
- blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, dev->queue);
- blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, dev->queue);
- blk_queue_write_cache(dev->queue,
- !!(rsp->cache_policy & RNBD_WRITEBACK),
- !!(rsp->cache_policy & RNBD_FUA));
-
return rnbd_clt_setup_gen_disk(dev, rsp, idx);
}
@@ -1814,7 +1809,7 @@ static int __init rnbd_client_init(void)
unregister_blkdev(rnbd_client_major, "rnbd");
return err;
}
- rnbd_clt_wq = alloc_workqueue("rnbd_clt_wq", 0, 0);
+ rnbd_clt_wq = alloc_workqueue("rnbd_clt_wq", WQ_PERCPU, 0);
if (!rnbd_clt_wq) {
pr_err("Failed to load module, alloc_workqueue failed.\n");
rnbd_clt_destroy_sysfs_files();
diff --git a/drivers/block/rnbd/rnbd-proto.h b/drivers/block/rnbd/rnbd-proto.h
index f35be51d213c..77360c2a6069 100644
--- a/drivers/block/rnbd/rnbd-proto.h
+++ b/drivers/block/rnbd/rnbd-proto.h
@@ -24,7 +24,7 @@
#define RTRS_PORT 1234
/**
- * enum rnbd_msg_types - RNBD message types
+ * enum rnbd_msg_type - RNBD message types
* @RNBD_MSG_SESS_INFO: initial session info from client to server
* @RNBD_MSG_SESS_INFO_RSP: initial session info from server to client
* @RNBD_MSG_OPEN: open (map) device request
@@ -47,10 +47,11 @@ enum rnbd_msg_type {
*/
struct rnbd_msg_hdr {
__le16 type;
+ /* private: */
__le16 __padding;
};
-/**
+/*
* We allow to map RO many times and RW only once. We allow to map yet another
* time RW, if MIGRATION is provided (second RW export can be required for
* example for VM migration)
@@ -78,6 +79,7 @@ static const __maybe_unused struct {
struct rnbd_msg_sess_info {
struct rnbd_msg_hdr hdr;
u8 ver;
+ /* private: */
u8 reserved[31];
};
@@ -89,6 +91,7 @@ struct rnbd_msg_sess_info {
struct rnbd_msg_sess_info_rsp {
struct rnbd_msg_hdr hdr;
u8 ver;
+ /* private: */
u8 reserved[31];
};
@@ -97,13 +100,16 @@ struct rnbd_msg_sess_info_rsp {
* @hdr: message header
* @access_mode: the mode to open remote device, valid values see:
* enum rnbd_access_mode
- * @device_name: device path on remote side
+ * @dev_name: device path on remote side
*/
struct rnbd_msg_open {
struct rnbd_msg_hdr hdr;
u8 access_mode;
+ /* private: */
u8 resv1;
+ /* public: */
s8 dev_name[NAME_MAX];
+ /* private: */
u8 reserved[3];
};
@@ -155,6 +161,7 @@ struct rnbd_msg_open_rsp {
__le16 secure_discard;
u8 obsolete_rotational;
u8 cache_policy;
+ /* private: */
u8 reserved[10];
};
@@ -187,7 +194,7 @@ struct rnbd_msg_io {
* @RNBD_OP_DISCARD: discard sectors
* @RNBD_OP_SECURE_ERASE: securely erase sectors
* @RNBD_OP_WRITE_ZEROES: write zeroes sectors
-
+ *
* @RNBD_F_SYNC: request is sync (sync write or read)
* @RNBD_F_FUA: forced unit access
*/
diff --git a/drivers/block/rnbd/rnbd-srv-sysfs.c b/drivers/block/rnbd/rnbd-srv-sysfs.c
index cba6ba43c2c2..64780094442c 100644
--- a/drivers/block/rnbd/rnbd-srv-sysfs.c
+++ b/drivers/block/rnbd/rnbd-srv-sysfs.c
@@ -33,7 +33,7 @@ static void rnbd_srv_dev_release(struct kobject *kobj)
kfree(dev);
}
-static struct kobj_type dev_ktype = {
+static const struct kobj_type dev_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
.release = rnbd_srv_dev_release
};
@@ -184,7 +184,7 @@ static void rnbd_srv_sess_dev_release(struct kobject *kobj)
rnbd_destroy_sess_dev(sess_dev, sess_dev->keep_id);
}
-static struct kobj_type rnbd_srv_sess_dev_ktype = {
+static const struct kobj_type rnbd_srv_sess_dev_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
.release = rnbd_srv_sess_dev_release,
};
diff --git a/drivers/block/rnbd/rnbd-srv-trace.h b/drivers/block/rnbd/rnbd-srv-trace.h
index 8dedf73bdd28..89d0bcb17195 100644
--- a/drivers/block/rnbd/rnbd-srv-trace.h
+++ b/drivers/block/rnbd/rnbd-srv-trace.h
@@ -27,7 +27,7 @@ DECLARE_EVENT_CLASS(rnbd_srv_link_class,
TP_fast_assign(
__entry->qdepth = srv->queue_depth;
- __assign_str(sessname, srv->sessname);
+ __assign_str(sessname);
),
TP_printk("sessname: %s qdepth: %d",
@@ -85,7 +85,7 @@ TRACE_EVENT(process_rdma,
),
TP_fast_assign(
- __assign_str(sessname, srv->sessname);
+ __assign_str(sessname);
__entry->dir = id->dir;
__entry->ver = srv->ver;
__entry->device_id = le32_to_cpu(msg->device_id);
@@ -130,7 +130,7 @@ TRACE_EVENT(process_msg_sess_info,
__entry->proto_ver = srv->ver;
__entry->clt_ver = msg->ver;
__entry->srv_ver = RNBD_PROTO_VER_MAJOR;
- __assign_str(sessname, srv->sessname);
+ __assign_str(sessname);
),
TP_printk("Session %s using proto-ver %d (clt-ver: %d, srv-ver: %d)",
@@ -165,8 +165,8 @@ TRACE_EVENT(process_msg_open,
TP_fast_assign(
__entry->access_mode = msg->access_mode;
- __assign_str(sessname, srv->sessname);
- __assign_str(dev_name, msg->dev_name);
+ __assign_str(sessname);
+ __assign_str(dev_name);
),
TP_printk("Open message received: session='%s' path='%s' access_mode=%s",
@@ -189,7 +189,7 @@ TRACE_EVENT(process_msg_close,
TP_fast_assign(
__entry->device_id = le32_to_cpu(msg->device_id);
- __assign_str(sessname, srv->sessname);
+ __assign_str(sessname);
),
TP_printk("Close message received: session='%s' device id='%d'",
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index f6e3a3c4b76c..2df8941a6b14 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -147,20 +147,22 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
bio = bio_alloc(file_bdev(sess_dev->bdev_file), 1,
rnbd_to_bio_flags(le32_to_cpu(msg->rw)), GFP_KERNEL);
- if (bio_add_page(bio, virt_to_page(data), datalen,
- offset_in_page(data)) != datalen) {
- rnbd_srv_err(sess_dev, "Failed to map data to bio\n");
+ bio_add_virt_nofail(bio, data, datalen);
+
+ bio->bi_opf = rnbd_to_bio_flags(le32_to_cpu(msg->rw));
+ if (bio_has_data(bio) &&
+ bio->bi_iter.bi_size != le32_to_cpu(msg->bi_size)) {
+ rnbd_srv_err_rl(sess_dev, "Datalen mismatch: bio bi_size (%u), bi_size (%u)\n",
+ bio->bi_iter.bi_size, msg->bi_size);
err = -EINVAL;
goto bio_put;
}
-
bio->bi_end_io = rnbd_dev_bi_end_io;
bio->bi_private = priv;
bio->bi_iter.bi_sector = le64_to_cpu(msg->sector);
- bio->bi_iter.bi_size = le32_to_cpu(msg->bi_size);
prio = srv_sess->ver < RNBD_PROTO_VER_MAJOR ||
usrlen < sizeof(*msg) ? 0 : le16_to_cpu(msg->prio);
- bio_set_prio(bio, prio);
+ bio->bi_ioprio = prio;
submit_bio(bio);
diff --git a/drivers/block/rnull/Kconfig b/drivers/block/rnull/Kconfig
new file mode 100644
index 000000000000..7bc5b376c128
--- /dev/null
+++ b/drivers/block/rnull/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Rust null block device driver configuration
+
+config BLK_DEV_RUST_NULL
+ tristate "Rust null block driver (Experimental)"
+ depends on RUST && CONFIGFS_FS
+ help
+ This is the Rust implementation of the null block driver. Like
+ the C version, the driver allows the user to create virutal block
+ devices that can be configured via various configuration options.
+
+ If unsure, say N.
diff --git a/drivers/block/rnull/Makefile b/drivers/block/rnull/Makefile
new file mode 100644
index 000000000000..11cfa5e615dc
--- /dev/null
+++ b/drivers/block/rnull/Makefile
@@ -0,0 +1,3 @@
+
+obj-$(CONFIG_BLK_DEV_RUST_NULL) += rnull_mod.o
+rnull_mod-y := rnull.o
diff --git a/drivers/block/rnull/configfs.rs b/drivers/block/rnull/configfs.rs
new file mode 100644
index 000000000000..6713a6d92391
--- /dev/null
+++ b/drivers/block/rnull/configfs.rs
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use super::{NullBlkDevice, THIS_MODULE};
+use kernel::{
+ block::mq::gen_disk::{GenDisk, GenDiskBuilder},
+ c_str,
+ configfs::{self, AttributeOperations},
+ configfs_attrs,
+ fmt::{self, Write as _},
+ new_mutex,
+ page::PAGE_SIZE,
+ prelude::*,
+ str::{kstrtobool_bytes, CString},
+ sync::Mutex,
+};
+use pin_init::PinInit;
+
+pub(crate) fn subsystem() -> impl PinInit<kernel::configfs::Subsystem<Config>, Error> {
+ let item_type = configfs_attrs! {
+ container: configfs::Subsystem<Config>,
+ data: Config,
+ child: DeviceConfig,
+ attributes: [
+ features: 0,
+ ],
+ };
+
+ kernel::configfs::Subsystem::new(c_str!("rnull"), item_type, try_pin_init!(Config {}))
+}
+
+#[pin_data]
+pub(crate) struct Config {}
+
+#[vtable]
+impl AttributeOperations<0> for Config {
+ type Data = Config;
+
+ fn show(_this: &Config, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
+ let mut writer = kernel::str::Formatter::new(page);
+ writer.write_str("blocksize,size,rotational,irqmode\n")?;
+ Ok(writer.bytes_written())
+ }
+}
+
+#[vtable]
+impl configfs::GroupOperations for Config {
+ type Child = DeviceConfig;
+
+ fn make_group(
+ &self,
+ name: &CStr,
+ ) -> Result<impl PinInit<configfs::Group<DeviceConfig>, Error>> {
+ let item_type = configfs_attrs! {
+ container: configfs::Group<DeviceConfig>,
+ data: DeviceConfig,
+ attributes: [
+ // Named for compatibility with C null_blk
+ power: 0,
+ blocksize: 1,
+ rotational: 2,
+ size: 3,
+ irqmode: 4,
+ ],
+ };
+
+ Ok(configfs::Group::new(
+ name.try_into()?,
+ item_type,
+ // TODO: cannot coerce new_mutex!() to impl PinInit<_, Error>, so put mutex inside
+ try_pin_init!( DeviceConfig {
+ data <- new_mutex!(DeviceConfigInner {
+ powered: false,
+ block_size: 4096,
+ rotational: false,
+ disk: None,
+ capacity_mib: 4096,
+ irq_mode: IRQMode::None,
+ name: name.try_into()?,
+ }),
+ }),
+ ))
+ }
+}
+
+#[derive(Debug, Clone, Copy)]
+pub(crate) enum IRQMode {
+ None,
+ Soft,
+}
+
+impl TryFrom<u8> for IRQMode {
+ type Error = kernel::error::Error;
+
+ fn try_from(value: u8) -> Result<Self> {
+ match value {
+ 0 => Ok(Self::None),
+ 1 => Ok(Self::Soft),
+ _ => Err(EINVAL),
+ }
+ }
+}
+
+impl fmt::Display for IRQMode {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ Self::None => f.write_str("0")?,
+ Self::Soft => f.write_str("1")?,
+ }
+ Ok(())
+ }
+}
+
+#[pin_data]
+pub(crate) struct DeviceConfig {
+ #[pin]
+ data: Mutex<DeviceConfigInner>,
+}
+
+#[pin_data]
+struct DeviceConfigInner {
+ powered: bool,
+ name: CString,
+ block_size: u32,
+ rotational: bool,
+ capacity_mib: u64,
+ irq_mode: IRQMode,
+ disk: Option<GenDisk<NullBlkDevice>>,
+}
+
+#[vtable]
+impl configfs::AttributeOperations<0> for DeviceConfig {
+ type Data = DeviceConfig;
+
+ fn show(this: &DeviceConfig, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
+ let mut writer = kernel::str::Formatter::new(page);
+
+ if this.data.lock().powered {
+ writer.write_str("1\n")?;
+ } else {
+ writer.write_str("0\n")?;
+ }
+
+ Ok(writer.bytes_written())
+ }
+
+ fn store(this: &DeviceConfig, page: &[u8]) -> Result {
+ let power_op = kstrtobool_bytes(page)?;
+ let mut guard = this.data.lock();
+
+ if !guard.powered && power_op {
+ guard.disk = Some(NullBlkDevice::new(
+ &guard.name,
+ guard.block_size,
+ guard.rotational,
+ guard.capacity_mib,
+ guard.irq_mode,
+ )?);
+ guard.powered = true;
+ } else if guard.powered && !power_op {
+ drop(guard.disk.take());
+ guard.powered = false;
+ }
+
+ Ok(())
+ }
+}
+
+#[vtable]
+impl configfs::AttributeOperations<1> for DeviceConfig {
+ type Data = DeviceConfig;
+
+ fn show(this: &DeviceConfig, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
+ let mut writer = kernel::str::Formatter::new(page);
+ writer.write_fmt(fmt!("{}\n", this.data.lock().block_size))?;
+ Ok(writer.bytes_written())
+ }
+
+ fn store(this: &DeviceConfig, page: &[u8]) -> Result {
+ if this.data.lock().powered {
+ return Err(EBUSY);
+ }
+
+ let text = core::str::from_utf8(page)?.trim();
+ let value = text.parse::<u32>().map_err(|_| EINVAL)?;
+
+ GenDiskBuilder::validate_block_size(value)?;
+ this.data.lock().block_size = value;
+ Ok(())
+ }
+}
+
+#[vtable]
+impl configfs::AttributeOperations<2> for DeviceConfig {
+ type Data = DeviceConfig;
+
+ fn show(this: &DeviceConfig, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
+ let mut writer = kernel::str::Formatter::new(page);
+
+ if this.data.lock().rotational {
+ writer.write_str("1\n")?;
+ } else {
+ writer.write_str("0\n")?;
+ }
+
+ Ok(writer.bytes_written())
+ }
+
+ fn store(this: &DeviceConfig, page: &[u8]) -> Result {
+ if this.data.lock().powered {
+ return Err(EBUSY);
+ }
+
+ this.data.lock().rotational = kstrtobool_bytes(page)?;
+
+ Ok(())
+ }
+}
+
+#[vtable]
+impl configfs::AttributeOperations<3> for DeviceConfig {
+ type Data = DeviceConfig;
+
+ fn show(this: &DeviceConfig, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
+ let mut writer = kernel::str::Formatter::new(page);
+ writer.write_fmt(fmt!("{}\n", this.data.lock().capacity_mib))?;
+ Ok(writer.bytes_written())
+ }
+
+ fn store(this: &DeviceConfig, page: &[u8]) -> Result {
+ if this.data.lock().powered {
+ return Err(EBUSY);
+ }
+
+ let text = core::str::from_utf8(page)?.trim();
+ let value = text.parse::<u64>().map_err(|_| EINVAL)?;
+
+ this.data.lock().capacity_mib = value;
+ Ok(())
+ }
+}
+
+#[vtable]
+impl configfs::AttributeOperations<4> for DeviceConfig {
+ type Data = DeviceConfig;
+
+ fn show(this: &DeviceConfig, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
+ let mut writer = kernel::str::Formatter::new(page);
+ writer.write_fmt(fmt!("{}\n", this.data.lock().irq_mode))?;
+ Ok(writer.bytes_written())
+ }
+
+ fn store(this: &DeviceConfig, page: &[u8]) -> Result {
+ if this.data.lock().powered {
+ return Err(EBUSY);
+ }
+
+ let text = core::str::from_utf8(page)?.trim();
+ let value = text.parse::<u8>().map_err(|_| EINVAL)?;
+
+ this.data.lock().irq_mode = IRQMode::try_from(value)?;
+ Ok(())
+ }
+}
diff --git a/drivers/block/rnull/rnull.rs b/drivers/block/rnull/rnull.rs
new file mode 100644
index 000000000000..a9d5e575a2c4
--- /dev/null
+++ b/drivers/block/rnull/rnull.rs
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! This is a Rust implementation of the C null block driver.
+
+mod configfs;
+
+use configfs::IRQMode;
+use kernel::{
+ block::{
+ self,
+ mq::{
+ self,
+ gen_disk::{self, GenDisk},
+ Operations, TagSet,
+ },
+ },
+ error::Result,
+ pr_info,
+ prelude::*,
+ sync::{aref::ARef, Arc},
+};
+use pin_init::PinInit;
+
+module! {
+ type: NullBlkModule,
+ name: "rnull_mod",
+ authors: ["Andreas Hindborg"],
+ description: "Rust implementation of the C null block driver",
+ license: "GPL v2",
+}
+
+#[pin_data]
+struct NullBlkModule {
+ #[pin]
+ configfs_subsystem: kernel::configfs::Subsystem<configfs::Config>,
+}
+
+impl kernel::InPlaceModule for NullBlkModule {
+ fn init(_module: &'static ThisModule) -> impl PinInit<Self, Error> {
+ pr_info!("Rust null_blk loaded\n");
+
+ try_pin_init!(Self {
+ configfs_subsystem <- configfs::subsystem(),
+ })
+ }
+}
+
+struct NullBlkDevice;
+
+impl NullBlkDevice {
+ fn new(
+ name: &CStr,
+ block_size: u32,
+ rotational: bool,
+ capacity_mib: u64,
+ irq_mode: IRQMode,
+ ) -> Result<GenDisk<Self>> {
+ let tagset = Arc::pin_init(TagSet::new(1, 256, 1), GFP_KERNEL)?;
+
+ let queue_data = Box::new(QueueData { irq_mode }, GFP_KERNEL)?;
+
+ gen_disk::GenDiskBuilder::new()
+ .capacity_sectors(capacity_mib << (20 - block::SECTOR_SHIFT))
+ .logical_block_size(block_size)?
+ .physical_block_size(block_size)?
+ .rotational(rotational)
+ .build(fmt!("{}", name.to_str()?), tagset, queue_data)
+ }
+}
+
+struct QueueData {
+ irq_mode: IRQMode,
+}
+
+#[vtable]
+impl Operations for NullBlkDevice {
+ type QueueData = KBox<QueueData>;
+
+ #[inline(always)]
+ fn queue_rq(queue_data: &QueueData, rq: ARef<mq::Request<Self>>, _is_last: bool) -> Result {
+ match queue_data.irq_mode {
+ IRQMode::None => mq::Request::end_ok(rq)
+ .map_err(|_e| kernel::error::code::EIO)
+ // We take no refcounts on the request, so we expect to be able to
+ // end the request. The request reference must be unique at this
+ // point, and so `end_ok` cannot fail.
+ .expect("Fatal error - expected to be able to end request"),
+ IRQMode::Soft => mq::Request::complete(rq),
+ }
+ Ok(())
+ }
+
+ fn commit_rqs(_queue_data: &QueueData) {}
+
+ fn complete(rq: ARef<mq::Request<Self>>) {
+ mq::Request::end_ok(rq)
+ .map_err(|_e| kernel::error::code::EIO)
+ // We take no refcounts on the request, so we expect to be able to
+ // end the request. The request reference must be unique at this
+ // point, and so `end_ok` cannot fail.
+ .expect("Fatal error - expected to be able to end request");
+ }
+}
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 5286cb8e0824..db1fe9772a4d 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -119,9 +119,8 @@ static inline u32 vdc_tx_dring_avail(struct vio_dring_state *dr)
return vio_dring_avail(dr, VDC_TX_RING_SIZE);
}
-static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int vdc_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- struct gendisk *disk = bdev->bd_disk;
sector_t nsect = get_capacity(disk);
sector_t cylinders = nsect;
@@ -485,7 +484,7 @@ static int __send_request(struct request *req)
}
sg_init_table(sg, port->ring_cookies);
- nsg = blk_rq_map_sg(req->q, req, sg);
+ nsg = blk_rq_map_sg(req, sg);
len = 0;
for (i = 0; i < nsg; i++)
@@ -791,6 +790,7 @@ static int probe_disk(struct vdc_port *port)
.seg_boundary_mask = PAGE_SIZE - 1,
.max_segment_size = PAGE_SIZE,
.max_segments = port->ring_cookies,
+ .features = BLK_FEAT_ROTATIONAL,
};
struct request_queue *q;
struct gendisk *g;
@@ -828,7 +828,7 @@ static int probe_disk(struct vdc_port *port)
}
err = blk_mq_alloc_sq_tag_set(&port->tag_set, &vdc_mq_ops,
- VDC_TX_RING_SIZE, BLK_MQ_F_SHOULD_MERGE);
+ VDC_TX_RING_SIZE, 0);
if (err)
return err;
@@ -917,12 +917,12 @@ struct vdc_check_port_data {
char *type;
};
-static int vdc_device_probed(struct device *dev, void *arg)
+static int vdc_device_probed(struct device *dev, const void *arg)
{
struct vio_dev *vdev = to_vio_dev(dev);
- struct vdc_check_port_data *port_data;
+ const struct vdc_check_port_data *port_data;
- port_data = (struct vdc_check_port_data *)arg;
+ port_data = (const struct vdc_check_port_data *)arg;
if ((vdev->dev_no == port_data->dev_no) &&
(!(strcmp((char *)&vdev->type, port_data->type))) &&
@@ -956,8 +956,10 @@ static bool vdc_port_mpgroup_check(struct vio_dev *vdev)
dev = device_find_child(vdev->dev.parent, &port_data,
vdc_device_probed);
- if (dev)
+ if (dev) {
+ put_device(dev);
return true;
+ }
return false;
}
@@ -1069,7 +1071,7 @@ static void vdc_port_remove(struct vio_dev *vdev)
flush_work(&port->ldc_reset_work);
cancel_delayed_work_sync(&port->ldc_reset_timer_work);
- del_timer_sync(&port->vio.timer);
+ timer_delete_sync(&port->vio.timer);
del_gendisk(port->disk);
put_disk(port->disk);
@@ -1112,6 +1114,7 @@ static void vdc_requeue_inflight(struct vdc_port *port)
static void vdc_queue_drain(struct vdc_port *port)
{
struct request_queue *q = port->disk->queue;
+ unsigned int memflags;
/*
* Mark the queue as draining, then freeze/quiesce to ensure
@@ -1120,13 +1123,13 @@ static void vdc_queue_drain(struct vdc_port *port)
port->drain = 1;
spin_unlock_irq(&port->vio.lock);
- blk_mq_freeze_queue(q);
+ memflags = blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
spin_lock_irq(&port->vio.lock);
port->drain = 0;
blk_mq_unquiesce_queue(q);
- blk_mq_unfreeze_queue(q);
+ blk_mq_unfreeze_queue(q, memflags);
}
static void vdc_ldc_reset_timer_work(struct work_struct *work)
@@ -1185,7 +1188,7 @@ static void vdc_ldc_reset(struct vdc_port *port)
}
if (port->ldc_timeout)
- mod_delayed_work(system_wq, &port->ldc_reset_timer_work,
+ mod_delayed_work(system_percpu_wq, &port->ldc_reset_timer_work,
round_jiffies(jiffies + HZ * port->ldc_timeout));
mod_timer(&port->vio.timer, round_jiffies(jiffies + HZ));
return;
@@ -1213,7 +1216,7 @@ static int __init vdc_init(void)
{
int err;
- sunvdc_wq = alloc_workqueue("sunvdc", 0, 0);
+ sunvdc_wq = alloc_workqueue("sunvdc", WQ_PERCPU, 0);
if (!sunvdc_wq)
return -ENOMEM;
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 6731678f3a41..416015947ae6 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -711,9 +711,9 @@ static int floppy_ioctl(struct block_device *bdev, blk_mode_t mode,
return -ENOTTY;
}
-static int floppy_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int floppy_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- struct floppy_state *fs = bdev->bd_disk->private_data;
+ struct floppy_state *fs = disk->private_data;
struct floppy_struct *g;
int ret;
@@ -787,6 +787,9 @@ static void swim_cleanup_floppy_disk(struct floppy_state *fs)
static int swim_floppy_init(struct swim_priv *swd)
{
+ struct queue_limits lim = {
+ .features = BLK_FEAT_ROTATIONAL,
+ };
int err;
int drive;
struct swim __iomem *base = swd->base;
@@ -815,12 +818,12 @@ static int swim_floppy_init(struct swim_priv *swd)
for (drive = 0; drive < swd->floppy_count; drive++) {
err = blk_mq_alloc_sq_tag_set(&swd->unit[drive].tag_set,
- &swim_mq_ops, 2, BLK_MQ_F_SHOULD_MERGE);
+ &swim_mq_ops, 2, 0);
if (err)
goto exit_put_disks;
swd->unit[drive].disk =
- blk_mq_alloc_disk(&swd->unit[drive].tag_set, NULL,
+ blk_mq_alloc_disk(&swd->unit[drive].tag_set, &lim,
&swd->unit[drive]);
if (IS_ERR(swd->unit[drive].disk)) {
blk_mq_free_tag_set(&swd->unit[drive].tag_set);
@@ -941,7 +944,7 @@ static void swim_remove(struct platform_device *dev)
static struct platform_driver swim_driver = {
.probe = swim_probe,
- .remove_new = swim_remove,
+ .remove = swim_remove,
.driver = {
.name = CARDNAME,
},
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index a04756ac778e..01f7aef3fcfb 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -362,7 +362,7 @@ static void set_timeout(struct floppy_state *fs, int nticks,
void (*proc)(struct timer_list *t))
{
if (fs->timeout_pending)
- del_timer(&fs->timeout);
+ timer_delete(&fs->timeout);
fs->timeout.expires = jiffies + nticks;
fs->timeout.function = proc;
add_timer(&fs->timeout);
@@ -555,7 +555,7 @@ static void act(struct floppy_state *fs)
static void scan_timeout(struct timer_list *t)
{
- struct floppy_state *fs = from_timer(fs, t, timeout);
+ struct floppy_state *fs = timer_container_of(fs, t, timeout);
struct swim3 __iomem *sw = fs->swim3;
unsigned long flags;
@@ -579,7 +579,7 @@ static void scan_timeout(struct timer_list *t)
static void seek_timeout(struct timer_list *t)
{
- struct floppy_state *fs = from_timer(fs, t, timeout);
+ struct floppy_state *fs = timer_container_of(fs, t, timeout);
struct swim3 __iomem *sw = fs->swim3;
unsigned long flags;
@@ -598,7 +598,7 @@ static void seek_timeout(struct timer_list *t)
static void settle_timeout(struct timer_list *t)
{
- struct floppy_state *fs = from_timer(fs, t, timeout);
+ struct floppy_state *fs = timer_container_of(fs, t, timeout);
struct swim3 __iomem *sw = fs->swim3;
unsigned long flags;
@@ -627,7 +627,7 @@ static void settle_timeout(struct timer_list *t)
static void xfer_timeout(struct timer_list *t)
{
- struct floppy_state *fs = from_timer(fs, t, timeout);
+ struct floppy_state *fs = timer_container_of(fs, t, timeout);
struct swim3 __iomem *sw = fs->swim3;
struct dbdma_regs __iomem *dr = fs->dma;
unsigned long flags;
@@ -677,7 +677,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
out_8(&sw->select, RELAX);
out_8(&sw->intr_enable, 0);
- del_timer(&fs->timeout);
+ timer_delete(&fs->timeout);
fs->timeout_pending = 0;
if (sw->ctrack == 0xff) {
swim3_err("%s", "Seen sector but cyl=ff?\n");
@@ -706,7 +706,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
out_8(&sw->control_bic, DO_SEEK);
out_8(&sw->select, RELAX);
out_8(&sw->intr_enable, 0);
- del_timer(&fs->timeout);
+ timer_delete(&fs->timeout);
fs->timeout_pending = 0;
if (fs->state == seeking)
++fs->retries;
@@ -716,7 +716,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
break;
case settling:
out_8(&sw->intr_enable, 0);
- del_timer(&fs->timeout);
+ timer_delete(&fs->timeout);
fs->timeout_pending = 0;
act(fs);
break;
@@ -726,7 +726,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
out_8(&sw->intr_enable, 0);
out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
out_8(&sw->select, RELAX);
- del_timer(&fs->timeout);
+ timer_delete(&fs->timeout);
fs->timeout_pending = 0;
dr = fs->dma;
cp = fs->dma_cmd;
@@ -840,6 +840,7 @@ static int grab_drive(struct floppy_state *fs, enum swim_state state,
static void release_drive(struct floppy_state *fs)
{
struct request_queue *q = disks[fs->index]->queue;
+ unsigned int memflags;
unsigned long flags;
swim3_dbg("%s", "-> release drive\n");
@@ -848,10 +849,10 @@ static void release_drive(struct floppy_state *fs)
fs->state = idle;
spin_unlock_irqrestore(&swim3_lock, flags);
- blk_mq_freeze_queue(q);
+ memflags = blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
blk_mq_unquiesce_queue(q);
- blk_mq_unfreeze_queue(q);
+ blk_mq_unfreeze_queue(q, memflags);
}
static int fd_eject(struct floppy_state *fs)
@@ -1189,6 +1190,9 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
static int swim3_attach(struct macio_dev *mdev,
const struct of_device_id *match)
{
+ struct queue_limits lim = {
+ .features = BLK_FEAT_ROTATIONAL,
+ };
struct floppy_state *fs;
struct gendisk *disk;
int rc;
@@ -1205,12 +1209,11 @@ static int swim3_attach(struct macio_dev *mdev,
fs = &floppy_states[floppy_count];
memset(fs, 0, sizeof(*fs));
- rc = blk_mq_alloc_sq_tag_set(&fs->tag_set, &swim3_mq_ops, 2,
- BLK_MQ_F_SHOULD_MERGE);
+ rc = blk_mq_alloc_sq_tag_set(&fs->tag_set, &swim3_mq_ops, 2, 0);
if (rc)
goto out_unregister;
- disk = blk_mq_alloc_disk(&fs->tag_set, NULL, fs);
+ disk = blk_mq_alloc_disk(&fs->tag_set, &lim, fs);
if (IS_ERR(disk)) {
rc = PTR_ERR(disk);
goto out_free_tag_set;
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 176657dce3e3..2c715df63f23 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -48,6 +48,16 @@
#define UBLK_MINORS (1U << MINORBITS)
+#define UBLK_INVALID_BUF_IDX ((u16)-1)
+
+/* private ioctl command mirror */
+#define UBLK_CMD_DEL_DEV_ASYNC _IOC_NR(UBLK_U_CMD_DEL_DEV_ASYNC)
+#define UBLK_CMD_UPDATE_SIZE _IOC_NR(UBLK_U_CMD_UPDATE_SIZE)
+#define UBLK_CMD_QUIESCE_DEV _IOC_NR(UBLK_U_CMD_QUIESCE_DEV)
+
+#define UBLK_IO_REGISTER_IO_BUF _IOC_NR(UBLK_U_IO_REGISTER_IO_BUF)
+#define UBLK_IO_UNREGISTER_IO_BUF _IOC_NR(UBLK_U_IO_UNREGISTER_IO_BUF)
+
/* All UBLK_F_* have to be included into UBLK_F_ALL */
#define UBLK_F_ALL (UBLK_F_SUPPORT_ZERO_COPY \
| UBLK_F_URING_CMD_COMP_IN_TASK \
@@ -57,24 +67,44 @@
| UBLK_F_UNPRIVILEGED_DEV \
| UBLK_F_CMD_IOCTL_ENCODE \
| UBLK_F_USER_COPY \
- | UBLK_F_ZONED)
+ | UBLK_F_ZONED \
+ | UBLK_F_USER_RECOVERY_FAIL_IO \
+ | UBLK_F_UPDATE_SIZE \
+ | UBLK_F_AUTO_BUF_REG \
+ | UBLK_F_QUIESCE \
+ | UBLK_F_PER_IO_DAEMON \
+ | UBLK_F_BUF_REG_OFF_DAEMON)
+
+#define UBLK_F_ALL_RECOVERY_FLAGS (UBLK_F_USER_RECOVERY \
+ | UBLK_F_USER_RECOVERY_REISSUE \
+ | UBLK_F_USER_RECOVERY_FAIL_IO)
/* All UBLK_PARAM_TYPE_* should be included here */
#define UBLK_PARAM_TYPE_ALL \
(UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DISCARD | \
- UBLK_PARAM_TYPE_DEVT | UBLK_PARAM_TYPE_ZONED)
-
-struct ublk_rq_data {
- struct llist_node node;
-
- struct kref ref;
- __u64 sector;
- __u32 operation;
- __u32 nr_zones;
-};
+ UBLK_PARAM_TYPE_DEVT | UBLK_PARAM_TYPE_ZONED | \
+ UBLK_PARAM_TYPE_DMA_ALIGN | UBLK_PARAM_TYPE_SEGMENT)
struct ublk_uring_cmd_pdu {
+ /*
+ * Store requests in same batch temporarily for queuing them to
+ * daemon context.
+ *
+ * It should have been stored to request payload, but we do want
+ * to avoid extra pre-allocation, and uring_cmd payload is always
+ * free for us
+ */
+ union {
+ struct request *req;
+ struct request *req_list;
+ };
+
+ /*
+ * The following two are valid in this cmd whole lifetime, and
+ * setup in ublk uring_cmd handler
+ */
struct ublk_queue *ubq;
+
u16 tag;
};
@@ -99,15 +129,6 @@ struct ublk_uring_cmd_pdu {
#define UBLK_IO_FLAG_OWNED_BY_SRV 0x02
/*
- * IO command is aborted, so this flag is set in case of
- * !UBLK_IO_FLAG_ACTIVE.
- *
- * After this flag is observed, any pending or new incoming request
- * associated with this io command will be failed immediately
- */
-#define UBLK_IO_FLAG_ABORTED 0x04
-
-/*
* UBLK_IO_FLAG_NEED_GET_DATA is set because IO command requires
* get data buffer address from ublksrv.
*
@@ -116,45 +137,79 @@ struct ublk_uring_cmd_pdu {
*/
#define UBLK_IO_FLAG_NEED_GET_DATA 0x08
+/*
+ * request buffer is registered automatically, so we have to unregister it
+ * before completing this request.
+ *
+ * io_uring will unregister buffer automatically for us during exiting.
+ */
+#define UBLK_IO_FLAG_AUTO_BUF_REG 0x10
+
/* atomic RW with ubq->cancel_lock */
#define UBLK_IO_FLAG_CANCELED 0x80000000
-struct ublk_io {
- /* userspace buffer address from io cmd */
+/*
+ * Initialize refcount to a large number to include any registered buffers.
+ * UBLK_IO_COMMIT_AND_FETCH_REQ will release these references minus those for
+ * any buffers registered on the io daemon task.
+ */
+#define UBLK_REFCOUNT_INIT (REFCOUNT_MAX / 2)
+
+union ublk_io_buf {
__u64 addr;
+ struct ublk_auto_buf_reg auto_reg;
+};
+
+struct ublk_io {
+ union ublk_io_buf buf;
unsigned int flags;
int res;
- struct io_uring_cmd *cmd;
-};
+ union {
+ /* valid if UBLK_IO_FLAG_ACTIVE is set */
+ struct io_uring_cmd *cmd;
+ /* valid if UBLK_IO_FLAG_OWNED_BY_SRV is set */
+ struct request *req;
+ };
+
+ struct task_struct *task;
+
+ /*
+ * The number of uses of this I/O by the ublk server
+ * if user copy or zero copy are enabled:
+ * - UBLK_REFCOUNT_INIT from dispatch to the server
+ * until UBLK_IO_COMMIT_AND_FETCH_REQ
+ * - 1 for each inflight ublk_ch_{read,write}_iter() call
+ * - 1 for each io_uring registered buffer not registered on task
+ * The I/O can only be completed once all references are dropped.
+ * User copy and buffer registration operations are only permitted
+ * if the reference count is nonzero.
+ */
+ refcount_t ref;
+ /* Count of buffers registered on task and not yet unregistered */
+ unsigned task_registered_buffers;
+
+ void *buf_ctx_handle;
+} ____cacheline_aligned_in_smp;
struct ublk_queue {
int q_id;
int q_depth;
unsigned long flags;
- struct task_struct *ubq_daemon;
- char *io_cmd_buf;
+ struct ublksrv_io_desc *io_cmd_buf;
- struct llist_head io_cmds;
-
- unsigned long io_addr; /* mapped vm address */
- unsigned int max_io_sz;
bool force_abort;
- bool timeout;
bool canceling;
- unsigned short nr_io_ready; /* how many ios setup */
+ bool fail_io; /* copy of dev->state == UBLK_S_DEV_FAIL_IO */
spinlock_t cancel_lock;
struct ublk_device *dev;
- struct ublk_io ios[];
+ struct ublk_io ios[] __counted_by(q_depth);
};
struct ublk_device {
struct gendisk *ub_disk;
- char *__queues;
-
- unsigned int queue_size;
struct ublksrv_ctrl_dev_info dev_info;
struct blk_mq_tag_set tag_set;
@@ -176,11 +231,14 @@ struct ublk_device {
struct ublk_params params;
struct completion completion;
- unsigned int nr_queues_ready;
- unsigned int nr_privileged_daemon;
+ u32 nr_io_ready;
+ bool unprivileged_daemons;
+ struct mutex cancel_mutex;
+ bool canceling;
+ pid_t ublksrv_tgid;
+ struct delayed_work exit_work;
- struct work_struct quiesce_work;
- struct work_struct stop_work;
+ struct ublk_queue *queues[];
};
/* header of ublk_params */
@@ -189,14 +247,17 @@ struct ublk_params_header {
__u32 types;
};
-static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq);
-
+static void ublk_io_release(void *priv);
+static void ublk_stop_dev_unlocked(struct ublk_device *ub);
+static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq);
+static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
+ u16 q_id, u16 tag, struct ublk_io *io, size_t offset);
static inline unsigned int ublk_req_build_flags(struct request *req);
-static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
- int tag);
-static inline bool ublk_dev_is_user_copy(const struct ublk_device *ub)
+
+static inline struct ublksrv_io_desc *
+ublk_get_iod(const struct ublk_queue *ubq, unsigned tag)
{
- return ub->dev_info.flags & UBLK_F_USER_COPY;
+ return &ubq->io_cmd_buf[tag];
}
static inline bool ublk_dev_is_zoned(const struct ublk_device *ub)
@@ -204,13 +265,40 @@ static inline bool ublk_dev_is_zoned(const struct ublk_device *ub)
return ub->dev_info.flags & UBLK_F_ZONED;
}
-static inline bool ublk_queue_is_zoned(struct ublk_queue *ubq)
+static inline bool ublk_queue_is_zoned(const struct ublk_queue *ubq)
{
return ubq->flags & UBLK_F_ZONED;
}
#ifdef CONFIG_BLK_DEV_ZONED
+struct ublk_zoned_report_desc {
+ __u64 sector;
+ __u32 operation;
+ __u32 nr_zones;
+};
+
+static DEFINE_XARRAY(ublk_zoned_report_descs);
+
+static int ublk_zoned_insert_report_desc(const struct request *req,
+ struct ublk_zoned_report_desc *desc)
+{
+ return xa_insert(&ublk_zoned_report_descs, (unsigned long)req,
+ desc, GFP_KERNEL);
+}
+
+static struct ublk_zoned_report_desc *ublk_zoned_erase_report_desc(
+ const struct request *req)
+{
+ return xa_erase(&ublk_zoned_report_descs, (unsigned long)req);
+}
+
+static struct ublk_zoned_report_desc *ublk_zoned_get_report_desc(
+ const struct request *req)
+{
+ return xa_load(&ublk_zoned_report_descs, (unsigned long)req);
+}
+
static int ublk_get_nr_zones(const struct ublk_device *ub)
{
const struct ublk_param_basic *p = &ub->params.basic;
@@ -248,8 +336,6 @@ static int ublk_dev_param_zoned_validate(const struct ublk_device *ub)
static void ublk_dev_param_zoned_apply(struct ublk_device *ub)
{
- blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ub->ub_disk->queue);
-
ub->ub_disk->nr_zones = ublk_get_nr_zones(ub);
}
@@ -282,7 +368,7 @@ static void *ublk_alloc_report_buffer(struct ublk_device *ublk,
}
static int ublk_report_zones(struct gendisk *disk, sector_t sector,
- unsigned int nr_zones, report_zones_cb cb, void *data)
+ unsigned int nr_zones, struct blk_report_zones_args *args)
{
struct ublk_device *ub = disk->private_data;
unsigned int zone_size_sectors = disk->queue->limits.chunk_sectors;
@@ -307,7 +393,7 @@ static int ublk_report_zones(struct gendisk *disk, sector_t sector,
unsigned int zones_in_request =
min_t(unsigned int, remaining_zones, max_zones_per_request);
struct request *req;
- struct ublk_rq_data *pdu;
+ struct ublk_zoned_report_desc desc;
blk_status_t status;
memset(buffer, 0, buffer_length);
@@ -318,20 +404,22 @@ static int ublk_report_zones(struct gendisk *disk, sector_t sector,
goto out;
}
- pdu = blk_mq_rq_to_pdu(req);
- pdu->operation = UBLK_IO_OP_REPORT_ZONES;
- pdu->sector = sector;
- pdu->nr_zones = zones_in_request;
+ desc.operation = UBLK_IO_OP_REPORT_ZONES;
+ desc.sector = sector;
+ desc.nr_zones = zones_in_request;
+ ret = ublk_zoned_insert_report_desc(req, &desc);
+ if (ret)
+ goto free_req;
- ret = blk_rq_map_kern(disk->queue, req, buffer, buffer_length,
- GFP_KERNEL);
- if (ret) {
- blk_mq_free_request(req);
- goto out;
- }
+ ret = blk_rq_map_kern(req, buffer, buffer_length, GFP_KERNEL);
+ if (ret)
+ goto erase_desc;
status = blk_execute_rq(req, 0);
ret = blk_status_to_errno(status);
+erase_desc:
+ ublk_zoned_erase_report_desc(req);
+free_req:
blk_mq_free_request(req);
if (ret)
goto out;
@@ -343,7 +431,7 @@ static int ublk_report_zones(struct gendisk *disk, sector_t sector,
if (!zone->len)
break;
- ret = cb(zone, i, data);
+ ret = disk_report_zone(disk, zone, i, args);
if (ret)
goto out;
@@ -365,7 +453,7 @@ static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
{
struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
struct ublk_io *io = &ubq->ios[req->tag];
- struct ublk_rq_data *pdu = blk_mq_rq_to_pdu(req);
+ struct ublk_zoned_report_desc *desc;
u32 ublk_op;
switch (req_op(req)) {
@@ -388,12 +476,15 @@ static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
ublk_op = UBLK_IO_OP_ZONE_RESET_ALL;
break;
case REQ_OP_DRV_IN:
- ublk_op = pdu->operation;
+ desc = ublk_zoned_get_report_desc(req);
+ if (!desc)
+ return BLK_STS_IOERR;
+ ublk_op = desc->operation;
switch (ublk_op) {
case UBLK_IO_OP_REPORT_ZONES:
iod->op_flags = ublk_op | ublk_req_build_flags(req);
- iod->nr_zones = pdu->nr_zones;
- iod->start_sector = pdu->sector;
+ iod->nr_zones = desc->nr_zones;
+ iod->start_sector = desc->sector;
return BLK_STS_OK;
default:
return BLK_STS_IOERR;
@@ -408,7 +499,7 @@ static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
iod->op_flags = ublk_op | ublk_req_build_flags(req);
iod->nr_sectors = blk_rq_sectors(req);
iod->start_sector = blk_rq_pos(req);
- iod->addr = io->addr;
+ iod->addr = io->buf.addr;
return BLK_STS_OK;
}
@@ -439,8 +530,8 @@ static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
#endif
-static inline void __ublk_complete_rq(struct request *req);
-static void ublk_complete_rq(struct kref *ref);
+static inline void __ublk_complete_rq(struct request *req, struct ublk_io *io,
+ bool need_map);
static dev_t ublk_chr_devt;
static const struct class ublk_chr_class = {
@@ -453,15 +544,17 @@ static wait_queue_head_t ublk_idr_wq; /* wait until one idr is freed */
static DEFINE_MUTEX(ublk_ctl_mutex);
+
+#define UBLK_MAX_UBLKS UBLK_MINORS
+
/*
- * Max ublk devices allowed to add
+ * Max unprivileged ublk devices allowed to add
*
* It can be extended to one per-user limit in future or even controlled
* by cgroup.
*/
-#define UBLK_MAX_UBLKS UBLK_MINORS
-static unsigned int ublks_max = 64;
-static unsigned int ublks_added; /* protected by ublk_ctl_mutex */
+static unsigned int unprivileged_ublks_max = 64;
+static unsigned int unprivileged_ublks_added; /* protected by ublk_ctl_mutex */
static struct miscdevice ublk_misc;
@@ -484,16 +577,8 @@ static inline unsigned ublk_pos_to_tag(loff_t pos)
static void ublk_dev_param_basic_apply(struct ublk_device *ub)
{
- struct request_queue *q = ub->ub_disk->queue;
const struct ublk_param_basic *p = &ub->params.basic;
- blk_queue_write_cache(q, p->attrs & UBLK_ATTR_VOLATILE_CACHE,
- p->attrs & UBLK_ATTR_FUA);
- if (p->attrs & UBLK_ATTR_ROTATIONAL)
- blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
- else
- blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
-
if (p->attrs & UBLK_ATTR_READ_ONLY)
set_disk_ro(ub->ub_disk, true);
@@ -540,6 +625,28 @@ static int ublk_validate_params(const struct ublk_device *ub)
else if (ublk_dev_is_zoned(ub))
return -EINVAL;
+ if (ub->params.types & UBLK_PARAM_TYPE_DMA_ALIGN) {
+ const struct ublk_param_dma_align *p = &ub->params.dma;
+
+ if (p->alignment >= PAGE_SIZE)
+ return -EINVAL;
+
+ if (!is_power_of_2(p->alignment + 1))
+ return -EINVAL;
+ }
+
+ if (ub->params.types & UBLK_PARAM_TYPE_SEGMENT) {
+ const struct ublk_param_segment *p = &ub->params.seg;
+
+ if (!is_power_of_2(p->seg_boundary_mask + 1))
+ return -EINVAL;
+
+ if (p->seg_boundary_mask + 1 < UBLK_MIN_SEGMENT_SIZE)
+ return -EINVAL;
+ if (p->max_segment_size < UBLK_MIN_SEGMENT_SIZE)
+ return -EINVAL;
+ }
+
return 0;
}
@@ -551,52 +658,100 @@ static void ublk_apply_params(struct ublk_device *ub)
ublk_dev_param_zoned_apply(ub);
}
+static inline bool ublk_support_zero_copy(const struct ublk_queue *ubq)
+{
+ return ubq->flags & UBLK_F_SUPPORT_ZERO_COPY;
+}
+
+static inline bool ublk_dev_support_zero_copy(const struct ublk_device *ub)
+{
+ return ub->dev_info.flags & UBLK_F_SUPPORT_ZERO_COPY;
+}
+
+static inline bool ublk_support_auto_buf_reg(const struct ublk_queue *ubq)
+{
+ return ubq->flags & UBLK_F_AUTO_BUF_REG;
+}
+
+static inline bool ublk_dev_support_auto_buf_reg(const struct ublk_device *ub)
+{
+ return ub->dev_info.flags & UBLK_F_AUTO_BUF_REG;
+}
+
static inline bool ublk_support_user_copy(const struct ublk_queue *ubq)
{
return ubq->flags & UBLK_F_USER_COPY;
}
+static inline bool ublk_dev_support_user_copy(const struct ublk_device *ub)
+{
+ return ub->dev_info.flags & UBLK_F_USER_COPY;
+}
+
+static inline bool ublk_need_map_io(const struct ublk_queue *ubq)
+{
+ return !ublk_support_user_copy(ubq) && !ublk_support_zero_copy(ubq) &&
+ !ublk_support_auto_buf_reg(ubq);
+}
+
+static inline bool ublk_dev_need_map_io(const struct ublk_device *ub)
+{
+ return !ublk_dev_support_user_copy(ub) &&
+ !ublk_dev_support_zero_copy(ub) &&
+ !ublk_dev_support_auto_buf_reg(ub);
+}
+
static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
{
/*
* read()/write() is involved in user copy, so request reference
* has to be grabbed
+ *
+ * for zero copy, request buffer need to be registered to io_uring
+ * buffer table, so reference is needed
+ *
+ * For auto buffer register, ublk server still may issue
+ * UBLK_IO_COMMIT_AND_FETCH_REQ before one registered buffer is used up,
+ * so reference is required too.
*/
- return ublk_support_user_copy(ubq);
+ return ublk_support_user_copy(ubq) || ublk_support_zero_copy(ubq) ||
+ ublk_support_auto_buf_reg(ubq);
}
-static inline void ublk_init_req_ref(const struct ublk_queue *ubq,
- struct request *req)
+static inline bool ublk_dev_need_req_ref(const struct ublk_device *ub)
{
- if (ublk_need_req_ref(ubq)) {
- struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
+ return ublk_dev_support_user_copy(ub) ||
+ ublk_dev_support_zero_copy(ub) ||
+ ublk_dev_support_auto_buf_reg(ub);
+}
- kref_init(&data->ref);
- }
+static inline void ublk_init_req_ref(const struct ublk_queue *ubq,
+ struct ublk_io *io)
+{
+ if (ublk_need_req_ref(ubq))
+ refcount_set(&io->ref, UBLK_REFCOUNT_INIT);
}
-static inline bool ublk_get_req_ref(const struct ublk_queue *ubq,
- struct request *req)
+static inline bool ublk_get_req_ref(struct ublk_io *io)
{
- if (ublk_need_req_ref(ubq)) {
- struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
+ return refcount_inc_not_zero(&io->ref);
+}
- return kref_get_unless_zero(&data->ref);
- }
+static inline void ublk_put_req_ref(struct ublk_io *io, struct request *req)
+{
+ if (!refcount_dec_and_test(&io->ref))
+ return;
- return true;
+ /* ublk_need_map_io() and ublk_need_req_ref() are mutually exclusive */
+ __ublk_complete_rq(req, io, false);
}
-static inline void ublk_put_req_ref(const struct ublk_queue *ubq,
- struct request *req)
+static inline bool ublk_sub_req_ref(struct ublk_io *io)
{
- if (ublk_need_req_ref(ubq)) {
- struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
+ unsigned sub_refs = UBLK_REFCOUNT_INIT - io->task_registered_buffers;
- kref_put(&data->ref, ublk_complete_rq);
- } else {
- __ublk_complete_rq(req);
- }
+ io->task_registered_buffers = 0;
+ return refcount_sub_and_test(sub_refs, &io->ref);
}
static inline bool ublk_need_get_data(const struct ublk_queue *ubq)
@@ -604,6 +759,11 @@ static inline bool ublk_need_get_data(const struct ublk_queue *ubq)
return ubq->flags & UBLK_F_NEED_GET_DATA;
}
+static inline bool ublk_dev_need_get_data(const struct ublk_device *ub)
+{
+ return ub->dev_info.flags & UBLK_F_NEED_GET_DATA;
+}
+
/* Called in slow path only, keep it noinline for trace purpose */
static noinline struct ublk_device *ublk_get_device(struct ublk_device *ub)
{
@@ -621,7 +781,7 @@ static noinline void ublk_put_device(struct ublk_device *ub)
static inline struct ublk_queue *ublk_get_queue(struct ublk_device *dev,
int qid)
{
- return (struct ublk_queue *)&(dev->__queues[qid * dev->queue_size]);
+ return dev->queues[qid];
}
static inline bool ublk_rq_has_data(const struct request *rq)
@@ -629,42 +789,73 @@ static inline bool ublk_rq_has_data(const struct request *rq)
return bio_has_data(rq->bio);
}
-static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
- int tag)
+static inline struct ublksrv_io_desc *
+ublk_queue_cmd_buf(struct ublk_device *ub, int q_id)
{
- return (struct ublksrv_io_desc *)
- &(ubq->io_cmd_buf[tag * sizeof(struct ublksrv_io_desc)]);
+ return ublk_get_queue(ub, q_id)->io_cmd_buf;
}
-static inline char *ublk_queue_cmd_buf(struct ublk_device *ub, int q_id)
+static inline int __ublk_queue_cmd_buf_size(int depth)
{
- return ublk_get_queue(ub, q_id)->io_cmd_buf;
+ return round_up(depth * sizeof(struct ublksrv_io_desc), PAGE_SIZE);
}
-static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub, int q_id)
+static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub)
{
- struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
+ return __ublk_queue_cmd_buf_size(ub->dev_info.queue_depth);
+}
- return round_up(ubq->q_depth * sizeof(struct ublksrv_io_desc),
- PAGE_SIZE);
+static int ublk_max_cmd_buf_size(void)
+{
+ return __ublk_queue_cmd_buf_size(UBLK_MAX_QUEUE_DEPTH);
}
-static inline bool ublk_queue_can_use_recovery_reissue(
- struct ublk_queue *ubq)
+/*
+ * Should I/O outstanding to the ublk server when it exits be reissued?
+ * If not, outstanding I/O will get errors.
+ */
+static inline bool ublk_nosrv_should_reissue_outstanding(struct ublk_device *ub)
+{
+ return (ub->dev_info.flags & UBLK_F_USER_RECOVERY) &&
+ (ub->dev_info.flags & UBLK_F_USER_RECOVERY_REISSUE);
+}
+
+/*
+ * Should I/O issued while there is no ublk server queue? If not, I/O
+ * issued while there is no ublk server will get errors.
+ */
+static inline bool ublk_nosrv_dev_should_queue_io(struct ublk_device *ub)
+{
+ return (ub->dev_info.flags & UBLK_F_USER_RECOVERY) &&
+ !(ub->dev_info.flags & UBLK_F_USER_RECOVERY_FAIL_IO);
+}
+
+/*
+ * Same as ublk_nosrv_dev_should_queue_io, but uses a queue-local copy
+ * of the device flags for smaller cache footprint - better for fast
+ * paths.
+ */
+static inline bool ublk_nosrv_should_queue_io(struct ublk_queue *ubq)
{
return (ubq->flags & UBLK_F_USER_RECOVERY) &&
- (ubq->flags & UBLK_F_USER_RECOVERY_REISSUE);
+ !(ubq->flags & UBLK_F_USER_RECOVERY_FAIL_IO);
}
-static inline bool ublk_queue_can_use_recovery(
- struct ublk_queue *ubq)
+/*
+ * Should ublk devices be stopped (i.e. no recovery possible) when the
+ * ublk server exits? If not, devices can be used again by a future
+ * incarnation of a ublk server via the start_recovery/end_recovery
+ * commands.
+ */
+static inline bool ublk_nosrv_should_stop_dev(struct ublk_device *ub)
{
- return ubq->flags & UBLK_F_USER_RECOVERY;
+ return !(ub->dev_info.flags & UBLK_F_USER_RECOVERY);
}
-static inline bool ublk_can_use_recovery(struct ublk_device *ub)
+static inline bool ublk_dev_in_recoverable_state(struct ublk_device *ub)
{
- return ub->dev_info.flags & UBLK_F_USER_RECOVERY;
+ return ub->dev_info.state == UBLK_S_DEV_QUIESCED ||
+ ub->dev_info.state == UBLK_S_DEV_FAIL_IO;
}
static void ublk_free_disk(struct gendisk *disk)
@@ -723,73 +914,6 @@ static const struct block_device_operations ub_fops = {
.report_zones = ublk_report_zones,
};
-#define UBLK_MAX_PIN_PAGES 32
-
-struct ublk_io_iter {
- struct page *pages[UBLK_MAX_PIN_PAGES];
- struct bio *bio;
- struct bvec_iter iter;
-};
-
-/* return how many pages are copied */
-static void ublk_copy_io_pages(struct ublk_io_iter *data,
- size_t total, size_t pg_off, int dir)
-{
- unsigned done = 0;
- unsigned pg_idx = 0;
-
- while (done < total) {
- struct bio_vec bv = bio_iter_iovec(data->bio, data->iter);
- unsigned int bytes = min3(bv.bv_len, (unsigned)total - done,
- (unsigned)(PAGE_SIZE - pg_off));
- void *bv_buf = bvec_kmap_local(&bv);
- void *pg_buf = kmap_local_page(data->pages[pg_idx]);
-
- if (dir == ITER_DEST)
- memcpy(pg_buf + pg_off, bv_buf, bytes);
- else
- memcpy(bv_buf, pg_buf + pg_off, bytes);
-
- kunmap_local(pg_buf);
- kunmap_local(bv_buf);
-
- /* advance page array */
- pg_off += bytes;
- if (pg_off == PAGE_SIZE) {
- pg_idx += 1;
- pg_off = 0;
- }
-
- done += bytes;
-
- /* advance bio */
- bio_advance_iter_single(data->bio, &data->iter, bytes);
- if (!data->iter.bi_size) {
- data->bio = data->bio->bi_next;
- if (data->bio == NULL)
- break;
- data->iter = data->bio->bi_iter;
- }
- }
-}
-
-static bool ublk_advance_io_iter(const struct request *req,
- struct ublk_io_iter *iter, unsigned int offset)
-{
- struct bio *bio = req->bio;
-
- for_each_bio(bio) {
- if (bio->bi_iter.bi_size > offset) {
- iter->bio = bio;
- iter->iter = bio->bi_iter;
- bio_advance_iter(iter->bio, &iter->iter, offset);
- return true;
- }
- offset -= bio->bi_iter.bi_size;
- }
- return false;
-}
-
/*
* Copy data between request pages and io_iter, and 'offset'
* is the start point of linear offset of request.
@@ -797,34 +921,35 @@ static bool ublk_advance_io_iter(const struct request *req,
static size_t ublk_copy_user_pages(const struct request *req,
unsigned offset, struct iov_iter *uiter, int dir)
{
- struct ublk_io_iter iter;
+ struct req_iterator iter;
+ struct bio_vec bv;
size_t done = 0;
- if (!ublk_advance_io_iter(req, &iter, offset))
- return 0;
+ rq_for_each_segment(bv, req, iter) {
+ void *bv_buf;
+ size_t copied;
- while (iov_iter_count(uiter) && iter.bio) {
- unsigned nr_pages;
- ssize_t len;
- size_t off;
- int i;
-
- len = iov_iter_get_pages2(uiter, iter.pages,
- iov_iter_count(uiter),
- UBLK_MAX_PIN_PAGES, &off);
- if (len <= 0)
- return done;
-
- ublk_copy_io_pages(&iter, len, off, dir);
- nr_pages = DIV_ROUND_UP(len + off, PAGE_SIZE);
- for (i = 0; i < nr_pages; i++) {
- if (dir == ITER_DEST)
- set_page_dirty(iter.pages[i]);
- put_page(iter.pages[i]);
+ if (offset >= bv.bv_len) {
+ offset -= bv.bv_len;
+ continue;
}
- done += len;
- }
+ bv.bv_offset += offset;
+ bv.bv_len -= offset;
+ bv_buf = bvec_kmap_local(&bv);
+ if (dir == ITER_DEST)
+ copied = copy_to_iter(bv_buf, bv.bv_len, uiter);
+ else
+ copied = copy_from_iter(bv_buf, bv.bv_len, uiter);
+
+ kunmap_local(bv_buf);
+
+ done += copied;
+ if (copied < bv.bv_len)
+ break;
+
+ offset = 0;
+ }
return done;
}
@@ -839,12 +964,13 @@ static inline bool ublk_need_unmap_req(const struct request *req)
(req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_DRV_IN);
}
-static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
- struct ublk_io *io)
+static unsigned int ublk_map_io(const struct ublk_queue *ubq,
+ const struct request *req,
+ const struct ublk_io *io)
{
const unsigned int rq_bytes = blk_rq_bytes(req);
- if (ublk_support_user_copy(ubq))
+ if (!ublk_need_map_io(ubq))
return rq_bytes;
/*
@@ -856,19 +982,19 @@ static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
struct iov_iter iter;
const int dir = ITER_DEST;
- import_ubuf(dir, u64_to_user_ptr(io->addr), rq_bytes, &iter);
+ import_ubuf(dir, u64_to_user_ptr(io->buf.addr), rq_bytes, &iter);
return ublk_copy_user_pages(req, 0, &iter, dir);
}
return rq_bytes;
}
-static int ublk_unmap_io(const struct ublk_queue *ubq,
+static unsigned int ublk_unmap_io(bool need_map,
const struct request *req,
- struct ublk_io *io)
+ const struct ublk_io *io)
{
const unsigned int rq_bytes = blk_rq_bytes(req);
- if (ublk_support_user_copy(ubq))
+ if (!need_map)
return rq_bytes;
if (ublk_need_unmap_req(req)) {
@@ -877,7 +1003,7 @@ static int ublk_unmap_io(const struct ublk_queue *ubq,
WARN_ON_ONCE(io->res > rq_bytes);
- import_ubuf(dir, u64_to_user_ptr(io->addr), io->res, &iter);
+ import_ubuf(dir, u64_to_user_ptr(io->buf.addr), io->res, &iter);
return ublk_copy_user_pages(req, 0, &iter, dir);
}
return rq_bytes;
@@ -915,13 +1041,8 @@ static blk_status_t ublk_setup_iod(struct ublk_queue *ubq, struct request *req)
{
struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
struct ublk_io *io = &ubq->ios[req->tag];
- enum req_op op = req_op(req);
u32 ublk_op;
- if (!ublk_queue_is_zoned(ubq) &&
- (op_is_zone_mgmt(op) || op == REQ_OP_ZONE_APPEND))
- return BLK_STS_IOERR;
-
switch (req_op(req)) {
case REQ_OP_READ:
ublk_op = UBLK_IO_OP_READ;
@@ -948,7 +1069,7 @@ static blk_status_t ublk_setup_iod(struct ublk_queue *ubq, struct request *req)
iod->op_flags = ublk_op | ublk_req_build_flags(req);
iod->nr_sectors = blk_rq_sectors(req);
iod->start_sector = blk_rq_pos(req);
- iod->addr = io->addr;
+ iod->addr = io->buf.addr;
return BLK_STS_OK;
}
@@ -956,28 +1077,16 @@ static blk_status_t ublk_setup_iod(struct ublk_queue *ubq, struct request *req)
static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
struct io_uring_cmd *ioucmd)
{
- return (struct ublk_uring_cmd_pdu *)&ioucmd->pdu;
-}
-
-static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
-{
- return ubq->ubq_daemon->flags & PF_EXITING;
+ return io_uring_cmd_to_pdu(ioucmd, struct ublk_uring_cmd_pdu);
}
/* todo: handle partial completion */
-static inline void __ublk_complete_rq(struct request *req)
+static inline void __ublk_complete_rq(struct request *req, struct ublk_io *io,
+ bool need_map)
{
- struct ublk_queue *ubq = req->mq_hctx->driver_data;
- struct ublk_io *io = &ubq->ios[req->tag];
unsigned int unmapped_bytes;
blk_status_t res = BLK_STS_OK;
- /* called from ublk_abort_queue() code path */
- if (io->flags & UBLK_IO_FLAG_ABORTED) {
- res = BLK_STS_IOERR;
- goto exit;
- }
-
/* failed read IO if nothing is read */
if (!io->res && req_op(req) == REQ_OP_READ)
io->res = -EIO;
@@ -998,7 +1107,7 @@ static inline void __ublk_complete_rq(struct request *req)
goto exit;
/* for READ request, writing data in iod->addr to rq buffers */
- unmapped_bytes = ublk_unmap_io(ubq, req, io);
+ unmapped_bytes = ublk_unmap_io(need_map, req, io);
/*
* Extremely impossible since we got data filled in just before
@@ -1010,7 +1119,7 @@ static inline void __ublk_complete_rq(struct request *req)
if (blk_update_request(req, BLK_STS_OK, io->res))
blk_mq_requeue_request(req, true);
- else
+ else if (likely(!blk_should_fake_timeout(req->q)))
__blk_mq_end_request(req, BLK_STS_OK);
return;
@@ -1018,37 +1127,12 @@ exit:
blk_mq_end_request(req, res);
}
-static void ublk_complete_rq(struct kref *ref)
+static struct io_uring_cmd *__ublk_prep_compl_io_cmd(struct ublk_io *io,
+ struct request *req)
{
- struct ublk_rq_data *data = container_of(ref, struct ublk_rq_data,
- ref);
- struct request *req = blk_mq_rq_from_pdu(data);
+ /* read cmd first because req will overwrite it */
+ struct io_uring_cmd *cmd = io->cmd;
- __ublk_complete_rq(req);
-}
-
-/*
- * Since __ublk_rq_task_work always fails requests immediately during
- * exiting, __ublk_fail_req() is only called from abort context during
- * exiting. So lock is unnecessary.
- *
- * Also aborting may not be started yet, keep in mind that one failed
- * request may be issued by block layer again.
- */
-static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
- struct request *req)
-{
- WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
-
- if (ublk_queue_can_use_recovery_reissue(ubq))
- blk_mq_requeue_request(req, false);
- else
- ublk_put_req_ref(ubq, req);
-}
-
-static void ubq_complete_io_cmd(struct ublk_io *io, int res,
- unsigned issue_flags)
-{
/* mark this cmd owned by ublksrv */
io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV;
@@ -1058,8 +1142,17 @@ static void ubq_complete_io_cmd(struct ublk_io *io, int res,
*/
io->flags &= ~UBLK_IO_FLAG_ACTIVE;
+ io->req = req;
+ return cmd;
+}
+
+static void ublk_complete_io_cmd(struct ublk_io *io, struct request *req,
+ int res, unsigned issue_flags)
+{
+ struct io_uring_cmd *cmd = __ublk_prep_compl_io_cmd(io, req);
+
/* tell ublksrv one io request is coming */
- io_uring_cmd_done(io->cmd, res, 0, issue_flags);
+ io_uring_cmd_done(cmd, res, issue_flags);
}
#define UBLK_REQUEUE_DELAY_MS 3
@@ -1068,66 +1161,78 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
struct request *rq)
{
/* We cannot process this rq so just requeue it. */
- if (ublk_queue_can_use_recovery(ubq))
+ if (ublk_nosrv_dev_should_queue_io(ubq->dev))
blk_mq_requeue_request(rq, false);
else
blk_mq_end_request(rq, BLK_STS_IOERR);
}
-static inline void __ublk_rq_task_work(struct request *req,
- unsigned issue_flags)
+static void
+ublk_auto_buf_reg_fallback(const struct ublk_queue *ubq, unsigned tag)
{
- struct ublk_queue *ubq = req->mq_hctx->driver_data;
- int tag = req->tag;
- struct ublk_io *io = &ubq->ios[tag];
- unsigned int mapped_bytes;
+ struct ublksrv_io_desc *iod = ublk_get_iod(ubq, tag);
- pr_devel("%s: complete: op %d, qid %d tag %d io_flags %x addr %llx\n",
- __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
- ublk_get_iod(ubq, req->tag)->addr);
+ iod->op_flags |= UBLK_IO_F_NEED_REG_BUF;
+}
- /*
- * Task is exiting if either:
- *
- * (1) current != ubq_daemon.
- * io_uring_cmd_complete_in_task() tries to run task_work
- * in a workqueue if ubq_daemon(cmd's task) is PF_EXITING.
- *
- * (2) current->flags & PF_EXITING.
- */
- if (unlikely(current != ubq->ubq_daemon || current->flags & PF_EXITING)) {
- __ublk_abort_rq(ubq, req);
- return;
+enum auto_buf_reg_res {
+ AUTO_BUF_REG_FAIL,
+ AUTO_BUF_REG_FALLBACK,
+ AUTO_BUF_REG_OK,
+};
+
+static void ublk_prep_auto_buf_reg_io(const struct ublk_queue *ubq,
+ struct request *req, struct ublk_io *io,
+ struct io_uring_cmd *cmd,
+ enum auto_buf_reg_res res)
+{
+ if (res == AUTO_BUF_REG_OK) {
+ io->task_registered_buffers = 1;
+ io->buf_ctx_handle = io_uring_cmd_ctx_handle(cmd);
+ io->flags |= UBLK_IO_FLAG_AUTO_BUF_REG;
}
+ ublk_init_req_ref(ubq, io);
+ __ublk_prep_compl_io_cmd(io, req);
+}
- if (ublk_need_get_data(ubq) && ublk_need_map_req(req)) {
- /*
- * We have not handled UBLK_IO_NEED_GET_DATA command yet,
- * so immepdately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv
- * and notify it.
- */
- if (!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA)) {
- io->flags |= UBLK_IO_FLAG_NEED_GET_DATA;
- pr_devel("%s: need get data. op %d, qid %d tag %d io_flags %x\n",
- __func__, io->cmd->cmd_op, ubq->q_id,
- req->tag, io->flags);
- ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA, issue_flags);
- return;
+static enum auto_buf_reg_res
+__ublk_do_auto_buf_reg(const struct ublk_queue *ubq, struct request *req,
+ struct ublk_io *io, struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+ int ret;
+
+ ret = io_buffer_register_bvec(cmd, req, ublk_io_release,
+ io->buf.auto_reg.index, issue_flags);
+ if (ret) {
+ if (io->buf.auto_reg.flags & UBLK_AUTO_BUF_REG_FALLBACK) {
+ ublk_auto_buf_reg_fallback(ubq, req->tag);
+ return AUTO_BUF_REG_FALLBACK;
}
- /*
- * We have handled UBLK_IO_NEED_GET_DATA command,
- * so clear UBLK_IO_FLAG_NEED_GET_DATA now and just
- * do the copy work.
- */
- io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA;
- /* update iod->addr because ublksrv may have passed a new io buffer */
- ublk_get_iod(ubq, req->tag)->addr = io->addr;
- pr_devel("%s: update iod->addr: op %d, qid %d tag %d io_flags %x addr %llx\n",
- __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
- ublk_get_iod(ubq, req->tag)->addr);
+ blk_mq_end_request(req, BLK_STS_IOERR);
+ return AUTO_BUF_REG_FAIL;
+ }
+
+ return AUTO_BUF_REG_OK;
+}
+
+static void ublk_do_auto_buf_reg(const struct ublk_queue *ubq, struct request *req,
+ struct ublk_io *io, struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+ enum auto_buf_reg_res res = __ublk_do_auto_buf_reg(ubq, req, io, cmd,
+ issue_flags);
+
+ if (res != AUTO_BUF_REG_FAIL) {
+ ublk_prep_auto_buf_reg_io(ubq, req, io, cmd, res);
+ io_uring_cmd_done(cmd, UBLK_IO_RES_OK, issue_flags);
}
+}
- mapped_bytes = ublk_map_io(ubq, req, io);
+static bool ublk_start_io(const struct ublk_queue *ubq, struct request *req,
+ struct ublk_io *io)
+{
+ unsigned mapped_bytes = ublk_map_io(ubq, req, io);
/* partially mapped, update io descriptor */
if (unlikely(mapped_bytes != blk_rq_bytes(req))) {
@@ -1142,99 +1247,137 @@ static inline void __ublk_rq_task_work(struct request *req,
blk_mq_requeue_request(req, false);
blk_mq_delay_kick_requeue_list(req->q,
UBLK_REQUEUE_DELAY_MS);
- return;
+ return false;
}
ublk_get_iod(ubq, req->tag)->nr_sectors =
mapped_bytes >> 9;
}
- ublk_init_req_ref(ubq, req);
- ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags);
+ return true;
}
-static inline void ublk_forward_io_cmds(struct ublk_queue *ubq,
- unsigned issue_flags)
+static void ublk_dispatch_req(struct ublk_queue *ubq, struct request *req)
{
- struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
- struct ublk_rq_data *data, *tmp;
+ unsigned int issue_flags = IO_URING_CMD_TASK_WORK_ISSUE_FLAGS;
+ int tag = req->tag;
+ struct ublk_io *io = &ubq->ios[tag];
+
+ pr_devel("%s: complete: qid %d tag %d io_flags %x addr %llx\n",
+ __func__, ubq->q_id, req->tag, io->flags,
+ ublk_get_iod(ubq, req->tag)->addr);
+
+ /*
+ * Task is exiting if either:
+ *
+ * (1) current != io->task.
+ * io_uring_cmd_complete_in_task() tries to run task_work
+ * in a workqueue if cmd's task is PF_EXITING.
+ *
+ * (2) current->flags & PF_EXITING.
+ */
+ if (unlikely(current != io->task || current->flags & PF_EXITING)) {
+ __ublk_abort_rq(ubq, req);
+ return;
+ }
+
+ if (ublk_need_get_data(ubq) && ublk_need_map_req(req)) {
+ /*
+ * We have not handled UBLK_IO_NEED_GET_DATA command yet,
+ * so immediately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv
+ * and notify it.
+ */
+ io->flags |= UBLK_IO_FLAG_NEED_GET_DATA;
+ pr_devel("%s: need get data. qid %d tag %d io_flags %x\n",
+ __func__, ubq->q_id, req->tag, io->flags);
+ ublk_complete_io_cmd(io, req, UBLK_IO_RES_NEED_GET_DATA,
+ issue_flags);
+ return;
+ }
+
+ if (!ublk_start_io(ubq, req, io))
+ return;
- io_cmds = llist_reverse_order(io_cmds);
- llist_for_each_entry_safe(data, tmp, io_cmds, node)
- __ublk_rq_task_work(blk_mq_rq_from_pdu(data), issue_flags);
+ if (ublk_support_auto_buf_reg(ubq) && ublk_rq_has_data(req)) {
+ ublk_do_auto_buf_reg(ubq, req, io, io->cmd, issue_flags);
+ } else {
+ ublk_init_req_ref(ubq, io);
+ ublk_complete_io_cmd(io, req, UBLK_IO_RES_OK, issue_flags);
+ }
}
-static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
+static void ublk_cmd_tw_cb(struct io_tw_req tw_req, io_tw_token_t tw)
{
+ struct io_uring_cmd *cmd = io_uring_cmd_from_tw(tw_req);
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
struct ublk_queue *ubq = pdu->ubq;
- ublk_forward_io_cmds(ubq, issue_flags);
+ ublk_dispatch_req(ubq, pdu->req);
}
static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
{
- struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq);
-
- if (llist_add(&data->node, &ubq->io_cmds)) {
- struct ublk_io *io = &ubq->ios[rq->tag];
+ struct io_uring_cmd *cmd = ubq->ios[rq->tag].cmd;
+ struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
- io_uring_cmd_complete_in_task(io->cmd, ublk_rq_task_work_cb);
- }
+ pdu->req = rq;
+ io_uring_cmd_complete_in_task(cmd, ublk_cmd_tw_cb);
}
-static enum blk_eh_timer_return ublk_timeout(struct request *rq)
+static void ublk_cmd_list_tw_cb(struct io_tw_req tw_req, io_tw_token_t tw)
{
- struct ublk_queue *ubq = rq->mq_hctx->driver_data;
- unsigned int nr_inflight = 0;
- int i;
-
- if (ubq->flags & UBLK_F_UNPRIVILEGED_DEV) {
- if (!ubq->timeout) {
- send_sig(SIGKILL, ubq->ubq_daemon, 0);
- ubq->timeout = true;
- }
+ struct io_uring_cmd *cmd = io_uring_cmd_from_tw(tw_req);
+ struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
+ struct request *rq = pdu->req_list;
+ struct request *next;
- return BLK_EH_DONE;
- }
+ do {
+ next = rq->rq_next;
+ rq->rq_next = NULL;
+ ublk_dispatch_req(rq->mq_hctx->driver_data, rq);
+ rq = next;
+ } while (rq);
+}
- if (!ubq_daemon_is_dying(ubq))
- return BLK_EH_RESET_TIMER;
+static void ublk_queue_cmd_list(struct ublk_io *io, struct rq_list *l)
+{
+ struct io_uring_cmd *cmd = io->cmd;
+ struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
- for (i = 0; i < ubq->q_depth; i++) {
- struct ublk_io *io = &ubq->ios[i];
+ pdu->req_list = rq_list_peek(l);
+ rq_list_init(l);
+ io_uring_cmd_complete_in_task(cmd, ublk_cmd_list_tw_cb);
+}
- if (!(io->flags & UBLK_IO_FLAG_ACTIVE))
- nr_inflight++;
- }
+static enum blk_eh_timer_return ublk_timeout(struct request *rq)
+{
+ struct ublk_queue *ubq = rq->mq_hctx->driver_data;
+ pid_t tgid = ubq->dev->ublksrv_tgid;
+ struct task_struct *p;
+ struct pid *pid;
- /* cancelable uring_cmd can't help us if all commands are in-flight */
- if (nr_inflight == ubq->q_depth) {
- struct ublk_device *ub = ubq->dev;
+ if (!(ubq->flags & UBLK_F_UNPRIVILEGED_DEV))
+ return BLK_EH_RESET_TIMER;
- if (ublk_abort_requests(ub, ubq)) {
- if (ublk_can_use_recovery(ub))
- schedule_work(&ub->quiesce_work);
- else
- schedule_work(&ub->stop_work);
- }
- return BLK_EH_DONE;
- }
+ if (unlikely(!tgid))
+ return BLK_EH_RESET_TIMER;
- return BLK_EH_RESET_TIMER;
+ rcu_read_lock();
+ pid = find_vpid(tgid);
+ p = pid_task(pid, PIDTYPE_PID);
+ if (p)
+ send_sig(SIGKILL, p, 0);
+ rcu_read_unlock();
+ return BLK_EH_DONE;
}
-static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd)
+static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq,
+ bool check_cancel)
{
- struct ublk_queue *ubq = hctx->driver_data;
- struct request *rq = bd->rq;
blk_status_t res;
- /* fill iod to slot in io cmd buffer */
- res = ublk_setup_iod(ubq, rq);
- if (unlikely(res != BLK_STS_OK))
- return BLK_STS_IOERR;
+ if (unlikely(READ_ONCE(ubq->fail_io)))
+ return BLK_STS_TARGET;
/* With recovery feature enabled, force_abort is set in
* ublk_stop_dev() before calling del_gendisk(). We have to
@@ -1245,20 +1388,83 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
* Note: force_abort is guaranteed to be seen because it is set
* before request queue is unqiuesced.
*/
- if (ublk_queue_can_use_recovery(ubq) && unlikely(ubq->force_abort))
+ if (ublk_nosrv_should_queue_io(ubq) &&
+ unlikely(READ_ONCE(ubq->force_abort)))
return BLK_STS_IOERR;
+ if (check_cancel && unlikely(ubq->canceling))
+ return BLK_STS_IOERR;
+
+ /* fill iod to slot in io cmd buffer */
+ res = ublk_setup_iod(ubq, rq);
+ if (unlikely(res != BLK_STS_OK))
+ return BLK_STS_IOERR;
+
+ blk_mq_start_request(rq);
+ return BLK_STS_OK;
+}
+
+static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct ublk_queue *ubq = hctx->driver_data;
+ struct request *rq = bd->rq;
+ blk_status_t res;
+
+ res = ublk_prep_req(ubq, rq, false);
+ if (res != BLK_STS_OK)
+ return res;
+
+ /*
+ * ->canceling has to be handled after ->force_abort and ->fail_io
+ * is dealt with, otherwise this request may not be failed in case
+ * of recovery, and cause hang when deleting disk
+ */
if (unlikely(ubq->canceling)) {
__ublk_abort_rq(ubq, rq);
return BLK_STS_OK;
}
- blk_mq_start_request(bd->rq);
ublk_queue_cmd(ubq, rq);
-
return BLK_STS_OK;
}
+static inline bool ublk_belong_to_same_batch(const struct ublk_io *io,
+ const struct ublk_io *io2)
+{
+ return (io_uring_cmd_ctx_handle(io->cmd) ==
+ io_uring_cmd_ctx_handle(io2->cmd)) &&
+ (io->task == io2->task);
+}
+
+static void ublk_queue_rqs(struct rq_list *rqlist)
+{
+ struct rq_list requeue_list = { };
+ struct rq_list submit_list = { };
+ struct ublk_io *io = NULL;
+ struct request *req;
+
+ while ((req = rq_list_pop(rqlist))) {
+ struct ublk_queue *this_q = req->mq_hctx->driver_data;
+ struct ublk_io *this_io = &this_q->ios[req->tag];
+
+ if (ublk_prep_req(this_q, req, true) != BLK_STS_OK) {
+ rq_list_add_tail(&requeue_list, req);
+ continue;
+ }
+
+ if (io && !ublk_belong_to_same_batch(io, this_io) &&
+ !rq_list_empty(&submit_list))
+ ublk_queue_cmd_list(io, &submit_list);
+ io = this_io;
+ rq_list_add_tail(&submit_list, req);
+ }
+
+ if (!rq_list_empty(&submit_list))
+ ublk_queue_cmd_list(io, &submit_list);
+ *rqlist = requeue_list;
+}
+
static int ublk_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
unsigned int hctx_idx)
{
@@ -1271,10 +1477,42 @@ static int ublk_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
static const struct blk_mq_ops ublk_mq_ops = {
.queue_rq = ublk_queue_rq,
+ .queue_rqs = ublk_queue_rqs,
.init_hctx = ublk_init_hctx,
.timeout = ublk_timeout,
};
+static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
+{
+ int i;
+
+ for (i = 0; i < ubq->q_depth; i++) {
+ struct ublk_io *io = &ubq->ios[i];
+
+ /*
+ * UBLK_IO_FLAG_CANCELED is kept for avoiding to touch
+ * io->cmd
+ */
+ io->flags &= UBLK_IO_FLAG_CANCELED;
+ io->cmd = NULL;
+ io->buf.addr = 0;
+
+ /*
+ * old task is PF_EXITING, put it now
+ *
+ * It could be NULL in case of closing one quiesced
+ * device.
+ */
+ if (io->task) {
+ put_task_struct(io->task);
+ io->task = NULL;
+ }
+
+ WARN_ON_ONCE(refcount_read(&io->ref));
+ WARN_ON_ONCE(io->task_registered_buffers);
+ }
+}
+
static int ublk_ch_open(struct inode *inode, struct file *filp)
{
struct ublk_device *ub = container_of(inode->i_cdev,
@@ -1283,14 +1521,209 @@ static int ublk_ch_open(struct inode *inode, struct file *filp)
if (test_and_set_bit(UB_STATE_OPEN, &ub->state))
return -EBUSY;
filp->private_data = ub;
+ ub->ublksrv_tgid = current->tgid;
return 0;
}
+static void ublk_reset_ch_dev(struct ublk_device *ub)
+{
+ int i;
+
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
+ ublk_queue_reinit(ub, ublk_get_queue(ub, i));
+
+ /* set to NULL, otherwise new tasks cannot mmap io_cmd_buf */
+ ub->mm = NULL;
+ ub->nr_io_ready = 0;
+ ub->unprivileged_daemons = false;
+ ub->ublksrv_tgid = -1;
+}
+
+static struct gendisk *ublk_get_disk(struct ublk_device *ub)
+{
+ struct gendisk *disk;
+
+ spin_lock(&ub->lock);
+ disk = ub->ub_disk;
+ if (disk)
+ get_device(disk_to_dev(disk));
+ spin_unlock(&ub->lock);
+
+ return disk;
+}
+
+static void ublk_put_disk(struct gendisk *disk)
+{
+ if (disk)
+ put_device(disk_to_dev(disk));
+}
+
+/*
+ * Use this function to ensure that ->canceling is consistently set for
+ * the device and all queues. Do not set these flags directly.
+ *
+ * Caller must ensure that:
+ * - cancel_mutex is held. This ensures that there is no concurrent
+ * access to ub->canceling and no concurrent writes to ubq->canceling.
+ * - there are no concurrent reads of ubq->canceling from the queue_rq
+ * path. This can be done by quiescing the queue, or through other
+ * means.
+ */
+static void ublk_set_canceling(struct ublk_device *ub, bool canceling)
+ __must_hold(&ub->cancel_mutex)
+{
+ int i;
+
+ ub->canceling = canceling;
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
+ ublk_get_queue(ub, i)->canceling = canceling;
+}
+
+static bool ublk_check_and_reset_active_ref(struct ublk_device *ub)
+{
+ int i, j;
+
+ if (!(ub->dev_info.flags & (UBLK_F_SUPPORT_ZERO_COPY |
+ UBLK_F_AUTO_BUF_REG)))
+ return false;
+
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+ struct ublk_queue *ubq = ublk_get_queue(ub, i);
+
+ for (j = 0; j < ubq->q_depth; j++) {
+ struct ublk_io *io = &ubq->ios[j];
+ unsigned int refs = refcount_read(&io->ref) +
+ io->task_registered_buffers;
+
+ /*
+ * UBLK_REFCOUNT_INIT or zero means no active
+ * reference
+ */
+ if (refs != UBLK_REFCOUNT_INIT && refs != 0)
+ return true;
+
+ /* reset to zero if the io hasn't active references */
+ refcount_set(&io->ref, 0);
+ io->task_registered_buffers = 0;
+ }
+ }
+ return false;
+}
+
+static void ublk_ch_release_work_fn(struct work_struct *work)
+{
+ struct ublk_device *ub =
+ container_of(work, struct ublk_device, exit_work.work);
+ struct gendisk *disk;
+ int i;
+
+ /*
+ * For zero-copy and auto buffer register modes, I/O references
+ * might not be dropped naturally when the daemon is killed, but
+ * io_uring guarantees that registered bvec kernel buffers are
+ * unregistered finally when freeing io_uring context, then the
+ * active references are dropped.
+ *
+ * Wait until active references are dropped for avoiding use-after-free
+ *
+ * registered buffer may be unregistered in io_ring's release hander,
+ * so have to wait by scheduling work function for avoiding the two
+ * file release dependency.
+ */
+ if (ublk_check_and_reset_active_ref(ub)) {
+ schedule_delayed_work(&ub->exit_work, 1);
+ return;
+ }
+
+ /*
+ * disk isn't attached yet, either device isn't live, or it has
+ * been removed already, so we needn't to do anything
+ */
+ disk = ublk_get_disk(ub);
+ if (!disk)
+ goto out;
+
+ /*
+ * All uring_cmd are done now, so abort any request outstanding to
+ * the ublk server
+ *
+ * This can be done in lockless way because ublk server has been
+ * gone
+ *
+ * More importantly, we have to provide forward progress guarantee
+ * without holding ub->mutex, otherwise control task grabbing
+ * ub->mutex triggers deadlock
+ *
+ * All requests may be inflight, so ->canceling may not be set, set
+ * it now.
+ */
+ mutex_lock(&ub->cancel_mutex);
+ ublk_set_canceling(ub, true);
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
+ ublk_abort_queue(ub, ublk_get_queue(ub, i));
+ mutex_unlock(&ub->cancel_mutex);
+ blk_mq_kick_requeue_list(disk->queue);
+
+ /*
+ * All infligh requests have been completed or requeued and any new
+ * request will be failed or requeued via `->canceling` now, so it is
+ * fine to grab ub->mutex now.
+ */
+ mutex_lock(&ub->mutex);
+
+ /* double check after grabbing lock */
+ if (!ub->ub_disk)
+ goto unlock;
+
+ /*
+ * Transition the device to the nosrv state. What exactly this
+ * means depends on the recovery flags
+ */
+ if (ublk_nosrv_should_stop_dev(ub)) {
+ /*
+ * Allow any pending/future I/O to pass through quickly
+ * with an error. This is needed because del_gendisk
+ * waits for all pending I/O to complete
+ */
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
+ WRITE_ONCE(ublk_get_queue(ub, i)->force_abort, true);
+
+ ublk_stop_dev_unlocked(ub);
+ } else {
+ if (ublk_nosrv_dev_should_queue_io(ub)) {
+ /* ->canceling is set and all requests are aborted */
+ ub->dev_info.state = UBLK_S_DEV_QUIESCED;
+ } else {
+ ub->dev_info.state = UBLK_S_DEV_FAIL_IO;
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
+ WRITE_ONCE(ublk_get_queue(ub, i)->fail_io, true);
+ }
+ }
+unlock:
+ mutex_unlock(&ub->mutex);
+ ublk_put_disk(disk);
+
+ /* all uring_cmd has been done now, reset device & ubq */
+ ublk_reset_ch_dev(ub);
+out:
+ clear_bit(UB_STATE_OPEN, &ub->state);
+
+ /* put the reference grabbed in ublk_ch_release() */
+ ublk_put_device(ub);
+}
+
static int ublk_ch_release(struct inode *inode, struct file *filp)
{
struct ublk_device *ub = filp->private_data;
- clear_bit(UB_STATE_OPEN, &ub->state);
+ /*
+ * Grab ublk device reference, so it won't be gone until we are
+ * really released from work function.
+ */
+ ublk_get_device(ub);
+
+ INIT_DELAYED_WORK(&ub->exit_work, ublk_ch_release_work_fn);
+ schedule_delayed_work(&ub->exit_work, 0);
return 0;
}
@@ -1299,7 +1732,7 @@ static int ublk_ch_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct ublk_device *ub = filp->private_data;
size_t sz = vma->vm_end - vma->vm_start;
- unsigned max_sz = UBLK_MAX_QUEUE_DEPTH * sizeof(struct ublksrv_io_desc);
+ unsigned max_sz = ublk_max_cmd_buf_size();
unsigned long pfn, end, phys_off = vma->vm_pgoff << PAGE_SHIFT;
int q_id, ret = 0;
@@ -1325,41 +1758,33 @@ static int ublk_ch_mmap(struct file *filp, struct vm_area_struct *vma)
__func__, q_id, current->pid, vma->vm_start,
phys_off, (unsigned long)sz);
- if (sz != ublk_queue_cmd_buf_size(ub, q_id))
+ if (sz != ublk_queue_cmd_buf_size(ub))
return -EINVAL;
pfn = virt_to_phys(ublk_queue_cmd_buf(ub, q_id)) >> PAGE_SHIFT;
return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
}
-static void ublk_commit_completion(struct ublk_device *ub,
- const struct ublksrv_io_cmd *ub_cmd)
+static void __ublk_fail_req(struct ublk_device *ub, struct ublk_io *io,
+ struct request *req)
{
- u32 qid = ub_cmd->q_id, tag = ub_cmd->tag;
- struct ublk_queue *ubq = ublk_get_queue(ub, qid);
- struct ublk_io *io = &ubq->ios[tag];
- struct request *req;
-
- /* now this cmd slot is owned by nbd driver */
- io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV;
- io->res = ub_cmd->result;
-
- /* find the io request and complete */
- req = blk_mq_tag_to_rq(ub->tag_set.tags[qid], tag);
- if (WARN_ON_ONCE(unlikely(!req)))
- return;
-
- if (req_op(req) == REQ_OP_ZONE_APPEND)
- req->__sector = ub_cmd->zone_append_lba;
+ WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
- if (likely(!blk_should_fake_timeout(req->q)))
- ublk_put_req_ref(ubq, req);
+ if (ublk_nosrv_should_reissue_outstanding(ub))
+ blk_mq_requeue_request(req, false);
+ else {
+ io->res = -EIO;
+ __ublk_complete_rq(req, io, ublk_dev_need_map_io(ub));
+ }
}
/*
- * Called from ubq_daemon context via cancel fn, meantime quiesce ublk
- * blk-mq queue, so we are called exclusively with blk-mq and ubq_daemon
- * context, so everything is serialized.
+ * Called from ublk char device release handler, when any uring_cmd is
+ * done, meantime request queue is "quiesced" since all inflight requests
+ * can't be completed because ublk server is dead.
+ *
+ * So no one can hold our request IO reference any more, simply ignore the
+ * reference, and complete the request immediately
*/
static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
{
@@ -1368,62 +1793,62 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
for (i = 0; i < ubq->q_depth; i++) {
struct ublk_io *io = &ubq->ios[i];
- if (!(io->flags & UBLK_IO_FLAG_ACTIVE)) {
- struct request *rq;
-
- /*
- * Either we fail the request or ublk_rq_task_work_fn
- * will do it
- */
- rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i);
- if (rq && blk_mq_request_started(rq)) {
- io->flags |= UBLK_IO_FLAG_ABORTED;
- __ublk_fail_req(ubq, io, rq);
- }
- }
+ if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
+ __ublk_fail_req(ub, io, io->req);
}
}
-static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq)
+static void ublk_start_cancel(struct ublk_device *ub)
{
- struct gendisk *disk;
-
- spin_lock(&ubq->cancel_lock);
- if (ubq->canceling) {
- spin_unlock(&ubq->cancel_lock);
- return false;
- }
- ubq->canceling = true;
- spin_unlock(&ubq->cancel_lock);
-
- spin_lock(&ub->lock);
- disk = ub->ub_disk;
- if (disk)
- get_device(disk_to_dev(disk));
- spin_unlock(&ub->lock);
+ struct gendisk *disk = ublk_get_disk(ub);
/* Our disk has been dead */
if (!disk)
- return false;
+ return;
- /* Now we are serialized with ublk_queue_rq() */
+ mutex_lock(&ub->cancel_mutex);
+ if (ub->canceling)
+ goto out;
+ /*
+ * Now we are serialized with ublk_queue_rq()
+ *
+ * Make sure that ubq->canceling is set when queue is frozen,
+ * because ublk_queue_rq() has to rely on this flag for avoiding to
+ * touch completed uring_cmd
+ */
blk_mq_quiesce_queue(disk->queue);
- /* abort queue is for making forward progress */
- ublk_abort_queue(ub, ubq);
+ ublk_set_canceling(ub, true);
blk_mq_unquiesce_queue(disk->queue);
- put_device(disk_to_dev(disk));
-
- return true;
+out:
+ mutex_unlock(&ub->cancel_mutex);
+ ublk_put_disk(disk);
}
-static void ublk_cancel_cmd(struct ublk_queue *ubq, struct ublk_io *io,
+static void ublk_cancel_cmd(struct ublk_queue *ubq, unsigned tag,
unsigned int issue_flags)
{
+ struct ublk_io *io = &ubq->ios[tag];
+ struct ublk_device *ub = ubq->dev;
+ struct request *req;
bool done;
if (!(io->flags & UBLK_IO_FLAG_ACTIVE))
return;
+ /*
+ * Don't try to cancel this command if the request is started for
+ * avoiding race between io_uring_cmd_done() and
+ * io_uring_cmd_complete_in_task().
+ *
+ * Either the started request will be aborted via __ublk_abort_rq(),
+ * then this uring_cmd is canceled next time, or it will be done in
+ * task work function ublk_dispatch_req() because io_uring guarantees
+ * that ublk_dispatch_req() is always called
+ */
+ req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag);
+ if (req && blk_mq_request_started(req) && req->tag == tag)
+ return;
+
spin_lock(&ubq->cancel_lock);
done = !!(io->flags & UBLK_IO_FLAG_CANCELED);
if (!done)
@@ -1431,12 +1856,23 @@ static void ublk_cancel_cmd(struct ublk_queue *ubq, struct ublk_io *io,
spin_unlock(&ubq->cancel_lock);
if (!done)
- io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0, issue_flags);
+ io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, issue_flags);
}
/*
* The ublk char device won't be closed when calling cancel fn, so both
* ublk device and queue are guaranteed to be live
+ *
+ * Two-stage cancel:
+ *
+ * - make every active uring_cmd done in ->cancel_fn()
+ *
+ * - aborting inflight ublk IO requests in ublk char device release handler,
+ * which depends on 1st stage because device can only be closed iff all
+ * uring_cmd are done
+ *
+ * Do _not_ try to acquire ub->mutex before all inflight requests are
+ * aborted, otherwise deadlock may be caused.
*/
static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd,
unsigned int issue_flags)
@@ -1444,8 +1880,6 @@ static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd,
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
struct ublk_queue *ubq = pdu->ubq;
struct task_struct *task;
- struct ublk_device *ub;
- bool need_schedule;
struct ublk_io *io;
if (WARN_ON_ONCE(!ubq))
@@ -1455,27 +1889,21 @@ static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd,
return;
task = io_uring_cmd_get_task(cmd);
- if (WARN_ON_ONCE(task && task != ubq->ubq_daemon))
+ io = &ubq->ios[pdu->tag];
+ if (WARN_ON_ONCE(task && task != io->task))
return;
- ub = ubq->dev;
- need_schedule = ublk_abort_requests(ub, ubq);
+ ublk_start_cancel(ubq->dev);
- io = &ubq->ios[pdu->tag];
WARN_ON_ONCE(io->cmd != cmd);
- ublk_cancel_cmd(ubq, io, issue_flags);
-
- if (need_schedule) {
- if (ublk_can_use_recovery(ub))
- schedule_work(&ub->quiesce_work);
- else
- schedule_work(&ub->stop_work);
- }
+ ublk_cancel_cmd(ubq, pdu->tag, issue_flags);
}
-static inline bool ublk_queue_ready(struct ublk_queue *ubq)
+static inline bool ublk_dev_ready(const struct ublk_device *ub)
{
- return ubq->nr_io_ready == ubq->q_depth;
+ u32 total = (u32)ub->dev_info.nr_hw_queues * ub->dev_info.queue_depth;
+
+ return ub->nr_io_ready == total;
}
static void ublk_cancel_queue(struct ublk_queue *ubq)
@@ -1483,7 +1911,7 @@ static void ublk_cancel_queue(struct ublk_queue *ubq)
int i;
for (i = 0; i < ubq->q_depth; i++)
- ublk_cancel_cmd(ubq, &ubq->ios[i], IO_URING_F_UNLOCKED);
+ ublk_cancel_cmd(ubq, i, IO_URING_F_UNLOCKED);
}
/* Cancel all pending commands, must be called after del_gendisk() returns */
@@ -1521,66 +1949,29 @@ static void ublk_wait_tagset_rqs_idle(struct ublk_device *ub)
}
}
-static void __ublk_quiesce_dev(struct ublk_device *ub)
-{
- pr_devel("%s: quiesce ub: dev_id %d state %s\n",
- __func__, ub->dev_info.dev_id,
- ub->dev_info.state == UBLK_S_DEV_LIVE ?
- "LIVE" : "QUIESCED");
- blk_mq_quiesce_queue(ub->ub_disk->queue);
- ublk_wait_tagset_rqs_idle(ub);
- ub->dev_info.state = UBLK_S_DEV_QUIESCED;
-}
-
-static void ublk_quiesce_work_fn(struct work_struct *work)
-{
- struct ublk_device *ub =
- container_of(work, struct ublk_device, quiesce_work);
-
- mutex_lock(&ub->mutex);
- if (ub->dev_info.state != UBLK_S_DEV_LIVE)
- goto unlock;
- __ublk_quiesce_dev(ub);
- unlock:
- mutex_unlock(&ub->mutex);
- ublk_cancel_dev(ub);
-}
-
-static void ublk_unquiesce_dev(struct ublk_device *ub)
+static void ublk_force_abort_dev(struct ublk_device *ub)
{
int i;
- pr_devel("%s: unquiesce ub: dev_id %d state %s\n",
+ pr_devel("%s: force abort ub: dev_id %d state %s\n",
__func__, ub->dev_info.dev_id,
ub->dev_info.state == UBLK_S_DEV_LIVE ?
"LIVE" : "QUIESCED");
- /* quiesce_work has run. We let requeued rqs be aborted
- * before running fallback_wq. "force_abort" must be seen
- * after request queue is unqiuesced. Then del_gendisk()
- * can move on.
- */
+ blk_mq_quiesce_queue(ub->ub_disk->queue);
+ if (ub->dev_info.state == UBLK_S_DEV_LIVE)
+ ublk_wait_tagset_rqs_idle(ub);
+
for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
ublk_get_queue(ub, i)->force_abort = true;
-
blk_mq_unquiesce_queue(ub->ub_disk->queue);
/* We may have requeued some rqs in ublk_quiesce_queue() */
blk_mq_kick_requeue_list(ub->ub_disk->queue);
}
-static void ublk_stop_dev(struct ublk_device *ub)
+static struct gendisk *ublk_detach_disk(struct ublk_device *ub)
{
struct gendisk *disk;
- mutex_lock(&ub->mutex);
- if (ub->dev_info.state == UBLK_S_DEV_DEAD)
- goto unlock;
- if (ublk_can_use_recovery(ub)) {
- if (ub->dev_info.state == UBLK_S_DEV_LIVE)
- __ublk_quiesce_dev(ub);
- ublk_unquiesce_dev(ub);
- }
- del_gendisk(ub->ub_disk);
-
/* Sync with ublk_abort_queue() by holding the lock */
spin_lock(&ub->lock);
disk = ub->ub_disk;
@@ -1588,37 +1979,66 @@ static void ublk_stop_dev(struct ublk_device *ub)
ub->dev_info.ublksrv_pid = -1;
ub->ub_disk = NULL;
spin_unlock(&ub->lock);
+
+ return disk;
+}
+
+static void ublk_stop_dev_unlocked(struct ublk_device *ub)
+ __must_hold(&ub->mutex)
+{
+ struct gendisk *disk;
+
+ if (ub->dev_info.state == UBLK_S_DEV_DEAD)
+ return;
+
+ if (ublk_nosrv_dev_should_queue_io(ub))
+ ublk_force_abort_dev(ub);
+ del_gendisk(ub->ub_disk);
+ disk = ublk_detach_disk(ub);
put_disk(disk);
- unlock:
+}
+
+static void ublk_stop_dev(struct ublk_device *ub)
+{
+ mutex_lock(&ub->mutex);
+ ublk_stop_dev_unlocked(ub);
mutex_unlock(&ub->mutex);
ublk_cancel_dev(ub);
}
-/* device can only be started after all IOs are ready */
-static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
+/* reset ublk io_uring queue & io flags */
+static void ublk_reset_io_flags(struct ublk_device *ub)
{
- mutex_lock(&ub->mutex);
- ubq->nr_io_ready++;
- if (ublk_queue_ready(ubq)) {
- ubq->ubq_daemon = current;
- get_task_struct(ubq->ubq_daemon);
- ub->nr_queues_ready++;
+ int i, j;
- if (capable(CAP_SYS_ADMIN))
- ub->nr_privileged_daemon++;
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+ struct ublk_queue *ubq = ublk_get_queue(ub, i);
+
+ /* UBLK_IO_FLAG_CANCELED can be cleared now */
+ spin_lock(&ubq->cancel_lock);
+ for (j = 0; j < ubq->q_depth; j++)
+ ubq->ios[j].flags &= ~UBLK_IO_FLAG_CANCELED;
+ spin_unlock(&ubq->cancel_lock);
+ ubq->fail_io = false;
}
- if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues)
- complete_all(&ub->completion);
- mutex_unlock(&ub->mutex);
+ mutex_lock(&ub->cancel_mutex);
+ ublk_set_canceling(ub, false);
+ mutex_unlock(&ub->cancel_mutex);
}
-static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
- int tag)
+/* device can only be started after all IOs are ready */
+static void ublk_mark_io_ready(struct ublk_device *ub)
+ __must_hold(&ub->mutex)
{
- struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
- struct request *req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag);
+ if (!ub->unprivileged_daemons && !capable(CAP_SYS_ADMIN))
+ ub->unprivileged_daemons = true;
- ublk_queue_cmd(ubq, req);
+ ub->nr_io_ready++;
+ if (ublk_dev_ready(ub)) {
+ /* now we are ready for handling ublk io request */
+ ublk_reset_io_flags(ub);
+ complete_all(&ub->completion);
+ }
}
static inline int ublk_check_cmd_op(u32 cmd_op)
@@ -1634,12 +2054,69 @@ static inline int ublk_check_cmd_op(u32 cmd_op)
return 0;
}
-static inline void ublk_fill_io_cmd(struct ublk_io *io,
- struct io_uring_cmd *cmd, unsigned long buf_addr)
+static inline int ublk_set_auto_buf_reg(struct ublk_io *io, struct io_uring_cmd *cmd)
{
+ struct ublk_auto_buf_reg buf;
+
+ buf = ublk_sqe_addr_to_auto_buf_reg(READ_ONCE(cmd->sqe->addr));
+
+ if (buf.reserved0 || buf.reserved1)
+ return -EINVAL;
+
+ if (buf.flags & ~UBLK_AUTO_BUF_REG_F_MASK)
+ return -EINVAL;
+ io->buf.auto_reg = buf;
+ return 0;
+}
+
+static int ublk_handle_auto_buf_reg(struct ublk_io *io,
+ struct io_uring_cmd *cmd,
+ u16 *buf_idx)
+{
+ if (io->flags & UBLK_IO_FLAG_AUTO_BUF_REG) {
+ io->flags &= ~UBLK_IO_FLAG_AUTO_BUF_REG;
+
+ /*
+ * `UBLK_F_AUTO_BUF_REG` only works iff `UBLK_IO_FETCH_REQ`
+ * and `UBLK_IO_COMMIT_AND_FETCH_REQ` are issued from same
+ * `io_ring_ctx`.
+ *
+ * If this uring_cmd's io_ring_ctx isn't same with the
+ * one for registering the buffer, it is ublk server's
+ * responsibility for unregistering the buffer, otherwise
+ * this ublk request gets stuck.
+ */
+ if (io->buf_ctx_handle == io_uring_cmd_ctx_handle(cmd))
+ *buf_idx = io->buf.auto_reg.index;
+ }
+
+ return ublk_set_auto_buf_reg(io, cmd);
+}
+
+/* Once we return, `io->req` can't be used any more */
+static inline struct request *
+ublk_fill_io_cmd(struct ublk_io *io, struct io_uring_cmd *cmd)
+{
+ struct request *req = io->req;
+
io->cmd = cmd;
io->flags |= UBLK_IO_FLAG_ACTIVE;
- io->addr = buf_addr;
+ /* now this cmd slot is owned by ublk driver */
+ io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV;
+
+ return req;
+}
+
+static inline int
+ublk_config_io_buf(const struct ublk_device *ub, struct ublk_io *io,
+ struct io_uring_cmd *cmd, unsigned long buf_addr,
+ u16 *buf_idx)
+{
+ if (ublk_dev_support_auto_buf_reg(ub))
+ return ublk_handle_auto_buf_reg(io, cmd, buf_idx);
+
+ io->buf.addr = buf_addr;
+ return 0;
}
static inline void ublk_prep_cancel(struct io_uring_cmd *cmd,
@@ -1657,39 +2134,265 @@ static inline void ublk_prep_cancel(struct io_uring_cmd *cmd,
io_uring_cmd_mark_cancelable(cmd, issue_flags);
}
-static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
- unsigned int issue_flags,
- const struct ublksrv_io_cmd *ub_cmd)
+static void ublk_io_release(void *priv)
{
+ struct request *rq = priv;
+ struct ublk_queue *ubq = rq->mq_hctx->driver_data;
+ struct ublk_io *io = &ubq->ios[rq->tag];
+
+ /*
+ * task_registered_buffers may be 0 if buffers were registered off task
+ * but unregistered on task. Or after UBLK_IO_COMMIT_AND_FETCH_REQ.
+ */
+ if (current == io->task && io->task_registered_buffers)
+ io->task_registered_buffers--;
+ else
+ ublk_put_req_ref(io, rq);
+}
+
+static int ublk_register_io_buf(struct io_uring_cmd *cmd,
+ struct ublk_device *ub,
+ u16 q_id, u16 tag,
+ struct ublk_io *io,
+ unsigned int index, unsigned int issue_flags)
+{
+ struct request *req;
+ int ret;
+
+ if (!ublk_dev_support_zero_copy(ub))
+ return -EINVAL;
+
+ req = __ublk_check_and_get_req(ub, q_id, tag, io, 0);
+ if (!req)
+ return -EINVAL;
+
+ ret = io_buffer_register_bvec(cmd, req, ublk_io_release, index,
+ issue_flags);
+ if (ret) {
+ ublk_put_req_ref(io, req);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+ublk_daemon_register_io_buf(struct io_uring_cmd *cmd,
+ struct ublk_device *ub,
+ u16 q_id, u16 tag, struct ublk_io *io,
+ unsigned index, unsigned issue_flags)
+{
+ unsigned new_registered_buffers;
+ struct request *req = io->req;
+ int ret;
+
+ /*
+ * Ensure there are still references for ublk_sub_req_ref() to release.
+ * If not, fall back on the thread-safe buffer registration.
+ */
+ new_registered_buffers = io->task_registered_buffers + 1;
+ if (unlikely(new_registered_buffers >= UBLK_REFCOUNT_INIT))
+ return ublk_register_io_buf(cmd, ub, q_id, tag, io, index,
+ issue_flags);
+
+ if (!ublk_dev_support_zero_copy(ub) || !ublk_rq_has_data(req))
+ return -EINVAL;
+
+ ret = io_buffer_register_bvec(cmd, req, ublk_io_release, index,
+ issue_flags);
+ if (ret)
+ return ret;
+
+ io->task_registered_buffers = new_registered_buffers;
+ return 0;
+}
+
+static int ublk_unregister_io_buf(struct io_uring_cmd *cmd,
+ const struct ublk_device *ub,
+ unsigned int index, unsigned int issue_flags)
+{
+ if (!(ub->dev_info.flags & UBLK_F_SUPPORT_ZERO_COPY))
+ return -EINVAL;
+
+ return io_buffer_unregister_bvec(cmd, index, issue_flags);
+}
+
+static int ublk_check_fetch_buf(const struct ublk_device *ub, __u64 buf_addr)
+{
+ if (ublk_dev_need_map_io(ub)) {
+ /*
+ * FETCH_RQ has to provide IO buffer if NEED GET
+ * DATA is not enabled
+ */
+ if (!buf_addr && !ublk_dev_need_get_data(ub))
+ return -EINVAL;
+ } else if (buf_addr) {
+ /* User copy requires addr to be unset */
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int __ublk_fetch(struct io_uring_cmd *cmd, struct ublk_device *ub,
+ struct ublk_io *io)
+{
+ /* UBLK_IO_FETCH_REQ is only allowed before dev is setup */
+ if (ublk_dev_ready(ub))
+ return -EBUSY;
+
+ /* allow each command to be FETCHed at most once */
+ if (io->flags & UBLK_IO_FLAG_ACTIVE)
+ return -EINVAL;
+
+ WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV);
+
+ ublk_fill_io_cmd(io, cmd);
+
+ WRITE_ONCE(io->task, get_task_struct(current));
+ ublk_mark_io_ready(ub);
+
+ return 0;
+}
+
+static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_device *ub,
+ struct ublk_io *io, __u64 buf_addr)
+{
+ int ret;
+
+ /*
+ * When handling FETCH command for setting up ublk uring queue,
+ * ub->mutex is the innermost lock, and we won't block for handling
+ * FETCH, so it is fine even for IO_URING_F_NONBLOCK.
+ */
+ mutex_lock(&ub->mutex);
+ ret = __ublk_fetch(cmd, ub, io);
+ if (!ret)
+ ret = ublk_config_io_buf(ub, io, cmd, buf_addr, NULL);
+ mutex_unlock(&ub->mutex);
+ return ret;
+}
+
+static int ublk_check_commit_and_fetch(const struct ublk_device *ub,
+ struct ublk_io *io, __u64 buf_addr)
+{
+ struct request *req = io->req;
+
+ if (ublk_dev_need_map_io(ub)) {
+ /*
+ * COMMIT_AND_FETCH_REQ has to provide IO buffer if
+ * NEED GET DATA is not enabled or it is Read IO.
+ */
+ if (!buf_addr && (!ublk_dev_need_get_data(ub) ||
+ req_op(req) == REQ_OP_READ))
+ return -EINVAL;
+ } else if (req_op(req) != REQ_OP_ZONE_APPEND && buf_addr) {
+ /*
+ * User copy requires addr to be unset when command is
+ * not zone append
+ */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static bool ublk_need_complete_req(const struct ublk_device *ub,
+ struct ublk_io *io)
+{
+ if (ublk_dev_need_req_ref(ub))
+ return ublk_sub_req_ref(io);
+ return true;
+}
+
+static bool ublk_get_data(const struct ublk_queue *ubq, struct ublk_io *io,
+ struct request *req)
+{
+ /*
+ * We have handled UBLK_IO_NEED_GET_DATA command,
+ * so clear UBLK_IO_FLAG_NEED_GET_DATA now and just
+ * do the copy work.
+ */
+ io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA;
+ /* update iod->addr because ublksrv may have passed a new io buffer */
+ ublk_get_iod(ubq, req->tag)->addr = io->buf.addr;
+ pr_devel("%s: update iod->addr: qid %d tag %d io_flags %x addr %llx\n",
+ __func__, ubq->q_id, req->tag, io->flags,
+ ublk_get_iod(ubq, req->tag)->addr);
+
+ return ublk_start_io(ubq, req, io);
+}
+
+static int ublk_ch_uring_cmd_local(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+ /* May point to userspace-mapped memory */
+ const struct ublksrv_io_cmd *ub_src = io_uring_sqe_cmd(cmd->sqe);
+ u16 buf_idx = UBLK_INVALID_BUF_IDX;
struct ublk_device *ub = cmd->file->private_data;
struct ublk_queue *ubq;
- struct ublk_io *io;
+ struct ublk_io *io = NULL;
u32 cmd_op = cmd->cmd_op;
- unsigned tag = ub_cmd->tag;
- int ret = -EINVAL;
+ u16 q_id = READ_ONCE(ub_src->q_id);
+ u16 tag = READ_ONCE(ub_src->tag);
+ s32 result = READ_ONCE(ub_src->result);
+ u64 addr = READ_ONCE(ub_src->addr); /* unioned with zone_append_lba */
struct request *req;
+ int ret;
+ bool compl;
+
+ WARN_ON_ONCE(issue_flags & IO_URING_F_UNLOCKED);
pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n",
- __func__, cmd->cmd_op, ub_cmd->q_id, tag,
- ub_cmd->result);
+ __func__, cmd->cmd_op, q_id, tag, result);
- if (ub_cmd->q_id >= ub->dev_info.nr_hw_queues)
+ ret = ublk_check_cmd_op(cmd_op);
+ if (ret)
goto out;
- ubq = ublk_get_queue(ub, ub_cmd->q_id);
- if (!ubq || ub_cmd->q_id != ubq->q_id)
- goto out;
+ /*
+ * io_buffer_unregister_bvec() doesn't access the ubq or io,
+ * so no need to validate the q_id, tag, or task
+ */
+ if (_IOC_NR(cmd_op) == UBLK_IO_UNREGISTER_IO_BUF)
+ return ublk_unregister_io_buf(cmd, ub, addr, issue_flags);
- if (ubq->ubq_daemon && ubq->ubq_daemon != current)
+ ret = -EINVAL;
+ if (q_id >= ub->dev_info.nr_hw_queues)
goto out;
- if (tag >= ubq->q_depth)
+ ubq = ublk_get_queue(ub, q_id);
+
+ if (tag >= ub->dev_info.queue_depth)
goto out;
io = &ubq->ios[tag];
+ /* UBLK_IO_FETCH_REQ can be handled on any task, which sets io->task */
+ if (unlikely(_IOC_NR(cmd_op) == UBLK_IO_FETCH_REQ)) {
+ ret = ublk_check_fetch_buf(ub, addr);
+ if (ret)
+ goto out;
+ ret = ublk_fetch(cmd, ub, io, addr);
+ if (ret)
+ goto out;
+
+ ublk_prep_cancel(cmd, issue_flags, ubq, tag);
+ return -EIOCBQUEUED;
+ }
+
+ if (READ_ONCE(io->task) != current) {
+ /*
+ * ublk_register_io_buf() accesses only the io's refcount,
+ * so can be handled on any task
+ */
+ if (_IOC_NR(cmd_op) == UBLK_IO_REGISTER_IO_BUF)
+ return ublk_register_io_buf(cmd, ub, q_id, tag, io,
+ addr, issue_flags);
+
+ goto out;
+ }
/* there is pending io cmd, something must be wrong */
- if (io->flags & UBLK_IO_FLAG_ACTIVE) {
+ if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)) {
ret = -EBUSY;
goto out;
}
@@ -1702,72 +2405,43 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
^ (_IOC_NR(cmd_op) == UBLK_IO_NEED_GET_DATA))
goto out;
- ret = ublk_check_cmd_op(cmd_op);
- if (ret)
- goto out;
-
- ret = -EINVAL;
switch (_IOC_NR(cmd_op)) {
- case UBLK_IO_FETCH_REQ:
- /* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
- if (ublk_queue_ready(ubq)) {
- ret = -EBUSY;
- goto out;
- }
- /*
- * The io is being handled by server, so COMMIT_RQ is expected
- * instead of FETCH_REQ
- */
- if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
- goto out;
-
- if (!ublk_support_user_copy(ubq)) {
- /*
- * FETCH_RQ has to provide IO buffer if NEED GET
- * DATA is not enabled
- */
- if (!ub_cmd->addr && !ublk_need_get_data(ubq))
- goto out;
- } else if (ub_cmd->addr) {
- /* User copy requires addr to be unset */
- ret = -EINVAL;
- goto out;
- }
-
- ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
- ublk_mark_io_ready(ub, ubq);
- break;
+ case UBLK_IO_REGISTER_IO_BUF:
+ return ublk_daemon_register_io_buf(cmd, ub, q_id, tag, io, addr,
+ issue_flags);
case UBLK_IO_COMMIT_AND_FETCH_REQ:
- req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag);
-
- if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
+ ret = ublk_check_commit_and_fetch(ub, io, addr);
+ if (ret)
goto out;
+ io->res = result;
+ req = ublk_fill_io_cmd(io, cmd);
+ ret = ublk_config_io_buf(ub, io, cmd, addr, &buf_idx);
+ compl = ublk_need_complete_req(ub, io);
+
+ /* can't touch 'ublk_io' any more */
+ if (buf_idx != UBLK_INVALID_BUF_IDX)
+ io_buffer_unregister_bvec(cmd, buf_idx, issue_flags);
+ if (req_op(req) == REQ_OP_ZONE_APPEND)
+ req->__sector = addr;
+ if (compl)
+ __ublk_complete_rq(req, io, ublk_dev_need_map_io(ub));
- if (!ublk_support_user_copy(ubq)) {
- /*
- * COMMIT_AND_FETCH_REQ has to provide IO buffer if
- * NEED GET DATA is not enabled or it is Read IO.
- */
- if (!ub_cmd->addr && (!ublk_need_get_data(ubq) ||
- req_op(req) == REQ_OP_READ))
- goto out;
- } else if (req_op(req) != REQ_OP_ZONE_APPEND && ub_cmd->addr) {
- /*
- * User copy requires addr to be unset when command is
- * not zone append
- */
- ret = -EINVAL;
+ if (ret)
goto out;
- }
-
- ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
- ublk_commit_completion(ub, ub_cmd);
break;
case UBLK_IO_NEED_GET_DATA:
- if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
- goto out;
- ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
- ublk_handle_need_get_data(ub, ub_cmd->q_id, ub_cmd->tag);
+ /*
+ * ublk_get_data() may fail and fallback to requeue, so keep
+ * uring_cmd active first and prepare for handling new requeued
+ * request
+ */
+ req = ublk_fill_io_cmd(io, cmd);
+ ret = ublk_config_io_buf(ub, io, cmd, addr, NULL);
+ WARN_ON_ONCE(ret);
+ if (likely(ublk_get_data(ubq, io, req))) {
+ __ublk_prep_compl_io_cmd(io, req);
+ return UBLK_IO_RES_OK;
+ }
break;
default:
goto out;
@@ -1776,25 +2450,25 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
return -EIOCBQUEUED;
out:
- io_uring_cmd_done(cmd, ret, 0, issue_flags);
pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n",
- __func__, cmd_op, tag, ret, io->flags);
- return -EIOCBQUEUED;
+ __func__, cmd_op, tag, ret, io ? io->flags : 0);
+ return ret;
}
static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
- struct ublk_queue *ubq, int tag, size_t offset)
+ u16 q_id, u16 tag, struct ublk_io *io, size_t offset)
{
struct request *req;
- if (!ublk_need_req_ref(ubq))
- return NULL;
-
- req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag);
+ /*
+ * can't use io->req in case of concurrent UBLK_IO_COMMIT_AND_FETCH_REQ,
+ * which would overwrite it with io->cmd
+ */
+ req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag);
if (!req)
return NULL;
- if (!ublk_get_req_ref(ubq, req))
+ if (!ublk_get_req_ref(io))
return NULL;
if (unlikely(!blk_mq_request_started(req) || req->tag != tag))
@@ -1808,34 +2482,18 @@ static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
return req;
fail_put:
- ublk_put_req_ref(ubq, req);
+ ublk_put_req_ref(io, req);
return NULL;
}
-static inline int ublk_ch_uring_cmd_local(struct io_uring_cmd *cmd,
- unsigned int issue_flags)
+static void ublk_ch_uring_cmd_cb(struct io_tw_req tw_req, io_tw_token_t tw)
{
- /*
- * Not necessary for async retry, but let's keep it simple and always
- * copy the values to avoid any potential reuse.
- */
- const struct ublksrv_io_cmd *ub_src = io_uring_sqe_cmd(cmd->sqe);
- const struct ublksrv_io_cmd ub_cmd = {
- .q_id = READ_ONCE(ub_src->q_id),
- .tag = READ_ONCE(ub_src->tag),
- .result = READ_ONCE(ub_src->result),
- .addr = READ_ONCE(ub_src->addr)
- };
-
- WARN_ON_ONCE(issue_flags & IO_URING_F_UNLOCKED);
-
- return __ublk_ch_uring_cmd(cmd, issue_flags, &ub_cmd);
-}
+ unsigned int issue_flags = IO_URING_CMD_TASK_WORK_ISSUE_FLAGS;
+ struct io_uring_cmd *cmd = io_uring_cmd_from_tw(tw_req);
+ int ret = ublk_ch_uring_cmd_local(cmd, issue_flags);
-static void ublk_ch_uring_cmd_cb(struct io_uring_cmd *cmd,
- unsigned int issue_flags)
-{
- ublk_ch_uring_cmd_local(cmd, issue_flags);
+ if (ret != -EIOCBQUEUED)
+ io_uring_cmd_done(cmd, ret, issue_flags);
}
static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
@@ -1872,7 +2530,8 @@ static inline bool ublk_check_ubuf_dir(const struct request *req,
}
static struct request *ublk_check_and_get_req(struct kiocb *iocb,
- struct iov_iter *iter, size_t *off, int dir)
+ struct iov_iter *iter, size_t *off, int dir,
+ struct ublk_io **io)
{
struct ublk_device *ub = iocb->ki_filp->private_data;
struct ublk_queue *ubq;
@@ -1880,9 +2539,6 @@ static struct request *ublk_check_and_get_req(struct kiocb *iocb,
size_t buf_off;
u16 tag, q_id;
- if (!ub)
- return ERR_PTR(-EACCES);
-
if (!user_backed_iter(iter))
return ERR_PTR(-EACCES);
@@ -1897,61 +2553,57 @@ static struct request *ublk_check_and_get_req(struct kiocb *iocb,
return ERR_PTR(-EINVAL);
ubq = ublk_get_queue(ub, q_id);
- if (!ubq)
- return ERR_PTR(-EINVAL);
+ if (!ublk_dev_support_user_copy(ub))
+ return ERR_PTR(-EACCES);
- if (tag >= ubq->q_depth)
+ if (tag >= ub->dev_info.queue_depth)
return ERR_PTR(-EINVAL);
- req = __ublk_check_and_get_req(ub, ubq, tag, buf_off);
+ *io = &ubq->ios[tag];
+ req = __ublk_check_and_get_req(ub, q_id, tag, *io, buf_off);
if (!req)
return ERR_PTR(-EINVAL);
- if (!req->mq_hctx || !req->mq_hctx->driver_data)
- goto fail;
-
if (!ublk_check_ubuf_dir(req, dir))
goto fail;
*off = buf_off;
return req;
fail:
- ublk_put_req_ref(ubq, req);
+ ublk_put_req_ref(*io, req);
return ERR_PTR(-EACCES);
}
static ssize_t ublk_ch_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
- struct ublk_queue *ubq;
struct request *req;
+ struct ublk_io *io;
size_t buf_off;
size_t ret;
- req = ublk_check_and_get_req(iocb, to, &buf_off, ITER_DEST);
+ req = ublk_check_and_get_req(iocb, to, &buf_off, ITER_DEST, &io);
if (IS_ERR(req))
return PTR_ERR(req);
ret = ublk_copy_user_pages(req, buf_off, to, ITER_DEST);
- ubq = req->mq_hctx->driver_data;
- ublk_put_req_ref(ubq, req);
+ ublk_put_req_ref(io, req);
return ret;
}
static ssize_t ublk_ch_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
- struct ublk_queue *ubq;
struct request *req;
+ struct ublk_io *io;
size_t buf_off;
size_t ret;
- req = ublk_check_and_get_req(iocb, from, &buf_off, ITER_SOURCE);
+ req = ublk_check_and_get_req(iocb, from, &buf_off, ITER_SOURCE, &io);
if (IS_ERR(req))
return PTR_ERR(req);
ret = ublk_copy_user_pages(req, buf_off, from, ITER_SOURCE);
- ubq = req->mq_hctx->driver_data;
- ublk_put_req_ref(ubq, req);
+ ublk_put_req_ref(io, req);
return ret;
}
@@ -1960,7 +2612,6 @@ static const struct file_operations ublk_ch_fops = {
.owner = THIS_MODULE,
.open = ublk_ch_open,
.release = ublk_ch_release,
- .llseek = no_llseek,
.read_iter = ublk_ch_read_iter,
.write_iter = ublk_ch_write_iter,
.uring_cmd = ublk_ch_uring_cmd,
@@ -1969,64 +2620,94 @@ static const struct file_operations ublk_ch_fops = {
static void ublk_deinit_queue(struct ublk_device *ub, int q_id)
{
- int size = ublk_queue_cmd_buf_size(ub, q_id);
- struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
+ struct ublk_queue *ubq = ub->queues[q_id];
+ int size, i;
+
+ if (!ubq)
+ return;
+
+ size = ublk_queue_cmd_buf_size(ub);
+
+ for (i = 0; i < ubq->q_depth; i++) {
+ struct ublk_io *io = &ubq->ios[i];
+ if (io->task)
+ put_task_struct(io->task);
+ WARN_ON_ONCE(refcount_read(&io->ref));
+ WARN_ON_ONCE(io->task_registered_buffers);
+ }
- if (ubq->ubq_daemon)
- put_task_struct(ubq->ubq_daemon);
if (ubq->io_cmd_buf)
free_pages((unsigned long)ubq->io_cmd_buf, get_order(size));
+
+ kvfree(ubq);
+ ub->queues[q_id] = NULL;
+}
+
+static int ublk_get_queue_numa_node(struct ublk_device *ub, int q_id)
+{
+ unsigned int cpu;
+
+ /* Find first CPU mapped to this queue */
+ for_each_possible_cpu(cpu) {
+ if (ub->tag_set.map[HCTX_TYPE_DEFAULT].mq_map[cpu] == q_id)
+ return cpu_to_node(cpu);
+ }
+
+ return NUMA_NO_NODE;
}
static int ublk_init_queue(struct ublk_device *ub, int q_id)
{
- struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
+ int depth = ub->dev_info.queue_depth;
gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO;
- void *ptr;
+ struct ublk_queue *ubq;
+ struct page *page;
+ int numa_node;
int size;
+ /* Determine NUMA node based on queue's CPU affinity */
+ numa_node = ublk_get_queue_numa_node(ub, q_id);
+
+ /* Allocate queue structure on local NUMA node */
+ ubq = kvzalloc_node(struct_size(ubq, ios, depth), GFP_KERNEL,
+ numa_node);
+ if (!ubq)
+ return -ENOMEM;
+
spin_lock_init(&ubq->cancel_lock);
ubq->flags = ub->dev_info.flags;
ubq->q_id = q_id;
- ubq->q_depth = ub->dev_info.queue_depth;
- size = ublk_queue_cmd_buf_size(ub, q_id);
+ ubq->q_depth = depth;
+ size = ublk_queue_cmd_buf_size(ub);
- ptr = (void *) __get_free_pages(gfp_flags, get_order(size));
- if (!ptr)
+ /* Allocate I/O command buffer on local NUMA node */
+ page = alloc_pages_node(numa_node, gfp_flags, get_order(size));
+ if (!page) {
+ kvfree(ubq);
return -ENOMEM;
+ }
+ ubq->io_cmd_buf = page_address(page);
- ubq->io_cmd_buf = ptr;
+ ub->queues[q_id] = ubq;
ubq->dev = ub;
return 0;
}
static void ublk_deinit_queues(struct ublk_device *ub)
{
- int nr_queues = ub->dev_info.nr_hw_queues;
int i;
- if (!ub->__queues)
- return;
-
- for (i = 0; i < nr_queues; i++)
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
ublk_deinit_queue(ub, i);
- kfree(ub->__queues);
}
static int ublk_init_queues(struct ublk_device *ub)
{
- int nr_queues = ub->dev_info.nr_hw_queues;
- int depth = ub->dev_info.queue_depth;
- int ubq_size = sizeof(struct ublk_queue) + depth * sizeof(struct ublk_io);
- int i, ret = -ENOMEM;
+ int i, ret;
- ub->queue_size = ubq_size;
- ub->__queues = kcalloc(nr_queues, ubq_size, GFP_KERNEL);
- if (!ub->__queues)
- return ret;
-
- for (i = 0; i < nr_queues; i++) {
- if (ublk_init_queue(ub, i))
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+ ret = ublk_init_queue(ub, i);
+ if (ret)
goto fail;
}
@@ -2077,6 +2758,7 @@ static void ublk_cdev_rel(struct device *dev)
ublk_deinit_queues(ub);
ublk_free_dev_number(ub);
mutex_destroy(&ub->mutex);
+ mutex_destroy(&ub->cancel_mutex);
kfree(ub);
}
@@ -2101,21 +2783,14 @@ static int ublk_add_chdev(struct ublk_device *ub)
if (ret)
goto fail;
- ublks_added++;
+ if (ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV)
+ unprivileged_ublks_added++;
return 0;
fail:
put_device(dev);
return ret;
}
-static void ublk_stop_work_fn(struct work_struct *work)
-{
- struct ublk_device *ub =
- container_of(work, struct ublk_device, stop_work);
-
- ublk_stop_dev(ub);
-}
-
/* align max io buffer size with PAGE_SIZE */
static void ublk_align_max_io_size(struct ublk_device *ub)
{
@@ -2131,20 +2806,21 @@ static int ublk_add_tag_set(struct ublk_device *ub)
ub->tag_set.nr_hw_queues = ub->dev_info.nr_hw_queues;
ub->tag_set.queue_depth = ub->dev_info.queue_depth;
ub->tag_set.numa_node = NUMA_NO_NODE;
- ub->tag_set.cmd_size = sizeof(struct ublk_rq_data);
- ub->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
ub->tag_set.driver_data = ub;
return blk_mq_alloc_tag_set(&ub->tag_set);
}
static void ublk_remove(struct ublk_device *ub)
{
+ bool unprivileged;
+
ublk_stop_dev(ub);
- cancel_work_sync(&ub->stop_work);
- cancel_work_sync(&ub->quiesce_work);
cdev_device_del(&ub->cdev, &ub->cdev_dev);
+ unprivileged = ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV;
ublk_put_device(ub);
- ublks_added--;
+
+ if (unprivileged)
+ unprivileged_ublks_added--;
}
static struct ublk_device *ublk_get_device_from_id(int idx)
@@ -2163,9 +2839,9 @@ static struct ublk_device *ublk_get_device_from_id(int idx)
return ub;
}
-static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
+static int ublk_ctrl_start_dev(struct ublk_device *ub,
+ const struct ublksrv_ctrl_cmd *header)
{
- const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
const struct ublk_param_basic *p = &ub->params.basic;
int ublksrv_pid = (int)header->data[0];
struct queue_limits lim = {
@@ -2178,6 +2854,7 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
.virt_boundary_mask = p->virt_boundary_mask,
.max_segments = USHRT_MAX,
.max_segment_size = UINT_MAX,
+ .dma_alignment = 3,
};
struct gendisk *disk;
int ret = -EINVAL;
@@ -2203,15 +2880,36 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED))
return -EOPNOTSUPP;
- lim.zoned = true;
+ lim.features |= BLK_FEAT_ZONED;
lim.max_active_zones = p->max_active_zones;
lim.max_open_zones = p->max_open_zones;
- lim.max_zone_append_sectors = p->max_zone_append_sectors;
+ lim.max_hw_zone_append_sectors = p->max_zone_append_sectors;
+ }
+
+ if (ub->params.basic.attrs & UBLK_ATTR_VOLATILE_CACHE) {
+ lim.features |= BLK_FEAT_WRITE_CACHE;
+ if (ub->params.basic.attrs & UBLK_ATTR_FUA)
+ lim.features |= BLK_FEAT_FUA;
+ }
+
+ if (ub->params.basic.attrs & UBLK_ATTR_ROTATIONAL)
+ lim.features |= BLK_FEAT_ROTATIONAL;
+
+ if (ub->params.types & UBLK_PARAM_TYPE_DMA_ALIGN)
+ lim.dma_alignment = ub->params.dma.alignment;
+
+ if (ub->params.types & UBLK_PARAM_TYPE_SEGMENT) {
+ lim.seg_boundary_mask = ub->params.seg.seg_boundary_mask;
+ lim.max_segment_size = ub->params.seg.max_segment_size;
+ lim.max_segments = ub->params.seg.max_segments;
}
if (wait_for_completion_interruptible(&ub->completion) != 0)
return -EINTR;
+ if (ub->ublksrv_tgid != ublksrv_pid)
+ return -EINVAL;
+
mutex_lock(&ub->mutex);
if (ub->dev_info.state == UBLK_S_DEV_LIVE ||
test_bit(UB_STATE_USED, &ub->state)) {
@@ -2233,8 +2931,8 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
ublk_apply_params(ub);
- /* don't probe partitions if any one ubq daemon is un-trusted */
- if (ub->nr_privileged_daemon != ub->nr_queues_ready)
+ /* don't probe partitions if any daemon task is un-trusted */
+ if (ub->unprivileged_daemons)
set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
ublk_get_device(ub);
@@ -2254,7 +2952,7 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
out_put_cdev:
if (ret) {
- ub->dev_info.state = UBLK_S_DEV_DEAD;
+ ublk_detach_disk(ub);
ublk_put_device(ub);
}
if (ret)
@@ -2265,9 +2963,8 @@ out_unlock:
}
static int ublk_ctrl_get_queue_affinity(struct ublk_device *ub,
- struct io_uring_cmd *cmd)
+ const struct ublksrv_ctrl_cmd *header)
{
- const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
cpumask_var_t cpumask;
unsigned long queue;
@@ -2316,9 +3013,8 @@ static inline void ublk_dump_dev_info(struct ublksrv_ctrl_dev_info *info)
info->nr_hw_queues, info->queue_depth);
}
-static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
+static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
{
- const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
struct ublksrv_ctrl_dev_info info;
struct ublk_device *ub;
@@ -2335,11 +3031,33 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
if (copy_from_user(&info, argp, sizeof(info)))
return -EFAULT;
+ if (info.queue_depth > UBLK_MAX_QUEUE_DEPTH || !info.queue_depth ||
+ info.nr_hw_queues > UBLK_MAX_NR_QUEUES || !info.nr_hw_queues)
+ return -EINVAL;
+
if (capable(CAP_SYS_ADMIN))
info.flags &= ~UBLK_F_UNPRIVILEGED_DEV;
else if (!(info.flags & UBLK_F_UNPRIVILEGED_DEV))
return -EPERM;
+ /* forbid nonsense combinations of recovery flags */
+ switch (info.flags & UBLK_F_ALL_RECOVERY_FLAGS) {
+ case 0:
+ case UBLK_F_USER_RECOVERY:
+ case (UBLK_F_USER_RECOVERY | UBLK_F_USER_RECOVERY_REISSUE):
+ case (UBLK_F_USER_RECOVERY | UBLK_F_USER_RECOVERY_FAIL_IO):
+ break;
+ default:
+ pr_warn("%s: invalid recovery flags %llx\n", __func__,
+ info.flags & UBLK_F_ALL_RECOVERY_FLAGS);
+ return -EINVAL;
+ }
+
+ if ((info.flags & UBLK_F_QUIESCE) && !(info.flags & UBLK_F_USER_RECOVERY)) {
+ pr_warn("UBLK_F_QUIESCE requires UBLK_F_USER_RECOVERY\n");
+ return -EINVAL;
+ }
+
/*
* unprivileged device can't be trusted, but RECOVERY and
* RECOVERY_REISSUE still may hang error handling, so can't
@@ -2348,10 +3066,22 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
* TODO: provide forward progress for RECOVERY handler, so that
* unprivileged device can benefit from it
*/
- if (info.flags & UBLK_F_UNPRIVILEGED_DEV)
+ if (info.flags & UBLK_F_UNPRIVILEGED_DEV) {
info.flags &= ~(UBLK_F_USER_RECOVERY_REISSUE |
UBLK_F_USER_RECOVERY);
+ /*
+ * For USER_COPY, we depends on userspace to fill request
+ * buffer by pwrite() to ublk char device, which can't be
+ * used for unprivileged device
+ *
+ * Same with zero copy or auto buffer register.
+ */
+ if (info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY |
+ UBLK_F_AUTO_BUF_REG))
+ return -EINVAL;
+ }
+
/* the created device is always owned by current user */
ublk_store_owner_uid_gid(&info.owner_uid, &info.owner_gid);
@@ -2374,17 +3104,17 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
return ret;
ret = -EACCES;
- if (ublks_added >= ublks_max)
+ if ((info.flags & UBLK_F_UNPRIVILEGED_DEV) &&
+ unprivileged_ublks_added >= unprivileged_ublks_max)
goto out_unlock;
ret = -ENOMEM;
- ub = kzalloc(sizeof(*ub), GFP_KERNEL);
+ ub = kzalloc(struct_size(ub, queues, info.nr_hw_queues), GFP_KERNEL);
if (!ub)
goto out_unlock;
mutex_init(&ub->mutex);
spin_lock_init(&ub->lock);
- INIT_WORK(&ub->quiesce_work, ublk_quiesce_work_fn);
- INIT_WORK(&ub->stop_work, ublk_stop_work_fn);
+ mutex_init(&ub->cancel_mutex);
ret = ublk_alloc_dev_number(ub, header->dev_id);
if (ret < 0)
@@ -2404,37 +3134,42 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
ub->dev_info.flags &= UBLK_F_ALL;
ub->dev_info.flags |= UBLK_F_CMD_IOCTL_ENCODE |
- UBLK_F_URING_CMD_COMP_IN_TASK;
+ UBLK_F_URING_CMD_COMP_IN_TASK |
+ UBLK_F_PER_IO_DAEMON |
+ UBLK_F_BUF_REG_OFF_DAEMON;
- /* GET_DATA isn't needed any more with USER_COPY */
- if (ublk_dev_is_user_copy(ub))
+ /* GET_DATA isn't needed any more with USER_COPY or ZERO COPY */
+ if (ub->dev_info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY |
+ UBLK_F_AUTO_BUF_REG))
ub->dev_info.flags &= ~UBLK_F_NEED_GET_DATA;
- /* Zoned storage support requires user copy feature */
+ /*
+ * Zoned storage support requires reuse `ublksrv_io_cmd->addr` for
+ * returning write_append_lba, which is only allowed in case of
+ * user copy or zero copy
+ */
if (ublk_dev_is_zoned(ub) &&
- (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) || !ublk_dev_is_user_copy(ub))) {
+ (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) || !(ub->dev_info.flags &
+ (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY)))) {
ret = -EINVAL;
goto out_free_dev_number;
}
- /* We are not ready to support zero copy */
- ub->dev_info.flags &= ~UBLK_F_SUPPORT_ZERO_COPY;
-
ub->dev_info.nr_hw_queues = min_t(unsigned int,
ub->dev_info.nr_hw_queues, nr_cpu_ids);
ublk_align_max_io_size(ub);
- ret = ublk_init_queues(ub);
+ ret = ublk_add_tag_set(ub);
if (ret)
goto out_free_dev_number;
- ret = ublk_add_tag_set(ub);
+ ret = ublk_init_queues(ub);
if (ret)
- goto out_deinit_queues;
+ goto out_free_tag_set;
ret = -EFAULT;
if (copy_to_user(argp, &ub->dev_info, sizeof(info)))
- goto out_free_tag_set;
+ goto out_deinit_queues;
/*
* Add the char dev so that ublksrv daemon can be setup.
@@ -2443,14 +3178,15 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
ret = ublk_add_chdev(ub);
goto out_unlock;
-out_free_tag_set:
- blk_mq_free_tag_set(&ub->tag_set);
out_deinit_queues:
ublk_deinit_queues(ub);
+out_free_tag_set:
+ blk_mq_free_tag_set(&ub->tag_set);
out_free_dev_number:
ublk_free_dev_number(ub);
out_free_ub:
mutex_destroy(&ub->mutex);
+ mutex_destroy(&ub->cancel_mutex);
kfree(ub);
out_unlock:
mutex_unlock(&ublk_ctl_mutex);
@@ -2519,16 +3255,12 @@ static inline void ublk_ctrl_cmd_dump(struct io_uring_cmd *cmd)
static int ublk_ctrl_stop_dev(struct ublk_device *ub)
{
ublk_stop_dev(ub);
- cancel_work_sync(&ub->stop_work);
- cancel_work_sync(&ub->quiesce_work);
-
return 0;
}
static int ublk_ctrl_get_dev_info(struct ublk_device *ub,
- struct io_uring_cmd *cmd)
+ const struct ublksrv_ctrl_cmd *header)
{
- const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
if (header->len < sizeof(struct ublksrv_ctrl_dev_info) || !header->addr)
@@ -2557,9 +3289,8 @@ static void ublk_ctrl_fill_params_devt(struct ublk_device *ub)
}
static int ublk_ctrl_get_params(struct ublk_device *ub,
- struct io_uring_cmd *cmd)
+ const struct ublksrv_ctrl_cmd *header)
{
- const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
struct ublk_params_header ph;
int ret;
@@ -2588,9 +3319,8 @@ static int ublk_ctrl_get_params(struct ublk_device *ub,
}
static int ublk_ctrl_set_params(struct ublk_device *ub,
- struct io_uring_cmd *cmd)
+ const struct ublksrv_ctrl_cmd *header)
{
- const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
struct ublk_params_header ph;
int ret = -EFAULT;
@@ -2607,9 +3337,12 @@ static int ublk_ctrl_set_params(struct ublk_device *ub,
if (ph.len > sizeof(struct ublk_params))
ph.len = sizeof(struct ublk_params);
- /* parameters can only be changed when device isn't live */
mutex_lock(&ub->mutex);
- if (ub->dev_info.state == UBLK_S_DEV_LIVE) {
+ if (test_bit(UB_STATE_USED, &ub->state)) {
+ /*
+ * Parameters can only be changed when device hasn't
+ * been started yet
+ */
ret = -EACCES;
} else if (copy_from_user(&ub->params, argp, ph.len)) {
ret = -EFAULT;
@@ -2625,40 +3358,13 @@ static int ublk_ctrl_set_params(struct ublk_device *ub,
return ret;
}
-static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
-{
- int i;
-
- WARN_ON_ONCE(!(ubq->ubq_daemon && ubq_daemon_is_dying(ubq)));
-
- /* All old ioucmds have to be completed */
- ubq->nr_io_ready = 0;
- /* old daemon is PF_EXITING, put it now */
- put_task_struct(ubq->ubq_daemon);
- /* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
- ubq->ubq_daemon = NULL;
- ubq->timeout = false;
- ubq->canceling = false;
-
- for (i = 0; i < ubq->q_depth; i++) {
- struct ublk_io *io = &ubq->ios[i];
-
- /* forget everything now and be ready for new FETCH_REQ */
- io->flags = 0;
- io->cmd = NULL;
- io->addr = 0;
- }
-}
-
static int ublk_ctrl_start_recovery(struct ublk_device *ub,
- struct io_uring_cmd *cmd)
+ const struct ublksrv_ctrl_cmd *header)
{
- const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
int ret = -EINVAL;
- int i;
mutex_lock(&ub->mutex);
- if (!ublk_can_use_recovery(ub))
+ if (ublk_nosrv_should_stop_dev(ub))
goto out_unlock;
/*
* START_RECOVERY is only allowd after:
@@ -2667,24 +3373,22 @@ static int ublk_ctrl_start_recovery(struct ublk_device *ub,
* and related io_uring ctx is freed so file struct of /dev/ublkcX is
* released.
*
+ * and one of the following holds
+ *
* (2) UBLK_S_DEV_QUIESCED is set, which means the quiesce_work:
* (a)has quiesced request queue
* (b)has requeued every inflight rqs whose io_flags is ACTIVE
* (c)has requeued/aborted every inflight rqs whose io_flags is NOT ACTIVE
* (d)has completed/camceled all ioucmds owned by ther dying process
+ *
+ * (3) UBLK_S_DEV_FAIL_IO is set, which means the queue is not
+ * quiesced, but all I/O is being immediately errored
*/
- if (test_bit(UB_STATE_OPEN, &ub->state) ||
- ub->dev_info.state != UBLK_S_DEV_QUIESCED) {
+ if (test_bit(UB_STATE_OPEN, &ub->state) || !ublk_dev_in_recoverable_state(ub)) {
ret = -EBUSY;
goto out_unlock;
}
pr_devel("%s: start recovery for dev id %d.\n", __func__, header->dev_id);
- for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
- ublk_queue_reinit(ub, ublk_get_queue(ub, i));
- /* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
- ub->mm = NULL;
- ub->nr_queues_ready = 0;
- ub->nr_privileged_daemon = 0;
init_completion(&ub->completion);
ret = 0;
out_unlock:
@@ -2693,48 +3397,46 @@ static int ublk_ctrl_start_recovery(struct ublk_device *ub,
}
static int ublk_ctrl_end_recovery(struct ublk_device *ub,
- struct io_uring_cmd *cmd)
+ const struct ublksrv_ctrl_cmd *header)
{
- const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
int ublksrv_pid = (int)header->data[0];
int ret = -EINVAL;
- pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n",
- __func__, ub->dev_info.nr_hw_queues, header->dev_id);
- /* wait until new ubq_daemon sending all FETCH_REQ */
+ pr_devel("%s: Waiting for all FETCH_REQs, dev id %d...\n", __func__,
+ header->dev_id);
+
if (wait_for_completion_interruptible(&ub->completion))
return -EINTR;
- pr_devel("%s: All new ubq_daemons(nr: %d) are ready, dev id %d\n",
- __func__, ub->dev_info.nr_hw_queues, header->dev_id);
+ pr_devel("%s: All FETCH_REQs received, dev id %d\n", __func__,
+ header->dev_id);
+
+ if (ub->ublksrv_tgid != ublksrv_pid)
+ return -EINVAL;
mutex_lock(&ub->mutex);
- if (!ublk_can_use_recovery(ub))
+ if (ublk_nosrv_should_stop_dev(ub))
goto out_unlock;
- if (ub->dev_info.state != UBLK_S_DEV_QUIESCED) {
+ if (!ublk_dev_in_recoverable_state(ub)) {
ret = -EBUSY;
goto out_unlock;
}
ub->dev_info.ublksrv_pid = ublksrv_pid;
+ ub->dev_info.state = UBLK_S_DEV_LIVE;
pr_devel("%s: new ublksrv_pid %d, dev id %d\n",
__func__, ublksrv_pid, header->dev_id);
- blk_mq_unquiesce_queue(ub->ub_disk->queue);
- pr_devel("%s: queue unquiesced, dev id %d.\n",
- __func__, header->dev_id);
blk_mq_kick_requeue_list(ub->ub_disk->queue);
- ub->dev_info.state = UBLK_S_DEV_LIVE;
ret = 0;
out_unlock:
mutex_unlock(&ub->mutex);
return ret;
}
-static int ublk_ctrl_get_features(struct io_uring_cmd *cmd)
+static int ublk_ctrl_get_features(const struct ublksrv_ctrl_cmd *header)
{
- const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
- u64 features = UBLK_F_ALL & ~UBLK_F_SUPPORT_ZERO_COPY;
+ u64 features = UBLK_F_ALL;
if (header->len != UBLK_FEATURES_LEN || !header->addr)
return -EINVAL;
@@ -2745,6 +3447,125 @@ static int ublk_ctrl_get_features(struct io_uring_cmd *cmd)
return 0;
}
+static void ublk_ctrl_set_size(struct ublk_device *ub, const struct ublksrv_ctrl_cmd *header)
+{
+ struct ublk_param_basic *p = &ub->params.basic;
+ u64 new_size = header->data[0];
+
+ mutex_lock(&ub->mutex);
+ p->dev_sectors = new_size;
+ set_capacity_and_notify(ub->ub_disk, p->dev_sectors);
+ mutex_unlock(&ub->mutex);
+}
+
+struct count_busy {
+ const struct ublk_queue *ubq;
+ unsigned int nr_busy;
+};
+
+static bool ublk_count_busy_req(struct request *rq, void *data)
+{
+ struct count_busy *idle = data;
+
+ if (!blk_mq_request_started(rq) && rq->mq_hctx->driver_data == idle->ubq)
+ idle->nr_busy += 1;
+ return true;
+}
+
+/* uring_cmd is guaranteed to be active if the associated request is idle */
+static bool ubq_has_idle_io(const struct ublk_queue *ubq)
+{
+ struct count_busy data = {
+ .ubq = ubq,
+ };
+
+ blk_mq_tagset_busy_iter(&ubq->dev->tag_set, ublk_count_busy_req, &data);
+ return data.nr_busy < ubq->q_depth;
+}
+
+/* Wait until each hw queue has at least one idle IO */
+static int ublk_wait_for_idle_io(struct ublk_device *ub,
+ unsigned int timeout_ms)
+{
+ unsigned int elapsed = 0;
+ int ret;
+
+ while (elapsed < timeout_ms && !signal_pending(current)) {
+ unsigned int queues_cancelable = 0;
+ int i;
+
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+ struct ublk_queue *ubq = ublk_get_queue(ub, i);
+
+ queues_cancelable += !!ubq_has_idle_io(ubq);
+ }
+
+ /*
+ * Each queue needs at least one active command for
+ * notifying ublk server
+ */
+ if (queues_cancelable == ub->dev_info.nr_hw_queues)
+ break;
+
+ msleep(UBLK_REQUEUE_DELAY_MS);
+ elapsed += UBLK_REQUEUE_DELAY_MS;
+ }
+
+ if (signal_pending(current))
+ ret = -EINTR;
+ else if (elapsed >= timeout_ms)
+ ret = -EBUSY;
+ else
+ ret = 0;
+
+ return ret;
+}
+
+static int ublk_ctrl_quiesce_dev(struct ublk_device *ub,
+ const struct ublksrv_ctrl_cmd *header)
+{
+ /* zero means wait forever */
+ u64 timeout_ms = header->data[0];
+ struct gendisk *disk;
+ int ret = -ENODEV;
+
+ if (!(ub->dev_info.flags & UBLK_F_QUIESCE))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&ub->mutex);
+ disk = ublk_get_disk(ub);
+ if (!disk)
+ goto unlock;
+ if (ub->dev_info.state == UBLK_S_DEV_DEAD)
+ goto put_disk;
+
+ ret = 0;
+ /* already in expected state */
+ if (ub->dev_info.state != UBLK_S_DEV_LIVE)
+ goto put_disk;
+
+ /* Mark the device as canceling */
+ mutex_lock(&ub->cancel_mutex);
+ blk_mq_quiesce_queue(disk->queue);
+ ublk_set_canceling(ub, true);
+ blk_mq_unquiesce_queue(disk->queue);
+ mutex_unlock(&ub->cancel_mutex);
+
+ if (!timeout_ms)
+ timeout_ms = UINT_MAX;
+ ret = ublk_wait_for_idle_io(ub, timeout_ms);
+
+put_disk:
+ ublk_put_disk(disk);
+unlock:
+ mutex_unlock(&ub->mutex);
+
+ /* Cancel pending uring_cmd */
+ if (!ret)
+ ublk_cancel_dev(ub);
+ return ret;
+}
+
/*
* All control commands are sent via /dev/ublk-control, so we have to check
* the destination device's permission
@@ -2830,6 +3651,8 @@ static int ublk_ctrl_uring_cmd_permission(struct ublk_device *ub,
case UBLK_CMD_SET_PARAMS:
case UBLK_CMD_START_USER_RECOVERY:
case UBLK_CMD_END_USER_RECOVERY:
+ case UBLK_CMD_UPDATE_SIZE:
+ case UBLK_CMD_QUIESCE_DEV:
mask = MAY_READ | MAY_WRITE;
break;
default:
@@ -2871,7 +3694,7 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
goto out;
if (cmd_op == UBLK_U_CMD_GET_FEATURES) {
- ret = ublk_ctrl_get_features(cmd);
+ ret = ublk_ctrl_get_features(header);
goto out;
}
@@ -2888,41 +3711,48 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
switch (_IOC_NR(cmd_op)) {
case UBLK_CMD_START_DEV:
- ret = ublk_ctrl_start_dev(ub, cmd);
+ ret = ublk_ctrl_start_dev(ub, header);
break;
case UBLK_CMD_STOP_DEV:
ret = ublk_ctrl_stop_dev(ub);
break;
case UBLK_CMD_GET_DEV_INFO:
case UBLK_CMD_GET_DEV_INFO2:
- ret = ublk_ctrl_get_dev_info(ub, cmd);
+ ret = ublk_ctrl_get_dev_info(ub, header);
break;
case UBLK_CMD_ADD_DEV:
- ret = ublk_ctrl_add_dev(cmd);
+ ret = ublk_ctrl_add_dev(header);
break;
case UBLK_CMD_DEL_DEV:
ret = ublk_ctrl_del_dev(&ub, true);
break;
- case UBLK_U_CMD_DEL_DEV_ASYNC:
+ case UBLK_CMD_DEL_DEV_ASYNC:
ret = ublk_ctrl_del_dev(&ub, false);
break;
case UBLK_CMD_GET_QUEUE_AFFINITY:
- ret = ublk_ctrl_get_queue_affinity(ub, cmd);
+ ret = ublk_ctrl_get_queue_affinity(ub, header);
break;
case UBLK_CMD_GET_PARAMS:
- ret = ublk_ctrl_get_params(ub, cmd);
+ ret = ublk_ctrl_get_params(ub, header);
break;
case UBLK_CMD_SET_PARAMS:
- ret = ublk_ctrl_set_params(ub, cmd);
+ ret = ublk_ctrl_set_params(ub, header);
break;
case UBLK_CMD_START_USER_RECOVERY:
- ret = ublk_ctrl_start_recovery(ub, cmd);
+ ret = ublk_ctrl_start_recovery(ub, header);
break;
case UBLK_CMD_END_USER_RECOVERY:
- ret = ublk_ctrl_end_recovery(ub, cmd);
+ ret = ublk_ctrl_end_recovery(ub, header);
+ break;
+ case UBLK_CMD_UPDATE_SIZE:
+ ublk_ctrl_set_size(ub, header);
+ ret = 0;
+ break;
+ case UBLK_CMD_QUIESCE_DEV:
+ ret = ublk_ctrl_quiesce_dev(ub, header);
break;
default:
- ret = -ENOTSUPP;
+ ret = -EOPNOTSUPP;
break;
}
@@ -2930,10 +3760,9 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
if (ub)
ublk_put_device(ub);
out:
- io_uring_cmd_done(cmd, ret, 0, issue_flags);
pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n",
__func__, ret, cmd->cmd_op, header->dev_id, header->queue_id);
- return -EIOCBQUEUED;
+ return ret;
}
static const struct file_operations ublk_ctl_fops = {
@@ -2955,6 +3784,7 @@ static int __init ublk_init(void)
BUILD_BUG_ON((u64)UBLKSRV_IO_BUF_OFFSET +
UBLKSRV_IO_BUF_TOTAL_SIZE < UBLKSRV_IO_BUF_OFFSET);
+ BUILD_BUG_ON(sizeof(struct ublk_auto_buf_reg) != 8);
init_waitqueue_head(&ublk_idr_wq);
@@ -2997,23 +3827,27 @@ static void __exit ublk_exit(void)
module_init(ublk_init);
module_exit(ublk_exit);
-static int ublk_set_max_ublks(const char *buf, const struct kernel_param *kp)
+static int ublk_set_max_unprivileged_ublks(const char *buf,
+ const struct kernel_param *kp)
{
return param_set_uint_minmax(buf, kp, 0, UBLK_MAX_UBLKS);
}
-static int ublk_get_max_ublks(char *buf, const struct kernel_param *kp)
+static int ublk_get_max_unprivileged_ublks(char *buf,
+ const struct kernel_param *kp)
{
- return sysfs_emit(buf, "%u\n", ublks_max);
+ return sysfs_emit(buf, "%u\n", unprivileged_ublks_max);
}
-static const struct kernel_param_ops ublk_max_ublks_ops = {
- .set = ublk_set_max_ublks,
- .get = ublk_get_max_ublks,
+static const struct kernel_param_ops ublk_max_unprivileged_ublks_ops = {
+ .set = ublk_set_max_unprivileged_ublks,
+ .get = ublk_get_max_unprivileged_ublks,
};
-module_param_cb(ublks_max, &ublk_max_ublks_ops, &ublks_max, 0644);
-MODULE_PARM_DESC(ublks_max, "max number of ublk devices allowed to add(default: 64)");
+module_param_cb(ublks_max, &ublk_max_unprivileged_ublks_ops,
+ &unprivileged_ublks_max, 0644);
+MODULE_PARM_DESC(ublks_max, "max number of unprivileged ublk devices allowed to add(default: 64)");
MODULE_AUTHOR("Ming Lei <ming.lei@redhat.com>");
+MODULE_DESCRIPTION("Userspace block device");
MODULE_LICENSE("GPL");
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index c1af0a7d56c8..357434bdae99 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -13,7 +13,6 @@
#include <linux/string_helpers.h>
#include <linux/idr.h>
#include <linux/blk-mq.h>
-#include <linux/blk-mq-virtio.h>
#include <linux/numa.h>
#include <linux/vmalloc.h>
#include <uapi/linux/virtio_ring.h>
@@ -227,7 +226,7 @@ static int virtblk_map_data(struct blk_mq_hw_ctx *hctx, struct request *req,
if (unlikely(err))
return -ENOMEM;
- return blk_rq_map_sg(hctx->queue, req, vbr->sg_table.sgl);
+ return blk_rq_map_sg(req, vbr->sg_table.sgl);
}
static void virtblk_cleanup_cmd(struct request *req)
@@ -471,18 +470,18 @@ static bool virtblk_prep_rq_batch(struct request *req)
return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK;
}
-static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
- struct request **rqlist)
+static void virtblk_add_req_batch(struct virtio_blk_vq *vq,
+ struct rq_list *rqlist)
{
+ struct request *req;
unsigned long flags;
- int err;
bool kick;
spin_lock_irqsave(&vq->lock, flags);
- while (!rq_list_empty(*rqlist)) {
- struct request *req = rq_list_pop(rqlist);
+ while ((req = rq_list_pop(rqlist))) {
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+ int err;
err = virtblk_add_req(vq->vq, vbr);
if (err) {
@@ -495,37 +494,32 @@ static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
kick = virtqueue_kick_prepare(vq->vq);
spin_unlock_irqrestore(&vq->lock, flags);
- return kick;
+ if (kick)
+ virtqueue_notify(vq->vq);
}
-static void virtio_queue_rqs(struct request **rqlist)
+static void virtio_queue_rqs(struct rq_list *rqlist)
{
- struct request *req, *next, *prev = NULL;
- struct request *requeue_list = NULL;
-
- rq_list_for_each_safe(rqlist, req, next) {
- struct virtio_blk_vq *vq = get_virtio_blk_vq(req->mq_hctx);
- bool kick;
-
- if (!virtblk_prep_rq_batch(req)) {
- rq_list_move(rqlist, &requeue_list, req, prev);
- req = prev;
- if (!req)
- continue;
- }
+ struct rq_list submit_list = { };
+ struct rq_list requeue_list = { };
+ struct virtio_blk_vq *vq = NULL;
+ struct request *req;
+
+ while ((req = rq_list_pop(rqlist))) {
+ struct virtio_blk_vq *this_vq = get_virtio_blk_vq(req->mq_hctx);
- if (!next || req->mq_hctx != next->mq_hctx) {
- req->rq_next = NULL;
- kick = virtblk_add_req_batch(vq, rqlist);
- if (kick)
- virtqueue_notify(vq->vq);
+ if (vq && vq != this_vq)
+ virtblk_add_req_batch(vq, &submit_list);
+ vq = this_vq;
- *rqlist = next;
- prev = NULL;
- } else
- prev = req;
+ if (virtblk_prep_rq_batch(req))
+ rq_list_add_tail(&submit_list, req);
+ else
+ rq_list_add_tail(&requeue_list, req);
}
+ if (vq)
+ virtblk_add_req_batch(vq, &submit_list);
*rqlist = requeue_list;
}
@@ -577,7 +571,7 @@ static int virtblk_submit_zone_report(struct virtio_blk *vblk,
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_ZONE_REPORT);
vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, sector);
- err = blk_rq_map_kern(q, req, report_buf, report_len, GFP_KERNEL);
+ err = blk_rq_map_kern(req, report_buf, report_len, GFP_KERNEL);
if (err)
goto out;
@@ -590,7 +584,8 @@ out:
static int virtblk_parse_zone(struct virtio_blk *vblk,
struct virtio_blk_zone_descriptor *entry,
- unsigned int idx, report_zones_cb cb, void *data)
+ unsigned int idx,
+ struct blk_report_zones_args *args)
{
struct blk_zone zone = { };
@@ -656,12 +651,12 @@ static int virtblk_parse_zone(struct virtio_blk *vblk,
* The callback below checks the validity of the reported
* entry data, no need to further validate it here.
*/
- return cb(&zone, idx, data);
+ return disk_report_zone(vblk->disk, &zone, idx, args);
}
static int virtblk_report_zones(struct gendisk *disk, sector_t sector,
- unsigned int nr_zones, report_zones_cb cb,
- void *data)
+ unsigned int nr_zones,
+ struct blk_report_zones_args *args)
{
struct virtio_blk *vblk = disk->private_data;
struct virtio_blk_zone_report *report;
@@ -699,7 +694,7 @@ static int virtblk_report_zones(struct gendisk *disk, sector_t sector,
for (i = 0; i < nz && zone_idx < nr_zones; i++) {
ret = virtblk_parse_zone(vblk, &report->zones[i],
- zone_idx, cb, data);
+ zone_idx, args);
if (ret)
goto fail_report;
@@ -728,7 +723,7 @@ static int virtblk_read_zoned_limits(struct virtio_blk *vblk,
dev_dbg(&vdev->dev, "probing host-managed zoned device\n");
- lim->zoned = true;
+ lim->features |= BLK_FEAT_ZONED;
virtio_cread(vdev, struct virtio_blk_config,
zoned.max_open_zones, &v);
@@ -784,7 +779,7 @@ static int virtblk_read_zoned_limits(struct virtio_blk *vblk,
wg, v);
return -ENODEV;
}
- lim->max_zone_append_sectors = v;
+ lim->max_hw_zone_append_sectors = v;
dev_dbg(&vdev->dev, "max append sectors = %u\n", v);
return 0;
@@ -823,7 +818,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID);
vbr->out_hdr.sector = 0;
- err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
+ err = blk_rq_map_kern(req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
if (err)
goto out;
@@ -835,9 +830,9 @@ out:
}
/* We provide getgeo only to please some old bootloader/partitioning tools */
-static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
+static int virtblk_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- struct virtio_blk *vblk = bd->bd_disk->private_data;
+ struct virtio_blk *vblk = disk->private_data;
int ret = 0;
mutex_lock(&vblk->vdev_mutex);
@@ -859,7 +854,7 @@ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
/* some standard values, similar to sd */
geo->heads = 1 << 6;
geo->sectors = 1 << 5;
- geo->cylinders = get_capacity(bd->bd_disk) >> 11;
+ geo->cylinders = get_capacity(disk) >> 11;
}
out:
mutex_unlock(&vblk->vdev_mutex);
@@ -964,8 +959,7 @@ static int init_vq(struct virtio_blk *vblk)
{
int err;
unsigned short i;
- vq_callback_t **callbacks;
- const char **names;
+ struct virtqueue_info *vqs_info;
struct virtqueue **vqs;
unsigned short num_vqs;
unsigned short num_poll_vqs;
@@ -983,9 +977,8 @@ static int init_vq(struct virtio_blk *vblk)
return -EINVAL;
}
- num_vqs = min_t(unsigned int,
- min_not_zero(num_request_queues, nr_cpu_ids),
- num_vqs);
+ num_vqs = blk_mq_num_possible_queues(
+ min_not_zero(num_request_queues, num_vqs));
num_poll_vqs = min_t(unsigned int, poll_queues, num_vqs - 1);
@@ -1002,28 +995,26 @@ static int init_vq(struct virtio_blk *vblk)
if (!vblk->vqs)
return -ENOMEM;
- names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
- callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
+ vqs_info = kcalloc(num_vqs, sizeof(*vqs_info), GFP_KERNEL);
vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
- if (!names || !callbacks || !vqs) {
+ if (!vqs_info || !vqs) {
err = -ENOMEM;
goto out;
}
for (i = 0; i < num_vqs - num_poll_vqs; i++) {
- callbacks[i] = virtblk_done;
+ vqs_info[i].callback = virtblk_done;
snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%u", i);
- names[i] = vblk->vqs[i].name;
+ vqs_info[i].name = vblk->vqs[i].name;
}
for (; i < num_vqs; i++) {
- callbacks[i] = NULL;
snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req_poll.%u", i);
- names[i] = vblk->vqs[i].name;
+ vqs_info[i].name = vblk->vqs[i].name;
}
/* Discover virtqueues and write information to configuration. */
- err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
+ err = virtio_find_vqs(vdev, num_vqs, vqs, vqs_info, &desc);
if (err)
goto out;
@@ -1035,10 +1026,14 @@ static int init_vq(struct virtio_blk *vblk)
out:
kfree(vqs);
- kfree(callbacks);
- kfree(names);
- if (err)
+ kfree(vqs_info);
+ if (err) {
kfree(vblk->vqs);
+ /*
+ * Set to NULL to prevent freeing vqs again during freezing.
+ */
+ vblk->vqs = NULL;
+ }
return err;
}
@@ -1089,14 +1084,6 @@ static int virtblk_get_cache_mode(struct virtio_device *vdev)
return writeback;
}
-static void virtblk_update_cache_mode(struct virtio_device *vdev)
-{
- u8 writeback = virtblk_get_cache_mode(vdev);
- struct virtio_blk *vblk = vdev->priv;
-
- blk_queue_write_cache(vblk->disk->queue, writeback, false);
-}
-
static const char *const virtblk_cache_types[] = {
"write through", "write back"
};
@@ -1108,6 +1095,7 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
struct gendisk *disk = dev_to_disk(dev);
struct virtio_blk *vblk = disk->private_data;
struct virtio_device *vdev = vblk->vdev;
+ struct queue_limits lim;
int i;
BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
@@ -1116,7 +1104,15 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
return i;
virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
- virtblk_update_cache_mode(vdev);
+
+ lim = queue_limits_start_update(disk->queue);
+ if (virtblk_get_cache_mode(vdev))
+ lim.features |= BLK_FEAT_WRITE_CACHE;
+ else
+ lim.features &= ~BLK_FEAT_WRITE_CACHE;
+ i = queue_limits_commit_update_frozen(disk->queue, &lim);
+ if (i)
+ return i;
return count;
}
@@ -1187,7 +1183,8 @@ static void virtblk_map_queues(struct blk_mq_tag_set *set)
if (i == HCTX_TYPE_POLL)
blk_mq_map_queues(&set->map[i]);
else
- blk_mq_virtio_map_queues(&set->map[i], vblk->vdev, 0);
+ blk_mq_map_hw_queues(&set->map[i],
+ &vblk->vdev->dev, 0);
}
}
@@ -1215,11 +1212,12 @@ static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) {
struct request *req = blk_mq_rq_from_pdu(vbr);
+ u8 status = virtblk_vbr_status(vbr);
found++;
if (!blk_mq_complete_request_remote(req) &&
- !blk_mq_add_to_batch(req, iob, virtblk_vbr_status(vbr),
- virtblk_complete_batch))
+ !blk_mq_add_to_batch(req, iob, status != VIRTIO_BLK_S_OK,
+ virtblk_complete_batch))
virtblk_request_done(req);
}
@@ -1247,7 +1245,7 @@ static int virtblk_read_limits(struct virtio_blk *vblk,
struct queue_limits *lim)
{
struct virtio_device *vdev = vblk->vdev;
- u32 v, blk_size, max_size, sg_elems, opt_io_size;
+ u32 v, max_size, sg_elems, opt_io_size;
u32 max_discard_segs = 0;
u32 discard_granularity = 0;
u16 min_io_size;
@@ -1286,46 +1284,36 @@ static int virtblk_read_limits(struct virtio_blk *vblk,
lim->max_segment_size = max_size;
/* Host can optionally specify the block size of the device */
- err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
+ virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
struct virtio_blk_config, blk_size,
- &blk_size);
- if (!err) {
- err = blk_validate_block_size(blk_size);
- if (err) {
- dev_err(&vdev->dev,
- "virtio_blk: invalid block size: 0x%x\n",
- blk_size);
- return err;
- }
-
- lim->logical_block_size = blk_size;
- } else
- blk_size = lim->logical_block_size;
+ &lim->logical_block_size);
/* Use topology information if available */
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
struct virtio_blk_config, physical_block_exp,
&physical_block_exp);
if (!err && physical_block_exp)
- lim->physical_block_size = blk_size * (1 << physical_block_exp);
+ lim->physical_block_size =
+ lim->logical_block_size * (1 << physical_block_exp);
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
struct virtio_blk_config, alignment_offset,
&alignment_offset);
if (!err && alignment_offset)
- lim->alignment_offset = blk_size * alignment_offset;
+ lim->alignment_offset =
+ lim->logical_block_size * alignment_offset;
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
struct virtio_blk_config, min_io_size,
&min_io_size);
if (!err && min_io_size)
- lim->io_min = blk_size * min_io_size;
+ lim->io_min = lim->logical_block_size * min_io_size;
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
struct virtio_blk_config, opt_io_size,
&opt_io_size);
if (!err && opt_io_size)
- lim->io_opt = blk_size * opt_io_size;
+ lim->io_opt = lim->logical_block_size * opt_io_size;
if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
virtio_cread(vdev, struct virtio_blk_config,
@@ -1419,7 +1407,7 @@ static int virtblk_read_limits(struct virtio_blk *vblk,
lim->discard_granularity =
discard_granularity << SECTOR_SHIFT;
else
- lim->discard_granularity = blk_size;
+ lim->discard_granularity = lim->logical_block_size;
}
if (virtio_has_feature(vdev, VIRTIO_BLK_F_ZONED)) {
@@ -1448,7 +1436,10 @@ static int virtblk_read_limits(struct virtio_blk *vblk,
static int virtblk_probe(struct virtio_device *vdev)
{
struct virtio_blk *vblk;
- struct queue_limits lim = { };
+ struct queue_limits lim = {
+ .features = BLK_FEAT_ROTATIONAL,
+ .logical_block_size = SECTOR_SIZE,
+ };
int err, index;
unsigned int queue_depth;
@@ -1494,7 +1485,6 @@ static int virtblk_probe(struct virtio_device *vdev)
vblk->tag_set.ops = &virtio_mq_ops;
vblk->tag_set.queue_depth = queue_depth;
vblk->tag_set.numa_node = NUMA_NO_NODE;
- vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
vblk->tag_set.cmd_size =
sizeof(struct virtblk_req) +
sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT;
@@ -1512,6 +1502,9 @@ static int virtblk_probe(struct virtio_device *vdev)
if (err)
goto out_free_tags;
+ if (virtblk_get_cache_mode(vdev))
+ lim.features |= BLK_FEAT_WRITE_CACHE;
+
vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, &lim, vblk);
if (IS_ERR(vblk->disk)) {
err = PTR_ERR(vblk->disk);
@@ -1527,9 +1520,6 @@ static int virtblk_probe(struct virtio_device *vdev)
vblk->disk->fops = &virtblk_fops;
vblk->index = index;
- /* configure queue flush support */
- virtblk_update_cache_mode(vdev);
-
/* If disk is read-only in the host, the guest should obey */
if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
set_disk_ro(vblk->disk, 1);
@@ -1541,8 +1531,8 @@ static int virtblk_probe(struct virtio_device *vdev)
* All steps that follow use the VQs therefore they need to be
* placed after the virtio_device_ready() call above.
*/
- if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && lim.zoned) {
- blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, vblk->disk->queue);
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
+ (lim.features & BLK_FEAT_ZONED)) {
err = blk_revalidate_disk_zones(vblk->disk);
if (err)
goto out_cleanup_disk;
@@ -1595,13 +1585,16 @@ static void virtblk_remove(struct virtio_device *vdev)
put_disk(vblk->disk);
}
-#ifdef CONFIG_PM_SLEEP
-static int virtblk_freeze(struct virtio_device *vdev)
+static int virtblk_freeze_priv(struct virtio_device *vdev)
{
struct virtio_blk *vblk = vdev->priv;
+ struct request_queue *q = vblk->disk->queue;
+ unsigned int memflags;
/* Ensure no requests in virtqueues before deleting vqs. */
- blk_mq_freeze_queue(vblk->disk->queue);
+ memflags = blk_mq_freeze_queue(q);
+ blk_mq_quiesce_queue_nowait(q);
+ blk_mq_unfreeze_queue(q, memflags);
/* Ensure we don't receive any more interrupts */
virtio_reset_device(vdev);
@@ -1611,11 +1604,17 @@ static int virtblk_freeze(struct virtio_device *vdev)
vdev->config->del_vqs(vdev);
kfree(vblk->vqs);
+ /*
+ * Set to NULL to prevent freeing vqs again after a failed vqs
+ * allocation during resume. Note that kfree() already handles NULL
+ * pointers safely.
+ */
+ vblk->vqs = NULL;
return 0;
}
-static int virtblk_restore(struct virtio_device *vdev)
+static int virtblk_restore_priv(struct virtio_device *vdev)
{
struct virtio_blk *vblk = vdev->priv;
int ret;
@@ -1625,12 +1624,33 @@ static int virtblk_restore(struct virtio_device *vdev)
return ret;
virtio_device_ready(vdev);
+ blk_mq_unquiesce_queue(vblk->disk->queue);
- blk_mq_unfreeze_queue(vblk->disk->queue);
return 0;
}
+
+#ifdef CONFIG_PM_SLEEP
+static int virtblk_freeze(struct virtio_device *vdev)
+{
+ return virtblk_freeze_priv(vdev);
+}
+
+static int virtblk_restore(struct virtio_device *vdev)
+{
+ return virtblk_restore_priv(vdev);
+}
#endif
+static int virtblk_reset_prepare(struct virtio_device *vdev)
+{
+ return virtblk_freeze_priv(vdev);
+}
+
+static int virtblk_reset_done(struct virtio_device *vdev)
+{
+ return virtblk_restore_priv(vdev);
+}
+
static const struct virtio_device_id id_table[] = {
{ VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
{ 0 },
@@ -1658,7 +1678,6 @@ static struct virtio_driver virtio_blk = {
.feature_table_legacy = features_legacy,
.feature_table_size_legacy = ARRAY_SIZE(features_legacy),
.driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = virtblk_probe,
.remove = virtblk_remove,
@@ -1667,13 +1686,15 @@ static struct virtio_driver virtio_blk = {
.freeze = virtblk_freeze,
.restore = virtblk_restore,
#endif
+ .reset_prepare = virtblk_reset_prepare,
+ .reset_done = virtblk_reset_done,
};
static int __init virtio_blk_init(void)
{
int error;
- virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
+ virtblk_wq = alloc_workqueue("virtio-blk", WQ_PERCPU, 0);
if (!virtblk_wq)
return -ENOMEM;
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 944576d582fb..a7c2b04ab943 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -544,7 +544,7 @@ static void print_stats(struct xen_blkif_ring *ring)
ring->st_rd_req, ring->st_wr_req,
ring->st_f_req, ring->st_ds_req,
ring->persistent_gnt_c, max_pgrants);
- ring->st_print = jiffies + msecs_to_jiffies(10 * 1000);
+ ring->st_print = jiffies + secs_to_jiffies(10);
ring->st_rd_req = 0;
ring->st_wr_req = 0;
ring->st_oo_req = 0;
@@ -1563,5 +1563,6 @@ static void __exit xen_blkif_fini(void)
module_exit(xen_blkif_fini);
+MODULE_DESCRIPTION("Virtual block device back-end driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("xen-backend:vbd");
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index fd7c0ff2139c..04fc6b552c04 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -493,11 +493,11 @@ static void blkif_restart_queue_callback(void *arg)
schedule_work(&rinfo->work);
}
-static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
+static int blkif_getgeo(struct gendisk *disk, struct hd_geometry *hg)
{
/* We don't have real geometry info, but let's at least return
values consistent with the size of the device */
- sector_t nsect = get_capacity(bd->bd_disk);
+ sector_t nsect = get_capacity(disk);
sector_t cylinders = nsect;
hg->heads = 0xff;
@@ -751,7 +751,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
id = blkif_ring_get_request(rinfo, req, &final_ring_req);
ring_req = &rinfo->shadow[id].req;
- num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg);
+ num_sg = blk_rq_map_sg(req, rinfo->shadow[id].sg);
num_grant = 0;
/* Calculate the number of grant used */
for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i)
@@ -788,6 +788,11 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
* A barrier request a superset of FUA, so we can
* implement it the same way. (It's also a FLUSH+FUA,
* since it is guaranteed ordered WRT previous writes.)
+ *
+ * Note that can end up here with a FUA write and the
+ * flags cleared. This happens when the flag was
+ * run-time disabled after a failing I/O, and we'll
+ * simplify submit it as a normal write.
*/
if (info->feature_flush && info->feature_fua)
ring_req->operation =
@@ -795,8 +800,6 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
else if (info->feature_flush)
ring_req->operation =
BLKIF_OP_FLUSH_DISKCACHE;
- else
- ring_req->operation = 0;
}
ring_req->u.rw.nr_segments = num_grant;
if (unlikely(require_extra_req)) {
@@ -887,16 +890,6 @@ static inline void flush_requests(struct blkfront_ring_info *rinfo)
notify_remote_via_irq(rinfo->irq);
}
-static inline bool blkif_request_flush_invalid(struct request *req,
- struct blkfront_info *info)
-{
- return (blk_rq_is_passthrough(req) ||
- ((req_op(req) == REQ_OP_FLUSH) &&
- !info->feature_flush) ||
- ((req->cmd_flags & REQ_FUA) &&
- !info->feature_fua));
-}
-
static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *qd)
{
@@ -908,12 +901,22 @@ static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
rinfo = get_rinfo(info, qid);
blk_mq_start_request(qd->rq);
spin_lock_irqsave(&rinfo->ring_lock, flags);
- if (RING_FULL(&rinfo->ring))
- goto out_busy;
- if (blkif_request_flush_invalid(qd->rq, rinfo->dev_info))
- goto out_err;
+ /*
+ * Check if the backend actually supports flushes.
+ *
+ * While the block layer won't send us flushes if we don't claim to
+ * support them, the Xen protocol allows the backend to revoke support
+ * at any time. That is of course a really bad idea and dangerous, but
+ * has been allowed for 10+ years. In that case we simply clear the
+ * flags, and directly return here for an empty flush and ignore the
+ * FUA flag later on.
+ */
+ if (unlikely(req_op(qd->rq) == REQ_OP_FLUSH && !info->feature_flush))
+ goto complete;
+ if (RING_FULL(&rinfo->ring))
+ goto out_busy;
if (blkif_queue_request(qd->rq, rinfo))
goto out_busy;
@@ -921,14 +924,14 @@ static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
return BLK_STS_OK;
-out_err:
- spin_unlock_irqrestore(&rinfo->ring_lock, flags);
- return BLK_STS_IOERR;
-
out_busy:
blk_mq_stop_hw_queue(hctx);
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
return BLK_STS_DEV_RESOURCE;
+complete:
+ spin_unlock_irqrestore(&rinfo->ring_lock, flags);
+ blk_mq_end_request(qd->rq, BLK_STS_OK);
+ return BLK_STS_OK;
}
static void blkif_complete_rq(struct request *rq)
@@ -956,6 +959,12 @@ static void blkif_set_queue_limits(const struct blkfront_info *info,
lim->max_secure_erase_sectors = UINT_MAX;
}
+ if (info->feature_flush) {
+ lim->features |= BLK_FEAT_WRITE_CACHE;
+ if (info->feature_fua)
+ lim->features |= BLK_FEAT_FUA;
+ }
+
/* Hard sector size and max sectors impersonate the equiv. hardware. */
lim->logical_block_size = info->sector_size;
lim->physical_block_size = info->physical_sector_size;
@@ -984,8 +993,6 @@ static const char *flush_info(struct blkfront_info *info)
static void xlvbd_flush(struct blkfront_info *info)
{
- blk_queue_write_cache(info->rq, info->feature_flush ? true : false,
- info->feature_fua ? true : false);
pr_info("blkfront: %s: %s %s %s %s %s %s %s\n",
info->gd->disk_name, flush_info(info),
"persistent grants:", info->feature_persistent ?
@@ -1063,8 +1070,7 @@ static char *encode_disk_name(char *ptr, unsigned int n)
}
static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
- struct blkfront_info *info, u16 sector_size,
- unsigned int physical_sector_size)
+ struct blkfront_info *info)
{
struct queue_limits lim = {};
struct gendisk *gd;
@@ -1125,7 +1131,6 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
} else
info->tag_set.queue_depth = BLK_RING_SIZE(info);
info->tag_set.numa_node = NUMA_NO_NODE;
- info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
info->tag_set.cmd_size = sizeof(struct blkif_req);
info->tag_set.driver_data = info;
@@ -1139,7 +1144,6 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
err = PTR_ERR(gd);
goto out_free_tag_set;
}
- blk_queue_flag_set(QUEUE_FLAG_VIRT, gd->queue);
strcpy(gd->disk_name, DEV_NAME);
ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset);
@@ -1159,8 +1163,6 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
info->rq = gd->queue;
info->gd = gd;
- info->sector_size = sector_size;
- info->physical_sector_size = physical_sector_size;
xlvbd_flush(info);
@@ -1605,8 +1607,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
blkif_req(req)->error = BLK_STS_NOTSUPP;
info->feature_discard = 0;
info->feature_secdiscard = 0;
- blk_queue_max_discard_sectors(rq, 0);
- blk_queue_max_secure_erase_sectors(rq, 0);
+ blk_queue_disable_discard(rq);
+ blk_queue_disable_secure_erase(rq);
}
break;
case BLKIF_OP_FLUSH_DISKCACHE:
@@ -1627,7 +1629,6 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
blkif_req(req)->error = BLK_STS_OK;
info->feature_fua = 0;
info->feature_flush = 0;
- xlvbd_flush(info);
}
fallthrough;
case BLKIF_OP_READ:
@@ -2315,8 +2316,6 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
static void blkfront_connect(struct blkfront_info *info)
{
unsigned long long sectors;
- unsigned long sector_size;
- unsigned int physical_sector_size;
int err, i;
struct blkfront_ring_info *rinfo;
@@ -2355,7 +2354,7 @@ static void blkfront_connect(struct blkfront_info *info)
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
"sectors", "%llu", &sectors,
"info", "%u", &info->vdisk_info,
- "sector-size", "%lu", &sector_size,
+ "sector-size", "%lu", &info->sector_size,
NULL);
if (err) {
xenbus_dev_fatal(info->xbdev, err,
@@ -2369,9 +2368,9 @@ static void blkfront_connect(struct blkfront_info *info)
* provide this. Assume physical sector size to be the same as
* sector_size in that case.
*/
- physical_sector_size = xenbus_read_unsigned(info->xbdev->otherend,
+ info->physical_sector_size = xenbus_read_unsigned(info->xbdev->otherend,
"physical-sector-size",
- sector_size);
+ info->sector_size);
blkfront_gather_backend_features(info);
for_each_rinfo(info, rinfo, i) {
err = blkfront_setup_indirect(rinfo);
@@ -2383,8 +2382,7 @@ static void blkfront_connect(struct blkfront_info *info)
}
}
- err = xlvbd_alloc_gendisk(sectors, info, sector_size,
- physical_sector_size);
+ err = xlvbd_alloc_gendisk(sectors, info);
if (err) {
xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
info->xbdev->otherend);
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 7c5f4e4d9b50..8c1c7f4211eb 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -354,7 +354,6 @@ static int __init z2_init(void)
tag_set.nr_maps = 1;
tag_set.queue_depth = 16;
tag_set.numa_node = NUMA_NO_NODE;
- tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
ret = blk_mq_alloc_tag_set(&tag_set);
if (ret)
goto out_unregister_blkdev;
@@ -409,4 +408,5 @@ static void __exit z2_exit(void)
module_init(z2_init);
module_exit(z2_exit);
+MODULE_DESCRIPTION("Amiga Zorro II ramdisk driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/block/zloop.c b/drivers/block/zloop.c
new file mode 100644
index 000000000000..77bd6081b244
--- /dev/null
+++ b/drivers/block/zloop.c
@@ -0,0 +1,1507 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025, Christoph Hellwig.
+ * Copyright (c) 2025, Western Digital Corporation or its affiliates.
+ *
+ * Zoned Loop Device driver - exports a zoned block device using one file per
+ * zone as backing storage.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/blk-mq.h>
+#include <linux/blkzoned.h>
+#include <linux/pagemap.h>
+#include <linux/miscdevice.h>
+#include <linux/falloc.h>
+#include <linux/mutex.h>
+#include <linux/parser.h>
+#include <linux/seq_file.h>
+
+/*
+ * Options for adding (and removing) a device.
+ */
+enum {
+ ZLOOP_OPT_ERR = 0,
+ ZLOOP_OPT_ID = (1 << 0),
+ ZLOOP_OPT_CAPACITY = (1 << 1),
+ ZLOOP_OPT_ZONE_SIZE = (1 << 2),
+ ZLOOP_OPT_ZONE_CAPACITY = (1 << 3),
+ ZLOOP_OPT_NR_CONV_ZONES = (1 << 4),
+ ZLOOP_OPT_BASE_DIR = (1 << 5),
+ ZLOOP_OPT_NR_QUEUES = (1 << 6),
+ ZLOOP_OPT_QUEUE_DEPTH = (1 << 7),
+ ZLOOP_OPT_BUFFERED_IO = (1 << 8),
+ ZLOOP_OPT_ZONE_APPEND = (1 << 9),
+ ZLOOP_OPT_ORDERED_ZONE_APPEND = (1 << 10),
+};
+
+static const match_table_t zloop_opt_tokens = {
+ { ZLOOP_OPT_ID, "id=%d" },
+ { ZLOOP_OPT_CAPACITY, "capacity_mb=%u" },
+ { ZLOOP_OPT_ZONE_SIZE, "zone_size_mb=%u" },
+ { ZLOOP_OPT_ZONE_CAPACITY, "zone_capacity_mb=%u" },
+ { ZLOOP_OPT_NR_CONV_ZONES, "conv_zones=%u" },
+ { ZLOOP_OPT_BASE_DIR, "base_dir=%s" },
+ { ZLOOP_OPT_NR_QUEUES, "nr_queues=%u" },
+ { ZLOOP_OPT_QUEUE_DEPTH, "queue_depth=%u" },
+ { ZLOOP_OPT_BUFFERED_IO, "buffered_io" },
+ { ZLOOP_OPT_ZONE_APPEND, "zone_append=%u" },
+ { ZLOOP_OPT_ORDERED_ZONE_APPEND, "ordered_zone_append" },
+ { ZLOOP_OPT_ERR, NULL }
+};
+
+/* Default values for the "add" operation. */
+#define ZLOOP_DEF_ID -1
+#define ZLOOP_DEF_ZONE_SIZE ((256ULL * SZ_1M) >> SECTOR_SHIFT)
+#define ZLOOP_DEF_NR_ZONES 64
+#define ZLOOP_DEF_NR_CONV_ZONES 8
+#define ZLOOP_DEF_BASE_DIR "/var/local/zloop"
+#define ZLOOP_DEF_NR_QUEUES 1
+#define ZLOOP_DEF_QUEUE_DEPTH 128
+#define ZLOOP_DEF_BUFFERED_IO false
+#define ZLOOP_DEF_ZONE_APPEND true
+#define ZLOOP_DEF_ORDERED_ZONE_APPEND false
+
+/* Arbitrary limit on the zone size (16GB). */
+#define ZLOOP_MAX_ZONE_SIZE_MB 16384
+
+struct zloop_options {
+ unsigned int mask;
+ int id;
+ sector_t capacity;
+ sector_t zone_size;
+ sector_t zone_capacity;
+ unsigned int nr_conv_zones;
+ char *base_dir;
+ unsigned int nr_queues;
+ unsigned int queue_depth;
+ bool buffered_io;
+ bool zone_append;
+ bool ordered_zone_append;
+};
+
+/*
+ * Device states.
+ */
+enum {
+ Zlo_creating = 0,
+ Zlo_live,
+ Zlo_deleting,
+};
+
+enum zloop_zone_flags {
+ ZLOOP_ZONE_CONV = 0,
+ ZLOOP_ZONE_SEQ_ERROR,
+};
+
+struct zloop_zone {
+ struct file *file;
+
+ unsigned long flags;
+ struct mutex lock;
+ spinlock_t wp_lock;
+ enum blk_zone_cond cond;
+ sector_t start;
+ sector_t wp;
+
+ gfp_t old_gfp_mask;
+};
+
+struct zloop_device {
+ unsigned int id;
+ unsigned int state;
+
+ struct blk_mq_tag_set tag_set;
+ struct gendisk *disk;
+
+ struct workqueue_struct *workqueue;
+ bool buffered_io;
+ bool zone_append;
+ bool ordered_zone_append;
+
+ const char *base_dir;
+ struct file *data_dir;
+
+ unsigned int zone_shift;
+ sector_t zone_size;
+ sector_t zone_capacity;
+ unsigned int nr_zones;
+ unsigned int nr_conv_zones;
+ unsigned int block_size;
+
+ struct zloop_zone zones[] __counted_by(nr_zones);
+};
+
+struct zloop_cmd {
+ struct work_struct work;
+ atomic_t ref;
+ sector_t sector;
+ sector_t nr_sectors;
+ long ret;
+ struct kiocb iocb;
+ struct bio_vec *bvec;
+};
+
+static DEFINE_IDR(zloop_index_idr);
+static DEFINE_MUTEX(zloop_ctl_mutex);
+
+static unsigned int rq_zone_no(struct request *rq)
+{
+ struct zloop_device *zlo = rq->q->queuedata;
+
+ return blk_rq_pos(rq) >> zlo->zone_shift;
+}
+
+static int zloop_update_seq_zone(struct zloop_device *zlo, unsigned int zone_no)
+{
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+ struct kstat stat;
+ sector_t file_sectors;
+ unsigned long flags;
+ int ret;
+
+ lockdep_assert_held(&zone->lock);
+
+ ret = vfs_getattr(&zone->file->f_path, &stat, STATX_SIZE, 0);
+ if (ret < 0) {
+ pr_err("Failed to get zone %u file stat (err=%d)\n",
+ zone_no, ret);
+ set_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
+ return ret;
+ }
+
+ file_sectors = stat.size >> SECTOR_SHIFT;
+ if (file_sectors > zlo->zone_capacity) {
+ pr_err("Zone %u file too large (%llu sectors > %llu)\n",
+ zone_no, file_sectors, zlo->zone_capacity);
+ return -EINVAL;
+ }
+
+ if (file_sectors & ((zlo->block_size >> SECTOR_SHIFT) - 1)) {
+ pr_err("Zone %u file size not aligned to block size %u\n",
+ zone_no, zlo->block_size);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&zone->wp_lock, flags);
+ if (!file_sectors) {
+ zone->cond = BLK_ZONE_COND_EMPTY;
+ zone->wp = zone->start;
+ } else if (file_sectors == zlo->zone_capacity) {
+ zone->cond = BLK_ZONE_COND_FULL;
+ zone->wp = ULLONG_MAX;
+ } else {
+ zone->cond = BLK_ZONE_COND_CLOSED;
+ zone->wp = zone->start + file_sectors;
+ }
+ spin_unlock_irqrestore(&zone->wp_lock, flags);
+
+ return 0;
+}
+
+static int zloop_open_zone(struct zloop_device *zlo, unsigned int zone_no)
+{
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+ int ret = 0;
+
+ if (test_bit(ZLOOP_ZONE_CONV, &zone->flags))
+ return -EIO;
+
+ mutex_lock(&zone->lock);
+
+ if (test_and_clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags)) {
+ ret = zloop_update_seq_zone(zlo, zone_no);
+ if (ret)
+ goto unlock;
+ }
+
+ switch (zone->cond) {
+ case BLK_ZONE_COND_EXP_OPEN:
+ break;
+ case BLK_ZONE_COND_EMPTY:
+ case BLK_ZONE_COND_CLOSED:
+ case BLK_ZONE_COND_IMP_OPEN:
+ zone->cond = BLK_ZONE_COND_EXP_OPEN;
+ break;
+ case BLK_ZONE_COND_FULL:
+ default:
+ ret = -EIO;
+ break;
+ }
+
+unlock:
+ mutex_unlock(&zone->lock);
+
+ return ret;
+}
+
+static int zloop_close_zone(struct zloop_device *zlo, unsigned int zone_no)
+{
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+ unsigned long flags;
+ int ret = 0;
+
+ if (test_bit(ZLOOP_ZONE_CONV, &zone->flags))
+ return -EIO;
+
+ mutex_lock(&zone->lock);
+
+ if (test_and_clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags)) {
+ ret = zloop_update_seq_zone(zlo, zone_no);
+ if (ret)
+ goto unlock;
+ }
+
+ switch (zone->cond) {
+ case BLK_ZONE_COND_CLOSED:
+ break;
+ case BLK_ZONE_COND_IMP_OPEN:
+ case BLK_ZONE_COND_EXP_OPEN:
+ spin_lock_irqsave(&zone->wp_lock, flags);
+ if (zone->wp == zone->start)
+ zone->cond = BLK_ZONE_COND_EMPTY;
+ else
+ zone->cond = BLK_ZONE_COND_CLOSED;
+ spin_unlock_irqrestore(&zone->wp_lock, flags);
+ break;
+ case BLK_ZONE_COND_EMPTY:
+ case BLK_ZONE_COND_FULL:
+ default:
+ ret = -EIO;
+ break;
+ }
+
+unlock:
+ mutex_unlock(&zone->lock);
+
+ return ret;
+}
+
+static int zloop_reset_zone(struct zloop_device *zlo, unsigned int zone_no)
+{
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+ unsigned long flags;
+ int ret = 0;
+
+ if (test_bit(ZLOOP_ZONE_CONV, &zone->flags))
+ return -EIO;
+
+ mutex_lock(&zone->lock);
+
+ if (!test_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags) &&
+ zone->cond == BLK_ZONE_COND_EMPTY)
+ goto unlock;
+
+ if (vfs_truncate(&zone->file->f_path, 0)) {
+ set_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
+ ret = -EIO;
+ goto unlock;
+ }
+
+ spin_lock_irqsave(&zone->wp_lock, flags);
+ zone->cond = BLK_ZONE_COND_EMPTY;
+ zone->wp = zone->start;
+ clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
+ spin_unlock_irqrestore(&zone->wp_lock, flags);
+
+unlock:
+ mutex_unlock(&zone->lock);
+
+ return ret;
+}
+
+static int zloop_reset_all_zones(struct zloop_device *zlo)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = zlo->nr_conv_zones; i < zlo->nr_zones; i++) {
+ ret = zloop_reset_zone(zlo, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int zloop_finish_zone(struct zloop_device *zlo, unsigned int zone_no)
+{
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+ unsigned long flags;
+ int ret = 0;
+
+ if (test_bit(ZLOOP_ZONE_CONV, &zone->flags))
+ return -EIO;
+
+ mutex_lock(&zone->lock);
+
+ if (!test_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags) &&
+ zone->cond == BLK_ZONE_COND_FULL)
+ goto unlock;
+
+ if (vfs_truncate(&zone->file->f_path, zlo->zone_size << SECTOR_SHIFT)) {
+ set_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
+ ret = -EIO;
+ goto unlock;
+ }
+
+ spin_lock_irqsave(&zone->wp_lock, flags);
+ zone->cond = BLK_ZONE_COND_FULL;
+ zone->wp = ULLONG_MAX;
+ clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
+ spin_unlock_irqrestore(&zone->wp_lock, flags);
+
+ unlock:
+ mutex_unlock(&zone->lock);
+
+ return ret;
+}
+
+static void zloop_put_cmd(struct zloop_cmd *cmd)
+{
+ struct request *rq = blk_mq_rq_from_pdu(cmd);
+
+ if (!atomic_dec_and_test(&cmd->ref))
+ return;
+ kfree(cmd->bvec);
+ cmd->bvec = NULL;
+ if (likely(!blk_should_fake_timeout(rq->q)))
+ blk_mq_complete_request(rq);
+}
+
+static void zloop_rw_complete(struct kiocb *iocb, long ret)
+{
+ struct zloop_cmd *cmd = container_of(iocb, struct zloop_cmd, iocb);
+
+ cmd->ret = ret;
+ zloop_put_cmd(cmd);
+}
+
+static void zloop_rw(struct zloop_cmd *cmd)
+{
+ struct request *rq = blk_mq_rq_from_pdu(cmd);
+ struct zloop_device *zlo = rq->q->queuedata;
+ unsigned int zone_no = rq_zone_no(rq);
+ sector_t sector = blk_rq_pos(rq);
+ sector_t nr_sectors = blk_rq_sectors(rq);
+ bool is_append = req_op(rq) == REQ_OP_ZONE_APPEND;
+ bool is_write = req_op(rq) == REQ_OP_WRITE || is_append;
+ int rw = is_write ? ITER_SOURCE : ITER_DEST;
+ struct req_iterator rq_iter;
+ struct zloop_zone *zone;
+ struct iov_iter iter;
+ struct bio_vec tmp;
+ unsigned long flags;
+ sector_t zone_end;
+ unsigned int nr_bvec;
+ int ret;
+
+ atomic_set(&cmd->ref, 2);
+ cmd->sector = sector;
+ cmd->nr_sectors = nr_sectors;
+ cmd->ret = 0;
+
+ if (WARN_ON_ONCE(is_append && !zlo->zone_append)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ /* We should never get an I/O beyond the device capacity. */
+ if (WARN_ON_ONCE(zone_no >= zlo->nr_zones)) {
+ ret = -EIO;
+ goto out;
+ }
+ zone = &zlo->zones[zone_no];
+ zone_end = zone->start + zlo->zone_capacity;
+
+ /*
+ * The block layer should never send requests that are not fully
+ * contained within the zone.
+ */
+ if (WARN_ON_ONCE(sector + nr_sectors > zone->start + zlo->zone_size)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (test_and_clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags)) {
+ mutex_lock(&zone->lock);
+ ret = zloop_update_seq_zone(zlo, zone_no);
+ mutex_unlock(&zone->lock);
+ if (ret)
+ goto out;
+ }
+
+ if (!test_bit(ZLOOP_ZONE_CONV, &zone->flags) && is_write) {
+ mutex_lock(&zone->lock);
+
+ spin_lock_irqsave(&zone->wp_lock, flags);
+
+ /*
+ * Zone append operations always go at the current write
+ * pointer, but regular write operations must already be
+ * aligned to the write pointer when submitted.
+ */
+ if (is_append) {
+ /*
+ * If ordered zone append is in use, we already checked
+ * and set the target sector in zloop_queue_rq().
+ */
+ if (!zlo->ordered_zone_append) {
+ if (zone->cond == BLK_ZONE_COND_FULL ||
+ zone->wp + nr_sectors > zone_end) {
+ spin_unlock_irqrestore(&zone->wp_lock,
+ flags);
+ ret = -EIO;
+ goto unlock;
+ }
+ sector = zone->wp;
+ }
+ cmd->sector = sector;
+ } else if (sector != zone->wp) {
+ spin_unlock_irqrestore(&zone->wp_lock, flags);
+ pr_err("Zone %u: unaligned write: sect %llu, wp %llu\n",
+ zone_no, sector, zone->wp);
+ ret = -EIO;
+ goto unlock;
+ }
+
+ /* Implicitly open the target zone. */
+ if (zone->cond == BLK_ZONE_COND_CLOSED ||
+ zone->cond == BLK_ZONE_COND_EMPTY)
+ zone->cond = BLK_ZONE_COND_IMP_OPEN;
+
+ /*
+ * Advance the write pointer, unless ordered zone append is in
+ * use. If the write fails, the write pointer position will be
+ * corrected when the next I/O starts execution.
+ */
+ if (!is_append || !zlo->ordered_zone_append) {
+ zone->wp += nr_sectors;
+ if (zone->wp == zone_end) {
+ zone->cond = BLK_ZONE_COND_FULL;
+ zone->wp = ULLONG_MAX;
+ }
+ }
+
+ spin_unlock_irqrestore(&zone->wp_lock, flags);
+ }
+
+ nr_bvec = blk_rq_nr_bvec(rq);
+
+ if (rq->bio != rq->biotail) {
+ struct bio_vec *bvec;
+
+ cmd->bvec = kmalloc_array(nr_bvec, sizeof(*cmd->bvec), GFP_NOIO);
+ if (!cmd->bvec) {
+ ret = -EIO;
+ goto unlock;
+ }
+
+ /*
+ * The bios of the request may be started from the middle of
+ * the 'bvec' because of bio splitting, so we can't directly
+ * copy bio->bi_iov_vec to new bvec. The rq_for_each_bvec
+ * API will take care of all details for us.
+ */
+ bvec = cmd->bvec;
+ rq_for_each_bvec(tmp, rq, rq_iter) {
+ *bvec = tmp;
+ bvec++;
+ }
+ iov_iter_bvec(&iter, rw, cmd->bvec, nr_bvec, blk_rq_bytes(rq));
+ } else {
+ /*
+ * Same here, this bio may be started from the middle of the
+ * 'bvec' because of bio splitting, so offset from the bvec
+ * must be passed to iov iterator
+ */
+ iov_iter_bvec(&iter, rw,
+ __bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter),
+ nr_bvec, blk_rq_bytes(rq));
+ iter.iov_offset = rq->bio->bi_iter.bi_bvec_done;
+ }
+
+ cmd->iocb.ki_pos = (sector - zone->start) << SECTOR_SHIFT;
+ cmd->iocb.ki_filp = zone->file;
+ cmd->iocb.ki_complete = zloop_rw_complete;
+ if (!zlo->buffered_io)
+ cmd->iocb.ki_flags = IOCB_DIRECT;
+ cmd->iocb.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
+
+ if (rw == ITER_SOURCE)
+ ret = zone->file->f_op->write_iter(&cmd->iocb, &iter);
+ else
+ ret = zone->file->f_op->read_iter(&cmd->iocb, &iter);
+unlock:
+ if (!test_bit(ZLOOP_ZONE_CONV, &zone->flags) && is_write)
+ mutex_unlock(&zone->lock);
+out:
+ if (ret != -EIOCBQUEUED)
+ zloop_rw_complete(&cmd->iocb, ret);
+ zloop_put_cmd(cmd);
+}
+
+static void zloop_handle_cmd(struct zloop_cmd *cmd)
+{
+ struct request *rq = blk_mq_rq_from_pdu(cmd);
+ struct zloop_device *zlo = rq->q->queuedata;
+
+ /* We can block in this context, so ignore REQ_NOWAIT. */
+ if (rq->cmd_flags & REQ_NOWAIT)
+ rq->cmd_flags &= ~REQ_NOWAIT;
+
+ switch (req_op(rq)) {
+ case REQ_OP_READ:
+ case REQ_OP_WRITE:
+ case REQ_OP_ZONE_APPEND:
+ /*
+ * zloop_rw() always executes asynchronously or completes
+ * directly.
+ */
+ zloop_rw(cmd);
+ return;
+ case REQ_OP_FLUSH:
+ /*
+ * Sync the entire FS containing the zone files instead of
+ * walking all files
+ */
+ cmd->ret = sync_filesystem(file_inode(zlo->data_dir)->i_sb);
+ break;
+ case REQ_OP_ZONE_RESET:
+ cmd->ret = zloop_reset_zone(zlo, rq_zone_no(rq));
+ break;
+ case REQ_OP_ZONE_RESET_ALL:
+ cmd->ret = zloop_reset_all_zones(zlo);
+ break;
+ case REQ_OP_ZONE_FINISH:
+ cmd->ret = zloop_finish_zone(zlo, rq_zone_no(rq));
+ break;
+ case REQ_OP_ZONE_OPEN:
+ cmd->ret = zloop_open_zone(zlo, rq_zone_no(rq));
+ break;
+ case REQ_OP_ZONE_CLOSE:
+ cmd->ret = zloop_close_zone(zlo, rq_zone_no(rq));
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ pr_err("Unsupported operation %d\n", req_op(rq));
+ cmd->ret = -EOPNOTSUPP;
+ break;
+ }
+
+ blk_mq_complete_request(rq);
+}
+
+static void zloop_cmd_workfn(struct work_struct *work)
+{
+ struct zloop_cmd *cmd = container_of(work, struct zloop_cmd, work);
+ int orig_flags = current->flags;
+
+ current->flags |= PF_LOCAL_THROTTLE | PF_MEMALLOC_NOIO;
+ zloop_handle_cmd(cmd);
+ current->flags = orig_flags;
+}
+
+static void zloop_complete_rq(struct request *rq)
+{
+ struct zloop_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ struct zloop_device *zlo = rq->q->queuedata;
+ unsigned int zone_no = cmd->sector >> zlo->zone_shift;
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+ blk_status_t sts = BLK_STS_OK;
+
+ switch (req_op(rq)) {
+ case REQ_OP_READ:
+ if (cmd->ret < 0)
+ pr_err("Zone %u: failed read sector %llu, %llu sectors\n",
+ zone_no, cmd->sector, cmd->nr_sectors);
+
+ if (cmd->ret >= 0 && cmd->ret != blk_rq_bytes(rq)) {
+ /* short read */
+ struct bio *bio;
+
+ __rq_for_each_bio(bio, rq)
+ zero_fill_bio(bio);
+ }
+ break;
+ case REQ_OP_WRITE:
+ case REQ_OP_ZONE_APPEND:
+ if (cmd->ret < 0)
+ pr_err("Zone %u: failed %swrite sector %llu, %llu sectors\n",
+ zone_no,
+ req_op(rq) == REQ_OP_WRITE ? "" : "append ",
+ cmd->sector, cmd->nr_sectors);
+
+ if (cmd->ret >= 0 && cmd->ret != blk_rq_bytes(rq)) {
+ pr_err("Zone %u: partial write %ld/%u B\n",
+ zone_no, cmd->ret, blk_rq_bytes(rq));
+ cmd->ret = -EIO;
+ }
+
+ if (cmd->ret < 0 && !test_bit(ZLOOP_ZONE_CONV, &zone->flags)) {
+ /*
+ * A write to a sequential zone file failed: mark the
+ * zone as having an error. This will be corrected and
+ * cleared when the next IO is submitted.
+ */
+ set_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
+ break;
+ }
+ if (req_op(rq) == REQ_OP_ZONE_APPEND)
+ rq->__sector = cmd->sector;
+
+ break;
+ default:
+ break;
+ }
+
+ if (cmd->ret < 0)
+ sts = errno_to_blk_status(cmd->ret);
+ blk_mq_end_request(rq, sts);
+}
+
+static bool zloop_set_zone_append_sector(struct request *rq)
+{
+ struct zloop_device *zlo = rq->q->queuedata;
+ unsigned int zone_no = rq_zone_no(rq);
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+ sector_t zone_end = zone->start + zlo->zone_capacity;
+ sector_t nr_sectors = blk_rq_sectors(rq);
+ unsigned long flags;
+
+ spin_lock_irqsave(&zone->wp_lock, flags);
+
+ if (zone->cond == BLK_ZONE_COND_FULL ||
+ zone->wp + nr_sectors > zone_end) {
+ spin_unlock_irqrestore(&zone->wp_lock, flags);
+ return false;
+ }
+
+ rq->__sector = zone->wp;
+ zone->wp += blk_rq_sectors(rq);
+ if (zone->wp >= zone_end) {
+ zone->cond = BLK_ZONE_COND_FULL;
+ zone->wp = ULLONG_MAX;
+ }
+
+ spin_unlock_irqrestore(&zone->wp_lock, flags);
+
+ return true;
+}
+
+static blk_status_t zloop_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct request *rq = bd->rq;
+ struct zloop_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ struct zloop_device *zlo = rq->q->queuedata;
+
+ if (zlo->state == Zlo_deleting)
+ return BLK_STS_IOERR;
+
+ /*
+ * If we need to strongly order zone append operations, set the request
+ * sector to the zone write pointer location now instead of when the
+ * command work runs.
+ */
+ if (zlo->ordered_zone_append && req_op(rq) == REQ_OP_ZONE_APPEND) {
+ if (!zloop_set_zone_append_sector(rq))
+ return BLK_STS_IOERR;
+ }
+
+ blk_mq_start_request(rq);
+
+ INIT_WORK(&cmd->work, zloop_cmd_workfn);
+ queue_work(zlo->workqueue, &cmd->work);
+
+ return BLK_STS_OK;
+}
+
+static const struct blk_mq_ops zloop_mq_ops = {
+ .queue_rq = zloop_queue_rq,
+ .complete = zloop_complete_rq,
+};
+
+static int zloop_open(struct gendisk *disk, blk_mode_t mode)
+{
+ struct zloop_device *zlo = disk->private_data;
+ int ret;
+
+ ret = mutex_lock_killable(&zloop_ctl_mutex);
+ if (ret)
+ return ret;
+
+ if (zlo->state != Zlo_live)
+ ret = -ENXIO;
+ mutex_unlock(&zloop_ctl_mutex);
+ return ret;
+}
+
+static int zloop_report_zones(struct gendisk *disk, sector_t sector,
+ unsigned int nr_zones, struct blk_report_zones_args *args)
+{
+ struct zloop_device *zlo = disk->private_data;
+ struct blk_zone blkz = {};
+ unsigned int first, i;
+ unsigned long flags;
+ int ret;
+
+ first = disk_zone_no(disk, sector);
+ if (first >= zlo->nr_zones)
+ return 0;
+ nr_zones = min(nr_zones, zlo->nr_zones - first);
+
+ for (i = 0; i < nr_zones; i++) {
+ unsigned int zone_no = first + i;
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+
+ mutex_lock(&zone->lock);
+
+ if (test_and_clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags)) {
+ ret = zloop_update_seq_zone(zlo, zone_no);
+ if (ret) {
+ mutex_unlock(&zone->lock);
+ return ret;
+ }
+ }
+
+ blkz.start = zone->start;
+ blkz.len = zlo->zone_size;
+ spin_lock_irqsave(&zone->wp_lock, flags);
+ blkz.wp = zone->wp;
+ spin_unlock_irqrestore(&zone->wp_lock, flags);
+ blkz.cond = zone->cond;
+ if (test_bit(ZLOOP_ZONE_CONV, &zone->flags)) {
+ blkz.type = BLK_ZONE_TYPE_CONVENTIONAL;
+ blkz.capacity = zlo->zone_size;
+ } else {
+ blkz.type = BLK_ZONE_TYPE_SEQWRITE_REQ;
+ blkz.capacity = zlo->zone_capacity;
+ }
+
+ mutex_unlock(&zone->lock);
+
+ ret = disk_report_zone(disk, &blkz, i, args);
+ if (ret)
+ return ret;
+ }
+
+ return nr_zones;
+}
+
+static void zloop_free_disk(struct gendisk *disk)
+{
+ struct zloop_device *zlo = disk->private_data;
+ unsigned int i;
+
+ blk_mq_free_tag_set(&zlo->tag_set);
+
+ for (i = 0; i < zlo->nr_zones; i++) {
+ struct zloop_zone *zone = &zlo->zones[i];
+
+ mapping_set_gfp_mask(zone->file->f_mapping,
+ zone->old_gfp_mask);
+ fput(zone->file);
+ }
+
+ fput(zlo->data_dir);
+ destroy_workqueue(zlo->workqueue);
+ kfree(zlo->base_dir);
+ kvfree(zlo);
+}
+
+static const struct block_device_operations zloop_fops = {
+ .owner = THIS_MODULE,
+ .open = zloop_open,
+ .report_zones = zloop_report_zones,
+ .free_disk = zloop_free_disk,
+};
+
+__printf(3, 4)
+static struct file *zloop_filp_open_fmt(int oflags, umode_t mode,
+ const char *fmt, ...)
+{
+ struct file *file;
+ va_list ap;
+ char *p;
+
+ va_start(ap, fmt);
+ p = kvasprintf(GFP_KERNEL, fmt, ap);
+ va_end(ap);
+
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+ file = filp_open(p, oflags, mode);
+ kfree(p);
+ return file;
+}
+
+static int zloop_get_block_size(struct zloop_device *zlo,
+ struct zloop_zone *zone)
+{
+ struct block_device *sb_bdev = zone->file->f_mapping->host->i_sb->s_bdev;
+ struct kstat st;
+
+ /*
+ * If the FS block size is lower than or equal to 4K, use that as the
+ * device block size. Otherwise, fallback to the FS direct IO alignment
+ * constraint if that is provided, and to the FS underlying device
+ * physical block size if the direct IO alignment is unknown.
+ */
+ if (file_inode(zone->file)->i_sb->s_blocksize <= SZ_4K)
+ zlo->block_size = file_inode(zone->file)->i_sb->s_blocksize;
+ else if (!vfs_getattr(&zone->file->f_path, &st, STATX_DIOALIGN, 0) &&
+ (st.result_mask & STATX_DIOALIGN))
+ zlo->block_size = st.dio_offset_align;
+ else if (sb_bdev)
+ zlo->block_size = bdev_physical_block_size(sb_bdev);
+ else
+ zlo->block_size = SECTOR_SIZE;
+
+ if (zlo->zone_capacity & ((zlo->block_size >> SECTOR_SHIFT) - 1)) {
+ pr_err("Zone capacity is not aligned to block size %u\n",
+ zlo->block_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int zloop_init_zone(struct zloop_device *zlo, struct zloop_options *opts,
+ unsigned int zone_no, bool restore)
+{
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+ int oflags = O_RDWR;
+ struct kstat stat;
+ sector_t file_sectors;
+ int ret;
+
+ mutex_init(&zone->lock);
+ spin_lock_init(&zone->wp_lock);
+ zone->start = (sector_t)zone_no << zlo->zone_shift;
+
+ if (!restore)
+ oflags |= O_CREAT;
+
+ if (!opts->buffered_io)
+ oflags |= O_DIRECT;
+
+ if (zone_no < zlo->nr_conv_zones) {
+ /* Conventional zone file. */
+ set_bit(ZLOOP_ZONE_CONV, &zone->flags);
+ zone->cond = BLK_ZONE_COND_NOT_WP;
+ zone->wp = U64_MAX;
+
+ zone->file = zloop_filp_open_fmt(oflags, 0600, "%s/%u/cnv-%06u",
+ zlo->base_dir, zlo->id, zone_no);
+ if (IS_ERR(zone->file)) {
+ pr_err("Failed to open zone %u file %s/%u/cnv-%06u (err=%ld)",
+ zone_no, zlo->base_dir, zlo->id, zone_no,
+ PTR_ERR(zone->file));
+ return PTR_ERR(zone->file);
+ }
+
+ if (!zlo->block_size) {
+ ret = zloop_get_block_size(zlo, zone);
+ if (ret)
+ return ret;
+ }
+
+ ret = vfs_getattr(&zone->file->f_path, &stat, STATX_SIZE, 0);
+ if (ret < 0) {
+ pr_err("Failed to get zone %u file stat\n", zone_no);
+ return ret;
+ }
+ file_sectors = stat.size >> SECTOR_SHIFT;
+
+ if (restore && file_sectors != zlo->zone_size) {
+ pr_err("Invalid conventional zone %u file size (%llu sectors != %llu)\n",
+ zone_no, file_sectors, zlo->zone_capacity);
+ return ret;
+ }
+
+ ret = vfs_truncate(&zone->file->f_path,
+ zlo->zone_size << SECTOR_SHIFT);
+ if (ret < 0) {
+ pr_err("Failed to truncate zone %u file (err=%d)\n",
+ zone_no, ret);
+ return ret;
+ }
+
+ return 0;
+ }
+
+ /* Sequential zone file. */
+ zone->file = zloop_filp_open_fmt(oflags, 0600, "%s/%u/seq-%06u",
+ zlo->base_dir, zlo->id, zone_no);
+ if (IS_ERR(zone->file)) {
+ pr_err("Failed to open zone %u file %s/%u/seq-%06u (err=%ld)",
+ zone_no, zlo->base_dir, zlo->id, zone_no,
+ PTR_ERR(zone->file));
+ return PTR_ERR(zone->file);
+ }
+
+ if (!zlo->block_size) {
+ ret = zloop_get_block_size(zlo, zone);
+ if (ret)
+ return ret;
+ }
+
+ zloop_get_block_size(zlo, zone);
+
+ mutex_lock(&zone->lock);
+ ret = zloop_update_seq_zone(zlo, zone_no);
+ mutex_unlock(&zone->lock);
+
+ return ret;
+}
+
+static bool zloop_dev_exists(struct zloop_device *zlo)
+{
+ struct file *cnv, *seq;
+ bool exists;
+
+ cnv = zloop_filp_open_fmt(O_RDONLY, 0600, "%s/%u/cnv-%06u",
+ zlo->base_dir, zlo->id, 0);
+ seq = zloop_filp_open_fmt(O_RDONLY, 0600, "%s/%u/seq-%06u",
+ zlo->base_dir, zlo->id, 0);
+ exists = !IS_ERR(cnv) || !IS_ERR(seq);
+
+ if (!IS_ERR(cnv))
+ fput(cnv);
+ if (!IS_ERR(seq))
+ fput(seq);
+
+ return exists;
+}
+
+static int zloop_ctl_add(struct zloop_options *opts)
+{
+ struct queue_limits lim = {
+ .max_hw_sectors = SZ_1M >> SECTOR_SHIFT,
+ .chunk_sectors = opts->zone_size,
+ .features = BLK_FEAT_ZONED,
+ };
+ unsigned int nr_zones, i, j;
+ struct zloop_device *zlo;
+ int ret = -EINVAL;
+ bool restore;
+
+ __module_get(THIS_MODULE);
+
+ nr_zones = opts->capacity >> ilog2(opts->zone_size);
+ if (opts->nr_conv_zones >= nr_zones) {
+ pr_err("Invalid number of conventional zones %u\n",
+ opts->nr_conv_zones);
+ goto out;
+ }
+
+ zlo = kvzalloc(struct_size(zlo, zones, nr_zones), GFP_KERNEL);
+ if (!zlo) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ zlo->state = Zlo_creating;
+
+ ret = mutex_lock_killable(&zloop_ctl_mutex);
+ if (ret)
+ goto out_free_dev;
+
+ /* Allocate id, if @opts->id >= 0, we're requesting that specific id */
+ if (opts->id >= 0) {
+ ret = idr_alloc(&zloop_index_idr, zlo,
+ opts->id, opts->id + 1, GFP_KERNEL);
+ if (ret == -ENOSPC)
+ ret = -EEXIST;
+ } else {
+ ret = idr_alloc(&zloop_index_idr, zlo, 0, 0, GFP_KERNEL);
+ }
+ mutex_unlock(&zloop_ctl_mutex);
+ if (ret < 0)
+ goto out_free_dev;
+
+ zlo->id = ret;
+ zlo->zone_shift = ilog2(opts->zone_size);
+ zlo->zone_size = opts->zone_size;
+ if (opts->zone_capacity)
+ zlo->zone_capacity = opts->zone_capacity;
+ else
+ zlo->zone_capacity = zlo->zone_size;
+ zlo->nr_zones = nr_zones;
+ zlo->nr_conv_zones = opts->nr_conv_zones;
+ zlo->buffered_io = opts->buffered_io;
+ zlo->zone_append = opts->zone_append;
+ if (zlo->zone_append)
+ zlo->ordered_zone_append = opts->ordered_zone_append;
+
+ zlo->workqueue = alloc_workqueue("zloop%d", WQ_UNBOUND | WQ_FREEZABLE,
+ opts->nr_queues * opts->queue_depth, zlo->id);
+ if (!zlo->workqueue) {
+ ret = -ENOMEM;
+ goto out_free_idr;
+ }
+
+ if (opts->base_dir)
+ zlo->base_dir = kstrdup(opts->base_dir, GFP_KERNEL);
+ else
+ zlo->base_dir = kstrdup(ZLOOP_DEF_BASE_DIR, GFP_KERNEL);
+ if (!zlo->base_dir) {
+ ret = -ENOMEM;
+ goto out_destroy_workqueue;
+ }
+
+ zlo->data_dir = zloop_filp_open_fmt(O_RDONLY | O_DIRECTORY, 0, "%s/%u",
+ zlo->base_dir, zlo->id);
+ if (IS_ERR(zlo->data_dir)) {
+ ret = PTR_ERR(zlo->data_dir);
+ pr_warn("Failed to open directory %s/%u (err=%d)\n",
+ zlo->base_dir, zlo->id, ret);
+ goto out_free_base_dir;
+ }
+
+ /*
+ * If we already have zone files, we are restoring a device created by a
+ * previous add operation. In this case, zloop_init_zone() will check
+ * that the zone files are consistent with the zone configuration given.
+ */
+ restore = zloop_dev_exists(zlo);
+ for (i = 0; i < nr_zones; i++) {
+ ret = zloop_init_zone(zlo, opts, i, restore);
+ if (ret)
+ goto out_close_files;
+ }
+
+ lim.physical_block_size = zlo->block_size;
+ lim.logical_block_size = zlo->block_size;
+ if (zlo->zone_append)
+ lim.max_hw_zone_append_sectors = lim.max_hw_sectors;
+
+ zlo->tag_set.ops = &zloop_mq_ops;
+ zlo->tag_set.nr_hw_queues = opts->nr_queues;
+ zlo->tag_set.queue_depth = opts->queue_depth;
+ zlo->tag_set.numa_node = NUMA_NO_NODE;
+ zlo->tag_set.cmd_size = sizeof(struct zloop_cmd);
+ zlo->tag_set.driver_data = zlo;
+
+ ret = blk_mq_alloc_tag_set(&zlo->tag_set);
+ if (ret) {
+ pr_err("blk_mq_alloc_tag_set failed (err=%d)\n", ret);
+ goto out_close_files;
+ }
+
+ zlo->disk = blk_mq_alloc_disk(&zlo->tag_set, &lim, zlo);
+ if (IS_ERR(zlo->disk)) {
+ pr_err("blk_mq_alloc_disk failed (err=%d)\n", ret);
+ ret = PTR_ERR(zlo->disk);
+ goto out_cleanup_tags;
+ }
+ zlo->disk->flags = GENHD_FL_NO_PART;
+ zlo->disk->fops = &zloop_fops;
+ zlo->disk->private_data = zlo;
+ sprintf(zlo->disk->disk_name, "zloop%d", zlo->id);
+ set_capacity(zlo->disk, (u64)lim.chunk_sectors * zlo->nr_zones);
+
+ ret = blk_revalidate_disk_zones(zlo->disk);
+ if (ret)
+ goto out_cleanup_disk;
+
+ ret = add_disk(zlo->disk);
+ if (ret) {
+ pr_err("add_disk failed (err=%d)\n", ret);
+ goto out_cleanup_disk;
+ }
+
+ mutex_lock(&zloop_ctl_mutex);
+ zlo->state = Zlo_live;
+ mutex_unlock(&zloop_ctl_mutex);
+
+ pr_info("zloop: device %d, %u zones of %llu MiB, %u B block size\n",
+ zlo->id, zlo->nr_zones,
+ ((sector_t)zlo->zone_size << SECTOR_SHIFT) >> 20,
+ zlo->block_size);
+ pr_info("zloop%d: using %s%s zone append\n",
+ zlo->id,
+ zlo->ordered_zone_append ? "ordered " : "",
+ zlo->zone_append ? "native" : "emulated");
+
+ return 0;
+
+out_cleanup_disk:
+ put_disk(zlo->disk);
+out_cleanup_tags:
+ blk_mq_free_tag_set(&zlo->tag_set);
+out_close_files:
+ for (j = 0; j < i; j++) {
+ struct zloop_zone *zone = &zlo->zones[j];
+
+ if (!IS_ERR_OR_NULL(zone->file))
+ fput(zone->file);
+ }
+ fput(zlo->data_dir);
+out_free_base_dir:
+ kfree(zlo->base_dir);
+out_destroy_workqueue:
+ destroy_workqueue(zlo->workqueue);
+out_free_idr:
+ mutex_lock(&zloop_ctl_mutex);
+ idr_remove(&zloop_index_idr, zlo->id);
+ mutex_unlock(&zloop_ctl_mutex);
+out_free_dev:
+ kvfree(zlo);
+out:
+ module_put(THIS_MODULE);
+ if (ret == -ENOENT)
+ ret = -EINVAL;
+ return ret;
+}
+
+static int zloop_ctl_remove(struct zloop_options *opts)
+{
+ struct zloop_device *zlo;
+ int ret;
+
+ if (!(opts->mask & ZLOOP_OPT_ID)) {
+ pr_err("No ID specified\n");
+ return -EINVAL;
+ }
+
+ ret = mutex_lock_killable(&zloop_ctl_mutex);
+ if (ret)
+ return ret;
+
+ zlo = idr_find(&zloop_index_idr, opts->id);
+ if (!zlo || zlo->state == Zlo_creating) {
+ ret = -ENODEV;
+ } else if (zlo->state == Zlo_deleting) {
+ ret = -EINVAL;
+ } else {
+ idr_remove(&zloop_index_idr, zlo->id);
+ zlo->state = Zlo_deleting;
+ }
+
+ mutex_unlock(&zloop_ctl_mutex);
+ if (ret)
+ return ret;
+
+ del_gendisk(zlo->disk);
+ put_disk(zlo->disk);
+
+ pr_info("Removed device %d\n", opts->id);
+
+ module_put(THIS_MODULE);
+
+ return 0;
+}
+
+static int zloop_parse_options(struct zloop_options *opts, const char *buf)
+{
+ substring_t args[MAX_OPT_ARGS];
+ char *options, *o, *p;
+ unsigned int token;
+ int ret = 0;
+
+ /* Set defaults. */
+ opts->mask = 0;
+ opts->id = ZLOOP_DEF_ID;
+ opts->capacity = ZLOOP_DEF_ZONE_SIZE * ZLOOP_DEF_NR_ZONES;
+ opts->zone_size = ZLOOP_DEF_ZONE_SIZE;
+ opts->nr_conv_zones = ZLOOP_DEF_NR_CONV_ZONES;
+ opts->nr_queues = ZLOOP_DEF_NR_QUEUES;
+ opts->queue_depth = ZLOOP_DEF_QUEUE_DEPTH;
+ opts->buffered_io = ZLOOP_DEF_BUFFERED_IO;
+ opts->zone_append = ZLOOP_DEF_ZONE_APPEND;
+ opts->ordered_zone_append = ZLOOP_DEF_ORDERED_ZONE_APPEND;
+
+ if (!buf)
+ return 0;
+
+ /* Skip leading spaces before the options. */
+ while (isspace(*buf))
+ buf++;
+
+ options = o = kstrdup(buf, GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ /* Parse the options, doing only some light invalid value checks. */
+ while ((p = strsep(&o, ",\n")) != NULL) {
+ if (!*p)
+ continue;
+
+ token = match_token(p, zloop_opt_tokens, args);
+ opts->mask |= token;
+ switch (token) {
+ case ZLOOP_OPT_ID:
+ if (match_int(args, &opts->id)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ break;
+ case ZLOOP_OPT_CAPACITY:
+ if (match_uint(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!token) {
+ pr_err("Invalid capacity\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->capacity =
+ ((sector_t)token * SZ_1M) >> SECTOR_SHIFT;
+ break;
+ case ZLOOP_OPT_ZONE_SIZE:
+ if (match_uint(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!token || token > ZLOOP_MAX_ZONE_SIZE_MB ||
+ !is_power_of_2(token)) {
+ pr_err("Invalid zone size %u\n", token);
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->zone_size =
+ ((sector_t)token * SZ_1M) >> SECTOR_SHIFT;
+ break;
+ case ZLOOP_OPT_ZONE_CAPACITY:
+ if (match_uint(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!token) {
+ pr_err("Invalid zone capacity\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->zone_capacity =
+ ((sector_t)token * SZ_1M) >> SECTOR_SHIFT;
+ break;
+ case ZLOOP_OPT_NR_CONV_ZONES:
+ if (match_uint(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->nr_conv_zones = token;
+ break;
+ case ZLOOP_OPT_BASE_DIR:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ kfree(opts->base_dir);
+ opts->base_dir = p;
+ break;
+ case ZLOOP_OPT_NR_QUEUES:
+ if (match_uint(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!token) {
+ pr_err("Invalid number of queues\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->nr_queues = min(token, num_online_cpus());
+ break;
+ case ZLOOP_OPT_QUEUE_DEPTH:
+ if (match_uint(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!token) {
+ pr_err("Invalid queue depth\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->queue_depth = token;
+ break;
+ case ZLOOP_OPT_BUFFERED_IO:
+ opts->buffered_io = true;
+ break;
+ case ZLOOP_OPT_ZONE_APPEND:
+ if (match_uint(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (token != 0 && token != 1) {
+ pr_err("Invalid zone_append value\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->zone_append = token;
+ break;
+ case ZLOOP_OPT_ORDERED_ZONE_APPEND:
+ opts->ordered_zone_append = true;
+ break;
+ case ZLOOP_OPT_ERR:
+ default:
+ pr_warn("unknown parameter or missing value '%s'\n", p);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ ret = -EINVAL;
+ if (opts->capacity <= opts->zone_size) {
+ pr_err("Invalid capacity\n");
+ goto out;
+ }
+
+ if (opts->zone_capacity > opts->zone_size) {
+ pr_err("Invalid zone capacity\n");
+ goto out;
+ }
+
+ ret = 0;
+out:
+ kfree(options);
+ return ret;
+}
+
+enum {
+ ZLOOP_CTL_ADD,
+ ZLOOP_CTL_REMOVE,
+};
+
+static struct zloop_ctl_op {
+ int code;
+ const char *name;
+} zloop_ctl_ops[] = {
+ { ZLOOP_CTL_ADD, "add" },
+ { ZLOOP_CTL_REMOVE, "remove" },
+ { -1, NULL },
+};
+
+static ssize_t zloop_ctl_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *pos)
+{
+ struct zloop_options opts = { };
+ struct zloop_ctl_op *op;
+ const char *buf, *opts_buf;
+ int i, ret;
+
+ if (count > PAGE_SIZE)
+ return -ENOMEM;
+
+ buf = memdup_user_nul(ubuf, count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ for (i = 0; i < ARRAY_SIZE(zloop_ctl_ops); i++) {
+ op = &zloop_ctl_ops[i];
+ if (!op->name) {
+ pr_err("Invalid operation\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!strncmp(buf, op->name, strlen(op->name)))
+ break;
+ }
+
+ if (count <= strlen(op->name))
+ opts_buf = NULL;
+ else
+ opts_buf = buf + strlen(op->name);
+
+ ret = zloop_parse_options(&opts, opts_buf);
+ if (ret) {
+ pr_err("Failed to parse options\n");
+ goto out;
+ }
+
+ switch (op->code) {
+ case ZLOOP_CTL_ADD:
+ ret = zloop_ctl_add(&opts);
+ break;
+ case ZLOOP_CTL_REMOVE:
+ ret = zloop_ctl_remove(&opts);
+ break;
+ default:
+ pr_err("Invalid operation\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ kfree(opts.base_dir);
+ kfree(buf);
+ return ret ? ret : count;
+}
+
+static int zloop_ctl_show(struct seq_file *seq_file, void *private)
+{
+ const struct match_token *tok;
+ int i;
+
+ /* Add operation */
+ seq_printf(seq_file, "%s ", zloop_ctl_ops[0].name);
+ for (i = 0; i < ARRAY_SIZE(zloop_opt_tokens); i++) {
+ tok = &zloop_opt_tokens[i];
+ if (!tok->pattern)
+ break;
+ if (i)
+ seq_putc(seq_file, ',');
+ seq_puts(seq_file, tok->pattern);
+ }
+ seq_putc(seq_file, '\n');
+
+ /* Remove operation */
+ seq_puts(seq_file, zloop_ctl_ops[1].name);
+ seq_puts(seq_file, " id=%d\n");
+
+ return 0;
+}
+
+static int zloop_ctl_open(struct inode *inode, struct file *file)
+{
+ file->private_data = NULL;
+ return single_open(file, zloop_ctl_show, NULL);
+}
+
+static int zloop_ctl_release(struct inode *inode, struct file *file)
+{
+ return single_release(inode, file);
+}
+
+static const struct file_operations zloop_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = zloop_ctl_open,
+ .release = zloop_ctl_release,
+ .write = zloop_ctl_write,
+ .read = seq_read,
+};
+
+static struct miscdevice zloop_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "zloop-control",
+ .fops = &zloop_ctl_fops,
+};
+
+static int __init zloop_init(void)
+{
+ int ret;
+
+ ret = misc_register(&zloop_misc);
+ if (ret) {
+ pr_err("Failed to register misc device: %d\n", ret);
+ return ret;
+ }
+ pr_info("Module loaded\n");
+
+ return 0;
+}
+
+static void __exit zloop_exit(void)
+{
+ misc_deregister(&zloop_misc);
+ idr_destroy(&zloop_index_idr);
+}
+
+module_init(zloop_init);
+module_exit(zloop_exit);
+
+MODULE_DESCRIPTION("Zoned loopback device");
+MODULE_LICENSE("GPL");
diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig
index 7b29cce60ab2..402b7b175863 100644
--- a/drivers/block/zram/Kconfig
+++ b/drivers/block/zram/Kconfig
@@ -2,7 +2,6 @@
config ZRAM
tristate "Compressed RAM block device support"
depends on BLOCK && SYSFS && MMU
- depends on CRYPTO_LZO || CRYPTO_ZSTD || CRYPTO_LZ4 || CRYPTO_LZ4HC || CRYPTO_842
select ZSMALLOC
help
Creates virtual block devices called /dev/zramX (X = 0, 1, ...).
@@ -15,6 +14,49 @@ config ZRAM
See Documentation/admin-guide/blockdev/zram.rst for more information.
+config ZRAM_BACKEND_LZ4
+ bool "lz4 compression support"
+ depends on ZRAM
+ select LZ4_COMPRESS
+ select LZ4_DECOMPRESS
+
+config ZRAM_BACKEND_LZ4HC
+ bool "lz4hc compression support"
+ depends on ZRAM
+ select LZ4HC_COMPRESS
+ select LZ4_DECOMPRESS
+
+config ZRAM_BACKEND_ZSTD
+ bool "zstd compression support"
+ depends on ZRAM
+ select ZSTD_COMPRESS
+ select ZSTD_DECOMPRESS
+
+config ZRAM_BACKEND_DEFLATE
+ bool "deflate compression support"
+ depends on ZRAM
+ select ZLIB_DEFLATE
+ select ZLIB_INFLATE
+
+config ZRAM_BACKEND_842
+ bool "842 compression support"
+ depends on ZRAM
+ select 842_COMPRESS
+ select 842_DECOMPRESS
+
+config ZRAM_BACKEND_FORCE_LZO
+ depends on ZRAM
+ def_bool !ZRAM_BACKEND_LZ4 && !ZRAM_BACKEND_LZ4HC && \
+ !ZRAM_BACKEND_ZSTD && !ZRAM_BACKEND_DEFLATE && \
+ !ZRAM_BACKEND_842
+
+config ZRAM_BACKEND_LZO
+ bool "lzo and lzo-rle compression support" if !ZRAM_BACKEND_FORCE_LZO
+ depends on ZRAM
+ default ZRAM_BACKEND_FORCE_LZO
+ select LZO_COMPRESS
+ select LZO_DECOMPRESS
+
choice
prompt "Default zram compressor"
default ZRAM_DEF_COMP_LZORLE
@@ -22,38 +64,45 @@ choice
config ZRAM_DEF_COMP_LZORLE
bool "lzo-rle"
- depends on CRYPTO_LZO
+ depends on ZRAM_BACKEND_LZO
-config ZRAM_DEF_COMP_ZSTD
- bool "zstd"
- depends on CRYPTO_ZSTD
+config ZRAM_DEF_COMP_LZO
+ bool "lzo"
+ depends on ZRAM_BACKEND_LZO
config ZRAM_DEF_COMP_LZ4
bool "lz4"
- depends on CRYPTO_LZ4
-
-config ZRAM_DEF_COMP_LZO
- bool "lzo"
- depends on CRYPTO_LZO
+ depends on ZRAM_BACKEND_LZ4
config ZRAM_DEF_COMP_LZ4HC
bool "lz4hc"
- depends on CRYPTO_LZ4HC
+ depends on ZRAM_BACKEND_LZ4HC
+
+config ZRAM_DEF_COMP_ZSTD
+ bool "zstd"
+ depends on ZRAM_BACKEND_ZSTD
+
+config ZRAM_DEF_COMP_DEFLATE
+ bool "deflate"
+ depends on ZRAM_BACKEND_DEFLATE
config ZRAM_DEF_COMP_842
bool "842"
- depends on CRYPTO_842
+ depends on ZRAM_BACKEND_842
endchoice
config ZRAM_DEF_COMP
string
+ depends on ZRAM
default "lzo-rle" if ZRAM_DEF_COMP_LZORLE
- default "zstd" if ZRAM_DEF_COMP_ZSTD
- default "lz4" if ZRAM_DEF_COMP_LZ4
default "lzo" if ZRAM_DEF_COMP_LZO
+ default "lz4" if ZRAM_DEF_COMP_LZ4
default "lz4hc" if ZRAM_DEF_COMP_LZ4HC
+ default "zstd" if ZRAM_DEF_COMP_ZSTD
+ default "deflate" if ZRAM_DEF_COMP_DEFLATE
default "842" if ZRAM_DEF_COMP_842
+ default "unset-value"
config ZRAM_WRITEBACK
bool "Write back incompressible or idle page to backing device"
diff --git a/drivers/block/zram/Makefile b/drivers/block/zram/Makefile
index de9e457907b1..0fdefd576691 100644
--- a/drivers/block/zram/Makefile
+++ b/drivers/block/zram/Makefile
@@ -1,4 +1,12 @@
# SPDX-License-Identifier: GPL-2.0-only
+
zram-y := zcomp.o zram_drv.o
+zram-$(CONFIG_ZRAM_BACKEND_LZO) += backend_lzorle.o backend_lzo.o
+zram-$(CONFIG_ZRAM_BACKEND_LZ4) += backend_lz4.o
+zram-$(CONFIG_ZRAM_BACKEND_LZ4HC) += backend_lz4hc.o
+zram-$(CONFIG_ZRAM_BACKEND_ZSTD) += backend_zstd.o
+zram-$(CONFIG_ZRAM_BACKEND_DEFLATE) += backend_deflate.o
+zram-$(CONFIG_ZRAM_BACKEND_842) += backend_842.o
+
obj-$(CONFIG_ZRAM) += zram.o
diff --git a/drivers/block/zram/backend_842.c b/drivers/block/zram/backend_842.c
new file mode 100644
index 000000000000..10d9d5c60f53
--- /dev/null
+++ b/drivers/block/zram/backend_842.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sw842.h>
+#include <linux/vmalloc.h>
+
+#include "backend_842.h"
+
+static void release_params_842(struct zcomp_params *params)
+{
+}
+
+static int setup_params_842(struct zcomp_params *params)
+{
+ return 0;
+}
+
+static void destroy_842(struct zcomp_ctx *ctx)
+{
+ kfree(ctx->context);
+}
+
+static int create_842(struct zcomp_params *params, struct zcomp_ctx *ctx)
+{
+ ctx->context = kmalloc(SW842_MEM_COMPRESS, GFP_KERNEL);
+ if (!ctx->context)
+ return -ENOMEM;
+ return 0;
+}
+
+static int compress_842(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ unsigned int dlen = req->dst_len;
+ int ret;
+
+ ret = sw842_compress(req->src, req->src_len, req->dst, &dlen,
+ ctx->context);
+ if (ret == 0)
+ req->dst_len = dlen;
+ return ret;
+}
+
+static int decompress_842(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ unsigned int dlen = req->dst_len;
+
+ return sw842_decompress(req->src, req->src_len, req->dst, &dlen);
+}
+
+const struct zcomp_ops backend_842 = {
+ .compress = compress_842,
+ .decompress = decompress_842,
+ .create_ctx = create_842,
+ .destroy_ctx = destroy_842,
+ .setup_params = setup_params_842,
+ .release_params = release_params_842,
+ .name = "842",
+};
diff --git a/drivers/block/zram/backend_842.h b/drivers/block/zram/backend_842.h
new file mode 100644
index 000000000000..4dc85c188799
--- /dev/null
+++ b/drivers/block/zram/backend_842.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#ifndef __BACKEND_842_H__
+#define __BACKEND_842_H__
+
+#include "zcomp.h"
+
+extern const struct zcomp_ops backend_842;
+
+#endif /* __BACKEND_842_H__ */
diff --git a/drivers/block/zram/backend_deflate.c b/drivers/block/zram/backend_deflate.c
new file mode 100644
index 000000000000..b75016e0e654
--- /dev/null
+++ b/drivers/block/zram/backend_deflate.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/zlib.h>
+
+#include "backend_deflate.h"
+
+/* Use the same value as crypto API */
+#define DEFLATE_DEF_WINBITS (-11)
+#define DEFLATE_DEF_MEMLEVEL MAX_MEM_LEVEL
+
+struct deflate_ctx {
+ struct z_stream_s cctx;
+ struct z_stream_s dctx;
+};
+
+static void deflate_release_params(struct zcomp_params *params)
+{
+}
+
+static int deflate_setup_params(struct zcomp_params *params)
+{
+ if (params->level == ZCOMP_PARAM_NOT_SET)
+ params->level = Z_DEFAULT_COMPRESSION;
+ if (params->deflate.winbits == ZCOMP_PARAM_NOT_SET)
+ params->deflate.winbits = DEFLATE_DEF_WINBITS;
+
+ return 0;
+}
+
+static void deflate_destroy(struct zcomp_ctx *ctx)
+{
+ struct deflate_ctx *zctx = ctx->context;
+
+ if (!zctx)
+ return;
+
+ if (zctx->cctx.workspace) {
+ zlib_deflateEnd(&zctx->cctx);
+ vfree(zctx->cctx.workspace);
+ }
+ if (zctx->dctx.workspace) {
+ zlib_inflateEnd(&zctx->dctx);
+ vfree(zctx->dctx.workspace);
+ }
+ kfree(zctx);
+}
+
+static int deflate_create(struct zcomp_params *params, struct zcomp_ctx *ctx)
+{
+ struct deflate_ctx *zctx;
+ size_t sz;
+ int ret;
+
+ zctx = kzalloc(sizeof(*zctx), GFP_KERNEL);
+ if (!zctx)
+ return -ENOMEM;
+
+ ctx->context = zctx;
+ sz = zlib_deflate_workspacesize(params->deflate.winbits, MAX_MEM_LEVEL);
+ zctx->cctx.workspace = vzalloc(sz);
+ if (!zctx->cctx.workspace)
+ goto error;
+
+ ret = zlib_deflateInit2(&zctx->cctx, params->level, Z_DEFLATED,
+ params->deflate.winbits, DEFLATE_DEF_MEMLEVEL,
+ Z_DEFAULT_STRATEGY);
+ if (ret != Z_OK)
+ goto error;
+
+ sz = zlib_inflate_workspacesize();
+ zctx->dctx.workspace = vzalloc(sz);
+ if (!zctx->dctx.workspace)
+ goto error;
+
+ ret = zlib_inflateInit2(&zctx->dctx, params->deflate.winbits);
+ if (ret != Z_OK)
+ goto error;
+
+ return 0;
+
+error:
+ deflate_destroy(ctx);
+ return -EINVAL;
+}
+
+static int deflate_compress(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ struct deflate_ctx *zctx = ctx->context;
+ struct z_stream_s *deflate;
+ int ret;
+
+ deflate = &zctx->cctx;
+ ret = zlib_deflateReset(deflate);
+ if (ret != Z_OK)
+ return -EINVAL;
+
+ deflate->next_in = (u8 *)req->src;
+ deflate->avail_in = req->src_len;
+ deflate->next_out = (u8 *)req->dst;
+ deflate->avail_out = req->dst_len;
+
+ ret = zlib_deflate(deflate, Z_FINISH);
+ if (ret != Z_STREAM_END)
+ return -EINVAL;
+
+ req->dst_len = deflate->total_out;
+ return 0;
+}
+
+static int deflate_decompress(struct zcomp_params *params,
+ struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ struct deflate_ctx *zctx = ctx->context;
+ struct z_stream_s *inflate;
+ int ret;
+
+ inflate = &zctx->dctx;
+
+ ret = zlib_inflateReset(inflate);
+ if (ret != Z_OK)
+ return -EINVAL;
+
+ inflate->next_in = (u8 *)req->src;
+ inflate->avail_in = req->src_len;
+ inflate->next_out = (u8 *)req->dst;
+ inflate->avail_out = req->dst_len;
+
+ ret = zlib_inflate(inflate, Z_SYNC_FLUSH);
+ if (ret != Z_STREAM_END)
+ return -EINVAL;
+
+ return 0;
+}
+
+const struct zcomp_ops backend_deflate = {
+ .compress = deflate_compress,
+ .decompress = deflate_decompress,
+ .create_ctx = deflate_create,
+ .destroy_ctx = deflate_destroy,
+ .setup_params = deflate_setup_params,
+ .release_params = deflate_release_params,
+ .name = "deflate",
+};
diff --git a/drivers/block/zram/backend_deflate.h b/drivers/block/zram/backend_deflate.h
new file mode 100644
index 000000000000..a39ac12b114c
--- /dev/null
+++ b/drivers/block/zram/backend_deflate.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#ifndef __BACKEND_DEFLATE_H__
+#define __BACKEND_DEFLATE_H__
+
+#include "zcomp.h"
+
+extern const struct zcomp_ops backend_deflate;
+
+#endif /* __BACKEND_DEFLATE_H__ */
diff --git a/drivers/block/zram/backend_lz4.c b/drivers/block/zram/backend_lz4.c
new file mode 100644
index 000000000000..daccd60857eb
--- /dev/null
+++ b/drivers/block/zram/backend_lz4.c
@@ -0,0 +1,127 @@
+#include <linux/kernel.h>
+#include <linux/lz4.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include "backend_lz4.h"
+
+struct lz4_ctx {
+ void *mem;
+
+ LZ4_streamDecode_t *dstrm;
+ LZ4_stream_t *cstrm;
+};
+
+static void lz4_release_params(struct zcomp_params *params)
+{
+}
+
+static int lz4_setup_params(struct zcomp_params *params)
+{
+ if (params->level == ZCOMP_PARAM_NOT_SET)
+ params->level = LZ4_ACCELERATION_DEFAULT;
+
+ return 0;
+}
+
+static void lz4_destroy(struct zcomp_ctx *ctx)
+{
+ struct lz4_ctx *zctx = ctx->context;
+
+ if (!zctx)
+ return;
+
+ vfree(zctx->mem);
+ kfree(zctx->dstrm);
+ kfree(zctx->cstrm);
+ kfree(zctx);
+}
+
+static int lz4_create(struct zcomp_params *params, struct zcomp_ctx *ctx)
+{
+ struct lz4_ctx *zctx;
+
+ zctx = kzalloc(sizeof(*zctx), GFP_KERNEL);
+ if (!zctx)
+ return -ENOMEM;
+
+ ctx->context = zctx;
+ if (params->dict_sz == 0) {
+ zctx->mem = vmalloc(LZ4_MEM_COMPRESS);
+ if (!zctx->mem)
+ goto error;
+ } else {
+ zctx->dstrm = kzalloc(sizeof(*zctx->dstrm), GFP_KERNEL);
+ if (!zctx->dstrm)
+ goto error;
+
+ zctx->cstrm = kzalloc(sizeof(*zctx->cstrm), GFP_KERNEL);
+ if (!zctx->cstrm)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ lz4_destroy(ctx);
+ return -ENOMEM;
+}
+
+static int lz4_compress(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ struct lz4_ctx *zctx = ctx->context;
+ int ret;
+
+ if (!zctx->cstrm) {
+ ret = LZ4_compress_fast(req->src, req->dst, req->src_len,
+ req->dst_len, params->level,
+ zctx->mem);
+ } else {
+ /* Cstrm needs to be reset */
+ ret = LZ4_loadDict(zctx->cstrm, params->dict, params->dict_sz);
+ if (ret != params->dict_sz)
+ return -EINVAL;
+ ret = LZ4_compress_fast_continue(zctx->cstrm, req->src,
+ req->dst, req->src_len,
+ req->dst_len, params->level);
+ }
+ if (!ret)
+ return -EINVAL;
+ req->dst_len = ret;
+ return 0;
+}
+
+static int lz4_decompress(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ struct lz4_ctx *zctx = ctx->context;
+ int ret;
+
+ if (!zctx->dstrm) {
+ ret = LZ4_decompress_safe(req->src, req->dst, req->src_len,
+ req->dst_len);
+ } else {
+ /* Dstrm needs to be reset */
+ ret = LZ4_setStreamDecode(zctx->dstrm, params->dict,
+ params->dict_sz);
+ if (!ret)
+ return -EINVAL;
+ ret = LZ4_decompress_safe_continue(zctx->dstrm, req->src,
+ req->dst, req->src_len,
+ req->dst_len);
+ }
+ if (ret < 0)
+ return -EINVAL;
+ return 0;
+}
+
+const struct zcomp_ops backend_lz4 = {
+ .compress = lz4_compress,
+ .decompress = lz4_decompress,
+ .create_ctx = lz4_create,
+ .destroy_ctx = lz4_destroy,
+ .setup_params = lz4_setup_params,
+ .release_params = lz4_release_params,
+ .name = "lz4",
+};
diff --git a/drivers/block/zram/backend_lz4.h b/drivers/block/zram/backend_lz4.h
new file mode 100644
index 000000000000..c11fa602a703
--- /dev/null
+++ b/drivers/block/zram/backend_lz4.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#ifndef __BACKEND_LZ4_H__
+#define __BACKEND_LZ4_H__
+
+#include "zcomp.h"
+
+extern const struct zcomp_ops backend_lz4;
+
+#endif /* __BACKEND_LZ4_H__ */
diff --git a/drivers/block/zram/backend_lz4hc.c b/drivers/block/zram/backend_lz4hc.c
new file mode 100644
index 000000000000..9e8a35dfa56d
--- /dev/null
+++ b/drivers/block/zram/backend_lz4hc.c
@@ -0,0 +1,128 @@
+#include <linux/kernel.h>
+#include <linux/lz4.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include "backend_lz4hc.h"
+
+struct lz4hc_ctx {
+ void *mem;
+
+ LZ4_streamDecode_t *dstrm;
+ LZ4_streamHC_t *cstrm;
+};
+
+static void lz4hc_release_params(struct zcomp_params *params)
+{
+}
+
+static int lz4hc_setup_params(struct zcomp_params *params)
+{
+ if (params->level == ZCOMP_PARAM_NOT_SET)
+ params->level = LZ4HC_DEFAULT_CLEVEL;
+
+ return 0;
+}
+
+static void lz4hc_destroy(struct zcomp_ctx *ctx)
+{
+ struct lz4hc_ctx *zctx = ctx->context;
+
+ if (!zctx)
+ return;
+
+ kfree(zctx->dstrm);
+ kfree(zctx->cstrm);
+ vfree(zctx->mem);
+ kfree(zctx);
+}
+
+static int lz4hc_create(struct zcomp_params *params, struct zcomp_ctx *ctx)
+{
+ struct lz4hc_ctx *zctx;
+
+ zctx = kzalloc(sizeof(*zctx), GFP_KERNEL);
+ if (!zctx)
+ return -ENOMEM;
+
+ ctx->context = zctx;
+ if (params->dict_sz == 0) {
+ zctx->mem = vmalloc(LZ4HC_MEM_COMPRESS);
+ if (!zctx->mem)
+ goto error;
+ } else {
+ zctx->dstrm = kzalloc(sizeof(*zctx->dstrm), GFP_KERNEL);
+ if (!zctx->dstrm)
+ goto error;
+
+ zctx->cstrm = kzalloc(sizeof(*zctx->cstrm), GFP_KERNEL);
+ if (!zctx->cstrm)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ lz4hc_destroy(ctx);
+ return -EINVAL;
+}
+
+static int lz4hc_compress(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ struct lz4hc_ctx *zctx = ctx->context;
+ int ret;
+
+ if (!zctx->cstrm) {
+ ret = LZ4_compress_HC(req->src, req->dst, req->src_len,
+ req->dst_len, params->level,
+ zctx->mem);
+ } else {
+ /* Cstrm needs to be reset */
+ LZ4_resetStreamHC(zctx->cstrm, params->level);
+ ret = LZ4_loadDictHC(zctx->cstrm, params->dict,
+ params->dict_sz);
+ if (ret != params->dict_sz)
+ return -EINVAL;
+ ret = LZ4_compress_HC_continue(zctx->cstrm, req->src, req->dst,
+ req->src_len, req->dst_len);
+ }
+ if (!ret)
+ return -EINVAL;
+ req->dst_len = ret;
+ return 0;
+}
+
+static int lz4hc_decompress(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ struct lz4hc_ctx *zctx = ctx->context;
+ int ret;
+
+ if (!zctx->dstrm) {
+ ret = LZ4_decompress_safe(req->src, req->dst, req->src_len,
+ req->dst_len);
+ } else {
+ /* Dstrm needs to be reset */
+ ret = LZ4_setStreamDecode(zctx->dstrm, params->dict,
+ params->dict_sz);
+ if (!ret)
+ return -EINVAL;
+ ret = LZ4_decompress_safe_continue(zctx->dstrm, req->src,
+ req->dst, req->src_len,
+ req->dst_len);
+ }
+ if (ret < 0)
+ return -EINVAL;
+ return 0;
+}
+
+const struct zcomp_ops backend_lz4hc = {
+ .compress = lz4hc_compress,
+ .decompress = lz4hc_decompress,
+ .create_ctx = lz4hc_create,
+ .destroy_ctx = lz4hc_destroy,
+ .setup_params = lz4hc_setup_params,
+ .release_params = lz4hc_release_params,
+ .name = "lz4hc",
+};
diff --git a/drivers/block/zram/backend_lz4hc.h b/drivers/block/zram/backend_lz4hc.h
new file mode 100644
index 000000000000..6de03551ed4d
--- /dev/null
+++ b/drivers/block/zram/backend_lz4hc.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#ifndef __BACKEND_LZ4HC_H__
+#define __BACKEND_LZ4HC_H__
+
+#include "zcomp.h"
+
+extern const struct zcomp_ops backend_lz4hc;
+
+#endif /* __BACKEND_LZ4HC_H__ */
diff --git a/drivers/block/zram/backend_lzo.c b/drivers/block/zram/backend_lzo.c
new file mode 100644
index 000000000000..4c906beaae6b
--- /dev/null
+++ b/drivers/block/zram/backend_lzo.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/lzo.h>
+
+#include "backend_lzo.h"
+
+static void lzo_release_params(struct zcomp_params *params)
+{
+}
+
+static int lzo_setup_params(struct zcomp_params *params)
+{
+ return 0;
+}
+
+static int lzo_create(struct zcomp_params *params, struct zcomp_ctx *ctx)
+{
+ ctx->context = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
+ if (!ctx->context)
+ return -ENOMEM;
+ return 0;
+}
+
+static void lzo_destroy(struct zcomp_ctx *ctx)
+{
+ kfree(ctx->context);
+}
+
+static int lzo_compress(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ int ret;
+
+ ret = lzo1x_1_compress(req->src, req->src_len, req->dst,
+ &req->dst_len, ctx->context);
+ return ret == LZO_E_OK ? 0 : ret;
+}
+
+static int lzo_decompress(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ int ret;
+
+ ret = lzo1x_decompress_safe(req->src, req->src_len,
+ req->dst, &req->dst_len);
+ return ret == LZO_E_OK ? 0 : ret;
+}
+
+const struct zcomp_ops backend_lzo = {
+ .compress = lzo_compress,
+ .decompress = lzo_decompress,
+ .create_ctx = lzo_create,
+ .destroy_ctx = lzo_destroy,
+ .setup_params = lzo_setup_params,
+ .release_params = lzo_release_params,
+ .name = "lzo",
+};
diff --git a/drivers/block/zram/backend_lzo.h b/drivers/block/zram/backend_lzo.h
new file mode 100644
index 000000000000..93d54749e63c
--- /dev/null
+++ b/drivers/block/zram/backend_lzo.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#ifndef __BACKEND_LZO_H__
+#define __BACKEND_LZO_H__
+
+#include "zcomp.h"
+
+extern const struct zcomp_ops backend_lzo;
+
+#endif /* __BACKEND_LZO_H__ */
diff --git a/drivers/block/zram/backend_lzorle.c b/drivers/block/zram/backend_lzorle.c
new file mode 100644
index 000000000000..10640c96cbfc
--- /dev/null
+++ b/drivers/block/zram/backend_lzorle.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/lzo.h>
+
+#include "backend_lzorle.h"
+
+static void lzorle_release_params(struct zcomp_params *params)
+{
+}
+
+static int lzorle_setup_params(struct zcomp_params *params)
+{
+ return 0;
+}
+
+static int lzorle_create(struct zcomp_params *params, struct zcomp_ctx *ctx)
+{
+ ctx->context = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
+ if (!ctx->context)
+ return -ENOMEM;
+ return 0;
+}
+
+static void lzorle_destroy(struct zcomp_ctx *ctx)
+{
+ kfree(ctx->context);
+}
+
+static int lzorle_compress(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ int ret;
+
+ ret = lzorle1x_1_compress(req->src, req->src_len, req->dst,
+ &req->dst_len, ctx->context);
+ return ret == LZO_E_OK ? 0 : ret;
+}
+
+static int lzorle_decompress(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ int ret;
+
+ ret = lzo1x_decompress_safe(req->src, req->src_len,
+ req->dst, &req->dst_len);
+ return ret == LZO_E_OK ? 0 : ret;
+}
+
+const struct zcomp_ops backend_lzorle = {
+ .compress = lzorle_compress,
+ .decompress = lzorle_decompress,
+ .create_ctx = lzorle_create,
+ .destroy_ctx = lzorle_destroy,
+ .setup_params = lzorle_setup_params,
+ .release_params = lzorle_release_params,
+ .name = "lzo-rle",
+};
diff --git a/drivers/block/zram/backend_lzorle.h b/drivers/block/zram/backend_lzorle.h
new file mode 100644
index 000000000000..6ecb163b09f1
--- /dev/null
+++ b/drivers/block/zram/backend_lzorle.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#ifndef __BACKEND_LZORLE_H__
+#define __BACKEND_LZORLE_H__
+
+#include "zcomp.h"
+
+extern const struct zcomp_ops backend_lzorle;
+
+#endif /* __BACKEND_LZORLE_H__ */
diff --git a/drivers/block/zram/backend_zstd.c b/drivers/block/zram/backend_zstd.c
new file mode 100644
index 000000000000..81defb98ed09
--- /dev/null
+++ b/drivers/block/zram/backend_zstd.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/zstd.h>
+
+#include "backend_zstd.h"
+
+struct zstd_ctx {
+ zstd_cctx *cctx;
+ zstd_dctx *dctx;
+ void *cctx_mem;
+ void *dctx_mem;
+};
+
+struct zstd_params {
+ zstd_custom_mem custom_mem;
+ zstd_cdict *cdict;
+ zstd_ddict *ddict;
+ zstd_parameters cprm;
+};
+
+/*
+ * For C/D dictionaries we need to provide zstd with zstd_custom_mem,
+ * which zstd uses internally to allocate/free memory when needed.
+ */
+static void *zstd_custom_alloc(void *opaque, size_t size)
+{
+ return kvzalloc(size, GFP_NOIO | __GFP_NOWARN);
+}
+
+static void zstd_custom_free(void *opaque, void *address)
+{
+ kvfree(address);
+}
+
+static void zstd_release_params(struct zcomp_params *params)
+{
+ struct zstd_params *zp = params->drv_data;
+
+ params->drv_data = NULL;
+ if (!zp)
+ return;
+
+ zstd_free_cdict(zp->cdict);
+ zstd_free_ddict(zp->ddict);
+ kfree(zp);
+}
+
+static int zstd_setup_params(struct zcomp_params *params)
+{
+ zstd_compression_parameters prm;
+ struct zstd_params *zp;
+
+ zp = kzalloc(sizeof(*zp), GFP_KERNEL);
+ if (!zp)
+ return -ENOMEM;
+
+ params->drv_data = zp;
+ if (params->level == ZCOMP_PARAM_NOT_SET)
+ params->level = zstd_default_clevel();
+
+ zp->cprm = zstd_get_params(params->level, PAGE_SIZE);
+
+ zp->custom_mem.customAlloc = zstd_custom_alloc;
+ zp->custom_mem.customFree = zstd_custom_free;
+
+ prm = zstd_get_cparams(params->level, PAGE_SIZE,
+ params->dict_sz);
+
+ zp->cdict = zstd_create_cdict_byreference(params->dict,
+ params->dict_sz,
+ prm,
+ zp->custom_mem);
+ if (!zp->cdict)
+ goto error;
+
+ zp->ddict = zstd_create_ddict_byreference(params->dict,
+ params->dict_sz,
+ zp->custom_mem);
+ if (!zp->ddict)
+ goto error;
+
+ return 0;
+
+error:
+ zstd_release_params(params);
+ return -EINVAL;
+}
+
+static void zstd_destroy(struct zcomp_ctx *ctx)
+{
+ struct zstd_ctx *zctx = ctx->context;
+
+ if (!zctx)
+ return;
+
+ /*
+ * If ->cctx_mem and ->dctx_mem were allocated then we didn't use
+ * C/D dictionary and ->cctx / ->dctx were "embedded" into these
+ * buffers.
+ *
+ * If otherwise then we need to explicitly release ->cctx / ->dctx.
+ */
+ if (zctx->cctx_mem)
+ vfree(zctx->cctx_mem);
+ else
+ zstd_free_cctx(zctx->cctx);
+
+ if (zctx->dctx_mem)
+ vfree(zctx->dctx_mem);
+ else
+ zstd_free_dctx(zctx->dctx);
+
+ kfree(zctx);
+}
+
+static int zstd_create(struct zcomp_params *params, struct zcomp_ctx *ctx)
+{
+ struct zstd_ctx *zctx;
+ zstd_parameters prm;
+ size_t sz;
+
+ zctx = kzalloc(sizeof(*zctx), GFP_KERNEL);
+ if (!zctx)
+ return -ENOMEM;
+
+ ctx->context = zctx;
+ if (params->dict_sz == 0) {
+ prm = zstd_get_params(params->level, PAGE_SIZE);
+ sz = zstd_cctx_workspace_bound(&prm.cParams);
+ zctx->cctx_mem = vzalloc(sz);
+ if (!zctx->cctx_mem)
+ goto error;
+
+ zctx->cctx = zstd_init_cctx(zctx->cctx_mem, sz);
+ if (!zctx->cctx)
+ goto error;
+
+ sz = zstd_dctx_workspace_bound();
+ zctx->dctx_mem = vzalloc(sz);
+ if (!zctx->dctx_mem)
+ goto error;
+
+ zctx->dctx = zstd_init_dctx(zctx->dctx_mem, sz);
+ if (!zctx->dctx)
+ goto error;
+ } else {
+ struct zstd_params *zp = params->drv_data;
+
+ zctx->cctx = zstd_create_cctx_advanced(zp->custom_mem);
+ if (!zctx->cctx)
+ goto error;
+
+ zctx->dctx = zstd_create_dctx_advanced(zp->custom_mem);
+ if (!zctx->dctx)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ zstd_release_params(params);
+ zstd_destroy(ctx);
+ return -EINVAL;
+}
+
+static int zstd_compress(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ struct zstd_params *zp = params->drv_data;
+ struct zstd_ctx *zctx = ctx->context;
+ size_t ret;
+
+ if (params->dict_sz == 0)
+ ret = zstd_compress_cctx(zctx->cctx, req->dst, req->dst_len,
+ req->src, req->src_len, &zp->cprm);
+ else
+ ret = zstd_compress_using_cdict(zctx->cctx, req->dst,
+ req->dst_len, req->src,
+ req->src_len,
+ zp->cdict);
+ if (zstd_is_error(ret))
+ return -EINVAL;
+ req->dst_len = ret;
+ return 0;
+}
+
+static int zstd_decompress(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ struct zstd_params *zp = params->drv_data;
+ struct zstd_ctx *zctx = ctx->context;
+ size_t ret;
+
+ if (params->dict_sz == 0)
+ ret = zstd_decompress_dctx(zctx->dctx, req->dst, req->dst_len,
+ req->src, req->src_len);
+ else
+ ret = zstd_decompress_using_ddict(zctx->dctx, req->dst,
+ req->dst_len, req->src,
+ req->src_len, zp->ddict);
+ if (zstd_is_error(ret))
+ return -EINVAL;
+ return 0;
+}
+
+const struct zcomp_ops backend_zstd = {
+ .compress = zstd_compress,
+ .decompress = zstd_decompress,
+ .create_ctx = zstd_create,
+ .destroy_ctx = zstd_destroy,
+ .setup_params = zstd_setup_params,
+ .release_params = zstd_release_params,
+ .name = "zstd",
+};
diff --git a/drivers/block/zram/backend_zstd.h b/drivers/block/zram/backend_zstd.h
new file mode 100644
index 000000000000..10fdfff1ec1c
--- /dev/null
+++ b/drivers/block/zram/backend_zstd.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#ifndef __BACKEND_ZSTD_H__
+#define __BACKEND_ZSTD_H__
+
+#include "zcomp.h"
+
+extern const struct zcomp_ops backend_zstd;
+
+#endif /* __BACKEND_ZSTD_H__ */
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index 8237b08c49d8..b1bd1daa0060 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -1,7 +1,4 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2014 Sergey Senozhatsky.
- */
#include <linux/kernel.h>
#include <linux/string.h>
@@ -9,156 +6,173 @@
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/sched.h>
-#include <linux/cpu.h>
-#include <linux/crypto.h>
+#include <linux/cpuhotplug.h>
#include <linux/vmalloc.h>
+#include <linux/sysfs.h>
#include "zcomp.h"
-static const char * const backends[] = {
-#if IS_ENABLED(CONFIG_CRYPTO_LZO)
- "lzo",
- "lzo-rle",
+#include "backend_lzo.h"
+#include "backend_lzorle.h"
+#include "backend_lz4.h"
+#include "backend_lz4hc.h"
+#include "backend_zstd.h"
+#include "backend_deflate.h"
+#include "backend_842.h"
+
+static const struct zcomp_ops *backends[] = {
+#if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZO)
+ &backend_lzorle,
+ &backend_lzo,
+#endif
+#if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZ4)
+ &backend_lz4,
#endif
-#if IS_ENABLED(CONFIG_CRYPTO_LZ4)
- "lz4",
+#if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZ4HC)
+ &backend_lz4hc,
#endif
-#if IS_ENABLED(CONFIG_CRYPTO_LZ4HC)
- "lz4hc",
+#if IS_ENABLED(CONFIG_ZRAM_BACKEND_ZSTD)
+ &backend_zstd,
#endif
-#if IS_ENABLED(CONFIG_CRYPTO_842)
- "842",
+#if IS_ENABLED(CONFIG_ZRAM_BACKEND_DEFLATE)
+ &backend_deflate,
#endif
-#if IS_ENABLED(CONFIG_CRYPTO_ZSTD)
- "zstd",
+#if IS_ENABLED(CONFIG_ZRAM_BACKEND_842)
+ &backend_842,
#endif
+ NULL
};
-static void zcomp_strm_free(struct zcomp_strm *zstrm)
+static void zcomp_strm_free(struct zcomp *comp, struct zcomp_strm *zstrm)
{
- if (!IS_ERR_OR_NULL(zstrm->tfm))
- crypto_free_comp(zstrm->tfm);
+ comp->ops->destroy_ctx(&zstrm->ctx);
+ vfree(zstrm->local_copy);
vfree(zstrm->buffer);
- zstrm->tfm = NULL;
zstrm->buffer = NULL;
}
-/*
- * Initialize zcomp_strm structure with ->tfm initialized by backend, and
- * ->buffer. Return a negative value on error.
- */
-static int zcomp_strm_init(struct zcomp_strm *zstrm, struct zcomp *comp)
+static int zcomp_strm_init(struct zcomp *comp, struct zcomp_strm *zstrm)
{
- zstrm->tfm = crypto_alloc_comp(comp->name, 0, 0);
+ int ret;
+
+ ret = comp->ops->create_ctx(comp->params, &zstrm->ctx);
+ if (ret)
+ return ret;
+
+ zstrm->local_copy = vzalloc(PAGE_SIZE);
/*
* allocate 2 pages. 1 for compressed data, plus 1 extra for the
* case when compressed size is larger than the original one
*/
zstrm->buffer = vzalloc(2 * PAGE_SIZE);
- if (IS_ERR_OR_NULL(zstrm->tfm) || !zstrm->buffer) {
- zcomp_strm_free(zstrm);
+ if (!zstrm->buffer || !zstrm->local_copy) {
+ zcomp_strm_free(comp, zstrm);
return -ENOMEM;
}
return 0;
}
+static const struct zcomp_ops *lookup_backend_ops(const char *comp)
+{
+ int i = 0;
+
+ while (backends[i]) {
+ if (sysfs_streq(comp, backends[i]->name))
+ break;
+ i++;
+ }
+ return backends[i];
+}
+
bool zcomp_available_algorithm(const char *comp)
{
- /*
- * Crypto does not ignore a trailing new line symbol,
- * so make sure you don't supply a string containing
- * one.
- * This also means that we permit zcomp initialisation
- * with any compressing algorithm known to crypto api.
- */
- return crypto_has_comp(comp, 0, 0) == 1;
+ return lookup_backend_ops(comp) != NULL;
}
/* show available compressors */
-ssize_t zcomp_available_show(const char *comp, char *buf)
+ssize_t zcomp_available_show(const char *comp, char *buf, ssize_t at)
{
- bool known_algorithm = false;
- ssize_t sz = 0;
int i;
- for (i = 0; i < ARRAY_SIZE(backends); i++) {
- if (!strcmp(comp, backends[i])) {
- known_algorithm = true;
- sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
- "[%s] ", backends[i]);
+ for (i = 0; i < ARRAY_SIZE(backends) - 1; i++) {
+ if (!strcmp(comp, backends[i]->name)) {
+ at += sysfs_emit_at(buf, at, "[%s] ",
+ backends[i]->name);
} else {
- sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
- "%s ", backends[i]);
+ at += sysfs_emit_at(buf, at, "%s ", backends[i]->name);
}
}
- /*
- * Out-of-tree module known to crypto api or a missing
- * entry in `backends'.
- */
- if (!known_algorithm && crypto_has_comp(comp, 0, 0) == 1)
- sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
- "[%s] ", comp);
-
- sz += scnprintf(buf + sz, PAGE_SIZE - sz, "\n");
- return sz;
+ at += sysfs_emit_at(buf, at, "\n");
+ return at;
}
struct zcomp_strm *zcomp_stream_get(struct zcomp *comp)
{
- local_lock(&comp->stream->lock);
- return this_cpu_ptr(comp->stream);
+ for (;;) {
+ struct zcomp_strm *zstrm = raw_cpu_ptr(comp->stream);
+
+ /*
+ * Inspired by zswap
+ *
+ * stream is returned with ->mutex locked which prevents
+ * cpu_dead() from releasing this stream under us, however
+ * there is still a race window between raw_cpu_ptr() and
+ * mutex_lock(), during which we could have been migrated
+ * from a CPU that has already destroyed its stream. If
+ * so then unlock and re-try on the current CPU.
+ */
+ mutex_lock(&zstrm->lock);
+ if (likely(zstrm->buffer))
+ return zstrm;
+ mutex_unlock(&zstrm->lock);
+ }
}
-void zcomp_stream_put(struct zcomp *comp)
+void zcomp_stream_put(struct zcomp_strm *zstrm)
{
- local_unlock(&comp->stream->lock);
+ mutex_unlock(&zstrm->lock);
}
-int zcomp_compress(struct zcomp_strm *zstrm,
- const void *src, unsigned int *dst_len)
+int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm,
+ const void *src, unsigned int *dst_len)
{
- /*
- * Our dst memory (zstrm->buffer) is always `2 * PAGE_SIZE' sized
- * because sometimes we can endup having a bigger compressed data
- * due to various reasons: for example compression algorithms tend
- * to add some padding to the compressed buffer. Speaking of padding,
- * comp algorithm `842' pads the compressed length to multiple of 8
- * and returns -ENOSP when the dst memory is not big enough, which
- * is not something that ZRAM wants to see. We can handle the
- * `compressed_size > PAGE_SIZE' case easily in ZRAM, but when we
- * receive -ERRNO from the compressing backend we can't help it
- * anymore. To make `842' happy we need to tell the exact size of
- * the dst buffer, zram_drv will take care of the fact that
- * compressed buffer is too big.
- */
- *dst_len = PAGE_SIZE * 2;
+ struct zcomp_req req = {
+ .src = src,
+ .dst = zstrm->buffer,
+ .src_len = PAGE_SIZE,
+ .dst_len = 2 * PAGE_SIZE,
+ };
+ int ret;
- return crypto_comp_compress(zstrm->tfm,
- src, PAGE_SIZE,
- zstrm->buffer, dst_len);
+ might_sleep();
+ ret = comp->ops->compress(comp->params, &zstrm->ctx, &req);
+ if (!ret)
+ *dst_len = req.dst_len;
+ return ret;
}
-int zcomp_decompress(struct zcomp_strm *zstrm,
- const void *src, unsigned int src_len, void *dst)
+int zcomp_decompress(struct zcomp *comp, struct zcomp_strm *zstrm,
+ const void *src, unsigned int src_len, void *dst)
{
- unsigned int dst_len = PAGE_SIZE;
+ struct zcomp_req req = {
+ .src = src,
+ .dst = dst,
+ .src_len = src_len,
+ .dst_len = PAGE_SIZE,
+ };
- return crypto_comp_decompress(zstrm->tfm,
- src, src_len,
- dst, &dst_len);
+ might_sleep();
+ return comp->ops->decompress(comp->params, &zstrm->ctx, &req);
}
int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
{
struct zcomp *comp = hlist_entry(node, struct zcomp, node);
- struct zcomp_strm *zstrm;
+ struct zcomp_strm *zstrm = per_cpu_ptr(comp->stream, cpu);
int ret;
- zstrm = per_cpu_ptr(comp->stream, cpu);
- local_lock_init(&zstrm->lock);
-
- ret = zcomp_strm_init(zstrm, comp);
+ ret = zcomp_strm_init(comp, zstrm);
if (ret)
pr_err("Can't allocate a compression stream\n");
return ret;
@@ -167,27 +181,38 @@ int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node)
{
struct zcomp *comp = hlist_entry(node, struct zcomp, node);
- struct zcomp_strm *zstrm;
+ struct zcomp_strm *zstrm = per_cpu_ptr(comp->stream, cpu);
- zstrm = per_cpu_ptr(comp->stream, cpu);
- zcomp_strm_free(zstrm);
+ mutex_lock(&zstrm->lock);
+ zcomp_strm_free(comp, zstrm);
+ mutex_unlock(&zstrm->lock);
return 0;
}
-static int zcomp_init(struct zcomp *comp)
+static int zcomp_init(struct zcomp *comp, struct zcomp_params *params)
{
- int ret;
+ int ret, cpu;
comp->stream = alloc_percpu(struct zcomp_strm);
if (!comp->stream)
return -ENOMEM;
+ comp->params = params;
+ ret = comp->ops->setup_params(comp->params);
+ if (ret)
+ goto cleanup;
+
+ for_each_possible_cpu(cpu)
+ mutex_init(&per_cpu_ptr(comp->stream, cpu)->lock);
+
ret = cpuhp_state_add_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
if (ret < 0)
goto cleanup;
+
return 0;
cleanup:
+ comp->ops->release_params(comp->params);
free_percpu(comp->stream);
return ret;
}
@@ -195,37 +220,35 @@ cleanup:
void zcomp_destroy(struct zcomp *comp)
{
cpuhp_state_remove_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
+ comp->ops->release_params(comp->params);
free_percpu(comp->stream);
kfree(comp);
}
-/*
- * search available compressors for requested algorithm.
- * allocate new zcomp and initialize it. return compressing
- * backend pointer or ERR_PTR if things went bad. ERR_PTR(-EINVAL)
- * if requested algorithm is not supported, ERR_PTR(-ENOMEM) in
- * case of allocation error, or any other error potentially
- * returned by zcomp_init().
- */
-struct zcomp *zcomp_create(const char *alg)
+struct zcomp *zcomp_create(const char *alg, struct zcomp_params *params)
{
struct zcomp *comp;
int error;
/*
- * Crypto API will execute /sbin/modprobe if the compression module
- * is not loaded yet. We must do it here, otherwise we are about to
- * call /sbin/modprobe under CPU hot-plug lock.
+ * The backends array has a sentinel NULL value, so the minimum
+ * size is 1. In order to be valid the array, apart from the
+ * sentinel NULL element, should have at least one compression
+ * backend selected.
*/
- if (!zcomp_available_algorithm(alg))
- return ERR_PTR(-EINVAL);
+ BUILD_BUG_ON(ARRAY_SIZE(backends) <= 1);
comp = kzalloc(sizeof(struct zcomp), GFP_KERNEL);
if (!comp)
return ERR_PTR(-ENOMEM);
- comp->name = alg;
- error = zcomp_init(comp);
+ comp->ops = lookup_backend_ops(alg);
+ if (!comp->ops) {
+ kfree(comp);
+ return ERR_PTR(-EINVAL);
+ }
+
+ error = zcomp_init(comp, params);
if (error) {
kfree(comp);
return ERR_PTR(error);
diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h
index e9fe63da0e9b..eacfd3f7d61d 100644
--- a/drivers/block/zram/zcomp.h
+++ b/drivers/block/zram/zcomp.h
@@ -1,42 +1,96 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2014 Sergey Senozhatsky.
- */
#ifndef _ZCOMP_H_
#define _ZCOMP_H_
-#include <linux/local_lock.h>
+
+#include <linux/mutex.h>
+
+#define ZCOMP_PARAM_NOT_SET INT_MIN
+
+struct deflate_params {
+ s32 winbits;
+};
+
+/*
+ * Immutable driver (backend) parameters. The driver may attach private
+ * data to it (e.g. driver representation of the dictionary, etc.).
+ *
+ * This data is kept per-comp and is shared among execution contexts.
+ */
+struct zcomp_params {
+ void *dict;
+ size_t dict_sz;
+ s32 level;
+ union {
+ struct deflate_params deflate;
+ };
+
+ void *drv_data;
+};
+
+/*
+ * Run-time driver context - scratch buffers, etc. It is modified during
+ * request execution (compression/decompression), cannot be shared, so
+ * it's in per-CPU area.
+ */
+struct zcomp_ctx {
+ void *context;
+};
struct zcomp_strm {
- /* The members ->buffer and ->tfm are protected by ->lock. */
- local_lock_t lock;
- /* compression/decompression buffer */
+ struct mutex lock;
+ /* compression buffer */
void *buffer;
- struct crypto_comp *tfm;
+ /* local copy of handle memory */
+ void *local_copy;
+ struct zcomp_ctx ctx;
+};
+
+struct zcomp_req {
+ const unsigned char *src;
+ const size_t src_len;
+
+ unsigned char *dst;
+ size_t dst_len;
+};
+
+struct zcomp_ops {
+ int (*compress)(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req);
+ int (*decompress)(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req);
+
+ int (*create_ctx)(struct zcomp_params *params, struct zcomp_ctx *ctx);
+ void (*destroy_ctx)(struct zcomp_ctx *ctx);
+
+ int (*setup_params)(struct zcomp_params *params);
+ void (*release_params)(struct zcomp_params *params);
+
+ const char *name;
};
/* dynamic per-device compression frontend */
struct zcomp {
struct zcomp_strm __percpu *stream;
- const char *name;
+ const struct zcomp_ops *ops;
+ struct zcomp_params *params;
struct hlist_node node;
};
int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node);
int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node);
-ssize_t zcomp_available_show(const char *comp, char *buf);
+ssize_t zcomp_available_show(const char *comp, char *buf, ssize_t at);
bool zcomp_available_algorithm(const char *comp);
-struct zcomp *zcomp_create(const char *alg);
+struct zcomp *zcomp_create(const char *alg, struct zcomp_params *params);
void zcomp_destroy(struct zcomp *comp);
struct zcomp_strm *zcomp_stream_get(struct zcomp *comp);
-void zcomp_stream_put(struct zcomp *comp);
-
-int zcomp_compress(struct zcomp_strm *zstrm,
- const void *src, unsigned int *dst_len);
+void zcomp_stream_put(struct zcomp_strm *zstrm);
-int zcomp_decompress(struct zcomp_strm *zstrm,
- const void *src, unsigned int src_len, void *dst);
+int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm,
+ const void *src, unsigned int *dst_len);
+int zcomp_decompress(struct zcomp *comp, struct zcomp_strm *zstrm,
+ const void *src, unsigned int src_len, void *dst);
#endif /* _ZCOMP_H_ */
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 4cf38f7d3e0a..5759823d6314 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -33,6 +33,7 @@
#include <linux/debugfs.h>
#include <linux/cpuhotplug.h>
#include <linux/part_stat.h>
+#include <linux/kernel_read_file.h>
#include "zram_drv.h"
@@ -43,6 +44,8 @@ static DEFINE_MUTEX(zram_index_mutex);
static int zram_major;
static const char *default_compressor = CONFIG_ZRAM_DEF_COMP;
+#define ZRAM_MAX_ALGO_NAME_SZ 128
+
/* Module params (documentation at end) */
static unsigned int num_devices = 1;
/*
@@ -54,22 +57,59 @@ static size_t huge_class_size;
static const struct block_device_operations zram_devops;
static void zram_free_page(struct zram *zram, size_t index);
-static int zram_read_page(struct zram *zram, struct page *page, u32 index,
- struct bio *parent);
+static int zram_read_from_zspool(struct zram *zram, struct page *page,
+ u32 index);
+
+#define slot_dep_map(zram, index) (&(zram)->table[(index)].dep_map)
+
+static void zram_slot_lock_init(struct zram *zram, u32 index)
+{
+ static struct lock_class_key __key;
-static int zram_slot_trylock(struct zram *zram, u32 index)
+ lockdep_init_map(slot_dep_map(zram, index), "zram->table[index].lock",
+ &__key, 0);
+}
+
+/*
+ * entry locking rules:
+ *
+ * 1) Lock is exclusive
+ *
+ * 2) lock() function can sleep waiting for the lock
+ *
+ * 3) Lock owner can sleep
+ *
+ * 4) Use TRY lock variant when in atomic context
+ * - must check return value and handle locking failers
+ */
+static __must_check bool zram_slot_trylock(struct zram *zram, u32 index)
{
- return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags);
+ unsigned long *lock = &zram->table[index].flags;
+
+ if (!test_and_set_bit_lock(ZRAM_ENTRY_LOCK, lock)) {
+ mutex_acquire(slot_dep_map(zram, index), 0, 1, _RET_IP_);
+ lock_acquired(slot_dep_map(zram, index), _RET_IP_);
+ return true;
+ }
+
+ return false;
}
static void zram_slot_lock(struct zram *zram, u32 index)
{
- bit_spin_lock(ZRAM_LOCK, &zram->table[index].flags);
+ unsigned long *lock = &zram->table[index].flags;
+
+ mutex_acquire(slot_dep_map(zram, index), 0, 0, _RET_IP_);
+ wait_on_bit_lock(lock, ZRAM_ENTRY_LOCK, TASK_UNINTERRUPTIBLE);
+ lock_acquired(slot_dep_map(zram, index), _RET_IP_);
}
static void zram_slot_unlock(struct zram *zram, u32 index)
{
- bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags);
+ unsigned long *lock = &zram->table[index].flags;
+
+ mutex_release(slot_dep_map(zram, index), _RET_IP_);
+ clear_and_wake_up_bit(ZRAM_ENTRY_LOCK, lock);
}
static inline bool init_done(struct zram *zram)
@@ -92,7 +132,6 @@ static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle)
zram->table[index].handle = handle;
}
-/* flag operations require table entry bit_spin_lock() being held */
static bool zram_test_flag(struct zram *zram, u32 index,
enum zram_pageflags flag)
{
@@ -111,17 +150,6 @@ static void zram_clear_flag(struct zram *zram, u32 index,
zram->table[index].flags &= ~BIT(flag);
}
-static inline void zram_set_element(struct zram *zram, u32 index,
- unsigned long element)
-{
- zram->table[index].element = element;
-}
-
-static unsigned long zram_get_element(struct zram *zram, u32 index)
-{
- return zram->table[index].element;
-}
-
static size_t zram_get_obj_size(struct zram *zram, u32 index)
{
return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1);
@@ -142,6 +170,27 @@ static inline bool zram_allocated(struct zram *zram, u32 index)
zram_test_flag(zram, index, ZRAM_WB);
}
+static inline void update_used_max(struct zram *zram, const unsigned long pages)
+{
+ unsigned long cur_max = atomic_long_read(&zram->stats.max_used_pages);
+
+ do {
+ if (cur_max >= pages)
+ return;
+ } while (!atomic_long_try_cmpxchg(&zram->stats.max_used_pages,
+ &cur_max, pages));
+}
+
+static bool zram_can_store_page(struct zram *zram)
+{
+ unsigned long alloced_pages;
+
+ alloced_pages = zs_get_total_pages(zram->mem_pool);
+ update_used_max(zram, alloced_pages);
+
+ return !zram->limit_pages || alloced_pages <= zram->limit_pages;
+}
+
#if PAGE_SIZE != 4096
static inline bool is_partial_io(struct bio_vec *bvec)
{
@@ -177,23 +226,114 @@ static inline u32 zram_get_priority(struct zram *zram, u32 index)
static void zram_accessed(struct zram *zram, u32 index)
{
zram_clear_flag(zram, index, ZRAM_IDLE);
+ zram_clear_flag(zram, index, ZRAM_PP_SLOT);
#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
zram->table[index].ac_time = ktime_get_boottime();
#endif
}
-static inline void update_used_max(struct zram *zram,
- const unsigned long pages)
+#if defined CONFIG_ZRAM_WRITEBACK || defined CONFIG_ZRAM_MULTI_COMP
+struct zram_pp_slot {
+ unsigned long index;
+ struct list_head entry;
+};
+
+/*
+ * A post-processing bucket is, essentially, a size class, this defines
+ * the range (in bytes) of pp-slots sizes in particular bucket.
+ */
+#define PP_BUCKET_SIZE_RANGE 64
+#define NUM_PP_BUCKETS ((PAGE_SIZE / PP_BUCKET_SIZE_RANGE) + 1)
+
+struct zram_pp_ctl {
+ struct list_head pp_buckets[NUM_PP_BUCKETS];
+};
+
+static struct zram_pp_ctl *init_pp_ctl(void)
{
- unsigned long cur_max = atomic_long_read(&zram->stats.max_used_pages);
+ struct zram_pp_ctl *ctl;
+ u32 idx;
- do {
- if (cur_max >= pages)
- return;
- } while (!atomic_long_try_cmpxchg(&zram->stats.max_used_pages,
- &cur_max, pages));
+ ctl = kmalloc(sizeof(*ctl), GFP_KERNEL);
+ if (!ctl)
+ return NULL;
+
+ for (idx = 0; idx < NUM_PP_BUCKETS; idx++)
+ INIT_LIST_HEAD(&ctl->pp_buckets[idx]);
+ return ctl;
+}
+
+static void release_pp_slot(struct zram *zram, struct zram_pp_slot *pps)
+{
+ list_del_init(&pps->entry);
+
+ zram_slot_lock(zram, pps->index);
+ zram_clear_flag(zram, pps->index, ZRAM_PP_SLOT);
+ zram_slot_unlock(zram, pps->index);
+
+ kfree(pps);
}
+static void release_pp_ctl(struct zram *zram, struct zram_pp_ctl *ctl)
+{
+ u32 idx;
+
+ if (!ctl)
+ return;
+
+ for (idx = 0; idx < NUM_PP_BUCKETS; idx++) {
+ while (!list_empty(&ctl->pp_buckets[idx])) {
+ struct zram_pp_slot *pps;
+
+ pps = list_first_entry(&ctl->pp_buckets[idx],
+ struct zram_pp_slot,
+ entry);
+ release_pp_slot(zram, pps);
+ }
+ }
+
+ kfree(ctl);
+}
+
+static bool place_pp_slot(struct zram *zram, struct zram_pp_ctl *ctl,
+ u32 index)
+{
+ struct zram_pp_slot *pps;
+ u32 bid;
+
+ pps = kmalloc(sizeof(*pps), GFP_NOIO | __GFP_NOWARN);
+ if (!pps)
+ return false;
+
+ INIT_LIST_HEAD(&pps->entry);
+ pps->index = index;
+
+ bid = zram_get_obj_size(zram, pps->index) / PP_BUCKET_SIZE_RANGE;
+ list_add(&pps->entry, &ctl->pp_buckets[bid]);
+
+ zram_set_flag(zram, pps->index, ZRAM_PP_SLOT);
+ return true;
+}
+
+static struct zram_pp_slot *select_pp_slot(struct zram_pp_ctl *ctl)
+{
+ struct zram_pp_slot *pps = NULL;
+ s32 idx = NUM_PP_BUCKETS - 1;
+
+ /* The higher the bucket id the more optimal slot post-processing is */
+ while (idx >= 0) {
+ pps = list_first_entry_or_null(&ctl->pp_buckets[idx],
+ struct zram_pp_slot,
+ entry);
+ if (pps)
+ break;
+
+ idx--;
+ }
+ return pps;
+}
+#endif
+
static inline void zram_fill_page(void *ptr, unsigned long len,
unsigned long value)
{
@@ -233,7 +373,7 @@ static ssize_t initstate_show(struct device *dev,
val = init_done(zram);
up_read(&zram->init_lock);
- return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+ return sysfs_emit(buf, "%u\n", val);
}
static ssize_t disksize_show(struct device *dev,
@@ -241,7 +381,7 @@ static ssize_t disksize_show(struct device *dev,
{
struct zram *zram = dev_to_zram(dev);
- return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
+ return sysfs_emit(buf, "%llu\n", zram->disksize);
}
static ssize_t mem_limit_store(struct device *dev,
@@ -295,19 +435,28 @@ static void mark_idle(struct zram *zram, ktime_t cutoff)
for (index = 0; index < nr_pages; index++) {
/*
- * Do not mark ZRAM_UNDER_WB slot as ZRAM_IDLE to close race.
- * See the comment in writeback_store.
+ * Do not mark ZRAM_SAME slots as ZRAM_IDLE, because no
+ * post-processing (recompress, writeback) happens to the
+ * ZRAM_SAME slot.
+ *
+ * And ZRAM_WB slots simply cannot be ZRAM_IDLE.
*/
zram_slot_lock(zram, index);
- if (zram_allocated(zram, index) &&
- !zram_test_flag(zram, index, ZRAM_UNDER_WB)) {
+ if (!zram_allocated(zram, index) ||
+ zram_test_flag(zram, index, ZRAM_WB) ||
+ zram_test_flag(zram, index, ZRAM_SAME)) {
+ zram_slot_unlock(zram, index);
+ continue;
+ }
+
#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
- is_idle = !cutoff || ktime_after(cutoff,
- zram->table[index].ac_time);
+ is_idle = !cutoff ||
+ ktime_after(cutoff, zram->table[index].ac_time);
#endif
- if (is_idle)
- zram_set_flag(zram, index, ZRAM_IDLE);
- }
+ if (is_idle)
+ zram_set_flag(zram, index, ZRAM_IDLE);
+ else
+ zram_clear_flag(zram, index, ZRAM_IDLE);
zram_slot_unlock(zram, index);
}
}
@@ -351,8 +500,31 @@ out:
}
#ifdef CONFIG_ZRAM_WRITEBACK
+#define INVALID_BDEV_BLOCK (~0UL)
+
+struct zram_wb_ctl {
+ /* idle list is accessed only by the writeback task, no concurency */
+ struct list_head idle_reqs;
+ /* done list is accessed concurrently, protect by done_lock */
+ struct list_head done_reqs;
+ wait_queue_head_t done_wait;
+ spinlock_t done_lock;
+ atomic_t num_inflight;
+};
+
+struct zram_wb_req {
+ unsigned long blk_idx;
+ struct page *page;
+ struct zram_pp_slot *pps;
+ struct bio_vec bio_vec;
+ struct bio bio;
+
+ struct list_head entry;
+};
+
static ssize_t writeback_limit_enable_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct zram *zram = dev_to_zram(dev);
u64 val;
@@ -361,33 +533,31 @@ static ssize_t writeback_limit_enable_store(struct device *dev,
if (kstrtoull(buf, 10, &val))
return ret;
- down_read(&zram->init_lock);
- spin_lock(&zram->wb_limit_lock);
+ down_write(&zram->init_lock);
zram->wb_limit_enable = val;
- spin_unlock(&zram->wb_limit_lock);
- up_read(&zram->init_lock);
+ up_write(&zram->init_lock);
ret = len;
return ret;
}
static ssize_t writeback_limit_enable_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
bool val;
struct zram *zram = dev_to_zram(dev);
down_read(&zram->init_lock);
- spin_lock(&zram->wb_limit_lock);
val = zram->wb_limit_enable;
- spin_unlock(&zram->wb_limit_lock);
up_read(&zram->init_lock);
- return scnprintf(buf, PAGE_SIZE, "%d\n", val);
+ return sysfs_emit(buf, "%d\n", val);
}
static ssize_t writeback_limit_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct zram *zram = dev_to_zram(dev);
u64 val;
@@ -396,29 +566,69 @@ static ssize_t writeback_limit_store(struct device *dev,
if (kstrtoull(buf, 10, &val))
return ret;
- down_read(&zram->init_lock);
- spin_lock(&zram->wb_limit_lock);
+ /*
+ * When the page size is greater than 4KB, if bd_wb_limit is set to
+ * a value that is not page - size aligned, it will cause value
+ * wrapping. For example, when the page size is set to 16KB and
+ * bd_wb_limit is set to 3, a single write - back operation will
+ * cause bd_wb_limit to become -1. Even more terrifying is that
+ * bd_wb_limit is an unsigned number.
+ */
+ val = rounddown(val, PAGE_SIZE / 4096);
+
+ down_write(&zram->init_lock);
zram->bd_wb_limit = val;
- spin_unlock(&zram->wb_limit_lock);
- up_read(&zram->init_lock);
+ up_write(&zram->init_lock);
ret = len;
return ret;
}
static ssize_t writeback_limit_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
u64 val;
struct zram *zram = dev_to_zram(dev);
down_read(&zram->init_lock);
- spin_lock(&zram->wb_limit_lock);
val = zram->bd_wb_limit;
- spin_unlock(&zram->wb_limit_lock);
up_read(&zram->init_lock);
- return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
+ return sysfs_emit(buf, "%llu\n", val);
+}
+
+static ssize_t writeback_batch_size_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct zram *zram = dev_to_zram(dev);
+ u32 val;
+
+ if (kstrtouint(buf, 10, &val))
+ return -EINVAL;
+
+ if (!val)
+ return -EINVAL;
+
+ down_write(&zram->init_lock);
+ zram->wb_batch_size = val;
+ up_write(&zram->init_lock);
+
+ return len;
+}
+
+static ssize_t writeback_batch_size_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u32 val;
+ struct zram *zram = dev_to_zram(dev);
+
+ down_read(&zram->init_lock);
+ val = zram->wb_batch_size;
+ up_read(&zram->init_lock);
+
+ return sysfs_emit(buf, "%u\n", val);
}
static void reset_bdev(struct zram *zram)
@@ -426,11 +636,10 @@ static void reset_bdev(struct zram *zram)
if (!zram->backing_dev)
return;
- fput(zram->bdev_file);
/* hope filp_close flush all of IO */
filp_close(zram->backing_dev, NULL);
zram->backing_dev = NULL;
- zram->bdev_file = NULL;
+ zram->bdev = NULL;
zram->disk->fops = &zram_devops;
kvfree(zram->bitmap);
zram->bitmap = NULL;
@@ -473,10 +682,8 @@ static ssize_t backing_dev_store(struct device *dev,
size_t sz;
struct file *backing_dev = NULL;
struct inode *inode;
- struct address_space *mapping;
unsigned int bitmap_sz;
unsigned long nr_pages, *bitmap = NULL;
- struct file *bdev_file = NULL;
int err;
struct zram *zram = dev_to_zram(dev);
@@ -497,15 +704,14 @@ static ssize_t backing_dev_store(struct device *dev,
if (sz > 0 && file_name[sz - 1] == '\n')
file_name[sz - 1] = 0x00;
- backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0);
+ backing_dev = filp_open(file_name, O_RDWR | O_LARGEFILE | O_EXCL, 0);
if (IS_ERR(backing_dev)) {
err = PTR_ERR(backing_dev);
backing_dev = NULL;
goto out;
}
- mapping = backing_dev->f_mapping;
- inode = mapping->host;
+ inode = backing_dev->f_mapping->host;
/* Support only block device in this moment */
if (!S_ISBLK(inode->i_mode)) {
@@ -513,15 +719,13 @@ static ssize_t backing_dev_store(struct device *dev,
goto out;
}
- bdev_file = bdev_file_open_by_dev(inode->i_rdev,
- BLK_OPEN_READ | BLK_OPEN_WRITE, zram, NULL);
- if (IS_ERR(bdev_file)) {
- err = PTR_ERR(bdev_file);
- bdev_file = NULL;
+ nr_pages = i_size_read(inode) >> PAGE_SHIFT;
+ /* Refuse to use zero sized device (also prevents self reference) */
+ if (!nr_pages) {
+ err = -EINVAL;
goto out;
}
- nr_pages = i_size_read(inode) >> PAGE_SHIFT;
bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
bitmap = kvzalloc(bitmap_sz, GFP_KERNEL);
if (!bitmap) {
@@ -531,7 +735,7 @@ static ssize_t backing_dev_store(struct device *dev,
reset_bdev(zram);
- zram->bdev_file = bdev_file;
+ zram->bdev = I_BDEV(inode);
zram->backing_dev = backing_dev;
zram->bitmap = bitmap;
zram->nr_pages = nr_pages;
@@ -544,9 +748,6 @@ static ssize_t backing_dev_store(struct device *dev,
out:
kvfree(bitmap);
- if (bdev_file)
- fput(bdev_file);
-
if (backing_dev)
filp_close(backing_dev, NULL);
@@ -557,23 +758,20 @@ out:
return err;
}
-static unsigned long alloc_block_bdev(struct zram *zram)
+static unsigned long zram_reserve_bdev_block(struct zram *zram)
{
- unsigned long blk_idx = 1;
-retry:
- /* skip 0 bit to confuse zram.handle = 0 */
- blk_idx = find_next_zero_bit(zram->bitmap, zram->nr_pages, blk_idx);
- if (blk_idx == zram->nr_pages)
- return 0;
+ unsigned long blk_idx;
- if (test_and_set_bit(blk_idx, zram->bitmap))
- goto retry;
+ blk_idx = find_next_zero_bit(zram->bitmap, zram->nr_pages, 0);
+ if (blk_idx == zram->nr_pages)
+ return INVALID_BDEV_BLOCK;
+ set_bit(blk_idx, zram->bitmap);
atomic64_inc(&zram->stats.bd_count);
return blk_idx;
}
-static void free_block_bdev(struct zram *zram, unsigned long blk_idx)
+static void zram_release_bdev_block(struct zram *zram, unsigned long blk_idx)
{
int was_set;
@@ -587,94 +785,394 @@ static void read_from_bdev_async(struct zram *zram, struct page *page,
{
struct bio *bio;
- bio = bio_alloc(file_bdev(zram->bdev_file), 1, parent->bi_opf, GFP_NOIO);
+ bio = bio_alloc(zram->bdev, 1, parent->bi_opf, GFP_NOIO);
bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
__bio_add_page(bio, page, PAGE_SIZE, 0);
bio_chain(bio, parent);
submit_bio(bio);
}
-#define PAGE_WB_SIG "page_index="
+static void release_wb_req(struct zram_wb_req *req)
+{
+ __free_page(req->page);
+ kfree(req);
+}
+
+static void release_wb_ctl(struct zram_wb_ctl *wb_ctl)
+{
+ if (!wb_ctl)
+ return;
-#define PAGE_WRITEBACK 0
-#define HUGE_WRITEBACK (1<<0)
-#define IDLE_WRITEBACK (1<<1)
-#define INCOMPRESSIBLE_WRITEBACK (1<<2)
+ /* We should never have inflight requests at this point */
+ WARN_ON(atomic_read(&wb_ctl->num_inflight));
+ WARN_ON(!list_empty(&wb_ctl->done_reqs));
-static ssize_t writeback_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ while (!list_empty(&wb_ctl->idle_reqs)) {
+ struct zram_wb_req *req;
+
+ req = list_first_entry(&wb_ctl->idle_reqs,
+ struct zram_wb_req, entry);
+ list_del(&req->entry);
+ release_wb_req(req);
+ }
+
+ kfree(wb_ctl);
+}
+
+static struct zram_wb_ctl *init_wb_ctl(struct zram *zram)
{
- struct zram *zram = dev_to_zram(dev);
- unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
- unsigned long index = 0;
- struct bio bio;
- struct bio_vec bio_vec;
- struct page *page;
- ssize_t ret = len;
- int mode, err;
- unsigned long blk_idx = 0;
-
- if (sysfs_streq(buf, "idle"))
- mode = IDLE_WRITEBACK;
- else if (sysfs_streq(buf, "huge"))
- mode = HUGE_WRITEBACK;
- else if (sysfs_streq(buf, "huge_idle"))
- mode = IDLE_WRITEBACK | HUGE_WRITEBACK;
- else if (sysfs_streq(buf, "incompressible"))
- mode = INCOMPRESSIBLE_WRITEBACK;
- else {
- if (strncmp(buf, PAGE_WB_SIG, sizeof(PAGE_WB_SIG) - 1))
- return -EINVAL;
+ struct zram_wb_ctl *wb_ctl;
+ int i;
- if (kstrtol(buf + sizeof(PAGE_WB_SIG) - 1, 10, &index) ||
- index >= nr_pages)
- return -EINVAL;
+ wb_ctl = kmalloc(sizeof(*wb_ctl), GFP_KERNEL);
+ if (!wb_ctl)
+ return NULL;
+
+ INIT_LIST_HEAD(&wb_ctl->idle_reqs);
+ INIT_LIST_HEAD(&wb_ctl->done_reqs);
+ atomic_set(&wb_ctl->num_inflight, 0);
+ init_waitqueue_head(&wb_ctl->done_wait);
+ spin_lock_init(&wb_ctl->done_lock);
+
+ for (i = 0; i < zram->wb_batch_size; i++) {
+ struct zram_wb_req *req;
- nr_pages = 1;
- mode = PAGE_WRITEBACK;
+ /*
+ * This is fatal condition only if we couldn't allocate
+ * any requests at all. Otherwise we just work with the
+ * requests that we have successfully allocated, so that
+ * writeback can still proceed, even if there is only one
+ * request on the idle list.
+ */
+ req = kzalloc(sizeof(*req), GFP_KERNEL | __GFP_NOWARN);
+ if (!req)
+ break;
+
+ req->page = alloc_page(GFP_KERNEL | __GFP_NOWARN);
+ if (!req->page) {
+ kfree(req);
+ break;
+ }
+
+ list_add(&req->entry, &wb_ctl->idle_reqs);
}
- down_read(&zram->init_lock);
- if (!init_done(zram)) {
- ret = -EINVAL;
- goto release_init_lock;
+ /* We couldn't allocate any requests, so writeabck is not possible */
+ if (list_empty(&wb_ctl->idle_reqs))
+ goto release_wb_ctl;
+
+ return wb_ctl;
+
+release_wb_ctl:
+ release_wb_ctl(wb_ctl);
+ return NULL;
+}
+
+static void zram_account_writeback_rollback(struct zram *zram)
+{
+ lockdep_assert_held_read(&zram->init_lock);
+
+ if (zram->wb_limit_enable)
+ zram->bd_wb_limit += 1UL << (PAGE_SHIFT - 12);
+}
+
+static void zram_account_writeback_submit(struct zram *zram)
+{
+ lockdep_assert_held_read(&zram->init_lock);
+
+ if (zram->wb_limit_enable && zram->bd_wb_limit > 0)
+ zram->bd_wb_limit -= 1UL << (PAGE_SHIFT - 12);
+}
+
+static int zram_writeback_complete(struct zram *zram, struct zram_wb_req *req)
+{
+ u32 index = req->pps->index;
+ int err;
+
+ err = blk_status_to_errno(req->bio.bi_status);
+ if (err) {
+ /*
+ * Failed wb requests should not be accounted in wb_limit
+ * (if enabled).
+ */
+ zram_account_writeback_rollback(zram);
+ zram_release_bdev_block(zram, req->blk_idx);
+ return err;
}
- if (!zram->backing_dev) {
- ret = -ENODEV;
- goto release_init_lock;
+ atomic64_inc(&zram->stats.bd_writes);
+ zram_slot_lock(zram, index);
+ /*
+ * We release slot lock during writeback so slot can change under us:
+ * slot_free() or slot_free() and zram_write_page(). In both cases
+ * slot loses ZRAM_PP_SLOT flag. No concurrent post-processing can
+ * set ZRAM_PP_SLOT on such slots until current post-processing
+ * finishes.
+ */
+ if (!zram_test_flag(zram, index, ZRAM_PP_SLOT)) {
+ zram_release_bdev_block(zram, req->blk_idx);
+ goto out;
}
- page = alloc_page(GFP_KERNEL);
- if (!page) {
- ret = -ENOMEM;
- goto release_init_lock;
+ zram_free_page(zram, index);
+ zram_set_flag(zram, index, ZRAM_WB);
+ zram_set_handle(zram, index, req->blk_idx);
+ atomic64_inc(&zram->stats.pages_stored);
+
+out:
+ zram_slot_unlock(zram, index);
+ return 0;
+}
+
+static void zram_writeback_endio(struct bio *bio)
+{
+ struct zram_wb_req *req = container_of(bio, struct zram_wb_req, bio);
+ struct zram_wb_ctl *wb_ctl = bio->bi_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&wb_ctl->done_lock, flags);
+ list_add(&req->entry, &wb_ctl->done_reqs);
+ spin_unlock_irqrestore(&wb_ctl->done_lock, flags);
+
+ wake_up(&wb_ctl->done_wait);
+}
+
+static void zram_submit_wb_request(struct zram *zram,
+ struct zram_wb_ctl *wb_ctl,
+ struct zram_wb_req *req)
+{
+ /*
+ * wb_limit (if enabled) should be adjusted before submission,
+ * so that we don't over-submit.
+ */
+ zram_account_writeback_submit(zram);
+ atomic_inc(&wb_ctl->num_inflight);
+ req->bio.bi_private = wb_ctl;
+ submit_bio(&req->bio);
+}
+
+static int zram_complete_done_reqs(struct zram *zram,
+ struct zram_wb_ctl *wb_ctl)
+{
+ struct zram_wb_req *req;
+ unsigned long flags;
+ int ret = 0, err;
+
+ while (atomic_read(&wb_ctl->num_inflight) > 0) {
+ spin_lock_irqsave(&wb_ctl->done_lock, flags);
+ req = list_first_entry_or_null(&wb_ctl->done_reqs,
+ struct zram_wb_req, entry);
+ if (req)
+ list_del(&req->entry);
+ spin_unlock_irqrestore(&wb_ctl->done_lock, flags);
+
+ /* ->num_inflight > 0 doesn't mean we have done requests */
+ if (!req)
+ break;
+
+ err = zram_writeback_complete(zram, req);
+ if (err)
+ ret = err;
+
+ atomic_dec(&wb_ctl->num_inflight);
+ release_pp_slot(zram, req->pps);
+ req->pps = NULL;
+
+ list_add(&req->entry, &wb_ctl->idle_reqs);
}
- for (; nr_pages != 0; index++, nr_pages--) {
- spin_lock(&zram->wb_limit_lock);
+ return ret;
+}
+
+static struct zram_wb_req *zram_select_idle_req(struct zram_wb_ctl *wb_ctl)
+{
+ struct zram_wb_req *req;
+
+ req = list_first_entry_or_null(&wb_ctl->idle_reqs,
+ struct zram_wb_req, entry);
+ if (req)
+ list_del(&req->entry);
+ return req;
+}
+
+static int zram_writeback_slots(struct zram *zram,
+ struct zram_pp_ctl *ctl,
+ struct zram_wb_ctl *wb_ctl)
+{
+ unsigned long blk_idx = INVALID_BDEV_BLOCK;
+ struct zram_wb_req *req = NULL;
+ struct zram_pp_slot *pps;
+ int ret = 0, err = 0;
+ u32 index = 0;
+
+ while ((pps = select_pp_slot(ctl))) {
if (zram->wb_limit_enable && !zram->bd_wb_limit) {
- spin_unlock(&zram->wb_limit_lock);
ret = -EIO;
break;
}
- spin_unlock(&zram->wb_limit_lock);
- if (!blk_idx) {
- blk_idx = alloc_block_bdev(zram);
- if (!blk_idx) {
+ while (!req) {
+ req = zram_select_idle_req(wb_ctl);
+ if (req)
+ break;
+
+ wait_event(wb_ctl->done_wait,
+ !list_empty(&wb_ctl->done_reqs));
+
+ err = zram_complete_done_reqs(zram, wb_ctl);
+ /*
+ * BIO errors are not fatal, we continue and simply
+ * attempt to writeback the remaining objects (pages).
+ * At the same time we need to signal user-space that
+ * some writes (at least one, but also could be all of
+ * them) were not successful and we do so by returning
+ * the most recent BIO error.
+ */
+ if (err)
+ ret = err;
+ }
+
+ if (blk_idx == INVALID_BDEV_BLOCK) {
+ blk_idx = zram_reserve_bdev_block(zram);
+ if (blk_idx == INVALID_BDEV_BLOCK) {
ret = -ENOSPC;
break;
}
}
+ index = pps->index;
+ zram_slot_lock(zram, index);
+ /*
+ * scan_slots() sets ZRAM_PP_SLOT and releases slot lock, so
+ * slots can change in the meantime. If slots are accessed or
+ * freed they lose ZRAM_PP_SLOT flag and hence we don't
+ * post-process them.
+ */
+ if (!zram_test_flag(zram, index, ZRAM_PP_SLOT))
+ goto next;
+ if (zram_read_from_zspool(zram, req->page, index))
+ goto next;
+ zram_slot_unlock(zram, index);
+
+ /*
+ * From now on pp-slot is owned by the req, remove it from
+ * its pp bucket.
+ */
+ list_del_init(&pps->entry);
+
+ req->blk_idx = blk_idx;
+ req->pps = pps;
+ bio_init(&req->bio, zram->bdev, &req->bio_vec, 1, REQ_OP_WRITE);
+ req->bio.bi_iter.bi_sector = req->blk_idx * (PAGE_SIZE >> 9);
+ req->bio.bi_end_io = zram_writeback_endio;
+ __bio_add_page(&req->bio, req->page, PAGE_SIZE, 0);
+
+ zram_submit_wb_request(zram, wb_ctl, req);
+ blk_idx = INVALID_BDEV_BLOCK;
+ req = NULL;
+ cond_resched();
+ continue;
+
+next:
+ zram_slot_unlock(zram, index);
+ release_pp_slot(zram, pps);
+ }
+
+ /*
+ * Selected idle req, but never submitted it due to some error or
+ * wb limit.
+ */
+ if (req)
+ release_wb_req(req);
+
+ while (atomic_read(&wb_ctl->num_inflight) > 0) {
+ wait_event(wb_ctl->done_wait, !list_empty(&wb_ctl->done_reqs));
+ err = zram_complete_done_reqs(zram, wb_ctl);
+ if (err)
+ ret = err;
+ }
+
+ return ret;
+}
+
+#define PAGE_WRITEBACK 0
+#define HUGE_WRITEBACK (1 << 0)
+#define IDLE_WRITEBACK (1 << 1)
+#define INCOMPRESSIBLE_WRITEBACK (1 << 2)
+
+static int parse_page_index(char *val, unsigned long nr_pages,
+ unsigned long *lo, unsigned long *hi)
+{
+ int ret;
+
+ ret = kstrtoul(val, 10, lo);
+ if (ret)
+ return ret;
+ if (*lo >= nr_pages)
+ return -ERANGE;
+ *hi = *lo + 1;
+ return 0;
+}
+
+static int parse_page_indexes(char *val, unsigned long nr_pages,
+ unsigned long *lo, unsigned long *hi)
+{
+ char *delim;
+ int ret;
+
+ delim = strchr(val, '-');
+ if (!delim)
+ return -EINVAL;
+
+ *delim = 0x00;
+ ret = kstrtoul(val, 10, lo);
+ if (ret)
+ return ret;
+ if (*lo >= nr_pages)
+ return -ERANGE;
+
+ ret = kstrtoul(delim + 1, 10, hi);
+ if (ret)
+ return ret;
+ if (*hi >= nr_pages || *lo > *hi)
+ return -ERANGE;
+ *hi += 1;
+ return 0;
+}
+
+static int parse_mode(char *val, u32 *mode)
+{
+ *mode = 0;
+
+ if (!strcmp(val, "idle"))
+ *mode = IDLE_WRITEBACK;
+ if (!strcmp(val, "huge"))
+ *mode = HUGE_WRITEBACK;
+ if (!strcmp(val, "huge_idle"))
+ *mode = IDLE_WRITEBACK | HUGE_WRITEBACK;
+ if (!strcmp(val, "incompressible"))
+ *mode = INCOMPRESSIBLE_WRITEBACK;
+
+ if (*mode == 0)
+ return -EINVAL;
+ return 0;
+}
+
+static int scan_slots_for_writeback(struct zram *zram, u32 mode,
+ unsigned long lo, unsigned long hi,
+ struct zram_pp_ctl *ctl)
+{
+ u32 index = lo;
+
+ while (index < hi) {
+ bool ok = true;
+
zram_slot_lock(zram, index);
if (!zram_allocated(zram, index))
goto next;
if (zram_test_flag(zram, index, ZRAM_WB) ||
- zram_test_flag(zram, index, ZRAM_SAME) ||
- zram_test_flag(zram, index, ZRAM_UNDER_WB))
+ zram_test_flag(zram, index, ZRAM_SAME))
goto next;
if (mode & IDLE_WRITEBACK &&
@@ -687,85 +1185,129 @@ static ssize_t writeback_store(struct device *dev,
!zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
goto next;
- /*
- * Clearing ZRAM_UNDER_WB is duty of caller.
- * IOW, zram_free_page never clear it.
- */
- zram_set_flag(zram, index, ZRAM_UNDER_WB);
- /* Need for hugepage writeback racing */
- zram_set_flag(zram, index, ZRAM_IDLE);
+ ok = place_pp_slot(zram, ctl, index);
+next:
zram_slot_unlock(zram, index);
- if (zram_read_page(zram, page, index, NULL)) {
- zram_slot_lock(zram, index);
- zram_clear_flag(zram, index, ZRAM_UNDER_WB);
- zram_clear_flag(zram, index, ZRAM_IDLE);
- zram_slot_unlock(zram, index);
- continue;
- }
+ if (!ok)
+ break;
+ index++;
+ }
+
+ return 0;
+}
+
+static ssize_t writeback_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct zram *zram = dev_to_zram(dev);
+ u64 nr_pages = zram->disksize >> PAGE_SHIFT;
+ unsigned long lo = 0, hi = nr_pages;
+ struct zram_pp_ctl *pp_ctl = NULL;
+ struct zram_wb_ctl *wb_ctl = NULL;
+ char *args, *param, *val;
+ ssize_t ret = len;
+ int err, mode = 0;
+
+ down_read(&zram->init_lock);
+ if (!init_done(zram)) {
+ up_read(&zram->init_lock);
+ return -EINVAL;
+ }
+
+ /* Do not permit concurrent post-processing actions. */
+ if (atomic_xchg(&zram->pp_in_progress, 1)) {
+ up_read(&zram->init_lock);
+ return -EAGAIN;
+ }
- bio_init(&bio, file_bdev(zram->bdev_file), &bio_vec, 1,
- REQ_OP_WRITE | REQ_SYNC);
- bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
- __bio_add_page(&bio, page, PAGE_SIZE, 0);
+ if (!zram->backing_dev) {
+ ret = -ENODEV;
+ goto release_init_lock;
+ }
+
+ pp_ctl = init_pp_ctl();
+ if (!pp_ctl) {
+ ret = -ENOMEM;
+ goto release_init_lock;
+ }
+
+ wb_ctl = init_wb_ctl(zram);
+ if (!wb_ctl) {
+ ret = -ENOMEM;
+ goto release_init_lock;
+ }
+
+ args = skip_spaces(buf);
+ while (*args) {
+ args = next_arg(args, &param, &val);
/*
- * XXX: A single page IO would be inefficient for write
- * but it would be not bad as starter.
+ * Workaround to support the old writeback interface.
+ *
+ * The old writeback interface has a minor inconsistency and
+ * requires key=value only for page_index parameter, while the
+ * writeback mode is a valueless parameter.
+ *
+ * This is not the case anymore and now all parameters are
+ * required to have values, however, we need to support the
+ * legacy writeback interface format so we check if we can
+ * recognize a valueless parameter as the (legacy) writeback
+ * mode.
*/
- err = submit_bio_wait(&bio);
- if (err) {
- zram_slot_lock(zram, index);
- zram_clear_flag(zram, index, ZRAM_UNDER_WB);
- zram_clear_flag(zram, index, ZRAM_IDLE);
- zram_slot_unlock(zram, index);
- /*
- * BIO errors are not fatal, we continue and simply
- * attempt to writeback the remaining objects (pages).
- * At the same time we need to signal user-space that
- * some writes (at least one, but also could be all of
- * them) were not successful and we do so by returning
- * the most recent BIO error.
- */
- ret = err;
- continue;
+ if (!val || !*val) {
+ err = parse_mode(param, &mode);
+ if (err) {
+ ret = err;
+ goto release_init_lock;
+ }
+
+ scan_slots_for_writeback(zram, mode, lo, hi, pp_ctl);
+ break;
}
- atomic64_inc(&zram->stats.bd_writes);
- /*
- * We released zram_slot_lock so need to check if the slot was
- * changed. If there is freeing for the slot, we can catch it
- * easily by zram_allocated.
- * A subtle case is the slot is freed/reallocated/marked as
- * ZRAM_IDLE again. To close the race, idle_store doesn't
- * mark ZRAM_IDLE once it found the slot was ZRAM_UNDER_WB.
- * Thus, we could close the race by checking ZRAM_IDLE bit.
- */
- zram_slot_lock(zram, index);
- if (!zram_allocated(zram, index) ||
- !zram_test_flag(zram, index, ZRAM_IDLE)) {
- zram_clear_flag(zram, index, ZRAM_UNDER_WB);
- zram_clear_flag(zram, index, ZRAM_IDLE);
- goto next;
+ if (!strcmp(param, "type")) {
+ err = parse_mode(val, &mode);
+ if (err) {
+ ret = err;
+ goto release_init_lock;
+ }
+
+ scan_slots_for_writeback(zram, mode, lo, hi, pp_ctl);
+ break;
}
- zram_free_page(zram, index);
- zram_clear_flag(zram, index, ZRAM_UNDER_WB);
- zram_set_flag(zram, index, ZRAM_WB);
- zram_set_element(zram, index, blk_idx);
- blk_idx = 0;
- atomic64_inc(&zram->stats.pages_stored);
- spin_lock(&zram->wb_limit_lock);
- if (zram->wb_limit_enable && zram->bd_wb_limit > 0)
- zram->bd_wb_limit -= 1UL << (PAGE_SHIFT - 12);
- spin_unlock(&zram->wb_limit_lock);
-next:
- zram_slot_unlock(zram, index);
+ if (!strcmp(param, "page_index")) {
+ err = parse_page_index(val, nr_pages, &lo, &hi);
+ if (err) {
+ ret = err;
+ goto release_init_lock;
+ }
+
+ scan_slots_for_writeback(zram, mode, lo, hi, pp_ctl);
+ continue;
+ }
+
+ if (!strcmp(param, "page_indexes")) {
+ err = parse_page_indexes(val, nr_pages, &lo, &hi);
+ if (err) {
+ ret = err;
+ goto release_init_lock;
+ }
+
+ scan_slots_for_writeback(zram, mode, lo, hi, pp_ctl);
+ continue;
+ }
}
- if (blk_idx)
- free_block_bdev(zram, blk_idx);
- __free_page(page);
+ err = zram_writeback_slots(zram, pp_ctl, wb_ctl);
+ if (err)
+ ret = err;
+
release_init_lock:
+ release_pp_ctl(zram, pp_ctl);
+ release_wb_ctl(wb_ctl);
+ atomic_set(&zram->pp_in_progress, 0);
up_read(&zram->init_lock);
return ret;
@@ -785,7 +1327,7 @@ static void zram_sync_read(struct work_struct *work)
struct bio_vec bv;
struct bio bio;
- bio_init(&bio, file_bdev(zw->zram->bdev_file), &bv, 1, REQ_OP_READ);
+ bio_init(&bio, zw->zram->bdev, &bv, 1, REQ_OP_READ);
bio.bi_iter.bi_sector = zw->entry * (PAGE_SIZE >> 9);
__bio_add_page(&bio, zw->page, PAGE_SIZE, 0);
zw->error = submit_bio_wait(&bio);
@@ -806,7 +1348,7 @@ static int read_from_bdev_sync(struct zram *zram, struct page *page,
work.entry = entry;
INIT_WORK_ONSTACK(&work.work, zram_sync_read);
- queue_work(system_unbound_wq, &work.work);
+ queue_work(system_dfl_wq, &work.work);
flush_work(&work.work);
destroy_work_on_stack(&work.work);
@@ -833,7 +1375,9 @@ static int read_from_bdev(struct zram *zram, struct page *page,
return -EIO;
}
-static void free_block_bdev(struct zram *zram, unsigned long blk_idx) {};
+static void zram_release_bdev_block(struct zram *zram, unsigned long blk_idx)
+{
+}
#endif
#ifdef CONFIG_ZRAM_MEMORY_TRACKING
@@ -937,27 +1481,6 @@ static void zram_debugfs_register(struct zram *zram) {};
static void zram_debugfs_unregister(struct zram *zram) {};
#endif
-/*
- * We switched to per-cpu streams and this attr is not needed anymore.
- * However, we will keep it around for some time, because:
- * a) we may revert per-cpu streams in the future
- * b) it's visible to user space and we need to follow our 2 years
- * retirement rule; but we already have a number of 'soon to be
- * altered' attrs, so max_comp_streams need to wait for the next
- * layoff cycle.
- */
-static ssize_t max_comp_streams_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus());
-}
-
-static ssize_t max_comp_streams_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
-{
- return len;
-}
-
static void comp_algorithm_set(struct zram *zram, u32 prio, const char *alg)
{
/* Do not free statically defined compression algorithms */
@@ -967,24 +1490,13 @@ static void comp_algorithm_set(struct zram *zram, u32 prio, const char *alg)
zram->comp_algs[prio] = alg;
}
-static ssize_t __comp_algorithm_show(struct zram *zram, u32 prio, char *buf)
-{
- ssize_t sz;
-
- down_read(&zram->init_lock);
- sz = zcomp_available_show(zram->comp_algs[prio], buf);
- up_read(&zram->init_lock);
-
- return sz;
-}
-
static int __comp_algorithm_store(struct zram *zram, u32 prio, const char *buf)
{
char *compressor;
size_t sz;
sz = strlen(buf);
- if (sz >= CRYPTO_MAX_ALG_NAME)
+ if (sz >= ZRAM_MAX_ALGO_NAME_SZ)
return -E2BIG;
compressor = kstrdup(buf, GFP_KERNEL);
@@ -1013,13 +1525,127 @@ static int __comp_algorithm_store(struct zram *zram, u32 prio, const char *buf)
return 0;
}
+static void comp_params_reset(struct zram *zram, u32 prio)
+{
+ struct zcomp_params *params = &zram->params[prio];
+
+ vfree(params->dict);
+ params->level = ZCOMP_PARAM_NOT_SET;
+ params->deflate.winbits = ZCOMP_PARAM_NOT_SET;
+ params->dict_sz = 0;
+ params->dict = NULL;
+}
+
+static int comp_params_store(struct zram *zram, u32 prio, s32 level,
+ const char *dict_path,
+ struct deflate_params *deflate_params)
+{
+ ssize_t sz = 0;
+
+ comp_params_reset(zram, prio);
+
+ if (dict_path) {
+ sz = kernel_read_file_from_path(dict_path, 0,
+ &zram->params[prio].dict,
+ INT_MAX,
+ NULL,
+ READING_POLICY);
+ if (sz < 0)
+ return -EINVAL;
+ }
+
+ zram->params[prio].dict_sz = sz;
+ zram->params[prio].level = level;
+ zram->params[prio].deflate.winbits = deflate_params->winbits;
+ return 0;
+}
+
+static ssize_t algorithm_params_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ s32 prio = ZRAM_PRIMARY_COMP, level = ZCOMP_PARAM_NOT_SET;
+ char *args, *param, *val, *algo = NULL, *dict_path = NULL;
+ struct deflate_params deflate_params;
+ struct zram *zram = dev_to_zram(dev);
+ int ret;
+
+ deflate_params.winbits = ZCOMP_PARAM_NOT_SET;
+
+ args = skip_spaces(buf);
+ while (*args) {
+ args = next_arg(args, &param, &val);
+
+ if (!val || !*val)
+ return -EINVAL;
+
+ if (!strcmp(param, "priority")) {
+ ret = kstrtoint(val, 10, &prio);
+ if (ret)
+ return ret;
+ continue;
+ }
+
+ if (!strcmp(param, "level")) {
+ ret = kstrtoint(val, 10, &level);
+ if (ret)
+ return ret;
+ continue;
+ }
+
+ if (!strcmp(param, "algo")) {
+ algo = val;
+ continue;
+ }
+
+ if (!strcmp(param, "dict")) {
+ dict_path = val;
+ continue;
+ }
+
+ if (!strcmp(param, "deflate.winbits")) {
+ ret = kstrtoint(val, 10, &deflate_params.winbits);
+ if (ret)
+ return ret;
+ continue;
+ }
+ }
+
+ /* Lookup priority by algorithm name */
+ if (algo) {
+ s32 p;
+
+ prio = -EINVAL;
+ for (p = ZRAM_PRIMARY_COMP; p < ZRAM_MAX_COMPS; p++) {
+ if (!zram->comp_algs[p])
+ continue;
+
+ if (!strcmp(zram->comp_algs[p], algo)) {
+ prio = p;
+ break;
+ }
+ }
+ }
+
+ if (prio < ZRAM_PRIMARY_COMP || prio >= ZRAM_MAX_COMPS)
+ return -EINVAL;
+
+ ret = comp_params_store(zram, prio, level, dict_path, &deflate_params);
+ return ret ? ret : len;
+}
+
static ssize_t comp_algorithm_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct zram *zram = dev_to_zram(dev);
+ ssize_t sz;
- return __comp_algorithm_show(zram, ZRAM_PRIMARY_COMP, buf);
+ down_read(&zram->init_lock);
+ sz = zcomp_available_show(zram->comp_algs[ZRAM_PRIMARY_COMP], buf, 0);
+ up_read(&zram->init_lock);
+ return sz;
}
static ssize_t comp_algorithm_store(struct device *dev,
@@ -1043,14 +1669,15 @@ static ssize_t recomp_algorithm_show(struct device *dev,
ssize_t sz = 0;
u32 prio;
+ down_read(&zram->init_lock);
for (prio = ZRAM_SECONDARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
if (!zram->comp_algs[prio])
continue;
- sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2, "#%d: ", prio);
- sz += __comp_algorithm_show(zram, prio, buf + sz);
+ sz += sysfs_emit_at(buf, sz, "#%d: ", prio);
+ sz += zcomp_available_show(zram->comp_algs[prio], buf, sz);
}
-
+ up_read(&zram->init_lock);
return sz;
}
@@ -1120,7 +1747,7 @@ static ssize_t io_stat_show(struct device *dev,
ssize_t ret;
down_read(&zram->init_lock);
- ret = scnprintf(buf, PAGE_SIZE,
+ ret = sysfs_emit(buf,
"%8llu %8llu 0 %8llu\n",
(u64)atomic64_read(&zram->stats.failed_reads),
(u64)atomic64_read(&zram->stats.failed_writes),
@@ -1150,7 +1777,7 @@ static ssize_t mm_stat_show(struct device *dev,
orig_size = atomic64_read(&zram->stats.pages_stored);
max_used = atomic_long_read(&zram->stats.max_used_pages);
- ret = scnprintf(buf, PAGE_SIZE,
+ ret = sysfs_emit(buf,
"%8llu %8llu %8llu %8lu %8ld %8llu %8lu %8llu %8llu\n",
orig_size << PAGE_SHIFT,
(u64)atomic64_read(&zram->stats.compr_data_size),
@@ -1175,8 +1802,8 @@ static ssize_t bd_stat_show(struct device *dev,
ssize_t ret;
down_read(&zram->init_lock);
- ret = scnprintf(buf, PAGE_SIZE,
- "%8llu %8llu %8llu\n",
+ ret = sysfs_emit(buf,
+ "%8llu %8llu %8llu\n",
FOUR_K((u64)atomic64_read(&zram->stats.bd_count)),
FOUR_K((u64)atomic64_read(&zram->stats.bd_reads)),
FOUR_K((u64)atomic64_read(&zram->stats.bd_writes)));
@@ -1194,10 +1821,9 @@ static ssize_t debug_stat_show(struct device *dev,
ssize_t ret;
down_read(&zram->init_lock);
- ret = scnprintf(buf, PAGE_SIZE,
- "version: %d\n%8llu %8llu\n",
+ ret = sysfs_emit(buf,
+ "version: %d\n0 %8llu\n",
version,
- (u64)atomic64_read(&zram->stats.writestall),
(u64)atomic64_read(&zram->stats.miss_free));
up_read(&zram->init_lock);
@@ -1216,17 +1842,21 @@ static void zram_meta_free(struct zram *zram, u64 disksize)
size_t num_pages = disksize >> PAGE_SHIFT;
size_t index;
+ if (!zram->table)
+ return;
+
/* Free all pages that are still in this zram device */
for (index = 0; index < num_pages; index++)
zram_free_page(zram, index);
zs_destroy_pool(zram->mem_pool);
vfree(zram->table);
+ zram->table = NULL;
}
static bool zram_meta_alloc(struct zram *zram, u64 disksize)
{
- size_t num_pages;
+ size_t num_pages, index;
num_pages = disksize >> PAGE_SHIFT;
zram->table = vzalloc(array_size(num_pages, sizeof(*zram->table)));
@@ -1236,19 +1866,19 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize)
zram->mem_pool = zs_create_pool(zram->disk->disk_name);
if (!zram->mem_pool) {
vfree(zram->table);
+ zram->table = NULL;
return false;
}
if (!huge_class_size)
huge_class_size = zs_huge_class_size(zram->mem_pool);
+
+ for (index = 0; index < num_pages; index++)
+ zram_slot_lock_init(zram, index);
+
return true;
}
-/*
- * To protect concurrent access to the same index entry,
- * caller should hold this table index entry's bit_spinlock to
- * indicate this index entry is accessing.
- */
static void zram_free_page(struct zram *zram, size_t index)
{
unsigned long handle;
@@ -1256,22 +1886,20 @@ static void zram_free_page(struct zram *zram, size_t index)
#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
zram->table[index].ac_time = 0;
#endif
- if (zram_test_flag(zram, index, ZRAM_IDLE))
- zram_clear_flag(zram, index, ZRAM_IDLE);
+
+ zram_clear_flag(zram, index, ZRAM_IDLE);
+ zram_clear_flag(zram, index, ZRAM_INCOMPRESSIBLE);
+ zram_clear_flag(zram, index, ZRAM_PP_SLOT);
+ zram_set_priority(zram, index, 0);
if (zram_test_flag(zram, index, ZRAM_HUGE)) {
zram_clear_flag(zram, index, ZRAM_HUGE);
atomic64_dec(&zram->stats.huge_pages);
}
- if (zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
- zram_clear_flag(zram, index, ZRAM_INCOMPRESSIBLE);
-
- zram_set_priority(zram, index, 0);
-
if (zram_test_flag(zram, index, ZRAM_WB)) {
zram_clear_flag(zram, index, ZRAM_WB);
- free_block_bdev(zram, zram_get_element(zram, index));
+ zram_release_bdev_block(zram, zram_get_handle(zram, index));
goto out;
}
@@ -1292,64 +1920,80 @@ static void zram_free_page(struct zram *zram, size_t index)
zs_free(zram->mem_pool, handle);
atomic64_sub(zram_get_obj_size(zram, index),
- &zram->stats.compr_data_size);
+ &zram->stats.compr_data_size);
out:
atomic64_dec(&zram->stats.pages_stored);
zram_set_handle(zram, index, 0);
zram_set_obj_size(zram, index, 0);
- WARN_ON_ONCE(zram->table[index].flags &
- ~(1UL << ZRAM_LOCK | 1UL << ZRAM_UNDER_WB));
}
-/*
- * Reads (decompresses if needed) a page from zspool (zsmalloc).
- * Corresponding ZRAM slot should be locked.
- */
-static int zram_read_from_zspool(struct zram *zram, struct page *page,
+static int read_same_filled_page(struct zram *zram, struct page *page,
u32 index)
{
+ void *mem;
+
+ mem = kmap_local_page(page);
+ zram_fill_page(mem, PAGE_SIZE, zram_get_handle(zram, index));
+ kunmap_local(mem);
+ return 0;
+}
+
+static int read_incompressible_page(struct zram *zram, struct page *page,
+ u32 index)
+{
+ unsigned long handle;
+ void *src, *dst;
+
+ handle = zram_get_handle(zram, index);
+ src = zs_obj_read_begin(zram->mem_pool, handle, NULL);
+ dst = kmap_local_page(page);
+ copy_page(dst, src);
+ kunmap_local(dst);
+ zs_obj_read_end(zram->mem_pool, handle, src);
+
+ return 0;
+}
+
+static int read_compressed_page(struct zram *zram, struct page *page, u32 index)
+{
struct zcomp_strm *zstrm;
unsigned long handle;
unsigned int size;
void *src, *dst;
- u32 prio;
- int ret;
+ int ret, prio;
handle = zram_get_handle(zram, index);
- if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) {
- unsigned long value;
- void *mem;
-
- value = handle ? zram_get_element(zram, index) : 0;
- mem = kmap_local_page(page);
- zram_fill_page(mem, PAGE_SIZE, value);
- kunmap_local(mem);
- return 0;
- }
-
size = zram_get_obj_size(zram, index);
+ prio = zram_get_priority(zram, index);
- if (size != PAGE_SIZE) {
- prio = zram_get_priority(zram, index);
- zstrm = zcomp_stream_get(zram->comps[prio]);
- }
+ zstrm = zcomp_stream_get(zram->comps[prio]);
+ src = zs_obj_read_begin(zram->mem_pool, handle, zstrm->local_copy);
+ dst = kmap_local_page(page);
+ ret = zcomp_decompress(zram->comps[prio], zstrm, src, size, dst);
+ kunmap_local(dst);
+ zs_obj_read_end(zram->mem_pool, handle, src);
+ zcomp_stream_put(zstrm);
- src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
- if (size == PAGE_SIZE) {
- dst = kmap_local_page(page);
- copy_page(dst, src);
- kunmap_local(dst);
- ret = 0;
- } else {
- dst = kmap_local_page(page);
- ret = zcomp_decompress(zstrm, src, size, dst);
- kunmap_local(dst);
- zcomp_stream_put(zram->comps[prio]);
- }
- zs_unmap_object(zram->mem_pool, handle);
return ret;
}
+/*
+ * Reads (decompresses if needed) a page from zspool (zsmalloc).
+ * Corresponding ZRAM slot should be locked.
+ */
+static int zram_read_from_zspool(struct zram *zram, struct page *page,
+ u32 index)
+{
+ if (zram_test_flag(zram, index, ZRAM_SAME) ||
+ !zram_get_handle(zram, index))
+ return read_same_filled_page(zram, page, index);
+
+ if (!zram_test_flag(zram, index, ZRAM_HUGE))
+ return read_compressed_page(zram, page, index);
+ else
+ return read_incompressible_page(zram, page, index);
+}
+
static int zram_read_page(struct zram *zram, struct page *page, u32 index,
struct bio *parent)
{
@@ -1361,14 +2005,14 @@ static int zram_read_page(struct zram *zram, struct page *page, u32 index,
ret = zram_read_from_zspool(zram, page, index);
zram_slot_unlock(zram, index);
} else {
+ unsigned long blk_idx = zram_get_handle(zram, index);
+
/*
* The slot should be unlocked before reading from the backing
* device.
*/
zram_slot_unlock(zram, index);
-
- ret = read_from_bdev(zram, page, zram_get_element(zram, index),
- parent);
+ ret = read_from_bdev(zram, page, blk_idx, parent);
}
/* Should NEVER happen. Return bio error if it does. */
@@ -1405,128 +2049,122 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
return zram_read_page(zram, bvec->bv_page, index, bio);
}
+static int write_same_filled_page(struct zram *zram, unsigned long fill,
+ u32 index)
+{
+ zram_slot_lock(zram, index);
+ zram_free_page(zram, index);
+ zram_set_flag(zram, index, ZRAM_SAME);
+ zram_set_handle(zram, index, fill);
+ zram_slot_unlock(zram, index);
+
+ atomic64_inc(&zram->stats.same_pages);
+ atomic64_inc(&zram->stats.pages_stored);
+
+ return 0;
+}
+
+static int write_incompressible_page(struct zram *zram, struct page *page,
+ u32 index)
+{
+ unsigned long handle;
+ void *src;
+
+ /*
+ * This function is called from preemptible context so we don't need
+ * to do optimistic and fallback to pessimistic handle allocation,
+ * like we do for compressible pages.
+ */
+ handle = zs_malloc(zram->mem_pool, PAGE_SIZE,
+ GFP_NOIO | __GFP_NOWARN |
+ __GFP_HIGHMEM | __GFP_MOVABLE, page_to_nid(page));
+ if (IS_ERR_VALUE(handle))
+ return PTR_ERR((void *)handle);
+
+ if (!zram_can_store_page(zram)) {
+ zs_free(zram->mem_pool, handle);
+ return -ENOMEM;
+ }
+
+ src = kmap_local_page(page);
+ zs_obj_write(zram->mem_pool, handle, src, PAGE_SIZE);
+ kunmap_local(src);
+
+ zram_slot_lock(zram, index);
+ zram_free_page(zram, index);
+ zram_set_flag(zram, index, ZRAM_HUGE);
+ zram_set_handle(zram, index, handle);
+ zram_set_obj_size(zram, index, PAGE_SIZE);
+ zram_slot_unlock(zram, index);
+
+ atomic64_add(PAGE_SIZE, &zram->stats.compr_data_size);
+ atomic64_inc(&zram->stats.huge_pages);
+ atomic64_inc(&zram->stats.huge_pages_since);
+ atomic64_inc(&zram->stats.pages_stored);
+
+ return 0;
+}
+
static int zram_write_page(struct zram *zram, struct page *page, u32 index)
{
int ret = 0;
- unsigned long alloced_pages;
- unsigned long handle = -ENOMEM;
- unsigned int comp_len = 0;
- void *src, *dst, *mem;
+ unsigned long handle;
+ unsigned int comp_len;
+ void *mem;
struct zcomp_strm *zstrm;
- unsigned long element = 0;
- enum zram_pageflags flags = 0;
+ unsigned long element;
+ bool same_filled;
mem = kmap_local_page(page);
- if (page_same_filled(mem, &element)) {
- kunmap_local(mem);
- /* Free memory associated with this sector now. */
- flags = ZRAM_SAME;
- atomic64_inc(&zram->stats.same_pages);
- goto out;
- }
+ same_filled = page_same_filled(mem, &element);
kunmap_local(mem);
+ if (same_filled)
+ return write_same_filled_page(zram, element, index);
-compress_again:
zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]);
- src = kmap_local_page(page);
- ret = zcomp_compress(zstrm, src, &comp_len);
- kunmap_local(src);
+ mem = kmap_local_page(page);
+ ret = zcomp_compress(zram->comps[ZRAM_PRIMARY_COMP], zstrm,
+ mem, &comp_len);
+ kunmap_local(mem);
if (unlikely(ret)) {
- zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
+ zcomp_stream_put(zstrm);
pr_err("Compression failed! err=%d\n", ret);
- zs_free(zram->mem_pool, handle);
return ret;
}
- if (comp_len >= huge_class_size)
- comp_len = PAGE_SIZE;
- /*
- * handle allocation has 2 paths:
- * a) fast path is executed with preemption disabled (for
- * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
- * since we can't sleep;
- * b) slow path enables preemption and attempts to allocate
- * the page with __GFP_DIRECT_RECLAIM bit set. we have to
- * put per-cpu compression stream and, thus, to re-do
- * the compression once handle is allocated.
- *
- * if we have a 'non-null' handle here then we are coming
- * from the slow path and handle has already been allocated.
- */
- if (IS_ERR_VALUE(handle))
- handle = zs_malloc(zram->mem_pool, comp_len,
- __GFP_KSWAPD_RECLAIM |
- __GFP_NOWARN |
- __GFP_HIGHMEM |
- __GFP_MOVABLE);
- if (IS_ERR_VALUE(handle)) {
- zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
- atomic64_inc(&zram->stats.writestall);
- handle = zs_malloc(zram->mem_pool, comp_len,
- GFP_NOIO | __GFP_HIGHMEM |
- __GFP_MOVABLE);
- if (IS_ERR_VALUE(handle))
- return PTR_ERR((void *)handle);
-
- if (comp_len != PAGE_SIZE)
- goto compress_again;
- /*
- * If the page is not compressible, you need to acquire the
- * lock and execute the code below. The zcomp_stream_get()
- * call is needed to disable the cpu hotplug and grab the
- * zstrm buffer back. It is necessary that the dereferencing
- * of the zstrm variable below occurs correctly.
- */
- zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]);
+ if (comp_len >= huge_class_size) {
+ zcomp_stream_put(zstrm);
+ return write_incompressible_page(zram, page, index);
}
- alloced_pages = zs_get_total_pages(zram->mem_pool);
- update_used_max(zram, alloced_pages);
+ handle = zs_malloc(zram->mem_pool, comp_len,
+ GFP_NOIO | __GFP_NOWARN |
+ __GFP_HIGHMEM | __GFP_MOVABLE, page_to_nid(page));
+ if (IS_ERR_VALUE(handle)) {
+ zcomp_stream_put(zstrm);
+ return PTR_ERR((void *)handle);
+ }
- if (zram->limit_pages && alloced_pages > zram->limit_pages) {
- zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
+ if (!zram_can_store_page(zram)) {
+ zcomp_stream_put(zstrm);
zs_free(zram->mem_pool, handle);
return -ENOMEM;
}
- dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
-
- src = zstrm->buffer;
- if (comp_len == PAGE_SIZE)
- src = kmap_local_page(page);
- memcpy(dst, src, comp_len);
- if (comp_len == PAGE_SIZE)
- kunmap_local(src);
+ zs_obj_write(zram->mem_pool, handle, zstrm->buffer, comp_len);
+ zcomp_stream_put(zstrm);
- zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
- zs_unmap_object(zram->mem_pool, handle);
- atomic64_add(comp_len, &zram->stats.compr_data_size);
-out:
- /*
- * Free memory associated with this sector
- * before overwriting unused sectors.
- */
zram_slot_lock(zram, index);
zram_free_page(zram, index);
-
- if (comp_len == PAGE_SIZE) {
- zram_set_flag(zram, index, ZRAM_HUGE);
- atomic64_inc(&zram->stats.huge_pages);
- atomic64_inc(&zram->stats.huge_pages_since);
- }
-
- if (flags) {
- zram_set_flag(zram, index, flags);
- zram_set_element(zram, index, element);
- } else {
- zram_set_handle(zram, index, handle);
- zram_set_obj_size(zram, index, comp_len);
- }
+ zram_set_handle(zram, index, handle);
+ zram_set_obj_size(zram, index, comp_len);
zram_slot_unlock(zram, index);
/* Update stats */
atomic64_inc(&zram->stats.pages_stored);
+ atomic64_add(comp_len, &zram->stats.compr_data_size);
+
return ret;
}
@@ -1560,6 +2198,49 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
}
#ifdef CONFIG_ZRAM_MULTI_COMP
+#define RECOMPRESS_IDLE (1 << 0)
+#define RECOMPRESS_HUGE (1 << 1)
+
+static int scan_slots_for_recompress(struct zram *zram, u32 mode, u32 prio_max,
+ struct zram_pp_ctl *ctl)
+{
+ unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
+ unsigned long index;
+
+ for (index = 0; index < nr_pages; index++) {
+ bool ok = true;
+
+ zram_slot_lock(zram, index);
+ if (!zram_allocated(zram, index))
+ goto next;
+
+ if (mode & RECOMPRESS_IDLE &&
+ !zram_test_flag(zram, index, ZRAM_IDLE))
+ goto next;
+
+ if (mode & RECOMPRESS_HUGE &&
+ !zram_test_flag(zram, index, ZRAM_HUGE))
+ goto next;
+
+ if (zram_test_flag(zram, index, ZRAM_WB) ||
+ zram_test_flag(zram, index, ZRAM_SAME) ||
+ zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
+ goto next;
+
+ /* Already compressed with same of higher priority */
+ if (zram_get_priority(zram, index) + 1 >= prio_max)
+ goto next;
+
+ ok = place_pp_slot(zram, ctl, index);
+next:
+ zram_slot_unlock(zram, index);
+ if (!ok)
+ break;
+ }
+
+ return 0;
+}
+
/*
* This function will decompress (unless it's ZRAM_HUGE) the page and then
* attempt to compress it using provided compression algorithm priority
@@ -1567,7 +2248,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
*
* Corresponding ZRAM slot should be locked.
*/
-static int zram_recompress(struct zram *zram, u32 index, struct page *page,
+static int recompress_slot(struct zram *zram, u32 index, struct page *page,
u64 *num_recomp_pages, u32 threshold, u32 prio,
u32 prio_max)
{
@@ -1578,9 +2259,8 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
unsigned int comp_len_new;
unsigned int class_index_old;
unsigned int class_index_new;
- u32 num_recomps = 0;
- void *src, *dst;
- int ret;
+ void *src;
+ int ret = 0;
handle_old = zram_get_handle(zram, index);
if (!handle_old)
@@ -1597,7 +2277,24 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
if (ret)
return ret;
+ /*
+ * We touched this entry so mark it as non-IDLE. This makes sure that
+ * we don't preserve IDLE flag and don't incorrectly pick this entry
+ * for different post-processing type (e.g. writeback).
+ */
+ zram_clear_flag(zram, index, ZRAM_IDLE);
+
class_index_old = zs_lookup_class_index(zram->mem_pool, comp_len_old);
+
+ prio = max(prio, zram_get_priority(zram, index) + 1);
+ /*
+ * Recompression slots scan should not select slots that are
+ * already compressed with a higher priority algorithm, but
+ * just in case
+ */
+ if (prio >= prio_max)
+ return 0;
+
/*
* Iterate the secondary comp algorithms list (in order of priority)
* and try to recompress the page.
@@ -1606,22 +2303,16 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
if (!zram->comps[prio])
continue;
- /*
- * Skip if the object is already re-compressed with a higher
- * priority algorithm (or same algorithm).
- */
- if (prio <= zram_get_priority(zram, index))
- continue;
-
- num_recomps++;
zstrm = zcomp_stream_get(zram->comps[prio]);
src = kmap_local_page(page);
- ret = zcomp_compress(zstrm, src, &comp_len_new);
+ ret = zcomp_compress(zram->comps[prio], zstrm,
+ src, &comp_len_new);
kunmap_local(src);
if (ret) {
- zcomp_stream_put(zram->comps[prio]);
- return ret;
+ zcomp_stream_put(zstrm);
+ zstrm = NULL;
+ break;
}
class_index_new = zs_lookup_class_index(zram->mem_pool,
@@ -1630,7 +2321,8 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
/* Continue until we make progress */
if (class_index_new >= class_index_old ||
(threshold && comp_len_new >= threshold)) {
- zcomp_stream_put(zram->comps[prio]);
+ zcomp_stream_put(zstrm);
+ zstrm = NULL;
continue;
}
@@ -1639,14 +2331,6 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
}
/*
- * We did not try to recompress, e.g. when we have only one
- * secondary algorithm and the page is already recompressed
- * using that algorithm
- */
- if (!zstrm)
- return 0;
-
- /*
* Decrement the limit (if set) on pages we can recompress, even
* when current recompression was unsuccessful or did not compress
* the page below the threshold, because we still spent resources
@@ -1655,48 +2339,44 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
if (*num_recomp_pages)
*num_recomp_pages -= 1;
- if (class_index_new >= class_index_old) {
+ /* Compression error */
+ if (ret)
+ return ret;
+
+ if (!zstrm) {
/*
* Secondary algorithms failed to re-compress the page
- * in a way that would save memory, mark the object as
- * incompressible so that we will not try to compress
- * it again.
+ * in a way that would save memory.
*
- * We need to make sure that all secondary algorithms have
- * failed, so we test if the number of recompressions matches
- * the number of active secondary algorithms.
+ * Mark the object incompressible if the max-priority
+ * algorithm couldn't re-compress it.
*/
- if (num_recomps == zram->num_active_comps - 1)
- zram_set_flag(zram, index, ZRAM_INCOMPRESSIBLE);
+ if (prio < zram->num_active_comps)
+ return 0;
+ zram_set_flag(zram, index, ZRAM_INCOMPRESSIBLE);
return 0;
}
- /* Successful recompression but above threshold */
- if (threshold && comp_len_new >= threshold)
- return 0;
-
/*
- * No direct reclaim (slow path) for handle allocation and no
- * re-compression attempt (unlike in zram_write_bvec()) since
- * we already have stored that object in zsmalloc. If we cannot
- * alloc memory for recompressed object then we bail out and
- * simply keep the old (existing) object in zsmalloc.
+ * We are holding per-CPU stream mutex and entry lock so better
+ * avoid direct reclaim. Allocation error is not fatal since
+ * we still have the old object in the mem_pool.
+ *
+ * XXX: technically, the node we really want here is the node that holds
+ * the original compressed data. But that would require us to modify
+ * zsmalloc API to return this information. For now, we will make do with
+ * the node of the page allocated for recompression.
*/
handle_new = zs_malloc(zram->mem_pool, comp_len_new,
- __GFP_KSWAPD_RECLAIM |
- __GFP_NOWARN |
- __GFP_HIGHMEM |
- __GFP_MOVABLE);
+ GFP_NOIO | __GFP_NOWARN |
+ __GFP_HIGHMEM | __GFP_MOVABLE, page_to_nid(page));
if (IS_ERR_VALUE(handle_new)) {
- zcomp_stream_put(zram->comps[prio]);
+ zcomp_stream_put(zstrm);
return PTR_ERR((void *)handle_new);
}
- dst = zs_map_object(zram->mem_pool, handle_new, ZS_MM_WO);
- memcpy(dst, zstrm->buffer, comp_len_new);
- zcomp_stream_put(zram->comps[prio]);
-
- zs_unmap_object(zram->mem_pool, handle_new);
+ zs_obj_write(zram->mem_pool, handle_new, zstrm->buffer, comp_len_new);
+ zcomp_stream_put(zstrm);
zram_free_page(zram, index);
zram_set_handle(zram, index, handle_new);
@@ -1709,23 +2389,23 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
return 0;
}
-#define RECOMPRESS_IDLE (1 << 0)
-#define RECOMPRESS_HUGE (1 << 1)
-
static ssize_t recompress_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
- u32 prio = ZRAM_SECONDARY_COMP, prio_max = ZRAM_MAX_COMPS;
struct zram *zram = dev_to_zram(dev);
- unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
char *args, *param, *val, *algo = NULL;
u64 num_recomp_pages = ULLONG_MAX;
+ struct zram_pp_ctl *ctl = NULL;
+ struct zram_pp_slot *pps;
u32 mode = 0, threshold = 0;
- unsigned long index;
- struct page *page;
+ u32 prio, prio_max;
+ struct page *page = NULL;
ssize_t ret;
+ prio = ZRAM_SECONDARY_COMP;
+ prio_max = zram->num_active_comps;
+
args = skip_spaces(buf);
while (*args) {
args = next_arg(args, &param, &val);
@@ -1769,6 +2449,18 @@ static ssize_t recompress_store(struct device *dev,
algo = val;
continue;
}
+
+ if (!strcmp(param, "priority")) {
+ ret = kstrtouint(val, 10, &prio);
+ if (ret)
+ return ret;
+
+ if (prio == ZRAM_PRIMARY_COMP)
+ prio = ZRAM_SECONDARY_COMP;
+
+ prio_max = prio + 1;
+ continue;
+ }
}
if (threshold >= huge_class_size)
@@ -1780,6 +2472,12 @@ static ssize_t recompress_store(struct device *dev,
goto release_init_lock;
}
+ /* Do not permit concurrent post-processing actions. */
+ if (atomic_xchg(&zram->pp_in_progress, 1)) {
+ up_read(&zram->init_lock);
+ return -EAGAIN;
+ }
+
if (algo) {
bool found = false;
@@ -1788,7 +2486,7 @@ static ssize_t recompress_store(struct device *dev,
continue;
if (!strcmp(zram->comp_algs[prio], algo)) {
- prio_max = min(prio + 1, ZRAM_MAX_COMPS);
+ prio_max = prio + 1;
found = true;
break;
}
@@ -1800,42 +2498,44 @@ static ssize_t recompress_store(struct device *dev,
}
}
+ prio_max = min(prio_max, (u32)zram->num_active_comps);
+ if (prio >= prio_max) {
+ ret = -EINVAL;
+ goto release_init_lock;
+ }
+
page = alloc_page(GFP_KERNEL);
if (!page) {
ret = -ENOMEM;
goto release_init_lock;
}
+ ctl = init_pp_ctl();
+ if (!ctl) {
+ ret = -ENOMEM;
+ goto release_init_lock;
+ }
+
+ scan_slots_for_recompress(zram, mode, prio_max, ctl);
+
ret = len;
- for (index = 0; index < nr_pages; index++) {
+ while ((pps = select_pp_slot(ctl))) {
int err = 0;
if (!num_recomp_pages)
break;
- zram_slot_lock(zram, index);
-
- if (!zram_allocated(zram, index))
- goto next;
-
- if (mode & RECOMPRESS_IDLE &&
- !zram_test_flag(zram, index, ZRAM_IDLE))
- goto next;
-
- if (mode & RECOMPRESS_HUGE &&
- !zram_test_flag(zram, index, ZRAM_HUGE))
+ zram_slot_lock(zram, pps->index);
+ if (!zram_test_flag(zram, pps->index, ZRAM_PP_SLOT))
goto next;
- if (zram_test_flag(zram, index, ZRAM_WB) ||
- zram_test_flag(zram, index, ZRAM_UNDER_WB) ||
- zram_test_flag(zram, index, ZRAM_SAME) ||
- zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
- goto next;
-
- err = zram_recompress(zram, index, page, &num_recomp_pages,
- threshold, prio, prio_max);
+ err = recompress_slot(zram, pps->index, page,
+ &num_recomp_pages, threshold,
+ prio, prio_max);
next:
- zram_slot_unlock(zram, index);
+ zram_slot_unlock(zram, pps->index);
+ release_pp_slot(zram, pps);
+
if (err) {
ret = err;
break;
@@ -1844,9 +2544,11 @@ next:
cond_resched();
}
- __free_page(page);
-
release_init_lock:
+ if (page)
+ __free_page(page);
+ release_pp_ctl(zram, ctl);
+ atomic_set(&zram->pp_in_progress, 0);
up_read(&zram->init_lock);
return ret;
}
@@ -1991,11 +2693,20 @@ static void zram_slot_free_notify(struct block_device *bdev,
zram_slot_unlock(zram, index);
}
+static void zram_comp_params_reset(struct zram *zram)
+{
+ u32 prio;
+
+ for (prio = ZRAM_PRIMARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
+ comp_params_reset(zram, prio);
+ }
+}
+
static void zram_destroy_comps(struct zram *zram)
{
u32 prio;
- for (prio = 0; prio < ZRAM_MAX_COMPS; prio++) {
+ for (prio = ZRAM_PRIMARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
struct zcomp *comp = zram->comps[prio];
zram->comps[prio] = NULL;
@@ -2004,6 +2715,15 @@ static void zram_destroy_comps(struct zram *zram)
zcomp_destroy(comp);
zram->num_active_comps--;
}
+
+ for (prio = ZRAM_PRIMARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
+ /* Do not free statically defined compression algorithms */
+ if (zram->comp_algs[prio] != default_compressor)
+ kfree(zram->comp_algs[prio]);
+ zram->comp_algs[prio] = NULL;
+ }
+
+ zram_comp_params_reset(zram);
}
static void zram_reset_device(struct zram *zram)
@@ -2012,11 +2732,6 @@ static void zram_reset_device(struct zram *zram)
zram->limit_pages = 0;
- if (!init_done(zram)) {
- up_write(&zram->init_lock);
- return;
- }
-
set_capacity_and_notify(zram->disk, 0);
part_stat_set_all(zram->disk->part0, 0);
@@ -2025,6 +2740,7 @@ static void zram_reset_device(struct zram *zram)
zram->disksize = 0;
zram_destroy_comps(zram);
memset(&zram->stats, 0, sizeof(zram->stats));
+ atomic_set(&zram->pp_in_progress, 0);
reset_bdev(zram);
comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor);
@@ -2057,11 +2773,12 @@ static ssize_t disksize_store(struct device *dev,
goto out_unlock;
}
- for (prio = 0; prio < ZRAM_MAX_COMPS; prio++) {
+ for (prio = ZRAM_PRIMARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
if (!zram->comp_algs[prio])
continue;
- comp = zcomp_create(zram->comp_algs[prio]);
+ comp = zcomp_create(zram->comp_algs[prio],
+ &zram->params[prio]);
if (IS_ERR(comp)) {
pr_err("Cannot initialise %s compressing backend\n",
zram->comp_algs[prio]);
@@ -2152,18 +2869,19 @@ static DEVICE_ATTR_WO(reset);
static DEVICE_ATTR_WO(mem_limit);
static DEVICE_ATTR_WO(mem_used_max);
static DEVICE_ATTR_WO(idle);
-static DEVICE_ATTR_RW(max_comp_streams);
static DEVICE_ATTR_RW(comp_algorithm);
#ifdef CONFIG_ZRAM_WRITEBACK
static DEVICE_ATTR_RW(backing_dev);
static DEVICE_ATTR_WO(writeback);
static DEVICE_ATTR_RW(writeback_limit);
static DEVICE_ATTR_RW(writeback_limit_enable);
+static DEVICE_ATTR_RW(writeback_batch_size);
#endif
#ifdef CONFIG_ZRAM_MULTI_COMP
static DEVICE_ATTR_RW(recomp_algorithm);
static DEVICE_ATTR_WO(recompress);
#endif
+static DEVICE_ATTR_WO(algorithm_params);
static struct attribute *zram_disk_attrs[] = {
&dev_attr_disksize.attr,
@@ -2173,13 +2891,13 @@ static struct attribute *zram_disk_attrs[] = {
&dev_attr_mem_limit.attr,
&dev_attr_mem_used_max.attr,
&dev_attr_idle.attr,
- &dev_attr_max_comp_streams.attr,
&dev_attr_comp_algorithm.attr,
#ifdef CONFIG_ZRAM_WRITEBACK
&dev_attr_backing_dev.attr,
&dev_attr_writeback.attr,
&dev_attr_writeback_limit.attr,
&dev_attr_writeback_limit_enable.attr,
+ &dev_attr_writeback_batch_size.attr,
#endif
&dev_attr_io_stat.attr,
&dev_attr_mm_stat.attr,
@@ -2191,6 +2909,7 @@ static struct attribute *zram_disk_attrs[] = {
&dev_attr_recomp_algorithm.attr,
&dev_attr_recompress.attr,
#endif
+ &dev_attr_algorithm_params.attr,
NULL,
};
@@ -2223,6 +2942,8 @@ static int zram_add(void)
#if ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE
.max_write_zeroes_sectors = UINT_MAX,
#endif
+ .features = BLK_FEAT_STABLE_WRITES |
+ BLK_FEAT_SYNCHRONOUS,
};
struct zram *zram;
int ret, device_id;
@@ -2238,7 +2959,7 @@ static int zram_add(void)
init_rwsem(&zram->init_lock);
#ifdef CONFIG_ZRAM_WRITEBACK
- spin_lock_init(&zram->wb_limit_lock);
+ zram->wb_batch_size = 32;
#endif
/* gendisk structure */
@@ -2257,19 +2978,16 @@ static int zram_add(void)
zram->disk->fops = &zram_devops;
zram->disk->private_data = zram;
snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
+ atomic_set(&zram->pp_in_progress, 0);
+ zram_comp_params_reset(zram);
+ comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor);
/* Actual capacity set using sysfs (/sys/block/zram<id>/disksize */
set_capacity(zram->disk, 0);
- /* zram devices sort of resembles non-rotational disks */
- blk_queue_flag_set(QUEUE_FLAG_NONROT, zram->disk->queue);
- blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, zram->disk->queue);
- blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue);
ret = device_add_disk(NULL, zram->disk, zram_disk_groups);
if (ret)
goto out_cleanup_disk;
- comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor);
-
zram_debugfs_register(zram);
pr_info("Added device: %s\n", zram->disk->disk_name);
return device_id;
@@ -2351,7 +3069,7 @@ static ssize_t hot_add_show(const struct class *class,
if (ret < 0)
return ret;
- return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
+ return sysfs_emit(buf, "%d\n", ret);
}
/* This attribute must be set to 0400, so CLASS_ATTR_RO() can not be used */
static struct class_attribute class_attr_hot_add =
@@ -2418,9 +3136,10 @@ static void destroy_devices(void)
static int __init zram_init(void)
{
+ struct zram_table_entry zram_te;
int ret;
- BUILD_BUG_ON(__NR_ZRAM_PAGEFLAGS > BITS_PER_LONG);
+ BUILD_BUG_ON(__NR_ZRAM_PAGEFLAGS > sizeof(zram_te.flags) * 8);
ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
zcomp_cpu_up_prepare, zcomp_cpu_dead);
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 37bf29f34d26..c6d94501376c 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -17,7 +17,6 @@
#include <linux/rwsem.h>
#include <linux/zsmalloc.h>
-#include <linux/crypto.h>
#include "zcomp.h"
@@ -28,7 +27,6 @@
#define ZRAM_SECTOR_PER_LOGICAL_BLOCK \
(1 << (ZRAM_LOGICAL_BLOCK_SHIFT - SECTOR_SHIFT))
-
/*
* ZRAM is mainly used for memory efficiency so we want to keep memory
* footprint small and thus squeeze size and zram pageflags into a flags
@@ -45,11 +43,10 @@
/* Flags for zram pages (table[page_no].flags) */
enum zram_pageflags {
- /* zram slot is locked */
- ZRAM_LOCK = ZRAM_FLAG_SHIFT,
- ZRAM_SAME, /* Page consists the same element */
+ ZRAM_SAME = ZRAM_FLAG_SHIFT, /* Page consists the same element */
+ ZRAM_ENTRY_LOCK, /* entry access lock bit */
ZRAM_WB, /* page is stored on backing_device */
- ZRAM_UNDER_WB, /* page is under writeback */
+ ZRAM_PP_SLOT, /* Selected for post-processing */
ZRAM_HUGE, /* Incompressible page */
ZRAM_IDLE, /* not accessed page since last idle marking */
ZRAM_INCOMPRESSIBLE, /* none of the algorithms could compress it */
@@ -60,18 +57,19 @@ enum zram_pageflags {
__NR_ZRAM_PAGEFLAGS,
};
-/*-- Data structures */
-
-/* Allocated for each disk page */
+/*
+ * Allocated for each disk page. We use bit-lock (ZRAM_ENTRY_LOCK bit
+ * of flags) to save memory. There can be plenty of entries and standard
+ * locking primitives (e.g. mutex) will significantly increase sizeof()
+ * of each entry and hence of the meta table.
+ */
struct zram_table_entry {
- union {
- unsigned long handle;
- unsigned long element;
- };
+ unsigned long handle;
unsigned long flags;
#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
ktime_t ac_time;
#endif
+ struct lockdep_map dep_map;
};
struct zram_stats {
@@ -84,7 +82,6 @@ struct zram_stats {
atomic64_t huge_pages_since; /* no. of huge pages since zram set up */
atomic64_t pages_stored; /* no. of pages currently stored */
atomic_long_t max_used_pages; /* no. of maximum pages stored */
- atomic64_t writestall; /* no. of write slow paths */
atomic64_t miss_free; /* no. of missed free */
#ifdef CONFIG_ZRAM_WRITEBACK
atomic64_t bd_count; /* no. of pages in backing device */
@@ -107,6 +104,7 @@ struct zram {
struct zram_table_entry *table;
struct zs_pool *mem_pool;
struct zcomp *comps[ZRAM_MAX_COMPS];
+ struct zcomp_params params[ZRAM_MAX_COMPS];
struct gendisk *disk;
/* Prevent concurrent execution of device init */
struct rw_semaphore init_lock;
@@ -129,15 +127,16 @@ struct zram {
bool claim; /* Protected by disk->open_mutex */
#ifdef CONFIG_ZRAM_WRITEBACK
struct file *backing_dev;
- spinlock_t wb_limit_lock;
bool wb_limit_enable;
+ u32 wb_batch_size;
u64 bd_wb_limit;
- struct file *bdev_file;
+ struct block_device *bdev;
unsigned long *bitmap;
unsigned long nr_pages;
#endif
#ifdef CONFIG_ZRAM_MEMORY_TRACKING
struct dentry *debugfs_dir;
#endif
+ atomic_t pp_in_progress;
};
#endif
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 0b5f218ac505..c5d45cf91f88 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -105,6 +105,7 @@ config BT_HCIUART
tristate "HCI UART driver"
depends on SERIAL_DEV_BUS || !SERIAL_DEV_BUS
depends on NVMEM || !NVMEM
+ depends on POWER_SEQUENCING || !POWER_SEQUENCING
depends on TTY
help
Bluetooth HCI UART driver.
@@ -187,6 +188,7 @@ config BT_HCIUART_3WIRE
bool "Three-wire UART (H5) protocol support"
depends on BT_HCIUART
depends on BT_HCIUART_SERDEV
+ select CRC_CCITT
help
The HCI Three-wire UART Transport Layer makes it possible to
user the Bluetooth HCI over a serial port interface. The HCI
@@ -273,6 +275,18 @@ config BT_HCIUART_MRVL
Say Y here to compile support for HCI MRVL protocol.
+config BT_HCIUART_AML
+ bool "Amlogic protocol support"
+ depends on BT_HCIUART
+ depends on BT_HCIUART_SERDEV
+ select BT_HCIUART_H4
+ select FW_LOADER
+ help
+ The Amlogic protocol support enables Bluetooth HCI over serial
+ port interface for Amlogic Bluetooth controllers.
+
+ Say Y here to compile support for HCI AML protocol.
+
config BT_HCIBCM203X
tristate "HCI BCM203x USB driver"
depends on USB
@@ -287,19 +301,21 @@ config BT_HCIBCM203X
config BT_HCIBCM4377
- tristate "HCI BCM4377/4378/4387 PCIe driver"
+ tristate "HCI BCM4377/4378/4387/4388 PCIe driver"
depends on PCI
select FW_LOADER
help
- Support for Broadcom BCM4377/4378/4387 Bluetooth chipsets attached via
- PCIe. These are usually found in Apple machines.
+ Support for Broadcom BCM4377/4378/4387/4388 Bluetooth chipsets
+ attached via PCIe. These are usually found in Apple machines.
Say Y here to compile support for HCI BCM4377 family devices into the
kernel or say M to compile it as module (hci_bcm4377).
config BT_HCIBPA10X
tristate "HCI BPA10x USB driver"
+ depends on BT_HCIUART
depends on USB
+ select BT_HCIUART_H4
help
Bluetooth HCI BPA10x USB driver.
This driver provides support for the Digianswer BPA 100/105 Bluetooth
@@ -323,7 +339,7 @@ config BT_HCIBFUSB
config BT_HCIDTL1
tristate "HCI DTL1 (PC Card) driver"
- depends on PCMCIA
+ depends on PCMCIA && HAS_IOPORT
help
Bluetooth HCI DTL1 (PC Card) driver.
This driver provides support for Bluetooth PCMCIA devices with
@@ -336,7 +352,7 @@ config BT_HCIDTL1
config BT_HCIBT3C
tristate "HCI BT3C (PC Card) driver"
- depends on PCMCIA
+ depends on PCMCIA && HAS_IOPORT
select FW_LOADER
help
Bluetooth HCI BT3C (PC Card) driver.
@@ -350,7 +366,7 @@ config BT_HCIBT3C
config BT_HCIBLUECARD
tristate "HCI BlueCard (PC Card) driver"
- depends on PCMCIA
+ depends on PCMCIA && HAS_IOPORT
help
Bluetooth HCI BlueCard (PC Card) driver.
This driver provides support for Bluetooth PCMCIA devices with
@@ -412,6 +428,7 @@ config BT_ATH3K
config BT_MTKSDIO
tristate "MediaTek HCI SDIO driver"
depends on MMC
+ depends on USB || !BT_HCIBTUSB_MTK
select BT_MTK
help
MediaTek Bluetooth HCI SDIO driver.
@@ -423,7 +440,10 @@ config BT_MTKSDIO
config BT_MTKUART
tristate "MediaTek HCI UART driver"
+ depends on BT_HCIUART
depends on SERIAL_DEV_BUS
+ depends on USB || !BT_HCIBTUSB_MTK
+ select BT_HCIUART_H4
select BT_MTK
help
MediaTek Bluetooth HCI UART driver.
@@ -468,7 +488,9 @@ config BT_VIRTIO
config BT_NXPUART
tristate "NXP protocol support"
+ depends on BT_HCIUART
depends on SERIAL_DEV_BUS
+ select BT_HCIUART_H4
select CRC32
select CRC8
help
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index 0730d6684d1a..81856512ddd0 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -51,4 +51,5 @@ hci_uart-$(CONFIG_BT_HCIUART_BCM) += hci_bcm.o
hci_uart-$(CONFIG_BT_HCIUART_QCA) += hci_qca.o
hci_uart-$(CONFIG_BT_HCIUART_AG6XX) += hci_ag6xx.o
hci_uart-$(CONFIG_BT_HCIUART_MRVL) += hci_mrvl.o
+hci_uart-$(CONFIG_BT_HCIUART_AML) += hci_aml.o
hci_uart-objs := $(hci_uart-y)
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index ce97b336fbfb..fc796f1dbda9 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -11,7 +11,7 @@
#include <linux/errno.h>
#include <linux/firmware.h>
#include <linux/usb.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#define VERSION "1.0"
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index cab93935cc7f..8df310983bf6 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -365,9 +365,8 @@ static void bfusb_rx_complete(struct urb *urb)
buf += 3;
}
- if (count < len) {
+ if (count < len)
bt_dev_err(data->hdev, "block extends over URB buffer ranges");
- }
if ((hdr & 0xe1) == 0xc1)
bfusb_recv_block(data, hdr, buf, len);
@@ -671,7 +670,7 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
hdev->flush = bfusb_flush;
hdev->send = bfusb_send_frame;
- set_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_LOCAL_COMMANDS);
if (hci_register_dev(hdev) < 0) {
BT_ERR("Can't register HCI device");
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index 36eabf61717f..1e3a56e9b139 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -158,7 +158,7 @@ static void bluecard_detach(struct pcmcia_device *p_dev);
static void bluecard_activity_led_timeout(struct timer_list *t)
{
- struct bluecard_info *info = from_timer(info, t, timer);
+ struct bluecard_info *info = timer_container_of(info, t, timer);
unsigned int iobase = info->p_dev->resource[0]->start;
if (test_bit(CARD_ACTIVITY, &(info->hw_state))) {
@@ -638,7 +638,7 @@ static int bluecard_hci_close(struct hci_dev *hdev)
bluecard_hci_flush(hdev);
/* Stop LED timer */
- del_timer_sync(&(info->timer));
+ timer_delete_sync(&(info->timer));
/* Disable power LED */
outb(0x00, iobase + 0x30);
@@ -885,7 +885,7 @@ static void bluecard_release(struct pcmcia_device *link)
bluecard_close(info);
- del_timer_sync(&(info->timer));
+ timer_delete_sync(&(info->timer));
pcmcia_disable_device(link);
}
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 1fa58c059cbf..e305d04aac9d 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -20,7 +20,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#include "h4_recv.h"
+#include "hci_uart.h"
#define VERSION "0.11"
@@ -41,6 +41,7 @@ struct bpa10x_data {
struct usb_anchor rx_anchor;
struct sk_buff *rx_skb[2];
+ struct hci_uart hu;
};
static void bpa10x_tx_complete(struct urb *urb)
@@ -96,7 +97,7 @@ static void bpa10x_rx_complete(struct urb *urb)
if (urb->status == 0) {
bool idx = usb_pipebulk(urb->pipe);
- data->rx_skb[idx] = h4_recv_buf(hdev, data->rx_skb[idx],
+ data->rx_skb[idx] = h4_recv_buf(&data->hu, data->rx_skb[idx],
urb->transfer_buffer,
urb->actual_length,
bpa10x_recv_pkts,
@@ -388,6 +389,7 @@ static int bpa10x_probe(struct usb_interface *intf,
hci_set_drvdata(hdev, data);
data->hdev = hdev;
+ data->hu.hdev = hdev;
SET_HCIDEV_DEV(hdev, &intf->dev);
@@ -398,7 +400,7 @@ static int bpa10x_probe(struct usb_interface *intf,
hdev->send = bpa10x_send_frame;
hdev->set_diag = bpa10x_set_diag;
- set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE);
err = hci_register_dev(hdev);
if (err < 0) {
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index f9a7c790d7e2..d33cc70eec66 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -12,7 +12,7 @@
#include <linux/dmi.h>
#include <linux/of.h>
#include <linux/string.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -135,7 +135,7 @@ int btbcm_check_bdaddr(struct hci_dev *hdev)
if (btbcm_set_bdaddr_from_efi(hdev) != 0) {
bt_dev_info(hdev, "BCM: Using default device address (%pMR)",
&bda->bdaddr);
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_INVALID_BDADDR);
}
}
@@ -467,7 +467,7 @@ static int btbcm_print_controller_features(struct hci_dev *hdev)
/* Read DMI and disable broken Read LE Min/Max Tx Power */
if (dmi_first_match(disable_broken_read_transmit_power))
- set_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER);
return 0;
}
@@ -541,11 +541,10 @@ static const struct bcm_subver_table bcm_usb_subver_table[] = {
static const char *btbcm_get_board_name(struct device *dev)
{
#ifdef CONFIG_OF
- struct device_node *root;
+ struct device_node *root __free(device_node) = of_find_node_by_path("/");
char *board_type;
const char *tmp;
- root = of_find_node_by_path("/");
if (!root)
return NULL;
@@ -554,8 +553,10 @@ static const char *btbcm_get_board_name(struct device *dev)
/* get rid of any '/' in the compatible string */
board_type = devm_kstrdup(dev, tmp, GFP_KERNEL);
+ if (!board_type)
+ return NULL;
+
strreplace(board_type, '/', '-');
- of_node_put(root);
return board_type;
#else
@@ -641,7 +642,9 @@ int btbcm_initialize(struct hci_dev *hdev, bool *fw_load_done, bool use_autobaud
snprintf(postfix, sizeof(postfix), "-%4.4x-%4.4x", vid, pid);
}
- fw_name = kmalloc(BCM_FW_NAME_COUNT_MAX * BCM_FW_NAME_LEN, GFP_KERNEL);
+ fw_name = kmalloc_array(BCM_FW_NAME_COUNT_MAX,
+ sizeof(*fw_name),
+ GFP_KERNEL);
if (!fw_name)
return -ENOMEM;
@@ -705,7 +708,7 @@ int btbcm_finalize(struct hci_dev *hdev, bool *fw_load_done, bool use_autobaud_m
btbcm_check_bdaddr(hdev);
- set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER);
return 0;
}
@@ -768,7 +771,7 @@ int btbcm_setup_apple(struct hci_dev *hdev)
kfree_skb(skb);
}
- set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER);
return 0;
}
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 0c855c3ee1c1..9d29ab811f80 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -9,9 +9,11 @@
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/regmap.h>
+#include <linux/string_choices.h>
#include <linux/acpi.h>
#include <acpi/acpi_bus.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
+#include <linux/efi.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -26,20 +28,17 @@
#define ECDSA_OFFSET 644
#define ECDSA_HEADER_LEN 320
-#define BTINTEL_PPAG_NAME "PPAG"
+#define BTINTEL_EFI_DSBR L"UefiCnvCommonDSBR"
enum {
DSM_SET_WDISABLE2_DELAY = 1,
DSM_SET_RESET_METHOD = 3,
};
-/* structure to store the PPAG data read from ACPI table */
-struct btintel_ppag {
- u32 domain;
- u32 mode;
- acpi_status status;
- struct hci_dev *hdev;
-};
+#define BTINTEL_BT_DOMAIN 0x12
+#define BTINTEL_SAR_LEGACY 0
+#define BTINTEL_SAR_INC_PWR 1
+#define BTINTEL_SAR_INC_PWR_SUPPORTED 0
#define CMD_WRITE_BOOT_PARAMS 0xfc0e
struct cmd_write_boot_params {
@@ -89,7 +88,7 @@ int btintel_check_bdaddr(struct hci_dev *hdev)
if (!bacmp(&bda->bdaddr, BDADDR_INTEL)) {
bt_dev_err(hdev, "Found Intel default device address (%pMR)",
&bda->bdaddr);
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_INVALID_BDADDR);
}
kfree_skb(skb);
@@ -482,7 +481,10 @@ int btintel_version_info_tlv(struct hci_dev *hdev,
case 0x19: /* Slr-F */
case 0x1b: /* Mgr */
case 0x1c: /* Gale Peak (GaP) */
+ case 0x1d: /* BlazarU (BzrU) */
case 0x1e: /* BlazarI (Bzr) */
+ case 0x1f: /* Scorpious Peak */
+ case 0x22: /* BlazarIW (BzrIW) */
break;
default:
bt_dev_err(hdev, "Unsupported Intel hardware variant (0x%x)",
@@ -512,13 +514,13 @@ int btintel_version_info_tlv(struct hci_dev *hdev,
bt_dev_info(hdev, "Device revision is %u", version->dev_rev_id);
bt_dev_info(hdev, "Secure boot is %s",
- version->secure_boot ? "enabled" : "disabled");
+ str_enabled_disabled(version->secure_boot));
bt_dev_info(hdev, "OTP lock is %s",
- version->otp_lock ? "enabled" : "disabled");
+ str_enabled_disabled(version->otp_lock));
bt_dev_info(hdev, "API lock is %s",
- version->api_lock ? "enabled" : "disabled");
+ str_enabled_disabled(version->api_lock));
bt_dev_info(hdev, "Debug lock is %s",
- version->debug_lock ? "enabled" : "disabled");
+ str_enabled_disabled(version->debug_lock));
bt_dev_info(hdev, "Minimum firmware build %u week %u %u",
version->min_fw_build_nn, version->min_fw_build_cw,
2000 + version->min_fw_build_yy);
@@ -554,7 +556,7 @@ int btintel_parse_version_tlv(struct hci_dev *hdev,
/* Consume Command Complete Status field */
skb_pull(skb, 1);
- /* Event parameters contatin multiple TLVs. Read each of them
+ /* Event parameters contain multiple TLVs. Read each of them
* and only keep the required data. Also, it use existing legacy
* version field like hw_platform, hw_variant, and fw_variant
* to keep the existing setup flow
@@ -641,6 +643,10 @@ int btintel_parse_version_tlv(struct hci_dev *hdev,
case INTEL_TLV_GIT_SHA1:
version->git_sha1 = get_unaligned_le32(tlv->val);
break;
+ case INTEL_TLV_FW_ID:
+ snprintf(version->fw_id, sizeof(version->fw_id),
+ "%s", tlv->val);
+ break;
default:
/* Ignore rest of information */
break;
@@ -884,7 +890,7 @@ int btintel_send_intel_reset(struct hci_dev *hdev, u32 boot_param)
params.boot_param = cpu_to_le32(boot_param);
- skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(params), &params,
+ skb = __hci_cmd_sync(hdev, BTINTEL_HCI_OP_RESET, sizeof(params), &params,
HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
bt_dev_err(hdev, "Failed to send Intel Reset command");
@@ -929,16 +935,16 @@ int btintel_read_boot_params(struct hci_dev *hdev,
le16_to_cpu(params->dev_revid));
bt_dev_info(hdev, "Secure boot is %s",
- params->secure_boot ? "enabled" : "disabled");
+ str_enabled_disabled(params->secure_boot));
bt_dev_info(hdev, "OTP lock is %s",
- params->otp_lock ? "enabled" : "disabled");
+ str_enabled_disabled(params->otp_lock));
bt_dev_info(hdev, "API lock is %s",
- params->api_lock ? "enabled" : "disabled");
+ str_enabled_disabled(params->api_lock));
bt_dev_info(hdev, "Debug lock is %s",
- params->debug_lock ? "enabled" : "disabled");
+ str_enabled_disabled(params->debug_lock));
bt_dev_info(hdev, "Minimum firmware build %u week %u %u",
params->min_fw_build_nn, params->min_fw_build_cw,
@@ -1042,7 +1048,7 @@ static int btintel_download_firmware_payload(struct hci_dev *hdev,
* as needed.
*
* Send set of commands with 4 byte alignment from the
- * firmware data buffer as a single Data fragement.
+ * firmware data buffer as a single Data fragment.
*/
if (!(frag_len % 4)) {
err = btintel_secure_send(hdev, 0x01, frag_len, fw_ptr);
@@ -1254,6 +1260,12 @@ static void btintel_reset_to_bootloader(struct hci_dev *hdev)
struct intel_reset params;
struct sk_buff *skb;
+ /* PCIe transport uses shared hardware reset mechanism for recovery
+ * which gets triggered in pcie *setup* function on error.
+ */
+ if (hdev->bus == HCI_PCI)
+ return;
+
/* Send Intel Reset command. This will result in
* re-enumeration of BT controller.
*
@@ -1269,13 +1281,14 @@ static void btintel_reset_to_bootloader(struct hci_dev *hdev)
* boot_param: Boot address
*
*/
+
params.reset_type = 0x01;
params.patch_enable = 0x01;
params.ddc_reload = 0x01;
params.boot_option = 0x00;
params.boot_param = cpu_to_le32(0x00000000);
- skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(params),
+ skb = __hci_cmd_sync(hdev, BTINTEL_HCI_OP_RESET, sizeof(params),
&params, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
bt_dev_err(hdev, "FW download error recovery failed (%ld)",
@@ -1324,65 +1337,6 @@ static int btintel_read_debug_features(struct hci_dev *hdev,
return 0;
}
-static acpi_status btintel_ppag_callback(acpi_handle handle, u32 lvl, void *data,
- void **ret)
-{
- acpi_status status;
- size_t len;
- struct btintel_ppag *ppag = data;
- union acpi_object *p, *elements;
- struct acpi_buffer string = {ACPI_ALLOCATE_BUFFER, NULL};
- struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
- struct hci_dev *hdev = ppag->hdev;
-
- status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
- if (ACPI_FAILURE(status)) {
- bt_dev_warn(hdev, "PPAG-BT: ACPI Failure: %s", acpi_format_exception(status));
- return status;
- }
-
- len = strlen(string.pointer);
- if (len < strlen(BTINTEL_PPAG_NAME)) {
- kfree(string.pointer);
- return AE_OK;
- }
-
- if (strncmp((char *)string.pointer + len - 4, BTINTEL_PPAG_NAME, 4)) {
- kfree(string.pointer);
- return AE_OK;
- }
- kfree(string.pointer);
-
- status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
- if (ACPI_FAILURE(status)) {
- ppag->status = status;
- bt_dev_warn(hdev, "PPAG-BT: ACPI Failure: %s", acpi_format_exception(status));
- return status;
- }
-
- p = buffer.pointer;
- ppag = (struct btintel_ppag *)data;
-
- if (p->type != ACPI_TYPE_PACKAGE || p->package.count != 2) {
- kfree(buffer.pointer);
- bt_dev_warn(hdev, "PPAG-BT: Invalid object type: %d or package count: %d",
- p->type, p->package.count);
- ppag->status = AE_ERROR;
- return AE_ERROR;
- }
-
- elements = p->package.elements;
-
- /* PPAG table is located at element[1] */
- p = &elements[1];
-
- ppag->domain = (u32)p->package.elements[0].integer.value;
- ppag->mode = (u32)p->package.elements[1].integer.value;
- ppag->status = AE_OK;
- kfree(buffer.pointer);
- return AE_CTRL_TERMINATE;
-}
-
static int btintel_set_debug_features(struct hci_dev *hdev,
const struct intel_debug_features *features)
{
@@ -1902,6 +1856,37 @@ static int btintel_boot_wait(struct hci_dev *hdev, ktime_t calltime, int msec)
return 0;
}
+static int btintel_boot_wait_d0(struct hci_dev *hdev, ktime_t calltime,
+ int msec)
+{
+ ktime_t delta, rettime;
+ unsigned long long duration;
+ int err;
+
+ bt_dev_info(hdev, "Waiting for device transition to d0");
+
+ err = btintel_wait_on_flag_timeout(hdev, INTEL_WAIT_FOR_D0,
+ TASK_INTERRUPTIBLE,
+ msecs_to_jiffies(msec));
+ if (err == -EINTR) {
+ bt_dev_err(hdev, "Device d0 move interrupted");
+ return -EINTR;
+ }
+
+ if (err) {
+ bt_dev_err(hdev, "Device d0 move timeout");
+ return -ETIMEDOUT;
+ }
+
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ duration = (unsigned long long)ktime_to_ns(delta) >> 10;
+
+ bt_dev_info(hdev, "Device moved to D0 in %llu usecs", duration);
+
+ return 0;
+}
+
static int btintel_boot(struct hci_dev *hdev, u32 boot_addr)
{
ktime_t calltime;
@@ -1910,6 +1895,7 @@ static int btintel_boot(struct hci_dev *hdev, u32 boot_addr)
calltime = ktime_get();
btintel_set_flag(hdev, INTEL_BOOTING);
+ btintel_set_flag(hdev, INTEL_WAIT_FOR_D0);
err = btintel_send_intel_reset(hdev, boot_addr);
if (err) {
@@ -1922,13 +1908,28 @@ static int btintel_boot(struct hci_dev *hdev, u32 boot_addr)
* is done by the operational firmware sending bootup notification.
*
* Booting into operational firmware should not take longer than
- * 1 second. However if that happens, then just fail the setup
+ * 5 second. However if that happens, then just fail the setup
* since something went wrong.
*/
- err = btintel_boot_wait(hdev, calltime, 1000);
- if (err == -ETIMEDOUT)
+ err = btintel_boot_wait(hdev, calltime, 5000);
+ if (err == -ETIMEDOUT) {
btintel_reset_to_bootloader(hdev);
+ goto exit_error;
+ }
+ if (hdev->bus == HCI_PCI) {
+ /* In case of PCIe, after receiving bootup event, driver performs
+ * D0 entry by writing 0 to sleep control register (check
+ * btintel_pcie_recv_event())
+ * Firmware acks with alive interrupt indicating host is full ready to
+ * perform BT operation. Lets wait here till INTEL_WAIT_FOR_D0
+ * bit is cleared.
+ */
+ calltime = ktime_get();
+ err = btintel_boot_wait_d0(hdev, calltime, 2000);
+ }
+
+exit_error:
return err;
}
@@ -2027,7 +2028,7 @@ static int btintel_download_fw(struct hci_dev *hdev,
*/
if (!bacmp(&params->otp_bdaddr, BDADDR_ANY)) {
bt_dev_info(hdev, "No device address configured");
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_INVALID_BDADDR);
}
download:
@@ -2202,30 +2203,61 @@ static void btintel_get_fw_name_tlv(const struct intel_version_tlv *ver,
const char *suffix)
{
const char *format;
- /* The firmware file name for new generation controllers will be
- * ibt-<cnvi_top type+cnvi_top step>-<cnvr_top type+cnvr_top step>
- */
- switch (ver->cnvi_top & 0xfff) {
+ u32 cnvi, cnvr;
+
+ cnvi = INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver->cnvi_top),
+ INTEL_CNVX_TOP_STEP(ver->cnvi_top));
+
+ cnvr = INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver->cnvr_top),
+ INTEL_CNVX_TOP_STEP(ver->cnvr_top));
+
/* Only Blazar product supports downloading of intermediate loader
* image
*/
- case BTINTEL_CNVI_BLAZARI:
- if (ver->img_type == BTINTEL_IMG_BOOTLOADER)
+ if (INTEL_HW_VARIANT(ver->cnvi_bt) >= 0x1e) {
+ u8 zero[BTINTEL_FWID_MAXLEN];
+
+ if (ver->img_type == BTINTEL_IMG_BOOTLOADER) {
format = "intel/ibt-%04x-%04x-iml.%s";
- else
- format = "intel/ibt-%04x-%04x.%s";
- break;
- default:
- format = "intel/ibt-%04x-%04x.%s";
- break;
+ snprintf(fw_name, len, format, cnvi, cnvr, suffix);
+ return;
+ }
+
+ memset(zero, 0, sizeof(zero));
+
+ /* ibt-<cnvi_top type+cnvi_top step>-<cnvr_top type+cnvr_top step-fw_id> */
+ if (memcmp(ver->fw_id, zero, sizeof(zero))) {
+ format = "intel/ibt-%04x-%04x-%s.%s";
+ snprintf(fw_name, len, format, cnvi, cnvr,
+ ver->fw_id, suffix);
+ return;
+ }
+ /* If firmware id is not present, fallback to legacy naming
+ * convention
+ */
}
+ /* Fallback to legacy naming convention for other controllers
+ * ibt-<cnvi_top type+cnvi_top step>-<cnvr_top type+cnvr_top step>
+ */
+ format = "intel/ibt-%04x-%04x.%s";
+ snprintf(fw_name, len, format, cnvi, cnvr, suffix);
+}
+
+static void btintel_get_iml_tlv(const struct intel_version_tlv *ver,
+ char *fw_name, size_t len,
+ const char *suffix)
+{
+ const char *format;
+ u32 cnvi, cnvr;
+
+ cnvi = INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver->cnvi_top),
+ INTEL_CNVX_TOP_STEP(ver->cnvi_top));
+
+ cnvr = INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver->cnvr_top),
+ INTEL_CNVX_TOP_STEP(ver->cnvr_top));
- snprintf(fw_name, len, format,
- INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver->cnvi_top),
- INTEL_CNVX_TOP_STEP(ver->cnvi_top)),
- INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver->cnvr_top),
- INTEL_CNVX_TOP_STEP(ver->cnvr_top)),
- suffix);
+ format = "intel/ibt-%04x-%04x-iml.%s";
+ snprintf(fw_name, len, format, cnvi, cnvr, suffix);
}
static int btintel_prepare_fw_download_tlv(struct hci_dev *hdev,
@@ -2233,7 +2265,7 @@ static int btintel_prepare_fw_download_tlv(struct hci_dev *hdev,
u32 *boot_param)
{
const struct firmware *fw;
- char fwname[64];
+ char fwname[128];
int err;
ktime_t calltime;
@@ -2264,11 +2296,24 @@ static int btintel_prepare_fw_download_tlv(struct hci_dev *hdev,
*/
if (!bacmp(&ver->otp_bd_addr, BDADDR_ANY)) {
bt_dev_info(hdev, "No device address configured");
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_INVALID_BDADDR);
}
}
- btintel_get_fw_name_tlv(ver, fwname, sizeof(fwname), "sfi");
+ if (ver->img_type == BTINTEL_IMG_OP) {
+ /* Controller running OP image. In case of FW downgrade,
+ * FWID TLV may not be present and driver may attempt to load
+ * firmware image which doesn't exist. Lets compare the version
+ * of IML image
+ */
+ if (INTEL_HW_VARIANT(ver->cnvi_bt) >= 0x1e)
+ btintel_get_iml_tlv(ver, fwname, sizeof(fwname), "sfi");
+ else
+ btintel_get_fw_name_tlv(ver, fwname, sizeof(fwname), "sfi");
+ } else {
+ btintel_get_fw_name_tlv(ver, fwname, sizeof(fwname), "sfi");
+ }
+
err = firmware_request_nowarn(&fw, fwname, &hdev->dev);
if (err < 0) {
if (!btintel_test_flag(hdev, INTEL_BOOTLOADER)) {
@@ -2427,10 +2472,13 @@ error:
static void btintel_set_ppag(struct hci_dev *hdev, struct intel_version_tlv *ver)
{
- struct btintel_ppag ppag;
struct sk_buff *skb;
struct hci_ppag_enable_cmd ppag_cmd;
acpi_handle handle;
+ struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
+ union acpi_object *p, *elements;
+ u32 domain, mode;
+ acpi_status status;
/* PPAG is not supported if CRF is HrP2, Jfp2, JfP1 */
switch (ver->cnvr_top & 0xFFF) {
@@ -2448,22 +2496,34 @@ static void btintel_set_ppag(struct hci_dev *hdev, struct intel_version_tlv *ver
return;
}
- memset(&ppag, 0, sizeof(ppag));
-
- ppag.hdev = hdev;
- ppag.status = AE_NOT_FOUND;
- acpi_walk_namespace(ACPI_TYPE_PACKAGE, handle, 1, NULL,
- btintel_ppag_callback, &ppag, NULL);
-
- if (ACPI_FAILURE(ppag.status)) {
- if (ppag.status == AE_NOT_FOUND) {
+ status = acpi_evaluate_object(handle, "PPAG", NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ if (status == AE_NOT_FOUND) {
bt_dev_dbg(hdev, "PPAG-BT: ACPI entry not found");
return;
}
+ bt_dev_warn(hdev, "PPAG-BT: ACPI Failure: %s", acpi_format_exception(status));
+ return;
+ }
+
+ p = buffer.pointer;
+ if (p->type != ACPI_TYPE_PACKAGE || p->package.count != 2) {
+ bt_dev_warn(hdev, "PPAG-BT: Invalid object type: %d or package count: %d",
+ p->type, p->package.count);
+ kfree(buffer.pointer);
return;
}
- if (ppag.domain != 0x12) {
+ elements = p->package.elements;
+
+ /* PPAG table is located at element[1] */
+ p = &elements[1];
+
+ domain = (u32)p->package.elements[0].integer.value;
+ mode = (u32)p->package.elements[1].integer.value;
+ kfree(buffer.pointer);
+
+ if (domain != 0x12) {
bt_dev_dbg(hdev, "PPAG-BT: Bluetooth domain is disabled in ACPI firmware");
return;
}
@@ -2474,19 +2534,22 @@ static void btintel_set_ppag(struct hci_dev *hdev, struct intel_version_tlv *ver
* BIT 1 : 0 Disabled in China
* 1 Enabled in China
*/
- if ((ppag.mode & 0x01) != BIT(0) && (ppag.mode & 0x02) != BIT(1)) {
- bt_dev_dbg(hdev, "PPAG-BT: EU, China mode are disabled in CB/BIOS");
+ mode &= 0x03;
+
+ if (!mode) {
+ bt_dev_dbg(hdev, "PPAG-BT: EU, China mode are disabled in BIOS");
return;
}
- ppag_cmd.ppag_enable_flags = cpu_to_le32(ppag.mode);
+ ppag_cmd.ppag_enable_flags = cpu_to_le32(mode);
- skb = __hci_cmd_sync(hdev, INTEL_OP_PPAG_CMD, sizeof(ppag_cmd), &ppag_cmd, HCI_CMD_TIMEOUT);
+ skb = __hci_cmd_sync(hdev, INTEL_OP_PPAG_CMD, sizeof(ppag_cmd),
+ &ppag_cmd, HCI_CMD_TIMEOUT);
if (IS_ERR(skb)) {
bt_dev_warn(hdev, "Failed to send PPAG Enable (%ld)", PTR_ERR(skb));
return;
}
- bt_dev_info(hdev, "PPAG-BT: Enabled (Mode %d)", ppag.mode);
+ bt_dev_info(hdev, "PPAG-BT: Enabled (Mode %d)", mode);
kfree_skb(skb);
}
@@ -2600,6 +2663,474 @@ static void btintel_set_dsm_reset_method(struct hci_dev *hdev,
data->acpi_reset_method = btintel_acpi_reset_method;
}
+#define BTINTEL_ISODATA_HANDLE_BASE 0x900
+
+static u8 btintel_classify_pkt_type(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ /*
+ * Distinguish ISO data packets form ACL data packets
+ * based on their connection handle value range.
+ */
+ if (iso_capable(hdev) && hci_skb_pkt_type(skb) == HCI_ACLDATA_PKT) {
+ __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
+
+ if (hci_handle(handle) >= BTINTEL_ISODATA_HANDLE_BASE)
+ return HCI_ISODATA_PKT;
+ }
+
+ return hci_skb_pkt_type(skb);
+}
+
+/*
+ * UefiCnvCommonDSBR UEFI variable provides information from the OEM platforms
+ * if they have replaced the BRI (Bluetooth Radio Interface) resistor to
+ * overcome the potential STEP errors on their designs. Based on the
+ * configauration, bluetooth firmware shall adjust the BRI response line drive
+ * strength. The below structure represents DSBR data.
+ * struct {
+ * u8 header;
+ * u32 dsbr;
+ * } __packed;
+ *
+ * header - defines revision number of the structure
+ * dsbr - defines drive strength BRI response
+ * bit0
+ * 0 - instructs bluetooth firmware to use default values
+ * 1 - instructs bluetooth firmware to override default values
+ * bit3:1
+ * Reserved
+ * bit7:4
+ * DSBR override values (only if bit0 is set. Default value is 0xF
+ * bit31:7
+ * Reserved
+ * Expected values for dsbr field:
+ * 1. 0xF1 - indicates that the resistor on board is 33 Ohm
+ * 2. 0x00 or 0xB1 - indicates that the resistor on board is 10 Ohm
+ * 3. Non existing UEFI variable or invalid (none of the above) - indicates
+ * that the resistor on board is 10 Ohm
+ * Even if uefi variable is not present, driver shall send 0xfc0a command to
+ * firmware to use default values.
+ *
+ */
+static int btintel_uefi_get_dsbr(u32 *dsbr_var)
+{
+ struct btintel_dsbr {
+ u8 header;
+ u32 dsbr;
+ } __packed data;
+
+ efi_status_t status;
+ unsigned long data_size = sizeof(data);
+ efi_guid_t guid = EFI_GUID(0xe65d8884, 0xd4af, 0x4b20, 0x8d, 0x03,
+ 0x77, 0x2e, 0xcc, 0x3d, 0xa5, 0x31);
+
+ if (!IS_ENABLED(CONFIG_EFI))
+ return -EOPNOTSUPP;
+
+ if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE))
+ return -EOPNOTSUPP;
+
+ status = efi.get_variable(BTINTEL_EFI_DSBR, &guid, NULL, &data_size,
+ &data);
+
+ if (status != EFI_SUCCESS || data_size != sizeof(data))
+ return -ENXIO;
+
+ *dsbr_var = data.dsbr;
+ return 0;
+}
+
+static int btintel_set_dsbr(struct hci_dev *hdev, struct intel_version_tlv *ver)
+{
+ struct btintel_dsbr_cmd {
+ u8 enable;
+ u8 dsbr;
+ } __packed;
+
+ struct btintel_dsbr_cmd cmd;
+ struct sk_buff *skb;
+ u32 dsbr, cnvi;
+ u8 status;
+ int err;
+
+ cnvi = ver->cnvi_top & 0xfff;
+ /* DSBR command needs to be sent for,
+ * 1. BlazarI or BlazarIW + B0 step product in IML image.
+ * 2. Gale Peak2 or BlazarU in OP image.
+ * 3. Scorpious Peak in IML image.
+ */
+
+ switch (cnvi) {
+ case BTINTEL_CNVI_BLAZARI:
+ case BTINTEL_CNVI_BLAZARIW:
+ if (ver->img_type == BTINTEL_IMG_IML &&
+ INTEL_CNVX_TOP_STEP(ver->cnvi_top) == 0x01)
+ break;
+ return 0;
+ case BTINTEL_CNVI_GAP:
+ case BTINTEL_CNVI_BLAZARU:
+ if (ver->img_type == BTINTEL_IMG_OP &&
+ hdev->bus == HCI_USB)
+ break;
+ return 0;
+ case BTINTEL_CNVI_SCP:
+ if (ver->img_type == BTINTEL_IMG_IML)
+ break;
+ return 0;
+ default:
+ return 0;
+ }
+
+ dsbr = 0;
+ err = btintel_uefi_get_dsbr(&dsbr);
+ if (err < 0)
+ bt_dev_dbg(hdev, "Error reading efi: %ls (%d)",
+ BTINTEL_EFI_DSBR, err);
+
+ cmd.enable = dsbr & BIT(0);
+ cmd.dsbr = dsbr >> 4 & 0xF;
+
+ bt_dev_info(hdev, "dsbr: enable: 0x%2.2x value: 0x%2.2x", cmd.enable,
+ cmd.dsbr);
+
+ skb = __hci_cmd_sync(hdev, 0xfc0a, sizeof(cmd), &cmd, HCI_CMD_TIMEOUT);
+ if (IS_ERR(skb))
+ return -bt_to_errno(PTR_ERR(skb));
+
+ status = skb->data[0];
+ kfree_skb(skb);
+
+ if (status)
+ return -bt_to_errno(status);
+
+ return 0;
+}
+
+#ifdef CONFIG_ACPI
+static acpi_status btintel_evaluate_acpi_method(struct hci_dev *hdev,
+ acpi_string method,
+ union acpi_object **ptr,
+ u8 pkg_size)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *p;
+ acpi_status status;
+ acpi_handle handle;
+
+ handle = ACPI_HANDLE(GET_HCIDEV_DEV(hdev));
+ if (!handle) {
+ bt_dev_dbg(hdev, "ACPI-BT: No ACPI support for Bluetooth device");
+ return AE_NOT_EXIST;
+ }
+
+ status = acpi_evaluate_object(handle, method, NULL, &buffer);
+
+ if (ACPI_FAILURE(status)) {
+ bt_dev_dbg(hdev, "ACPI-BT: ACPI Failure: %s method: %s",
+ acpi_format_exception(status), method);
+ return status;
+ }
+
+ p = buffer.pointer;
+
+ if (p->type != ACPI_TYPE_PACKAGE || p->package.count < pkg_size) {
+ bt_dev_warn(hdev, "ACPI-BT: Invalid object type: %d or package count: %d",
+ p->type, p->package.count);
+ kfree(buffer.pointer);
+ return AE_ERROR;
+ }
+
+ *ptr = buffer.pointer;
+ return 0;
+}
+
+static union acpi_object *btintel_acpi_get_bt_pkg(union acpi_object *buffer)
+{
+ union acpi_object *domain, *bt_pkg;
+ int i;
+
+ for (i = 1; i < buffer->package.count; i++) {
+ bt_pkg = &buffer->package.elements[i];
+ domain = &bt_pkg->package.elements[0];
+ if (domain->type == ACPI_TYPE_INTEGER &&
+ domain->integer.value == BTINTEL_BT_DOMAIN)
+ return bt_pkg;
+ }
+ return ERR_PTR(-ENOENT);
+}
+
+static int btintel_send_sar_ddc(struct hci_dev *hdev, struct btintel_cp_ddc_write *data, u8 len)
+{
+ struct sk_buff *skb;
+
+ skb = __hci_cmd_sync(hdev, 0xfc8b, len, data, HCI_CMD_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_warn(hdev, "Failed to send sar ddc id:0x%4.4x (%ld)",
+ le16_to_cpu(data->id), PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+ kfree_skb(skb);
+ return 0;
+}
+
+static int btintel_send_edr(struct hci_dev *hdev, struct btintel_cp_ddc_write *cmd,
+ int id, struct btintel_sar_inc_pwr *sar)
+{
+ cmd->len = 5;
+ cmd->id = cpu_to_le16(id);
+ cmd->data[0] = sar->br >> 3;
+ cmd->data[1] = sar->edr2 >> 3;
+ cmd->data[2] = sar->edr3 >> 3;
+ return btintel_send_sar_ddc(hdev, cmd, 6);
+}
+
+static int btintel_send_le(struct hci_dev *hdev, struct btintel_cp_ddc_write *cmd,
+ int id, struct btintel_sar_inc_pwr *sar)
+{
+ cmd->len = 3;
+ cmd->id = cpu_to_le16(id);
+ cmd->data[0] = min3(sar->le, sar->le_lr, sar->le_2mhz) >> 3;
+ return btintel_send_sar_ddc(hdev, cmd, 4);
+}
+
+static int btintel_send_br(struct hci_dev *hdev, struct btintel_cp_ddc_write *cmd,
+ int id, struct btintel_sar_inc_pwr *sar)
+{
+ cmd->len = 3;
+ cmd->id = cpu_to_le16(id);
+ cmd->data[0] = sar->br >> 3;
+ return btintel_send_sar_ddc(hdev, cmd, 4);
+}
+
+static int btintel_send_br_mutual(struct hci_dev *hdev, struct btintel_cp_ddc_write *cmd,
+ int id, struct btintel_sar_inc_pwr *sar)
+{
+ cmd->len = 3;
+ cmd->id = cpu_to_le16(id);
+ cmd->data[0] = sar->br;
+ return btintel_send_sar_ddc(hdev, cmd, 4);
+}
+
+static int btintel_send_edr2(struct hci_dev *hdev, struct btintel_cp_ddc_write *cmd,
+ int id, struct btintel_sar_inc_pwr *sar)
+{
+ cmd->len = 3;
+ cmd->id = cpu_to_le16(id);
+ cmd->data[0] = sar->edr2;
+ return btintel_send_sar_ddc(hdev, cmd, 4);
+}
+
+static int btintel_send_edr3(struct hci_dev *hdev, struct btintel_cp_ddc_write *cmd,
+ int id, struct btintel_sar_inc_pwr *sar)
+{
+ cmd->len = 3;
+ cmd->id = cpu_to_le16(id);
+ cmd->data[0] = sar->edr3;
+ return btintel_send_sar_ddc(hdev, cmd, 4);
+}
+
+static int btintel_set_legacy_sar(struct hci_dev *hdev, struct btintel_sar_inc_pwr *sar)
+{
+ struct btintel_cp_ddc_write *cmd;
+ u8 buffer[64];
+ int ret;
+
+ cmd = (void *)buffer;
+ ret = btintel_send_br(hdev, cmd, 0x0131, sar);
+ if (ret)
+ return ret;
+
+ ret = btintel_send_br(hdev, cmd, 0x0132, sar);
+ if (ret)
+ return ret;
+
+ ret = btintel_send_le(hdev, cmd, 0x0133, sar);
+ if (ret)
+ return ret;
+
+ ret = btintel_send_edr(hdev, cmd, 0x0137, sar);
+ if (ret)
+ return ret;
+
+ ret = btintel_send_edr(hdev, cmd, 0x0138, sar);
+ if (ret)
+ return ret;
+
+ ret = btintel_send_edr(hdev, cmd, 0x013b, sar);
+ if (ret)
+ return ret;
+
+ ret = btintel_send_edr(hdev, cmd, 0x013c, sar);
+
+ return ret;
+}
+
+static int btintel_set_mutual_sar(struct hci_dev *hdev, struct btintel_sar_inc_pwr *sar)
+{
+ struct btintel_cp_ddc_write *cmd;
+ struct sk_buff *skb;
+ u8 buffer[64];
+ bool enable;
+ int ret;
+
+ cmd = (void *)buffer;
+
+ cmd->len = 3;
+ cmd->id = cpu_to_le16(0x019e);
+
+ if (sar->revision == BTINTEL_SAR_INC_PWR &&
+ sar->inc_power_mode == BTINTEL_SAR_INC_PWR_SUPPORTED)
+ cmd->data[0] = 0x01;
+ else
+ cmd->data[0] = 0x00;
+
+ ret = btintel_send_sar_ddc(hdev, cmd, 4);
+ if (ret)
+ return ret;
+
+ if (sar->revision == BTINTEL_SAR_INC_PWR &&
+ sar->inc_power_mode == BTINTEL_SAR_INC_PWR_SUPPORTED) {
+ cmd->len = 3;
+ cmd->id = cpu_to_le16(0x019f);
+ cmd->data[0] = sar->sar_2400_chain_a;
+
+ ret = btintel_send_sar_ddc(hdev, cmd, 4);
+ if (ret)
+ return ret;
+ }
+
+ ret = btintel_send_br_mutual(hdev, cmd, 0x01a0, sar);
+ if (ret)
+ return ret;
+
+ ret = btintel_send_edr2(hdev, cmd, 0x01a1, sar);
+ if (ret)
+ return ret;
+
+ ret = btintel_send_edr3(hdev, cmd, 0x01a2, sar);
+ if (ret)
+ return ret;
+
+ ret = btintel_send_le(hdev, cmd, 0x01a3, sar);
+ if (ret)
+ return ret;
+
+ enable = true;
+ skb = __hci_cmd_sync(hdev, 0xfe25, 1, &enable, HCI_CMD_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_warn(hdev, "Failed to send Intel SAR Enable (%ld)", PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+
+ kfree_skb(skb);
+ return 0;
+}
+
+static int btintel_sar_send_to_device(struct hci_dev *hdev, struct btintel_sar_inc_pwr *sar,
+ struct intel_version_tlv *ver)
+{
+ u16 cnvi, cnvr;
+ int ret;
+
+ cnvi = ver->cnvi_top & 0xfff;
+ cnvr = ver->cnvr_top & 0xfff;
+
+ if (cnvi < BTINTEL_CNVI_BLAZARI && cnvr < BTINTEL_CNVR_FMP2) {
+ bt_dev_info(hdev, "Applying legacy Bluetooth SAR");
+ ret = btintel_set_legacy_sar(hdev, sar);
+ } else if (cnvi == BTINTEL_CNVI_GAP || cnvr == BTINTEL_CNVR_FMP2) {
+ bt_dev_info(hdev, "Applying mutual Bluetooth SAR");
+ ret = btintel_set_mutual_sar(hdev, sar);
+ } else {
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int btintel_acpi_set_sar(struct hci_dev *hdev, struct intel_version_tlv *ver)
+{
+ union acpi_object *bt_pkg, *buffer = NULL;
+ struct btintel_sar_inc_pwr sar;
+ acpi_status status;
+ u8 revision;
+ int ret;
+
+ status = btintel_evaluate_acpi_method(hdev, "BRDS", &buffer, 2);
+ if (ACPI_FAILURE(status))
+ return -ENOENT;
+
+ bt_pkg = btintel_acpi_get_bt_pkg(buffer);
+
+ if (IS_ERR(bt_pkg)) {
+ ret = PTR_ERR(bt_pkg);
+ goto error;
+ }
+
+ if (!bt_pkg->package.count) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ revision = buffer->package.elements[0].integer.value;
+
+ if (revision > BTINTEL_SAR_INC_PWR) {
+ bt_dev_dbg(hdev, "BT_SAR: revision: 0x%2.2x not supported", revision);
+ ret = -EOPNOTSUPP;
+ goto error;
+ }
+
+ memset(&sar, 0, sizeof(sar));
+
+ if (revision == BTINTEL_SAR_LEGACY && bt_pkg->package.count == 8) {
+ sar.revision = revision;
+ sar.bt_sar_bios = bt_pkg->package.elements[1].integer.value;
+ sar.br = bt_pkg->package.elements[2].integer.value;
+ sar.edr2 = bt_pkg->package.elements[3].integer.value;
+ sar.edr3 = bt_pkg->package.elements[4].integer.value;
+ sar.le = bt_pkg->package.elements[5].integer.value;
+ sar.le_2mhz = bt_pkg->package.elements[6].integer.value;
+ sar.le_lr = bt_pkg->package.elements[7].integer.value;
+
+ } else if (revision == BTINTEL_SAR_INC_PWR && bt_pkg->package.count == 10) {
+ sar.revision = revision;
+ sar.bt_sar_bios = bt_pkg->package.elements[1].integer.value;
+ sar.inc_power_mode = bt_pkg->package.elements[2].integer.value;
+ sar.sar_2400_chain_a = bt_pkg->package.elements[3].integer.value;
+ sar.br = bt_pkg->package.elements[4].integer.value;
+ sar.edr2 = bt_pkg->package.elements[5].integer.value;
+ sar.edr3 = bt_pkg->package.elements[6].integer.value;
+ sar.le = bt_pkg->package.elements[7].integer.value;
+ sar.le_2mhz = bt_pkg->package.elements[8].integer.value;
+ sar.le_lr = bt_pkg->package.elements[9].integer.value;
+ } else {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* Apply only if it is enabled in BIOS */
+ if (sar.bt_sar_bios != 1) {
+ bt_dev_dbg(hdev, "Bluetooth SAR is not enabled");
+ ret = -EOPNOTSUPP;
+ goto error;
+ }
+
+ ret = btintel_sar_send_to_device(hdev, &sar, ver);
+error:
+ kfree(buffer);
+ return ret;
+}
+#endif /* CONFIG_ACPI */
+
+static int btintel_set_specific_absorption_rate(struct hci_dev *hdev,
+ struct intel_version_tlv *ver)
+{
+#ifdef CONFIG_ACPI
+ return btintel_acpi_set_sar(hdev, ver);
+#endif
+ return 0;
+}
+
int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
struct intel_version_tlv *ver)
{
@@ -2616,6 +3147,13 @@ int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
*/
boot_param = 0x00000000;
+ /* In case of PCIe, this function might get called multiple times with
+ * same hdev instance if there is any error on firmware download.
+ * Need to clear stale bits of previous firmware download attempt.
+ */
+ for (int i = 0; i < __INTEL_NUM_FLAGS; i++)
+ btintel_clear_flag(hdev, i);
+
btintel_set_flag(hdev, INTEL_BOOTLOADER);
err = btintel_prepare_fw_download_tlv(hdev, ver, &boot_param);
@@ -2634,8 +3172,15 @@ int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
if (err)
return err;
+ /* set drive strength of BRI response */
+ err = btintel_set_dsbr(hdev, ver);
+ if (err) {
+ bt_dev_err(hdev, "Failed to send dsbr command (%d)", err);
+ return err;
+ }
+
/* If image type returned is BTINTEL_IMG_IML, then controller supports
- * intermediae loader image
+ * intermediate loader image
*/
if (ver->img_type == BTINTEL_IMG_IML) {
err = btintel_prepare_fw_download_tlv(hdev, ver, &boot_param);
@@ -2663,6 +3208,9 @@ int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
+ /* Send sar values to controller */
+ btintel_set_specific_absorption_rate(hdev, ver);
+
/* Set PPAG feature */
btintel_set_ppag(hdev, ver);
@@ -2695,7 +3243,7 @@ void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant)
case 0x12: /* ThP */
case 0x13: /* HrP */
case 0x14: /* CcP */
- /* All Intel new genration controllers support the Microsoft vendor
+ /* All Intel new generation controllers support the Microsoft vendor
* extension are using 0xFC1E for VsMsftOpCode.
*/
case 0x17:
@@ -2703,7 +3251,10 @@ void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant)
case 0x19:
case 0x1b:
case 0x1c:
+ case 0x1d:
case 0x1e:
+ case 0x1f:
+ case 0x22:
hci_set_msft_opcode(hdev, 0xFC1E);
break;
default:
@@ -2713,7 +3264,7 @@ void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant)
}
EXPORT_SYMBOL_GPL(btintel_set_msft_opcode);
-static void btintel_print_fseq_info(struct hci_dev *hdev)
+void btintel_print_fseq_info(struct hci_dev *hdev)
{
struct sk_buff *skb;
u8 *p;
@@ -2825,6 +3376,7 @@ static void btintel_print_fseq_info(struct hci_dev *hdev)
kfree_skb(skb);
}
+EXPORT_SYMBOL_GPL(btintel_print_fseq_info);
static int btintel_setup_combined(struct hci_dev *hdev)
{
@@ -2885,9 +3437,9 @@ static int btintel_setup_combined(struct hci_dev *hdev)
}
/* Apply the common HCI quirks for Intel device */
- set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
- set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER);
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
+ hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_DIAG);
/* Set up the quality report callback for Intel devices */
hdev->set_quality_report = btintel_set_quality_report;
@@ -2925,11 +3477,8 @@ static int btintel_setup_combined(struct hci_dev *hdev)
*/
if (!btintel_test_flag(hdev,
INTEL_ROM_LEGACY_NO_WBS_SUPPORT))
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
- &hdev->quirks);
- if (ver.hw_variant == 0x08 && ver.fw_variant == 0x22)
- set_bit(HCI_QUIRK_VALID_LE_STATES,
- &hdev->quirks);
+ hci_set_quirk(hdev,
+ HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
err = btintel_legacy_rom_setup(hdev, &ver);
break;
@@ -2938,18 +3487,17 @@ static int btintel_setup_combined(struct hci_dev *hdev)
case 0x12: /* ThP */
case 0x13: /* HrP */
case 0x14: /* CcP */
- set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
fallthrough;
case 0x0c: /* WsP */
/* Apply the device specific HCI quirks
*
* All Legacy bootloader devices support WBS
*/
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
- &hdev->quirks);
+ hci_set_quirk(hdev,
+ HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
/* These variants don't seem to support LE Coded PHY */
- set_bit(HCI_QUIRK_BROKEN_LE_CODED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_LE_CODED);
/* Setup MSFT Extension support */
btintel_set_msft_opcode(hdev, ver.hw_variant);
@@ -3025,13 +3573,10 @@ static int btintel_setup_combined(struct hci_dev *hdev)
*
* All Legacy bootloader devices support WBS
*/
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
/* These variants don't seem to support LE Coded PHY */
- set_bit(HCI_QUIRK_BROKEN_LE_CODED, &hdev->quirks);
-
- /* Set Valid LE States quirk */
- set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_LE_CODED);
/* Setup MSFT Extension support */
btintel_set_msft_opcode(hdev, ver.hw_variant);
@@ -3039,12 +3584,18 @@ static int btintel_setup_combined(struct hci_dev *hdev)
err = btintel_bootloader_setup(hdev, &ver);
btintel_register_devcoredump_support(hdev);
break;
+ case 0x18: /* GfP2 */
+ case 0x1c: /* GaP */
+ /* Re-classify packet type for controllers with LE audio */
+ hdev->classify_pkt_type = btintel_classify_pkt_type;
+ fallthrough;
case 0x17:
- case 0x18:
case 0x19:
case 0x1b:
- case 0x1c:
+ case 0x1d:
case 0x1e:
+ case 0x1f:
+ case 0x22:
/* Display version information of TLV type */
btintel_version_info_tlv(hdev, &ver_tlv);
@@ -3052,10 +3603,7 @@ static int btintel_setup_combined(struct hci_dev *hdev)
*
* All TLV based devices support WBS
*/
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
-
- /* Apply LE States quirk from solar onwards */
- set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
/* Setup MSFT Extension support */
btintel_set_msft_opcode(hdev,
@@ -3063,6 +3611,9 @@ static int btintel_setup_combined(struct hci_dev *hdev)
btintel_set_dsm_reset_method(hdev, &ver_tlv);
err = btintel_bootloader_setup_tlv(hdev, &ver_tlv);
+ if (err)
+ goto exit_error;
+
btintel_register_devcoredump_support(hdev);
btintel_print_fseq_info(hdev);
break;
@@ -3149,13 +3700,12 @@ static int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb)
case INTEL_TLV_TEST_EXCEPTION:
/* Generate devcoredump from exception */
if (!hci_devcd_init(hdev, skb->len)) {
- hci_devcd_append(hdev, skb);
+ hci_devcd_append(hdev, skb_clone(skb, GFP_ATOMIC));
hci_devcd_complete(hdev);
} else {
bt_dev_err(hdev, "Failed to generate devcoredump");
- kfree_skb(skb);
}
- return 0;
+ break;
default:
bt_dev_err(hdev, "Invalid exception type %02X", tlv->val[0]);
}
@@ -3182,7 +3732,8 @@ int btintel_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
* indicating that the bootup completed.
*/
btintel_bootup(hdev, ptr, len);
- break;
+ kfree_skb(skb);
+ return 0;
case 0x06:
/* When the firmware loading completes the
* device sends out a vendor specific event
@@ -3190,7 +3741,8 @@ int btintel_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
* loading.
*/
btintel_secure_send_result(hdev, ptr, len);
- break;
+ kfree_skb(skb);
+ return 0;
}
}
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index b5fea735e260..431998049e68 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -42,7 +42,8 @@ enum {
INTEL_TLV_SBE_TYPE,
INTEL_TLV_OTP_BDADDR,
INTEL_TLV_UNLOCKED_STATE,
- INTEL_TLV_GIT_SHA1
+ INTEL_TLV_GIT_SHA1,
+ INTEL_TLV_FW_ID = 0x50
};
struct intel_tlv {
@@ -51,12 +52,23 @@ struct intel_tlv {
u8 val[];
} __packed;
+#define BTINTEL_HCI_OP_RESET 0xfc01
+
#define BTINTEL_CNVI_BLAZARI 0x900
+#define BTINTEL_CNVI_BLAZARIW 0x901
+#define BTINTEL_CNVI_GAP 0x910
+#define BTINTEL_CNVI_BLAZARU 0x930
+#define BTINTEL_CNVI_SCP 0xA00
+
+/* CNVR */
+#define BTINTEL_CNVR_FMP2 0x910
#define BTINTEL_IMG_BOOTLOADER 0x01 /* Bootloader image */
#define BTINTEL_IMG_IML 0x02 /* Intermediate image */
#define BTINTEL_IMG_OP 0x03 /* Operational image */
+#define BTINTEL_FWID_MAXLEN 64
+
struct intel_version_tlv {
u32 cnvi_top;
u32 cnvr_top;
@@ -77,6 +89,7 @@ struct intel_version_tlv {
u8 limited_cce;
u8 sbe_type;
u32 git_sha1;
+ u8 fw_id[BTINTEL_FWID_MAXLEN];
bdaddr_t otp_bd_addr;
};
@@ -157,6 +170,26 @@ struct hci_ppag_enable_cmd {
#define INTEL_TLV_DEBUG_EXCEPTION 0x02
#define INTEL_TLV_TEST_EXCEPTION 0xDE
+struct btintel_cp_ddc_write {
+ u8 len;
+ __le16 id;
+ u8 data[];
+} __packed;
+
+/* Bluetooth SAR feature (BRDS), Revision 1 */
+struct btintel_sar_inc_pwr {
+ u8 revision;
+ u32 bt_sar_bios; /* Mode of SAR control to be used, 1:enabled in bios */
+ u32 inc_power_mode; /* Increased power mode */
+ u8 sar_2400_chain_a; /* Sar power restriction LB */
+ u8 br;
+ u8 edr2;
+ u8 edr3;
+ u8 le;
+ u8 le_2mhz;
+ u8 le_lr;
+};
+
#define INTEL_HW_PLATFORM(cnvx_bt) ((u8)(((cnvx_bt) & 0x0000ff00) >> 8))
#define INTEL_HW_VARIANT(cnvx_bt) ((u8)(((cnvx_bt) & 0x003f0000) >> 16))
#define INTEL_CNVX_TOP_TYPE(cnvx_top) ((cnvx_top) & 0x00000fff)
@@ -174,6 +207,7 @@ enum {
INTEL_ROM_LEGACY,
INTEL_ROM_LEGACY_NO_WBS_SUPPORT,
INTEL_ACPI_RESET_ACTIVE,
+ INTEL_WAIT_FOR_D0,
__INTEL_NUM_FLAGS,
};
@@ -244,6 +278,7 @@ int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
struct intel_version_tlv *ver);
int btintel_shutdown_combined(struct hci_dev *hdev);
void btintel_hw_error(struct hci_dev *hdev, u8 code);
+void btintel_print_fseq_info(struct hci_dev *hdev);
#else
static inline int btintel_check_bdaddr(struct hci_dev *hdev)
@@ -373,4 +408,8 @@ static inline int btintel_shutdown_combined(struct hci_dev *hdev)
static inline void btintel_hw_error(struct hci_dev *hdev, u8 code)
{
}
+
+static inline void btintel_print_fseq_info(struct hci_dev *hdev)
+{
+}
#endif
diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c
index 5b6805d87fcf..2936b535479f 100644
--- a/drivers/bluetooth/btintel_pcie.c
+++ b/drivers/bluetooth/btintel_pcie.c
@@ -14,10 +14,12 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
+#include <linux/devcoredump.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/hci_drv.h>
#include "btintel.h"
#include "btintel_pcie.h"
@@ -35,17 +37,166 @@
/* Intel Bluetooth PCIe device id table */
static const struct pci_device_id btintel_pcie_table[] = {
+ /* BlazarI, Wildcat Lake */
+ { BTINTEL_PCI_DEVICE(0x4D76, PCI_ANY_ID) },
+ /* BlazarI, Lunar Lake */
{ BTINTEL_PCI_DEVICE(0xA876, PCI_ANY_ID) },
+ /* Scorpious, Panther Lake-H484 */
+ { BTINTEL_PCI_DEVICE(0xE376, PCI_ANY_ID) },
+ /* Scorpious, Panther Lake-H404 */
+ { BTINTEL_PCI_DEVICE(0xE476, PCI_ANY_ID) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, btintel_pcie_table);
+struct btintel_pcie_dev_recovery {
+ struct list_head list;
+ u8 count;
+ time64_t last_error;
+ char name[];
+};
+
/* Intel PCIe uses 4 bytes of HCI type instead of 1 byte BT SIG HCI type */
#define BTINTEL_PCIE_HCI_TYPE_LEN 4
#define BTINTEL_PCIE_HCI_CMD_PKT 0x00000001
#define BTINTEL_PCIE_HCI_ACL_PKT 0x00000002
#define BTINTEL_PCIE_HCI_SCO_PKT 0x00000003
#define BTINTEL_PCIE_HCI_EVT_PKT 0x00000004
+#define BTINTEL_PCIE_HCI_ISO_PKT 0x00000005
+
+#define BTINTEL_PCIE_MAGIC_NUM 0xA5A5A5A5
+
+#define BTINTEL_PCIE_BLZR_HWEXP_SIZE 1024
+#define BTINTEL_PCIE_BLZR_HWEXP_DMP_ADDR 0xB00A7C00
+
+#define BTINTEL_PCIE_SCP_HWEXP_SIZE 4096
+#define BTINTEL_PCIE_SCP_HWEXP_DMP_ADDR 0xB030F800
+
+#define BTINTEL_PCIE_MAGIC_NUM 0xA5A5A5A5
+
+#define BTINTEL_PCIE_TRIGGER_REASON_USER_TRIGGER 0x17A2
+#define BTINTEL_PCIE_TRIGGER_REASON_FW_ASSERT 0x1E61
+
+#define BTINTEL_PCIE_RESET_WINDOW_SECS 5
+#define BTINTEL_PCIE_FLR_MAX_RETRY 1
+
+/* Alive interrupt context */
+enum {
+ BTINTEL_PCIE_ROM,
+ BTINTEL_PCIE_FW_DL,
+ BTINTEL_PCIE_HCI_RESET,
+ BTINTEL_PCIE_INTEL_HCI_RESET1,
+ BTINTEL_PCIE_INTEL_HCI_RESET2,
+ BTINTEL_PCIE_D0,
+ BTINTEL_PCIE_D3
+};
+
+/* Structure for dbgc fragment buffer
+ * @buf_addr_lsb: LSB of the buffer's physical address
+ * @buf_addr_msb: MSB of the buffer's physical address
+ * @buf_size: Total size of the buffer
+ */
+struct btintel_pcie_dbgc_ctxt_buf {
+ u32 buf_addr_lsb;
+ u32 buf_addr_msb;
+ u32 buf_size;
+};
+
+/* Structure for dbgc fragment
+ * @magic_num: 0XA5A5A5A5
+ * @ver: For Driver-FW compatibility
+ * @total_size: Total size of the payload debug info
+ * @num_buf: Num of allocated debug bufs
+ * @bufs: All buffer's addresses and sizes
+ */
+struct btintel_pcie_dbgc_ctxt {
+ u32 magic_num;
+ u32 ver;
+ u32 total_size;
+ u32 num_buf;
+ struct btintel_pcie_dbgc_ctxt_buf bufs[BTINTEL_PCIE_DBGC_BUFFER_COUNT];
+};
+
+struct btintel_pcie_removal {
+ struct pci_dev *pdev;
+ struct work_struct work;
+};
+
+static LIST_HEAD(btintel_pcie_recovery_list);
+static DEFINE_SPINLOCK(btintel_pcie_recovery_lock);
+
+static inline char *btintel_pcie_alivectxt_state2str(u32 alive_intr_ctxt)
+{
+ switch (alive_intr_ctxt) {
+ case BTINTEL_PCIE_ROM:
+ return "rom";
+ case BTINTEL_PCIE_FW_DL:
+ return "fw_dl";
+ case BTINTEL_PCIE_D0:
+ return "d0";
+ case BTINTEL_PCIE_D3:
+ return "d3";
+ case BTINTEL_PCIE_HCI_RESET:
+ return "hci_reset";
+ case BTINTEL_PCIE_INTEL_HCI_RESET1:
+ return "intel_reset1";
+ case BTINTEL_PCIE_INTEL_HCI_RESET2:
+ return "intel_reset2";
+ default:
+ return "unknown";
+ }
+}
+
+/* This function initializes the memory for DBGC buffers and formats the
+ * DBGC fragment which consists header info and DBGC buffer's LSB, MSB and
+ * size as the payload
+ */
+static int btintel_pcie_setup_dbgc(struct btintel_pcie_data *data)
+{
+ struct btintel_pcie_dbgc_ctxt db_frag;
+ struct data_buf *buf;
+ int i;
+
+ data->dbgc.count = BTINTEL_PCIE_DBGC_BUFFER_COUNT;
+ data->dbgc.bufs = devm_kcalloc(&data->pdev->dev, data->dbgc.count,
+ sizeof(*buf), GFP_KERNEL);
+ if (!data->dbgc.bufs)
+ return -ENOMEM;
+
+ data->dbgc.buf_v_addr = dmam_alloc_coherent(&data->pdev->dev,
+ data->dbgc.count *
+ BTINTEL_PCIE_DBGC_BUFFER_SIZE,
+ &data->dbgc.buf_p_addr,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!data->dbgc.buf_v_addr)
+ return -ENOMEM;
+
+ data->dbgc.frag_v_addr = dmam_alloc_coherent(&data->pdev->dev,
+ sizeof(struct btintel_pcie_dbgc_ctxt),
+ &data->dbgc.frag_p_addr,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!data->dbgc.frag_v_addr)
+ return -ENOMEM;
+
+ data->dbgc.frag_size = sizeof(struct btintel_pcie_dbgc_ctxt);
+
+ db_frag.magic_num = BTINTEL_PCIE_MAGIC_NUM;
+ db_frag.ver = BTINTEL_PCIE_DBGC_FRAG_VERSION;
+ db_frag.total_size = BTINTEL_PCIE_DBGC_FRAG_PAYLOAD_SIZE;
+ db_frag.num_buf = BTINTEL_PCIE_DBGC_FRAG_BUFFER_COUNT;
+
+ for (i = 0; i < data->dbgc.count; i++) {
+ buf = &data->dbgc.bufs[i];
+ buf->data_p_addr = data->dbgc.buf_p_addr + i * BTINTEL_PCIE_DBGC_BUFFER_SIZE;
+ buf->data = data->dbgc.buf_v_addr + i * BTINTEL_PCIE_DBGC_BUFFER_SIZE;
+ db_frag.bufs[i].buf_addr_lsb = lower_32_bits(buf->data_p_addr);
+ db_frag.bufs[i].buf_addr_msb = upper_32_bits(buf->data_p_addr);
+ db_frag.bufs[i].buf_size = BTINTEL_PCIE_DBGC_BUFFER_SIZE;
+ }
+
+ memcpy(data->dbgc.frag_v_addr, &db_frag, sizeof(db_frag));
+ return 0;
+}
static inline void ipc_print_ia_ring(struct hci_dev *hdev, struct ia *ia,
u16 queue_num)
@@ -63,24 +214,6 @@ static inline void ipc_print_urbd1(struct hci_dev *hdev, struct urbd1 *urbd1,
index, urbd1->frbd_tag, urbd1->status, urbd1->fixed);
}
-static int btintel_pcie_poll_bit(struct btintel_pcie_data *data, u32 offset,
- u32 bits, u32 mask, int timeout_us)
-{
- int t = 0;
- u32 reg;
-
- do {
- reg = btintel_pcie_rd_reg32(data, offset);
-
- if ((reg & mask) == (bits & mask))
- return t;
- udelay(POLL_INTERVAL_US);
- t += POLL_INTERVAL_US;
- } while (t < timeout_us);
-
- return -ETIMEDOUT;
-}
-
static struct btintel_pcie_data *btintel_pcie_get_data(struct msix_entry *entry)
{
u8 queue = entry->entry;
@@ -123,11 +256,105 @@ static void btintel_pcie_prepare_tx(struct txq *txq, u16 tfd_index,
memcpy(buf->data, skb->data, tfd->size);
}
+static inline void btintel_pcie_dump_debug_registers(struct hci_dev *hdev)
+{
+ struct btintel_pcie_data *data = hci_get_drvdata(hdev);
+ u16 cr_hia, cr_tia;
+ u32 reg, mbox_reg;
+ struct sk_buff *skb;
+ u8 buf[80];
+
+ skb = alloc_skb(1024, GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ snprintf(buf, sizeof(buf), "%s", "---- Dump of debug registers ---");
+ bt_dev_dbg(hdev, "%s", buf);
+ skb_put_data(skb, buf, strlen(buf));
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_BOOT_STAGE_REG);
+ snprintf(buf, sizeof(buf), "boot stage: 0x%8.8x", reg);
+ bt_dev_dbg(hdev, "%s", buf);
+ skb_put_data(skb, buf, strlen(buf));
+ data->boot_stage_cache = reg;
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IPC_STATUS_REG);
+ snprintf(buf, sizeof(buf), "ipc status: 0x%8.8x", reg);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IPC_CONTROL_REG);
+ snprintf(buf, sizeof(buf), "ipc control: 0x%8.8x", reg);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IPC_SLEEP_CTL_REG);
+ snprintf(buf, sizeof(buf), "ipc sleep control: 0x%8.8x", reg);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+
+ /*Read the Mail box status and registers*/
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MBOX_STATUS_REG);
+ snprintf(buf, sizeof(buf), "mbox status: 0x%8.8x", reg);
+ skb_put_data(skb, buf, strlen(buf));
+ if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX1) {
+ mbox_reg = btintel_pcie_rd_reg32(data,
+ BTINTEL_PCIE_CSR_MBOX_1_REG);
+ snprintf(buf, sizeof(buf), "mbox_1: 0x%8.8x", mbox_reg);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+ }
+
+ if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX2) {
+ mbox_reg = btintel_pcie_rd_reg32(data,
+ BTINTEL_PCIE_CSR_MBOX_2_REG);
+ snprintf(buf, sizeof(buf), "mbox_2: 0x%8.8x", mbox_reg);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+ }
+
+ if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX3) {
+ mbox_reg = btintel_pcie_rd_reg32(data,
+ BTINTEL_PCIE_CSR_MBOX_3_REG);
+ snprintf(buf, sizeof(buf), "mbox_3: 0x%8.8x", mbox_reg);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+ }
+
+ if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX4) {
+ mbox_reg = btintel_pcie_rd_reg32(data,
+ BTINTEL_PCIE_CSR_MBOX_4_REG);
+ snprintf(buf, sizeof(buf), "mbox_4: 0x%8.8x", mbox_reg);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+ }
+
+ cr_hia = data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM];
+ cr_tia = data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM];
+ snprintf(buf, sizeof(buf), "rxq: cr_tia: %u cr_hia: %u", cr_tia, cr_hia);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+
+ cr_hia = data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM];
+ cr_tia = data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM];
+ snprintf(buf, sizeof(buf), "txq: cr_tia: %u cr_hia: %u", cr_tia, cr_hia);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+ snprintf(buf, sizeof(buf), "--------------------------------");
+ bt_dev_dbg(hdev, "%s", buf);
+
+ hci_recv_diag(hdev, skb);
+}
+
static int btintel_pcie_send_sync(struct btintel_pcie_data *data,
- struct sk_buff *skb)
+ struct sk_buff *skb, u32 pkt_type, u16 opcode)
{
int ret;
u16 tfd_index;
+ u32 old_ctxt;
+ bool wait_on_alive = false;
+ struct hci_dev *hdev = data->hdev;
+
struct txq *txq = &data->txq;
tfd_index = data->ia.tr_hia[BTINTEL_PCIE_TXQ_NUM];
@@ -135,6 +362,26 @@ static int btintel_pcie_send_sync(struct btintel_pcie_data *data,
if (tfd_index > txq->count)
return -ERANGE;
+ /* Firmware raises alive interrupt on HCI_OP_RESET or
+ * BTINTEL_HCI_OP_RESET
+ */
+ wait_on_alive = (pkt_type == BTINTEL_PCIE_HCI_CMD_PKT &&
+ (opcode == BTINTEL_HCI_OP_RESET || opcode == HCI_OP_RESET));
+
+ if (wait_on_alive) {
+ data->gp0_received = false;
+ old_ctxt = data->alive_intr_ctxt;
+ data->alive_intr_ctxt =
+ (opcode == BTINTEL_HCI_OP_RESET ? BTINTEL_PCIE_INTEL_HCI_RESET1 :
+ BTINTEL_PCIE_HCI_RESET);
+ bt_dev_dbg(data->hdev, "sending cmd: 0x%4.4x alive context changed: %s -> %s",
+ opcode, btintel_pcie_alivectxt_state2str(old_ctxt),
+ btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
+ }
+
+ memcpy(skb_push(skb, BTINTEL_PCIE_HCI_TYPE_LEN), &pkt_type,
+ BTINTEL_PCIE_HCI_TYPE_LEN);
+
/* Prepare for TX. It updates the TFD with the length of data and
* address of the DMA buffer, and copy the data to the DMA buffer
*/
@@ -152,9 +399,25 @@ static int btintel_pcie_send_sync(struct btintel_pcie_data *data,
/* Wait for the complete interrupt - URBD0 */
ret = wait_event_timeout(data->tx_wait_q, data->tx_wait_done,
msecs_to_jiffies(BTINTEL_PCIE_TX_WAIT_TIMEOUT_MS));
- if (!ret)
+ if (!ret) {
+ bt_dev_err(data->hdev, "Timeout (%u ms) on tx completion",
+ BTINTEL_PCIE_TX_WAIT_TIMEOUT_MS);
+ btintel_pcie_dump_debug_registers(data->hdev);
return -ETIME;
+ }
+ if (wait_on_alive) {
+ ret = wait_event_timeout(data->gp0_wait_q,
+ data->gp0_received,
+ msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
+ if (!ret) {
+ hdev->stat.err_tx++;
+ bt_dev_err(hdev, "Timeout (%u ms) on alive interrupt, alive context: %s",
+ BTINTEL_DEFAULT_INTR_TIMEOUT_MS,
+ btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
+ return -ETIME;
+ }
+ }
return 0;
}
@@ -218,8 +481,13 @@ static int btintel_pcie_submit_rx(struct btintel_pcie_data *data)
static int btintel_pcie_start_rx(struct btintel_pcie_data *data)
{
int i, ret;
+ struct rxq *rxq = &data->rxq;
- for (i = 0; i < BTINTEL_PCIE_RX_MAX_QUEUE; i++) {
+ /* Post (BTINTEL_PCIE_RX_DESCS_COUNT - 3) buffers to overcome the
+ * hardware issues leading to race condition at the firmware.
+ */
+
+ for (i = 0; i < rxq->count - 3; i++) {
ret = btintel_pcie_submit_rx(data);
if (ret)
return ret;
@@ -236,10 +504,249 @@ static void btintel_pcie_reset_ia(struct btintel_pcie_data *data)
memset(data->ia.cr_tia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
}
-static void btintel_pcie_reset_bt(struct btintel_pcie_data *data)
+static int btintel_pcie_reset_bt(struct btintel_pcie_data *data)
+{
+ u32 reg;
+ int retry = 3;
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
+
+ reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
+ BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT |
+ BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT);
+ reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON;
+
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
+
+ do {
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
+ if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_STS)
+ break;
+ usleep_range(10000, 12000);
+
+ } while (--retry > 0);
+ usleep_range(10000, 12000);
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
+
+ reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
+ BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT |
+ BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT);
+ reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET;
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
+ usleep_range(10000, 12000);
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
+ bt_dev_dbg(data->hdev, "csr register after reset: 0x%8.8x", reg);
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_BOOT_STAGE_REG);
+
+ /* If shared hardware reset is success then boot stage register shall be
+ * set to 0
+ */
+ return reg == 0 ? 0 : -ENODEV;
+}
+
+static void btintel_pcie_mac_init(struct btintel_pcie_data *data)
+{
+ u32 reg;
+
+ /* Set MAC_INIT bit to start primary bootloader */
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
+ reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT |
+ BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON |
+ BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET);
+ reg |= (BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
+ BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT);
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
+}
+
+static int btintel_pcie_get_mac_access(struct btintel_pcie_data *data)
+{
+ u32 reg;
+ int retry = 15;
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
+
+ reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS;
+ reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ;
+ if ((reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS) == 0)
+ reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ;
+
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
+
+ do {
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
+ if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS)
+ return 0;
+ /* Need delay here for Target Access harwdware to settle down*/
+ usleep_range(1000, 1200);
+
+ } while (--retry > 0);
+
+ return -ETIME;
+}
+
+static void btintel_pcie_release_mac_access(struct btintel_pcie_data *data)
+{
+ u32 reg;
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
+
+ if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ)
+ reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ;
+
+ if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS)
+ reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS;
+
+ if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ)
+ reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ;
+
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
+}
+
+static void *btintel_pcie_copy_tlv(void *dest, enum btintel_pcie_tlv_type type,
+ void *data, size_t size)
+{
+ struct intel_tlv *tlv;
+
+ tlv = dest;
+ tlv->type = type;
+ tlv->len = size;
+ memcpy(tlv->val, data, tlv->len);
+ return dest + sizeof(*tlv) + size;
+}
+
+static int btintel_pcie_read_dram_buffers(struct btintel_pcie_data *data)
+{
+ u32 offset, prev_size, wr_ptr_status, dump_size, data_len;
+ struct btintel_pcie_dbgc *dbgc = &data->dbgc;
+ struct hci_dev *hdev = data->hdev;
+ u8 *pdata, *p, buf_idx;
+ struct intel_tlv *tlv;
+ struct timespec64 now;
+ struct tm tm_now;
+ char fw_build[128];
+ char ts[128];
+ char vendor[64];
+ char driver[64];
+
+ if (!IS_ENABLED(CONFIG_DEV_COREDUMP))
+ return -EOPNOTSUPP;
+
+
+ wr_ptr_status = btintel_pcie_rd_dev_mem(data, BTINTEL_PCIE_DBGC_CUR_DBGBUFF_STATUS);
+ offset = wr_ptr_status & BTINTEL_PCIE_DBG_OFFSET_BIT_MASK;
+
+ buf_idx = BTINTEL_PCIE_DBGC_DBG_BUF_IDX(wr_ptr_status);
+ if (buf_idx > dbgc->count) {
+ bt_dev_warn(hdev, "Buffer index is invalid");
+ return -EINVAL;
+ }
+
+ prev_size = buf_idx * BTINTEL_PCIE_DBGC_BUFFER_SIZE;
+ if (prev_size + offset >= prev_size)
+ data->dmp_hdr.write_ptr = prev_size + offset;
+ else
+ return -EINVAL;
+
+ snprintf(vendor, sizeof(vendor), "Vendor: Intel\n");
+ snprintf(driver, sizeof(driver), "Driver: %s\n",
+ data->dmp_hdr.driver_name);
+
+ ktime_get_real_ts64(&now);
+ time64_to_tm(now.tv_sec, 0, &tm_now);
+ snprintf(ts, sizeof(ts), "Dump Time: %02d-%02d-%04ld %02d:%02d:%02d",
+ tm_now.tm_mday, tm_now.tm_mon + 1, tm_now.tm_year + 1900,
+ tm_now.tm_hour, tm_now.tm_min, tm_now.tm_sec);
+
+ snprintf(fw_build, sizeof(fw_build),
+ "Firmware Timestamp: Year %u WW %02u buildtype %u build %u",
+ 2000 + (data->dmp_hdr.fw_timestamp >> 8),
+ data->dmp_hdr.fw_timestamp & 0xff, data->dmp_hdr.fw_build_type,
+ data->dmp_hdr.fw_build_num);
+
+ data_len = sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_bt) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.write_ptr) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.wrap_ctr) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.trigger_reason) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.fw_git_sha1) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.cnvr_top) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_top) +
+ sizeof(*tlv) + strlen(ts) +
+ sizeof(*tlv) + strlen(fw_build) +
+ sizeof(*tlv) + strlen(vendor) +
+ sizeof(*tlv) + strlen(driver);
+
+ /*
+ * sizeof(u32) - signature
+ * sizeof(data_len) - to store tlv data size
+ * data_len - TLV data
+ */
+ dump_size = sizeof(u32) + sizeof(data_len) + data_len;
+
+
+ /* Add debug buffers data length to dump size */
+ dump_size += BTINTEL_PCIE_DBGC_BUFFER_SIZE * dbgc->count;
+
+ pdata = vmalloc(dump_size);
+ if (!pdata)
+ return -ENOMEM;
+ p = pdata;
+
+ *(u32 *)p = BTINTEL_PCIE_MAGIC_NUM;
+ p += sizeof(u32);
+
+ *(u32 *)p = data_len;
+ p += sizeof(u32);
+
+
+ p = btintel_pcie_copy_tlv(p, BTINTEL_VENDOR, vendor, strlen(vendor));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_DRIVER, driver, strlen(driver));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_DUMP_TIME, ts, strlen(ts));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_FW_BUILD, fw_build,
+ strlen(fw_build));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_CNVI_BT, &data->dmp_hdr.cnvi_bt,
+ sizeof(data->dmp_hdr.cnvi_bt));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_WRITE_PTR, &data->dmp_hdr.write_ptr,
+ sizeof(data->dmp_hdr.write_ptr));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_WRAP_CTR, &data->dmp_hdr.wrap_ctr,
+ sizeof(data->dmp_hdr.wrap_ctr));
+
+ data->dmp_hdr.wrap_ctr = btintel_pcie_rd_dev_mem(data,
+ BTINTEL_PCIE_DBGC_DBGBUFF_WRAP_ARND);
+
+ p = btintel_pcie_copy_tlv(p, BTINTEL_TRIGGER_REASON, &data->dmp_hdr.trigger_reason,
+ sizeof(data->dmp_hdr.trigger_reason));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_FW_SHA, &data->dmp_hdr.fw_git_sha1,
+ sizeof(data->dmp_hdr.fw_git_sha1));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_CNVR_TOP, &data->dmp_hdr.cnvr_top,
+ sizeof(data->dmp_hdr.cnvr_top));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_CNVI_TOP, &data->dmp_hdr.cnvi_top,
+ sizeof(data->dmp_hdr.cnvi_top));
+
+ memcpy(p, dbgc->bufs[0].data, dbgc->count * BTINTEL_PCIE_DBGC_BUFFER_SIZE);
+ dev_coredumpv(&hdev->dev, pdata, dump_size, GFP_KERNEL);
+ return 0;
+}
+
+static void btintel_pcie_dump_traces(struct hci_dev *hdev)
{
- btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG,
- BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET);
+ struct btintel_pcie_data *data = hci_get_drvdata(hdev);
+ int ret = 0;
+
+ ret = btintel_pcie_get_mac_access(data);
+ if (ret) {
+ bt_dev_err(hdev, "Failed to get mac access: (%d)", ret);
+ return;
+ }
+
+ ret = btintel_pcie_read_dram_buffers(data);
+
+ btintel_pcie_release_mac_access(data);
+
+ if (ret)
+ bt_dev_err(hdev, "Failed to dump traces: (%d)", ret);
}
/* This function enables BT function by setting BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT bit in
@@ -251,6 +758,7 @@ static void btintel_pcie_reset_bt(struct btintel_pcie_data *data)
static int btintel_pcie_enable_bt(struct btintel_pcie_data *data)
{
int err;
+ u32 reg;
data->gp0_received = false;
@@ -266,22 +774,17 @@ static int btintel_pcie_enable_bt(struct btintel_pcie_data *data)
data->boot_stage_cache = 0x0;
/* Set MAC_INIT bit to start primary bootloader */
- btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
+ reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT |
+ BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON |
+ BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET);
+ reg |= (BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
+ BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT);
- btintel_pcie_set_reg_bits(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG,
- BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT);
-
- /* Wait until MAC_ACCESS is granted */
- err = btintel_pcie_poll_bit(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG,
- BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS,
- BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS,
- BTINTEL_DEFAULT_MAC_ACCESS_TIMEOUT_US);
- if (err < 0)
- return -ENODEV;
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
/* MAC is ready. Enable BT FUNC */
btintel_pcie_set_reg_bits(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG,
- BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT);
btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
@@ -289,8 +792,9 @@ static int btintel_pcie_enable_bt(struct btintel_pcie_data *data)
/* wait for interrupt from the device after booting up to primary
* bootloader.
*/
+ data->alive_intr_ctxt = BTINTEL_PCIE_ROM;
err = wait_event_timeout(data->gp0_wait_q, data->gp0_received,
- msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT));
+ msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
if (!err)
return -ETIME;
@@ -301,12 +805,87 @@ static int btintel_pcie_enable_bt(struct btintel_pcie_data *data)
return 0;
}
+static inline bool btintel_pcie_in_op(struct btintel_pcie_data *data)
+{
+ return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW;
+}
+
+static inline bool btintel_pcie_in_iml(struct btintel_pcie_data *data)
+{
+ return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_IML &&
+ !(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW);
+}
+
+static inline bool btintel_pcie_in_d3(struct btintel_pcie_data *data)
+{
+ return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY;
+}
+
+static inline bool btintel_pcie_in_d0(struct btintel_pcie_data *data)
+{
+ return !(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY);
+}
+
+static inline bool btintel_pcie_in_device_halt(struct btintel_pcie_data *data)
+{
+ return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_HALTED;
+}
+
+static void btintel_pcie_wr_sleep_cntrl(struct btintel_pcie_data *data,
+ u32 dxstate)
+{
+ bt_dev_dbg(data->hdev, "writing sleep_ctl_reg: 0x%8.8x", dxstate);
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_IPC_SLEEP_CTL_REG, dxstate);
+}
+
+static int btintel_pcie_read_device_mem(struct btintel_pcie_data *data,
+ void *buf, u32 dev_addr, int len)
+{
+ int err;
+ u32 *val = buf;
+
+ /* Get device mac access */
+ err = btintel_pcie_get_mac_access(data);
+ if (err) {
+ bt_dev_err(data->hdev, "Failed to get mac access %d", err);
+ return err;
+ }
+
+ for (; len > 0; len -= 4, dev_addr += 4, val++)
+ *val = btintel_pcie_rd_dev_mem(data, dev_addr);
+
+ btintel_pcie_release_mac_access(data);
+
+ return 0;
+}
+
+static inline bool btintel_pcie_in_lockdown(struct btintel_pcie_data *data)
+{
+ return (data->boot_stage_cache &
+ BTINTEL_PCIE_CSR_BOOT_STAGE_ROM_LOCKDOWN) ||
+ (data->boot_stage_cache &
+ BTINTEL_PCIE_CSR_BOOT_STAGE_IML_LOCKDOWN);
+}
+
+static inline bool btintel_pcie_in_error(struct btintel_pcie_data *data)
+{
+ return (data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_ERR) ||
+ (data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_ABORT_HANDLER);
+}
+
+static void btintel_pcie_msix_gp1_handler(struct btintel_pcie_data *data)
+{
+ bt_dev_err(data->hdev, "Received gp1 mailbox interrupt");
+ btintel_pcie_dump_debug_registers(data->hdev);
+}
+
/* This function handles the MSI-X interrupt for gp0 cause (bit 0 in
* BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES) which is sent for boot stage and image response.
*/
static void btintel_pcie_msix_gp0_handler(struct btintel_pcie_data *data)
{
- u32 reg;
+ bool submit_rx, signal_waitq;
+ u32 reg, old_ctxt;
/* This interrupt is for three different causes and it is not easy to
* know what causes the interrupt. So, it compares each register value
@@ -316,20 +895,101 @@ static void btintel_pcie_msix_gp0_handler(struct btintel_pcie_data *data)
if (reg != data->boot_stage_cache)
data->boot_stage_cache = reg;
+ bt_dev_dbg(data->hdev, "Alive context: %s old_boot_stage: 0x%8.8x new_boot_stage: 0x%8.8x",
+ btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt),
+ data->boot_stage_cache, reg);
reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IMG_RESPONSE_REG);
if (reg != data->img_resp_cache)
data->img_resp_cache = reg;
+ if (btintel_pcie_in_error(data)) {
+ bt_dev_err(data->hdev, "Controller in error state");
+ btintel_pcie_dump_debug_registers(data->hdev);
+ return;
+ }
+
+ if (btintel_pcie_in_lockdown(data)) {
+ bt_dev_err(data->hdev, "Controller in lockdown state");
+ btintel_pcie_dump_debug_registers(data->hdev);
+ return;
+ }
+
data->gp0_received = true;
- /* If the boot stage is OP or IML, reset IA and start RX again */
- if (data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW ||
- data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_IML) {
+ old_ctxt = data->alive_intr_ctxt;
+ submit_rx = false;
+ signal_waitq = false;
+
+ switch (data->alive_intr_ctxt) {
+ case BTINTEL_PCIE_ROM:
+ data->alive_intr_ctxt = BTINTEL_PCIE_FW_DL;
+ signal_waitq = true;
+ break;
+ case BTINTEL_PCIE_FW_DL:
+ /* Error case is already handled. Ideally control shall not
+ * reach here
+ */
+ break;
+ case BTINTEL_PCIE_INTEL_HCI_RESET1:
+ if (btintel_pcie_in_op(data)) {
+ submit_rx = true;
+ signal_waitq = true;
+ break;
+ }
+
+ if (btintel_pcie_in_iml(data)) {
+ submit_rx = true;
+ signal_waitq = true;
+ data->alive_intr_ctxt = BTINTEL_PCIE_FW_DL;
+ break;
+ }
+ break;
+ case BTINTEL_PCIE_INTEL_HCI_RESET2:
+ if (btintel_test_and_clear_flag(data->hdev, INTEL_WAIT_FOR_D0)) {
+ btintel_wake_up_flag(data->hdev, INTEL_WAIT_FOR_D0);
+ data->alive_intr_ctxt = BTINTEL_PCIE_D0;
+ }
+ break;
+ case BTINTEL_PCIE_D0:
+ if (btintel_pcie_in_d3(data)) {
+ data->alive_intr_ctxt = BTINTEL_PCIE_D3;
+ signal_waitq = true;
+ break;
+ }
+ break;
+ case BTINTEL_PCIE_D3:
+ if (btintel_pcie_in_d0(data)) {
+ data->alive_intr_ctxt = BTINTEL_PCIE_D0;
+ submit_rx = true;
+ signal_waitq = true;
+ break;
+ }
+ break;
+ case BTINTEL_PCIE_HCI_RESET:
+ data->alive_intr_ctxt = BTINTEL_PCIE_D0;
+ submit_rx = true;
+ signal_waitq = true;
+ break;
+ default:
+ bt_dev_err(data->hdev, "Unknown state: 0x%2.2x",
+ data->alive_intr_ctxt);
+ break;
+ }
+
+ if (submit_rx) {
btintel_pcie_reset_ia(data);
btintel_pcie_start_rx(data);
}
- wake_up(&data->gp0_wait_q);
+ if (signal_waitq) {
+ bt_dev_dbg(data->hdev, "wake up gp0 wait_q");
+ wake_up(&data->gp0_wait_q);
+ }
+
+ if (old_ctxt != data->alive_intr_ctxt)
+ bt_dev_dbg(data->hdev, "alive context changed: %s -> %s",
+ btintel_pcie_alivectxt_state2str(old_ctxt),
+ btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
}
/* This function handles the MSX-X interrupt for rx queue 0 which is for TX
@@ -363,6 +1023,75 @@ static void btintel_pcie_msix_tx_handle(struct btintel_pcie_data *data)
}
}
+static int btintel_pcie_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_event_hdr *hdr = (void *)skb->data;
+ struct btintel_pcie_data *data = hci_get_drvdata(hdev);
+
+ if (skb->len > HCI_EVENT_HDR_SIZE && hdr->evt == 0xff &&
+ hdr->plen > 0) {
+ const void *ptr = skb->data + HCI_EVENT_HDR_SIZE + 1;
+ unsigned int len = skb->len - HCI_EVENT_HDR_SIZE - 1;
+
+ if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) {
+ switch (skb->data[2]) {
+ case 0x02:
+ /* When switching to the operational firmware
+ * the device sends a vendor specific event
+ * indicating that the bootup completed.
+ */
+ btintel_bootup(hdev, ptr, len);
+
+ /* If bootup event is from operational image,
+ * driver needs to write sleep control register to
+ * move into D0 state
+ */
+ if (btintel_pcie_in_op(data)) {
+ btintel_pcie_wr_sleep_cntrl(data, BTINTEL_PCIE_STATE_D0);
+ data->alive_intr_ctxt = BTINTEL_PCIE_INTEL_HCI_RESET2;
+ kfree_skb(skb);
+ return 0;
+ }
+
+ if (btintel_pcie_in_iml(data)) {
+ /* In case of IML, there is no concept
+ * of D0 transition. Just mimic as if
+ * IML moved to D0 by clearing INTEL_WAIT_FOR_D0
+ * bit and waking up the task waiting on
+ * INTEL_WAIT_FOR_D0. This is required
+ * as intel_boot() is common function for
+ * both IML and OP image loading.
+ */
+ if (btintel_test_and_clear_flag(data->hdev,
+ INTEL_WAIT_FOR_D0))
+ btintel_wake_up_flag(data->hdev,
+ INTEL_WAIT_FOR_D0);
+ }
+ kfree_skb(skb);
+ return 0;
+ case 0x06:
+ /* When the firmware loading completes the
+ * device sends out a vendor specific event
+ * indicating the result of the firmware
+ * loading.
+ */
+ btintel_secure_send_result(hdev, ptr, len);
+ kfree_skb(skb);
+ return 0;
+ }
+ }
+
+ /* This is a debug event that comes from IML and OP image when it
+ * starts execution. There is no need pass this event to stack.
+ */
+ if (skb->data[2] == 0x97) {
+ hci_recv_diag(hdev, skb);
+ return 0;
+ }
+ }
+
+ return hci_recv_frame(hdev, skb);
+}
/* Process the received rx data
* It check the frame header to identify the data type and create skb
* and calling HCI API
@@ -374,7 +1103,6 @@ static int btintel_pcie_recv_frame(struct btintel_pcie_data *data,
u8 pkt_type;
u16 plen;
u32 pcie_pkt_type;
- struct sk_buff *new_skb;
void *pdata;
struct hci_dev *hdev = data->hdev;
@@ -382,7 +1110,7 @@ static int btintel_pcie_recv_frame(struct btintel_pcie_data *data,
/* The first 4 bytes indicates the Intel PCIe specific packet type */
pdata = skb_pull_data(skb, BTINTEL_PCIE_HCI_TYPE_LEN);
- if (!data) {
+ if (!pdata) {
bt_dev_err(hdev, "Corrupted packet received");
ret = -EILSEQ;
goto exit_error;
@@ -423,6 +1151,18 @@ static int btintel_pcie_recv_frame(struct btintel_pcie_data *data,
goto exit_error;
}
break;
+
+ case BTINTEL_PCIE_HCI_ISO_PKT:
+ if (skb->len >= HCI_ISO_HDR_SIZE) {
+ plen = HCI_ISO_HDR_SIZE + __le16_to_cpu(hci_iso_hdr(skb)->dlen);
+ pkt_type = HCI_ISODATA_PKT;
+ } else {
+ bt_dev_err(hdev, "ISO packet is too short");
+ ret = -EILSEQ;
+ goto exit_error;
+ }
+ break;
+
default:
bt_dev_err(hdev, "Invalid packet type received: 0x%4.4x",
pcie_pkt_type);
@@ -439,24 +1179,20 @@ static int btintel_pcie_recv_frame(struct btintel_pcie_data *data,
bt_dev_dbg(hdev, "pkt_type: 0x%2.2x len: %u", pkt_type, plen);
- new_skb = bt_skb_alloc(plen, GFP_ATOMIC);
- if (!new_skb) {
- bt_dev_err(hdev, "Failed to allocate memory for skb of len: %u",
- skb->len);
- ret = -ENOMEM;
- goto exit_error;
- }
-
- hci_skb_pkt_type(new_skb) = pkt_type;
- skb_put_data(new_skb, skb->data, plen);
+ hci_skb_pkt_type(skb) = pkt_type;
hdev->stat.byte_rx += plen;
+ skb_trim(skb, plen);
if (pcie_pkt_type == BTINTEL_PCIE_HCI_EVT_PKT)
- ret = btintel_recv_event(hdev, new_skb);
+ ret = btintel_pcie_recv_event(hdev, skb);
else
- ret = hci_recv_frame(hdev, new_skb);
+ ret = hci_recv_frame(hdev, skb);
+ skb = NULL; /* skb is freed in the callee */
exit_error:
+ if (skb)
+ kfree_skb(skb);
+
if (ret)
hdev->stat.err_rx++;
@@ -465,21 +1201,152 @@ exit_error:
return ret;
}
+static void btintel_pcie_read_hwexp(struct btintel_pcie_data *data)
+{
+ int len, err, offset, pending;
+ struct sk_buff *skb;
+ u8 *buf, prefix[64];
+ u32 addr, val;
+ u16 pkt_len;
+
+ struct tlv {
+ u8 type;
+ __le16 len;
+ u8 val[];
+ } __packed;
+
+ struct tlv *tlv;
+
+ switch (data->dmp_hdr.cnvi_top & 0xfff) {
+ case BTINTEL_CNVI_BLAZARI:
+ case BTINTEL_CNVI_BLAZARIW:
+ /* only from step B0 onwards */
+ if (INTEL_CNVX_TOP_STEP(data->dmp_hdr.cnvi_top) != 0x01)
+ return;
+ len = BTINTEL_PCIE_BLZR_HWEXP_SIZE; /* exception data length */
+ addr = BTINTEL_PCIE_BLZR_HWEXP_DMP_ADDR;
+ break;
+ case BTINTEL_CNVI_SCP:
+ len = BTINTEL_PCIE_SCP_HWEXP_SIZE;
+ addr = BTINTEL_PCIE_SCP_HWEXP_DMP_ADDR;
+ break;
+ default:
+ bt_dev_err(data->hdev, "Unsupported cnvi 0x%8.8x", data->dmp_hdr.cnvi_top);
+ return;
+ }
+
+ buf = kzalloc(len, GFP_KERNEL);
+ if (!buf)
+ goto exit_on_error;
+
+ btintel_pcie_mac_init(data);
+
+ err = btintel_pcie_read_device_mem(data, buf, addr, len);
+ if (err)
+ goto exit_on_error;
+
+ val = get_unaligned_le32(buf);
+ if (val != BTINTEL_PCIE_MAGIC_NUM) {
+ bt_dev_err(data->hdev, "Invalid exception dump signature: 0x%8.8x",
+ val);
+ goto exit_on_error;
+ }
+
+ snprintf(prefix, sizeof(prefix), "Bluetooth: %s: ", bt_dev_name(data->hdev));
+
+ offset = 4;
+ do {
+ pending = len - offset;
+ if (pending < sizeof(*tlv))
+ break;
+ tlv = (struct tlv *)(buf + offset);
+
+ /* If type == 0, then there are no more TLVs to be parsed */
+ if (!tlv->type) {
+ bt_dev_dbg(data->hdev, "Invalid TLV type 0");
+ break;
+ }
+ pkt_len = le16_to_cpu(tlv->len);
+ offset += sizeof(*tlv);
+ pending = len - offset;
+ if (pkt_len > pending)
+ break;
+
+ offset += pkt_len;
+
+ /* Only TLVs of type == 1 are HCI events, no need to process other
+ * TLVs
+ */
+ if (tlv->type != 1)
+ continue;
+
+ bt_dev_dbg(data->hdev, "TLV packet length: %u", pkt_len);
+ if (pkt_len > HCI_MAX_EVENT_SIZE)
+ break;
+ skb = bt_skb_alloc(pkt_len, GFP_KERNEL);
+ if (!skb)
+ goto exit_on_error;
+ hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
+ skb_put_data(skb, tlv->val, pkt_len);
+
+ /* copy Intel specific pcie packet type */
+ val = BTINTEL_PCIE_HCI_EVT_PKT;
+ memcpy(skb_push(skb, BTINTEL_PCIE_HCI_TYPE_LEN), &val,
+ BTINTEL_PCIE_HCI_TYPE_LEN);
+
+ print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_OFFSET, 16, 1,
+ tlv->val, pkt_len, false);
+
+ btintel_pcie_recv_frame(data, skb);
+ } while (offset < len);
+
+exit_on_error:
+ kfree(buf);
+}
+
+static void btintel_pcie_msix_hw_exp_handler(struct btintel_pcie_data *data)
+{
+ bt_dev_err(data->hdev, "Received hw exception interrupt");
+
+ if (test_and_set_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags))
+ return;
+
+ if (test_and_set_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags))
+ return;
+
+ /* Trigger device core dump when there is HW exception */
+ if (!test_and_set_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags))
+ data->dmp_hdr.trigger_reason = BTINTEL_PCIE_TRIGGER_REASON_FW_ASSERT;
+
+ queue_work(data->workqueue, &data->rx_work);
+}
+
static void btintel_pcie_rx_work(struct work_struct *work)
{
struct btintel_pcie_data *data = container_of(work,
struct btintel_pcie_data, rx_work);
struct sk_buff *skb;
- int err;
- struct hci_dev *hdev = data->hdev;
+
+ if (test_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags)) {
+ btintel_pcie_dump_traces(data->hdev);
+ clear_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags);
+ }
+
+ if (test_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags)) {
+ /* Unlike usb products, controller will not send hardware
+ * exception event on exception. Instead controller writes the
+ * hardware event to device memory along with optional debug
+ * events, raises MSIX and halts. Driver shall read the
+ * exception event from device memory and passes it stack for
+ * further processing.
+ */
+ btintel_pcie_read_hwexp(data);
+ clear_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags);
+ }
/* Process the sk_buf in queue and send to the HCI layer */
while ((skb = skb_dequeue(&data->rx_skb_q))) {
- err = btintel_pcie_recv_frame(data, skb);
- if (err)
- bt_dev_err(hdev, "Failed to send received frame: %d",
- err);
- kfree_skb(skb);
+ btintel_pcie_recv_frame(data, skb);
}
}
@@ -503,10 +1370,8 @@ static int btintel_pcie_submit_rx_work(struct btintel_pcie_data *data, u8 status
buf += sizeof(*rfh_hdr);
skb = alloc_skb(len, GFP_ATOMIC);
- if (!skb) {
- ret = -ENOMEM;
+ if (!skb)
goto resubmit;
- }
skb_put_data(skb, buf, len);
skb_queue_tail(&data->rx_skb_q, skb);
@@ -534,10 +1399,8 @@ static void btintel_pcie_msix_rx_handle(struct btintel_pcie_data *data)
bt_dev_dbg(hdev, "RXQ: cr_hia: %u cr_tia: %u", cr_hia, cr_tia);
/* Check CR_TIA and CR_HIA for change */
- if (cr_tia == cr_hia) {
- bt_dev_warn(hdev, "RXQ: no new CD found");
+ if (cr_tia == cr_hia)
return;
- }
rxq = &data->rxq;
@@ -573,6 +1436,16 @@ static irqreturn_t btintel_pcie_msix_isr(int irq, void *data)
return IRQ_WAKE_THREAD;
}
+static inline bool btintel_pcie_is_rxq_empty(struct btintel_pcie_data *data)
+{
+ return data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM] == data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM];
+}
+
+static inline bool btintel_pcie_is_txackq_empty(struct btintel_pcie_data *data)
+{
+ return data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM] == data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM];
+}
+
static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
{
struct msix_entry *entry = dev_id;
@@ -593,19 +1466,33 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
return IRQ_NONE;
}
- /* This interrupt is triggered by the firmware after updating
- * boot_stage register and image_response register
- */
- if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0)
- btintel_pcie_msix_gp0_handler(data);
+ /* This interrupt is raised when there is an hardware exception */
+ if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP)
+ btintel_pcie_msix_hw_exp_handler(data);
+
+ if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP1)
+ btintel_pcie_msix_gp1_handler(data);
+
/* For TX */
- if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0)
+ if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0) {
btintel_pcie_msix_tx_handle(data);
+ if (!btintel_pcie_is_rxq_empty(data))
+ btintel_pcie_msix_rx_handle(data);
+ }
/* For RX */
- if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1)
+ if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1) {
btintel_pcie_msix_rx_handle(data);
+ if (!btintel_pcie_is_txackq_empty(data))
+ btintel_pcie_msix_tx_handle(data);
+ }
+
+ /* This interrupt is triggered by the firmware after updating
+ * boot_stage register and image_response register
+ */
+ if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0)
+ btintel_pcie_msix_gp0_handler(data);
/*
* Before sending the interrupt the HW disables it to prevent a nested
@@ -673,7 +1560,8 @@ struct btintel_pcie_causes_list {
static struct btintel_pcie_causes_list causes_list[] = {
{ BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, 0x00 },
{ BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, 0x01 },
- { BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, 0x20 },
+ { BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, 0x20 },
+ { BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, 0x23 },
};
/* This function configures the interrupt masks for both HW_INT_CAUSES and
@@ -721,13 +1609,9 @@ static int btintel_pcie_config_pcie(struct pci_dev *pdev,
return err;
}
- err = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME);
- if (err)
- return err;
-
- data->base_addr = pcim_iomap_table(pdev)[0];
- if (!data->base_addr)
- return -ENODEV;
+ data->base_addr = pcim_iomap_region(pdev, 0, KBUILD_MODNAME);
+ if (IS_ERR(data->base_addr))
+ return PTR_ERR(data->base_addr);
err = btintel_pcie_setup_irq(data);
if (err)
@@ -764,6 +1648,11 @@ static void btintel_pcie_init_ci(struct btintel_pcie_data *data,
ci->addr_urbdq1 = data->rxq.urbd1s_p_addr;
ci->num_urbdq1 = data->rxq.count;
ci->urbdq_db_vec = BTINTEL_PCIE_RXQ_NUM;
+
+ ci->dbg_output_mode = 0x01;
+ ci->dbgc_addr = data->dbgc.frag_p_addr;
+ ci->dbgc_size = data->dbgc.frag_size;
+ ci->dbg_preset = 0x00;
}
static void btintel_pcie_free_txq_bufs(struct btintel_pcie_data *data,
@@ -797,7 +1686,6 @@ static int btintel_pcie_setup_txq_bufs(struct btintel_pcie_data *data,
kfree(txq->bufs);
return -ENOMEM;
}
- memset(txq->buf_v_addr, 0, txq->count * BTINTEL_PCIE_BUFFER_SIZE);
/* Setup the allocated DMA buffer to bufs. Each data_buf should
* have virtual address and physical address
@@ -842,7 +1730,6 @@ static int btintel_pcie_setup_rxq_bufs(struct btintel_pcie_data *data,
kfree(rxq->bufs);
return -ENOMEM;
}
- memset(rxq->buf_v_addr, 0, rxq->count * BTINTEL_PCIE_BUFFER_SIZE);
/* Setup the allocated DMA buffer to bufs. Each data_buf should
* have virtual address and physical address
@@ -908,8 +1795,8 @@ static int btintel_pcie_alloc(struct btintel_pcie_data *data)
* + size of index * Number of queues(2) * type of index array(4)
* + size of context information
*/
- total = (sizeof(struct tfd) + sizeof(struct urbd0) + sizeof(struct frbd)
- + sizeof(struct urbd1)) * BTINTEL_DESCS_COUNT;
+ total = (sizeof(struct tfd) + sizeof(struct urbd0)) * BTINTEL_PCIE_TX_DESCS_COUNT;
+ total += (sizeof(struct frbd) + sizeof(struct urbd1)) * BTINTEL_PCIE_RX_DESCS_COUNT;
/* Add the sum of size of index array and size of ci struct */
total += (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4) + sizeof(struct ctx_info);
@@ -934,36 +1821,36 @@ static int btintel_pcie_alloc(struct btintel_pcie_data *data)
data->dma_v_addr = v_addr;
/* Setup descriptor count */
- data->txq.count = BTINTEL_DESCS_COUNT;
- data->rxq.count = BTINTEL_DESCS_COUNT;
+ data->txq.count = BTINTEL_PCIE_TX_DESCS_COUNT;
+ data->rxq.count = BTINTEL_PCIE_RX_DESCS_COUNT;
/* Setup tfds */
data->txq.tfds_p_addr = p_addr;
data->txq.tfds = v_addr;
- p_addr += (sizeof(struct tfd) * BTINTEL_DESCS_COUNT);
- v_addr += (sizeof(struct tfd) * BTINTEL_DESCS_COUNT);
+ p_addr += (sizeof(struct tfd) * BTINTEL_PCIE_TX_DESCS_COUNT);
+ v_addr += (sizeof(struct tfd) * BTINTEL_PCIE_TX_DESCS_COUNT);
/* Setup urbd0 */
data->txq.urbd0s_p_addr = p_addr;
data->txq.urbd0s = v_addr;
- p_addr += (sizeof(struct urbd0) * BTINTEL_DESCS_COUNT);
- v_addr += (sizeof(struct urbd0) * BTINTEL_DESCS_COUNT);
+ p_addr += (sizeof(struct urbd0) * BTINTEL_PCIE_TX_DESCS_COUNT);
+ v_addr += (sizeof(struct urbd0) * BTINTEL_PCIE_TX_DESCS_COUNT);
/* Setup FRBD*/
data->rxq.frbds_p_addr = p_addr;
data->rxq.frbds = v_addr;
- p_addr += (sizeof(struct frbd) * BTINTEL_DESCS_COUNT);
- v_addr += (sizeof(struct frbd) * BTINTEL_DESCS_COUNT);
+ p_addr += (sizeof(struct frbd) * BTINTEL_PCIE_RX_DESCS_COUNT);
+ v_addr += (sizeof(struct frbd) * BTINTEL_PCIE_RX_DESCS_COUNT);
/* Setup urbd1 */
data->rxq.urbd1s_p_addr = p_addr;
data->rxq.urbd1s = v_addr;
- p_addr += (sizeof(struct urbd1) * BTINTEL_DESCS_COUNT);
- v_addr += (sizeof(struct urbd1) * BTINTEL_DESCS_COUNT);
+ p_addr += (sizeof(struct urbd1) * BTINTEL_PCIE_RX_DESCS_COUNT);
+ v_addr += (sizeof(struct urbd1) * BTINTEL_PCIE_RX_DESCS_COUNT);
/* Setup data buffers for txq */
err = btintel_pcie_setup_txq_bufs(data, &data->txq);
@@ -978,6 +1865,11 @@ static int btintel_pcie_alloc(struct btintel_pcie_data *data)
/* Setup Index Array */
btintel_pcie_setup_ia(data, p_addr, v_addr, &data->ia);
+ /* Setup data buffers for dbgc */
+ err = btintel_pcie_setup_dbgc(data);
+ if (err)
+ goto exit_error_txq;
+
/* Setup Context Information */
p_addr += sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4;
v_addr += sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4;
@@ -1042,9 +1934,14 @@ static int btintel_pcie_send_frame(struct hci_dev *hdev,
struct sk_buff *skb)
{
struct btintel_pcie_data *data = hci_get_drvdata(hdev);
+ struct hci_command_hdr *cmd;
+ __u16 opcode = ~0;
int ret;
u32 type;
+ if (test_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags))
+ return -ENODEV;
+
/* Due to the fw limitation, the type header of the packet should be
* 4 bytes unlike 1 byte for UART. In UART, the firmware can read
* the first byte to get the packet type and redirect the rest of data
@@ -1062,18 +1959,21 @@ static int btintel_pcie_send_frame(struct hci_dev *hdev,
switch (hci_skb_pkt_type(skb)) {
case HCI_COMMAND_PKT:
type = BTINTEL_PCIE_HCI_CMD_PKT;
+ cmd = (void *)skb->data;
+ opcode = le16_to_cpu(cmd->opcode);
if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) {
struct hci_command_hdr *cmd = (void *)skb->data;
__u16 opcode = le16_to_cpu(cmd->opcode);
- /* When the 0xfc01 command is issued to boot into
- * the operational firmware, it will actually not
- * send a command complete event. To keep the flow
+ /* When the BTINTEL_HCI_OP_RESET command is issued to
+ * boot into the operational firmware, it will actually
+ * not send a command complete event. To keep the flow
* control working inject that event here.
*/
- if (opcode == 0xfc01)
+ if (opcode == BTINTEL_HCI_OP_RESET)
btintel_pcie_inject_cmd_complete(hdev, opcode);
}
+
hdev->stat.cmd_tx++;
break;
case HCI_ACLDATA_PKT:
@@ -1084,19 +1984,21 @@ static int btintel_pcie_send_frame(struct hci_dev *hdev,
type = BTINTEL_PCIE_HCI_SCO_PKT;
hdev->stat.sco_tx++;
break;
+ case HCI_ISODATA_PKT:
+ type = BTINTEL_PCIE_HCI_ISO_PKT;
+ break;
default:
bt_dev_err(hdev, "Unknown HCI packet type");
return -EILSEQ;
}
- memcpy(skb_push(skb, BTINTEL_PCIE_HCI_TYPE_LEN), &type,
- BTINTEL_PCIE_HCI_TYPE_LEN);
- ret = btintel_pcie_send_sync(data, skb);
+ ret = btintel_pcie_send_sync(data, skb, type, opcode);
if (ret) {
hdev->stat.err_tx++;
bt_dev_err(hdev, "Failed to send frame (%d)", ret);
goto exit_error;
}
+
hdev->stat.byte_tx += skb->len;
kfree_skb(skb);
@@ -1114,8 +2016,31 @@ static void btintel_pcie_release_hdev(struct btintel_pcie_data *data)
data->hdev = NULL;
}
-static int btintel_pcie_setup(struct hci_dev *hdev)
+static void btintel_pcie_disable_interrupts(struct btintel_pcie_data *data)
{
+ spin_lock(&data->irq_lock);
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, data->fh_init_mask);
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, data->hw_init_mask);
+ spin_unlock(&data->irq_lock);
+}
+
+static void btintel_pcie_enable_interrupts(struct btintel_pcie_data *data)
+{
+ spin_lock(&data->irq_lock);
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, ~data->fh_init_mask);
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, ~data->hw_init_mask);
+ spin_unlock(&data->irq_lock);
+}
+
+static void btintel_pcie_synchronize_irqs(struct btintel_pcie_data *data)
+{
+ for (int i = 0; i < data->alloc_vecs; i++)
+ synchronize_irq(data->msix_entries[i].vector);
+}
+
+static int btintel_pcie_setup_internal(struct hci_dev *hdev)
+{
+ struct btintel_pcie_data *data = hci_get_drvdata(hdev);
const u8 param[1] = { 0xFF };
struct intel_version_tlv ver_tlv;
struct sk_buff *skb;
@@ -1139,9 +2064,9 @@ static int btintel_pcie_setup(struct hci_dev *hdev)
}
/* Apply the common HCI quirks for Intel device */
- set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
- set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER);
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
+ hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_DIAG);
/* Set up the quality report callback for Intel devices */
hdev->set_quality_report = btintel_set_quality_report;
@@ -1173,6 +2098,8 @@ static int btintel_pcie_setup(struct hci_dev *hdev)
*/
switch (INTEL_HW_VARIANT(ver_tlv.cnvi_bt)) {
case 0x1e: /* BzrI */
+ case 0x1f: /* ScP */
+ case 0x22: /* BzrIW */
/* Display version information of TLV type */
btintel_version_info_tlv(hdev, &ver_tlv);
@@ -1180,10 +2107,7 @@ static int btintel_pcie_setup(struct hci_dev *hdev)
*
* All TLV based devices support WBS
*/
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
-
- /* Apply LE States quirk from solar onwards */
- set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
/* Setup MSFT Extension support */
btintel_set_msft_opcode(hdev,
@@ -1197,21 +2121,309 @@ static int btintel_pcie_setup(struct hci_dev *hdev)
bt_dev_err(hdev, "Unsupported Intel hw variant (%u)",
INTEL_HW_VARIANT(ver_tlv.cnvi_bt));
err = -EINVAL;
+ goto exit_error;
break;
}
+ data->dmp_hdr.cnvi_top = ver_tlv.cnvi_top;
+ data->dmp_hdr.cnvr_top = ver_tlv.cnvr_top;
+ data->dmp_hdr.fw_timestamp = ver_tlv.timestamp;
+ data->dmp_hdr.fw_build_type = ver_tlv.build_type;
+ data->dmp_hdr.fw_build_num = ver_tlv.build_num;
+ data->dmp_hdr.cnvi_bt = ver_tlv.cnvi_bt;
+
+ if (ver_tlv.img_type == 0x02 || ver_tlv.img_type == 0x03)
+ data->dmp_hdr.fw_git_sha1 = ver_tlv.git_sha1;
+
+ btintel_print_fseq_info(hdev);
exit_error:
kfree_skb(skb);
return err;
}
+static int btintel_pcie_setup(struct hci_dev *hdev)
+{
+ int err, fw_dl_retry = 0;
+ struct btintel_pcie_data *data = hci_get_drvdata(hdev);
+
+ while ((err = btintel_pcie_setup_internal(hdev)) && fw_dl_retry++ < 1) {
+ bt_dev_err(hdev, "Firmware download retry count: %d",
+ fw_dl_retry);
+ btintel_pcie_dump_debug_registers(hdev);
+ btintel_pcie_disable_interrupts(data);
+ btintel_pcie_synchronize_irqs(data);
+ err = btintel_pcie_reset_bt(data);
+ if (err) {
+ bt_dev_err(hdev, "Failed to do shr reset: %d", err);
+ break;
+ }
+ usleep_range(10000, 12000);
+ btintel_pcie_reset_ia(data);
+ btintel_pcie_enable_interrupts(data);
+ btintel_pcie_config_msix(data);
+ err = btintel_pcie_enable_bt(data);
+ if (err) {
+ bt_dev_err(hdev, "Failed to enable hardware: %d", err);
+ break;
+ }
+ btintel_pcie_start_rx(data);
+ }
+
+ if (!err)
+ set_bit(BTINTEL_PCIE_SETUP_DONE, &data->flags);
+ return err;
+}
+
+static struct btintel_pcie_dev_recovery *
+btintel_pcie_get_recovery(struct pci_dev *pdev, struct device *dev)
+{
+ struct btintel_pcie_dev_recovery *tmp, *data = NULL;
+ const char *name = pci_name(pdev);
+ const size_t name_len = strlen(name) + 1;
+ struct hci_dev *hdev = to_hci_dev(dev);
+
+ spin_lock(&btintel_pcie_recovery_lock);
+ list_for_each_entry(tmp, &btintel_pcie_recovery_list, list) {
+ if (strcmp(tmp->name, name))
+ continue;
+ data = tmp;
+ break;
+ }
+ spin_unlock(&btintel_pcie_recovery_lock);
+
+ if (data) {
+ bt_dev_dbg(hdev, "Found restart data for BDF: %s", data->name);
+ return data;
+ }
+
+ data = kzalloc(struct_size(data, name, name_len), GFP_ATOMIC);
+ if (!data)
+ return NULL;
+
+ strscpy(data->name, name, name_len);
+ spin_lock(&btintel_pcie_recovery_lock);
+ list_add_tail(&data->list, &btintel_pcie_recovery_list);
+ spin_unlock(&btintel_pcie_recovery_lock);
+
+ return data;
+}
+
+static void btintel_pcie_free_restart_list(void)
+{
+ struct btintel_pcie_dev_recovery *tmp;
+
+ while ((tmp = list_first_entry_or_null(&btintel_pcie_recovery_list,
+ typeof(*tmp), list))) {
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+}
+
+static void btintel_pcie_inc_recovery_count(struct pci_dev *pdev,
+ struct device *dev)
+{
+ struct btintel_pcie_dev_recovery *data;
+ time64_t retry_window;
+
+ data = btintel_pcie_get_recovery(pdev, dev);
+ if (!data)
+ return;
+
+ retry_window = ktime_get_boottime_seconds() - data->last_error;
+ if (data->count == 0) {
+ data->last_error = ktime_get_boottime_seconds();
+ data->count++;
+ } else if (retry_window < BTINTEL_PCIE_RESET_WINDOW_SECS &&
+ data->count <= BTINTEL_PCIE_FLR_MAX_RETRY) {
+ data->count++;
+ } else if (retry_window > BTINTEL_PCIE_RESET_WINDOW_SECS) {
+ data->last_error = 0;
+ data->count = 0;
+ }
+}
+
+static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data);
+
+static void btintel_pcie_removal_work(struct work_struct *wk)
+{
+ struct btintel_pcie_removal *removal =
+ container_of(wk, struct btintel_pcie_removal, work);
+ struct pci_dev *pdev = removal->pdev;
+ struct btintel_pcie_data *data;
+ int err;
+
+ pci_lock_rescan_remove();
+
+ if (!pdev->bus)
+ goto error;
+
+ data = pci_get_drvdata(pdev);
+
+ btintel_pcie_disable_interrupts(data);
+ btintel_pcie_synchronize_irqs(data);
+
+ flush_work(&data->rx_work);
+
+ bt_dev_dbg(data->hdev, "Release bluetooth interface");
+ btintel_pcie_release_hdev(data);
+
+ err = pci_reset_function(pdev);
+ if (err) {
+ BT_ERR("Failed resetting the pcie device (%d)", err);
+ goto error;
+ }
+
+ btintel_pcie_enable_interrupts(data);
+ btintel_pcie_config_msix(data);
+
+ err = btintel_pcie_enable_bt(data);
+ if (err) {
+ BT_ERR("Failed to enable bluetooth hardware after reset (%d)",
+ err);
+ goto error;
+ }
+
+ btintel_pcie_reset_ia(data);
+ btintel_pcie_start_rx(data);
+ data->flags = 0;
+
+ err = btintel_pcie_setup_hdev(data);
+ if (err) {
+ BT_ERR("Failed registering hdev (%d)", err);
+ goto error;
+ }
+error:
+ pci_dev_put(pdev);
+ pci_unlock_rescan_remove();
+ kfree(removal);
+}
+
+static void btintel_pcie_reset(struct hci_dev *hdev)
+{
+ struct btintel_pcie_removal *removal;
+ struct btintel_pcie_data *data;
+
+ data = hci_get_drvdata(hdev);
+
+ if (!test_bit(BTINTEL_PCIE_SETUP_DONE, &data->flags))
+ return;
+
+ if (test_and_set_bit(BTINTEL_PCIE_RECOVERY_IN_PROGRESS, &data->flags))
+ return;
+
+ removal = kzalloc(sizeof(*removal), GFP_ATOMIC);
+ if (!removal)
+ return;
+
+ removal->pdev = data->pdev;
+ INIT_WORK(&removal->work, btintel_pcie_removal_work);
+ pci_dev_get(removal->pdev);
+ schedule_work(&removal->work);
+}
+
+static void btintel_pcie_hw_error(struct hci_dev *hdev, u8 code)
+{
+ struct btintel_pcie_dev_recovery *data;
+ struct btintel_pcie_data *dev_data = hci_get_drvdata(hdev);
+ struct pci_dev *pdev = dev_data->pdev;
+ time64_t retry_window;
+
+ if (code == 0x13) {
+ bt_dev_err(hdev, "Encountered top exception");
+ return;
+ }
+
+ data = btintel_pcie_get_recovery(pdev, &hdev->dev);
+ if (!data)
+ return;
+
+ retry_window = ktime_get_boottime_seconds() - data->last_error;
+
+ if (retry_window < BTINTEL_PCIE_RESET_WINDOW_SECS &&
+ data->count >= BTINTEL_PCIE_FLR_MAX_RETRY) {
+ bt_dev_err(hdev, "Exhausted maximum: %d recovery attempts: %d",
+ BTINTEL_PCIE_FLR_MAX_RETRY, data->count);
+ bt_dev_dbg(hdev, "Boot time: %lld seconds",
+ ktime_get_boottime_seconds());
+ bt_dev_dbg(hdev, "last error at: %lld seconds",
+ data->last_error);
+ return;
+ }
+ btintel_pcie_inc_recovery_count(pdev, &hdev->dev);
+ btintel_pcie_reset(hdev);
+}
+
+static bool btintel_pcie_wakeup(struct hci_dev *hdev)
+{
+ struct btintel_pcie_data *data = hci_get_drvdata(hdev);
+
+ return device_may_wakeup(&data->pdev->dev);
+}
+
+static const struct {
+ u16 opcode;
+ const char *desc;
+} btintel_pcie_hci_drv_supported_commands[] = {
+ /* Common commands */
+ { HCI_DRV_OP_READ_INFO, "Read Info" },
+};
+
+static int btintel_pcie_hci_drv_read_info(struct hci_dev *hdev, void *data,
+ u16 data_len)
+{
+ struct hci_drv_rp_read_info *rp;
+ size_t rp_size;
+ int err, i;
+ u16 opcode, num_supported_commands =
+ ARRAY_SIZE(btintel_pcie_hci_drv_supported_commands);
+
+ rp_size = sizeof(*rp) + num_supported_commands * 2;
+
+ rp = kmalloc(rp_size, GFP_KERNEL);
+ if (!rp)
+ return -ENOMEM;
+
+ strscpy_pad(rp->driver_name, KBUILD_MODNAME);
+
+ rp->num_supported_commands = cpu_to_le16(num_supported_commands);
+ for (i = 0; i < num_supported_commands; i++) {
+ opcode = btintel_pcie_hci_drv_supported_commands[i].opcode;
+ bt_dev_dbg(hdev,
+ "Supported HCI Drv command (0x%02x|0x%04x): %s",
+ hci_opcode_ogf(opcode),
+ hci_opcode_ocf(opcode),
+ btintel_pcie_hci_drv_supported_commands[i].desc);
+ rp->supported_commands[i] = cpu_to_le16(opcode);
+ }
+
+ err = hci_drv_cmd_complete(hdev, HCI_DRV_OP_READ_INFO,
+ HCI_DRV_STATUS_SUCCESS,
+ rp, rp_size);
+
+ kfree(rp);
+ return err;
+}
+
+static const struct hci_drv_handler btintel_pcie_hci_drv_common_handlers[] = {
+ { btintel_pcie_hci_drv_read_info, HCI_DRV_READ_INFO_SIZE },
+};
+
+static const struct hci_drv_handler btintel_pcie_hci_drv_specific_handlers[] = {};
+
+static struct hci_drv btintel_pcie_hci_drv = {
+ .common_handler_count = ARRAY_SIZE(btintel_pcie_hci_drv_common_handlers),
+ .common_handlers = btintel_pcie_hci_drv_common_handlers,
+ .specific_handler_count = ARRAY_SIZE(btintel_pcie_hci_drv_specific_handlers),
+ .specific_handlers = btintel_pcie_hci_drv_specific_handlers,
+};
+
static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data)
{
int err;
struct hci_dev *hdev;
- hdev = hci_alloc_dev();
+ hdev = hci_alloc_dev_priv(sizeof(struct btintel_data));
if (!hdev)
return -ENOMEM;
@@ -1227,9 +2439,12 @@ static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data)
hdev->send = btintel_pcie_send_frame;
hdev->setup = btintel_pcie_setup;
hdev->shutdown = btintel_shutdown_combined;
- hdev->hw_error = btintel_hw_error;
+ hdev->hw_error = btintel_pcie_hw_error;
hdev->set_diag = btintel_set_diag;
hdev->set_bdaddr = btintel_set_bdaddr;
+ hdev->reset = btintel_pcie_reset;
+ hdev->wakeup = btintel_pcie_wakeup;
+ hdev->hci_drv = &btintel_pcie_hci_drv;
err = hci_register_dev(hdev);
if (err < 0) {
@@ -1237,6 +2452,7 @@ static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data)
goto exit_error;
}
+ data->dmp_hdr.driver_name = KBUILD_MODNAME;
return 0;
exit_error:
@@ -1326,14 +2542,24 @@ static void btintel_pcie_remove(struct pci_dev *pdev)
data = pci_get_drvdata(pdev);
+ btintel_pcie_disable_interrupts(data);
+
+ btintel_pcie_synchronize_irqs(data);
+
+ flush_work(&data->rx_work);
+
btintel_pcie_reset_bt(data);
+ for (int i = 0; i < data->alloc_vecs; i++) {
+ struct msix_entry *msix_entry;
+
+ msix_entry = &data->msix_entries[i];
+ free_irq(msix_entry->vector, msix_entry);
+ }
pci_free_irq_vectors(pdev);
btintel_pcie_release_hdev(data);
- flush_work(&data->rx_work);
-
destroy_workqueue(data->workqueue);
btintel_pcie_free(data);
@@ -1343,13 +2569,197 @@ static void btintel_pcie_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
}
+#ifdef CONFIG_DEV_COREDUMP
+static void btintel_pcie_coredump(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct btintel_pcie_data *data = pci_get_drvdata(pdev);
+
+ if (test_and_set_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags))
+ return;
+
+ data->dmp_hdr.trigger_reason = BTINTEL_PCIE_TRIGGER_REASON_USER_TRIGGER;
+ queue_work(data->workqueue, &data->rx_work);
+}
+#endif
+
+static int btintel_pcie_set_dxstate(struct btintel_pcie_data *data, u32 dxstate)
+{
+ int retry = 0, status;
+ u32 dx_intr_timeout_ms = 200;
+
+ do {
+ data->gp0_received = false;
+
+ btintel_pcie_wr_sleep_cntrl(data, dxstate);
+
+ status = wait_event_timeout(data->gp0_wait_q, data->gp0_received,
+ msecs_to_jiffies(dx_intr_timeout_ms));
+
+ if (status)
+ return 0;
+
+ bt_dev_warn(data->hdev,
+ "Timeout (%u ms) on alive interrupt for D%d entry, retry count %d",
+ dx_intr_timeout_ms, dxstate, retry);
+
+ /* clear gp0 cause */
+ btintel_pcie_clr_reg_bits(data,
+ BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES,
+ BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0);
+
+ /* A hardware bug may cause the alive interrupt to be missed.
+ * Check if the controller reached the expected state and retry
+ * the operation only if it hasn't.
+ */
+ if (dxstate == BTINTEL_PCIE_STATE_D0) {
+ if (btintel_pcie_in_d0(data))
+ return 0;
+ } else {
+ if (btintel_pcie_in_d3(data))
+ return 0;
+ }
+
+ } while (++retry < BTINTEL_PCIE_DX_TRANSITION_MAX_RETRIES);
+
+ return -EBUSY;
+}
+
+static int btintel_pcie_suspend_late(struct device *dev, pm_message_t mesg)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct btintel_pcie_data *data;
+ ktime_t start;
+ u32 dxstate;
+ int err;
+
+ data = pci_get_drvdata(pdev);
+
+ dxstate = (mesg.event == PM_EVENT_SUSPEND ?
+ BTINTEL_PCIE_STATE_D3_HOT : BTINTEL_PCIE_STATE_D3_COLD);
+
+ data->pm_sx_event = mesg.event;
+
+ start = ktime_get();
+
+ /* Refer: 6.4.11.7 -> Platform power management */
+ err = btintel_pcie_set_dxstate(data, dxstate);
+
+ if (err)
+ return err;
+
+ bt_dev_dbg(data->hdev,
+ "device entered into d3 state from d0 in %lld us",
+ ktime_to_us(ktime_get() - start));
+ return err;
+}
+
+static int btintel_pcie_suspend(struct device *dev)
+{
+ return btintel_pcie_suspend_late(dev, PMSG_SUSPEND);
+}
+
+static int btintel_pcie_hibernate(struct device *dev)
+{
+ return btintel_pcie_suspend_late(dev, PMSG_HIBERNATE);
+}
+
+static int btintel_pcie_freeze(struct device *dev)
+{
+ return btintel_pcie_suspend_late(dev, PMSG_FREEZE);
+}
+
+static int btintel_pcie_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct btintel_pcie_data *data;
+ ktime_t start;
+ int err;
+
+ data = pci_get_drvdata(pdev);
+ data->gp0_received = false;
+
+ start = ktime_get();
+
+ /* When the system enters S4 (hibernate) mode, bluetooth device loses
+ * power, which results in the erasure of its loaded firmware.
+ * Consequently, function level reset (flr) is required on system
+ * resume to bring the controller back into an operational state by
+ * initiating a new firmware download.
+ */
+
+ if (data->pm_sx_event == PM_EVENT_FREEZE ||
+ data->pm_sx_event == PM_EVENT_HIBERNATE) {
+ set_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags);
+ btintel_pcie_reset(data->hdev);
+ return 0;
+ }
+
+ /* Refer: 6.4.11.7 -> Platform power management */
+ err = btintel_pcie_set_dxstate(data, BTINTEL_PCIE_STATE_D0);
+
+ if (err == 0) {
+ bt_dev_dbg(data->hdev,
+ "device entered into d0 state from d3 in %lld us",
+ ktime_to_us(ktime_get() - start));
+ return err;
+ }
+
+ /* Trigger function level reset if the controller is in error
+ * state during resume() to bring back the controller to
+ * operational mode
+ */
+
+ data->boot_stage_cache = btintel_pcie_rd_reg32(data,
+ BTINTEL_PCIE_CSR_BOOT_STAGE_REG);
+ if (btintel_pcie_in_error(data) ||
+ btintel_pcie_in_device_halt(data)) {
+ bt_dev_err(data->hdev, "Controller in error state for D0 entry");
+ if (!test_and_set_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS,
+ &data->flags)) {
+ data->dmp_hdr.trigger_reason =
+ BTINTEL_PCIE_TRIGGER_REASON_FW_ASSERT;
+ queue_work(data->workqueue, &data->rx_work);
+ }
+ set_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags);
+ btintel_pcie_reset(data->hdev);
+ }
+ return err;
+}
+
+static const struct dev_pm_ops btintel_pcie_pm_ops = {
+ .suspend = btintel_pcie_suspend,
+ .resume = btintel_pcie_resume,
+ .freeze = btintel_pcie_freeze,
+ .thaw = btintel_pcie_resume,
+ .poweroff = btintel_pcie_hibernate,
+ .restore = btintel_pcie_resume,
+};
+
static struct pci_driver btintel_pcie_driver = {
.name = KBUILD_MODNAME,
.id_table = btintel_pcie_table,
.probe = btintel_pcie_probe,
.remove = btintel_pcie_remove,
+ .driver.pm = pm_sleep_ptr(&btintel_pcie_pm_ops),
+#ifdef CONFIG_DEV_COREDUMP
+ .driver.coredump = btintel_pcie_coredump
+#endif
};
-module_pci_driver(btintel_pcie_driver);
+
+static int __init btintel_pcie_init(void)
+{
+ return pci_register_driver(&btintel_pcie_driver);
+}
+
+static void __exit btintel_pcie_exit(void)
+{
+ pci_unregister_driver(&btintel_pcie_driver);
+ btintel_pcie_free_restart_list();
+}
+
+module_init(btintel_pcie_init);
+module_exit(btintel_pcie_exit);
MODULE_AUTHOR("Tedd Ho-Jeong An <tedd.an@intel.com>");
MODULE_DESCRIPTION("Intel Bluetooth PCIe transport driver ver " VERSION);
diff --git a/drivers/bluetooth/btintel_pcie.h b/drivers/bluetooth/btintel_pcie.h
index baaff70420f5..e3d941ffef4a 100644
--- a/drivers/bluetooth/btintel_pcie.h
+++ b/drivers/bluetooth/btintel_pcie.h
@@ -12,9 +12,19 @@
#define BTINTEL_PCIE_CSR_HW_REV_REG (BTINTEL_PCIE_CSR_BASE + 0x028)
#define BTINTEL_PCIE_CSR_RF_ID_REG (BTINTEL_PCIE_CSR_BASE + 0x09C)
#define BTINTEL_PCIE_CSR_BOOT_STAGE_REG (BTINTEL_PCIE_CSR_BASE + 0x108)
+#define BTINTEL_PCIE_CSR_IPC_CONTROL_REG (BTINTEL_PCIE_CSR_BASE + 0x10C)
+#define BTINTEL_PCIE_CSR_IPC_STATUS_REG (BTINTEL_PCIE_CSR_BASE + 0x110)
+#define BTINTEL_PCIE_CSR_IPC_SLEEP_CTL_REG (BTINTEL_PCIE_CSR_BASE + 0x114)
#define BTINTEL_PCIE_CSR_CI_ADDR_LSB_REG (BTINTEL_PCIE_CSR_BASE + 0x118)
#define BTINTEL_PCIE_CSR_CI_ADDR_MSB_REG (BTINTEL_PCIE_CSR_BASE + 0x11C)
#define BTINTEL_PCIE_CSR_IMG_RESPONSE_REG (BTINTEL_PCIE_CSR_BASE + 0x12C)
+#define BTINTEL_PCIE_CSR_MBOX_1_REG (BTINTEL_PCIE_CSR_BASE + 0x170)
+#define BTINTEL_PCIE_CSR_MBOX_2_REG (BTINTEL_PCIE_CSR_BASE + 0x174)
+#define BTINTEL_PCIE_CSR_MBOX_3_REG (BTINTEL_PCIE_CSR_BASE + 0x178)
+#define BTINTEL_PCIE_CSR_MBOX_4_REG (BTINTEL_PCIE_CSR_BASE + 0x17C)
+#define BTINTEL_PCIE_CSR_MBOX_STATUS_REG (BTINTEL_PCIE_CSR_BASE + 0x180)
+#define BTINTEL_PCIE_PRPH_DEV_ADDR_REG (BTINTEL_PCIE_CSR_BASE + 0x440)
+#define BTINTEL_PCIE_PRPH_DEV_RD_REG (BTINTEL_PCIE_CSR_BASE + 0x458)
#define BTINTEL_PCIE_CSR_HBUS_TARG_WRPTR (BTINTEL_PCIE_CSR_BASE + 0x460)
/* BTINTEL_PCIE_CSR Function Control Register */
@@ -22,6 +32,14 @@
#define BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT (BIT(6))
#define BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT (BIT(7))
#define BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS (BIT(20))
+
+#define BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ (BIT(21))
+/* Stop MAC Access disconnection request */
+#define BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS (BIT(22))
+#define BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ (BIT(23))
+
+#define BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_STS (BIT(28))
+#define BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON (BIT(29))
#define BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET (BIT(31))
/* Value for BTINTEL_PCIE_CSR_BOOT_STAGE register */
@@ -30,8 +48,12 @@
#define BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW (BIT(2))
#define BTINTEL_PCIE_CSR_BOOT_STAGE_ROM_LOCKDOWN (BIT(10))
#define BTINTEL_PCIE_CSR_BOOT_STAGE_IML_LOCKDOWN (BIT(11))
+#define BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_ERR (BIT(12))
+#define BTINTEL_PCIE_CSR_BOOT_STAGE_ABORT_HANDLER (BIT(13))
+#define BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_HALTED (BIT(14))
#define BTINTEL_PCIE_CSR_BOOT_STAGE_MAC_ACCESS_ON (BIT(16))
#define BTINTEL_PCIE_CSR_BOOT_STAGE_ALIVE (BIT(23))
+#define BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY (BIT(24))
/* Registers for MSI-X */
#define BTINTEL_PCIE_CSR_MSIX_BASE (0x2000)
@@ -44,6 +66,30 @@
#define BTINTEL_PCIE_CSR_MSIX_IVAR_BASE (BTINTEL_PCIE_CSR_MSIX_BASE + 0x0880)
#define BTINTEL_PCIE_CSR_MSIX_IVAR(cause) (BTINTEL_PCIE_CSR_MSIX_IVAR_BASE + (cause))
+/* IOSF Debug Register */
+#define BTINTEL_PCIE_DBGC_BASE_ADDR (0xf3800300)
+#define BTINTEL_PCIE_DBGC_CUR_DBGBUFF_STATUS (BTINTEL_PCIE_DBGC_BASE_ADDR + 0x1C)
+#define BTINTEL_PCIE_DBGC_DBGBUFF_WRAP_ARND (BTINTEL_PCIE_DBGC_BASE_ADDR + 0x2C)
+
+#define BTINTEL_PCIE_DBG_IDX_BIT_MASK 0x0F
+#define BTINTEL_PCIE_DBGC_DBG_BUF_IDX(data) (((data) >> 24) & BTINTEL_PCIE_DBG_IDX_BIT_MASK)
+#define BTINTEL_PCIE_DBG_OFFSET_BIT_MASK 0xFFFFFF
+
+/* The DRAM buffer count, each buffer size, and
+ * fragment buffer size
+ */
+#define BTINTEL_PCIE_DBGC_BUFFER_COUNT 16
+#define BTINTEL_PCIE_DBGC_BUFFER_SIZE (256 * 1024) /* 256 KB */
+
+#define BTINTEL_PCIE_DBGC_FRAG_VERSION 1
+#define BTINTEL_PCIE_DBGC_FRAG_BUFFER_COUNT BTINTEL_PCIE_DBGC_BUFFER_COUNT
+
+/* Magic number(4), version(4), size of payload length(4) */
+#define BTINTEL_PCIE_DBGC_FRAG_HEADER_SIZE 12
+
+/* Num of alloc Dbg buff (4) + (LSB(4), MSB(4), Size(4)) for each buffer */
+#define BTINTEL_PCIE_DBGC_FRAG_PAYLOAD_SIZE 196
+
/* Causes for the FH register interrupts */
enum msix_fh_int_causes {
BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0 = BIT(0), /* cause 0 */
@@ -53,6 +99,49 @@ enum msix_fh_int_causes {
/* Causes for the HW register interrupts */
enum msix_hw_int_causes {
BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0 = BIT(0), /* cause 32 */
+ BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP1 = BIT(1), /* cause 33 */
+ BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP = BIT(3), /* cause 35 */
+};
+
+/* PCIe device states
+ * Host-Device interface is active
+ * Host-Device interface is inactive(as reflected by IPC_SLEEP_CONTROL_CSR_AD)
+ * Host-Device interface is inactive(as reflected by IPC_SLEEP_CONTROL_CSR_AD)
+ */
+enum {
+ BTINTEL_PCIE_STATE_D0 = 0,
+ BTINTEL_PCIE_STATE_D3_HOT = 2,
+ BTINTEL_PCIE_STATE_D3_COLD = 3,
+};
+
+enum {
+ BTINTEL_PCIE_CORE_HALTED,
+ BTINTEL_PCIE_HWEXP_INPROGRESS,
+ BTINTEL_PCIE_COREDUMP_INPROGRESS,
+ BTINTEL_PCIE_RECOVERY_IN_PROGRESS,
+ BTINTEL_PCIE_SETUP_DONE
+};
+
+enum btintel_pcie_tlv_type {
+ BTINTEL_CNVI_BT,
+ BTINTEL_WRITE_PTR,
+ BTINTEL_WRAP_CTR,
+ BTINTEL_TRIGGER_REASON,
+ BTINTEL_FW_SHA,
+ BTINTEL_CNVR_TOP,
+ BTINTEL_CNVI_TOP,
+ BTINTEL_DUMP_TIME,
+ BTINTEL_FW_BUILD,
+ BTINTEL_VENDOR,
+ BTINTEL_DRIVER
+};
+
+/* causes for the MBOX interrupts */
+enum msix_mbox_int_causes {
+ BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX1 = BIT(0), /* cause MBOX1 */
+ BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX2 = BIT(1), /* cause MBOX2 */
+ BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX3 = BIT(2), /* cause MBOX3 */
+ BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX4 = BIT(3), /* cause MBOX4 */
};
#define BTINTEL_PCIE_MSIX_NON_AUTO_CLEAR_CAUSE BIT(7)
@@ -67,10 +156,15 @@ enum msix_hw_int_causes {
#define BTINTEL_DEFAULT_MAC_ACCESS_TIMEOUT_US 200000
/* Default interrupt timeout in msec */
-#define BTINTEL_DEFAULT_INTR_TIMEOUT 3000
+#define BTINTEL_DEFAULT_INTR_TIMEOUT_MS 3000
-/* The number of descriptors in TX/RX queues */
-#define BTINTEL_DESCS_COUNT 16
+#define BTINTEL_PCIE_DX_TRANSITION_MAX_RETRIES 3
+
+/* The number of descriptors in TX queues */
+#define BTINTEL_PCIE_TX_DESCS_COUNT 32
+
+/* The number of descriptors in RX queues */
+#define BTINTEL_PCIE_RX_DESCS_COUNT 64
/* Number of Queue for TX and RX
* It indicates the index of the IA(Index Array)
@@ -92,9 +186,6 @@ enum {
/* Doorbell vector for TFD */
#define BTINTEL_PCIE_TX_DB_VEC 0
-/* Number of pending RX requests for downlink */
-#define BTINTEL_PCIE_RX_MAX_QUEUE 6
-
/* Doorbell vector for FRBD */
#define BTINTEL_PCIE_RX_DB_VEC 513
@@ -311,6 +402,37 @@ struct rxq {
struct data_buf *bufs;
};
+/* Structure for DRAM Buffer
+ * @count: Number of descriptors
+ * @buf: Array of data_buf structure
+ */
+struct btintel_pcie_dbgc {
+ u16 count;
+
+ void *frag_v_addr;
+ dma_addr_t frag_p_addr;
+ u16 frag_size;
+
+ dma_addr_t buf_p_addr;
+ void *buf_v_addr;
+ struct data_buf *bufs;
+};
+
+struct btintel_pcie_dump_header {
+ const char *driver_name;
+ u32 cnvi_top;
+ u32 cnvr_top;
+ u16 fw_timestamp;
+ u8 fw_build_type;
+ u32 fw_build_num;
+ u32 fw_git_sha1;
+ u32 cnvi_bt;
+ u32 write_ptr;
+ u32 wrap_ctr;
+ u16 trigger_reason;
+ int state;
+};
+
/* struct btintel_pcie_data
* @pdev: pci device
* @hdev: hdev device
@@ -343,6 +465,8 @@ struct rxq {
* @ia: Index Array struct
* @txq: TX Queue struct
* @rxq: RX Queue struct
+ * @alive_intr_ctxt: Alive interrupt context
+ * @pm_sx_event: PM event on which system got suspended
*/
struct btintel_pcie_data {
struct pci_dev *pdev;
@@ -389,6 +513,10 @@ struct btintel_pcie_data {
struct ia ia;
struct txq txq;
struct rxq rxq;
+ u32 alive_intr_ctxt;
+ struct btintel_pcie_dbgc dbgc;
+ struct btintel_pcie_dump_header dmp_hdr;
+ u8 pm_sx_event;
};
static inline u32 btintel_pcie_rd_reg32(struct btintel_pcie_data *data,
@@ -428,3 +556,11 @@ static inline void btintel_pcie_clr_reg_bits(struct btintel_pcie_data *data,
r &= ~bits;
iowrite32(r, data->base_addr + offset);
}
+
+static inline u32 btintel_pcie_rd_dev_mem(struct btintel_pcie_data *data,
+ u32 addr)
+{
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_PRPH_DEV_ADDR_REG, addr);
+ return btintel_pcie_rd_reg32(data, BTINTEL_PCIE_PRPH_DEV_RD_REG);
+}
+
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 18f34998a120..e26b07a9387d 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/string_choices.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <linux/mmc/sdio_func.h>
@@ -88,7 +89,7 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
else
adapter->psmode = 0;
BT_DBG("PS Mode:%s",
- (adapter->psmode) ? "Enable" : "Disable");
+ str_enable_disable(adapter->psmode));
} else {
BT_DBG("PS Mode command failed");
}
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 85b7f2bb4259..93932a0d8625 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -92,7 +92,7 @@ static int btmrvl_sdio_probe_of(struct device *dev,
} else {
ret = devm_request_irq(dev, cfg->irq_bt,
btmrvl_wake_irq_bt,
- 0, "bt_wake", card);
+ IRQF_NO_AUTOEN, "bt_wake", card);
if (ret) {
dev_err(dev,
"Failed to request irq_bt %d (%d)\n",
@@ -100,8 +100,9 @@ static int btmrvl_sdio_probe_of(struct device *dev,
}
/* Configure wakeup (enabled by default) */
- device_init_wakeup(dev, true);
- disable_irq(cfg->irq_bt);
+ ret = devm_device_init_wakeup(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init wakeup\n");
}
}
diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
index 812fd2a8f853..a8c520dc09e1 100644
--- a/drivers/bluetooth/btmtk.c
+++ b/drivers/bluetooth/btmtk.c
@@ -4,6 +4,9 @@
*/
#include <linux/module.h>
#include <linux/firmware.h>
+#include <linux/usb.h>
+#include <linux/iopoll.h>
+#include <linux/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -19,6 +22,9 @@
#define MTK_SEC_MAP_COMMON_SIZE 12
#define MTK_SEC_MAP_NEED_SEND_SIZE 52
+/* It is for mt79xx iso data transmission setting */
+#define MTK_ISO_THRESHOLD 264
+
struct btmtk_patch_header {
u8 datetime[16];
u8 platform[4];
@@ -64,7 +70,7 @@ static void btmtk_coredump(struct hci_dev *hdev)
static void btmtk_coredump_hdr(struct hci_dev *hdev, struct sk_buff *skb)
{
- struct btmediatek_data *data = hci_get_priv(hdev);
+ struct btmtk_data *data = hci_get_priv(hdev);
char buf[80];
snprintf(buf, sizeof(buf), "Controller Name: 0x%X\n",
@@ -85,7 +91,7 @@ static void btmtk_coredump_hdr(struct hci_dev *hdev, struct sk_buff *skb)
static void btmtk_coredump_notify(struct hci_dev *hdev, int state)
{
- struct btmediatek_data *data = hci_get_priv(hdev);
+ struct btmtk_data *data = hci_get_priv(hdev);
switch (state) {
case HCI_DEVCOREDUMP_IDLE:
@@ -103,6 +109,24 @@ static void btmtk_coredump_notify(struct hci_dev *hdev, int state)
}
}
+void btmtk_fw_get_filename(char *buf, size_t size, u32 dev_id, u32 fw_ver,
+ u32 fw_flavor)
+{
+ if (dev_id == 0x7925)
+ snprintf(buf, size,
+ "mediatek/mt%04x/BT_RAM_CODE_MT%04x_1_%x_hdr.bin",
+ dev_id & 0xffff, dev_id & 0xffff, (fw_ver & 0xff) + 1);
+ else if (dev_id == 0x7961 && fw_flavor)
+ snprintf(buf, size,
+ "mediatek/BT_RAM_CODE_MT%04x_1a_%x_hdr.bin",
+ dev_id & 0xffff, (fw_ver & 0xff) + 1);
+ else
+ snprintf(buf, size,
+ "mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin",
+ dev_id & 0xffff, (fw_ver & 0xff) + 1);
+}
+EXPORT_SYMBOL_GPL(btmtk_fw_get_filename);
+
int btmtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwname,
wmt_cmd_sync_func_t wmt_cmd_sync)
{
@@ -300,7 +324,7 @@ int btmtk_setup_firmware(struct hci_dev *hdev, const char *fwname,
wmt_params.data = NULL;
wmt_params.status = NULL;
- /* Activate funciton the firmware providing to */
+ /* Activate function the firmware providing to */
err = wmt_cmd_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to send wmt rst (%d)", err);
@@ -337,7 +361,7 @@ EXPORT_SYMBOL_GPL(btmtk_set_bdaddr);
void btmtk_reset_sync(struct hci_dev *hdev)
{
- struct btmediatek_data *reset_work = hci_get_priv(hdev);
+ struct btmtk_data *reset_work = hci_get_priv(hdev);
int err;
hci_dev_lock(hdev);
@@ -353,7 +377,7 @@ EXPORT_SYMBOL_GPL(btmtk_reset_sync);
int btmtk_register_coredump(struct hci_dev *hdev, const char *name,
u32 fw_version)
{
- struct btmediatek_data *data = hci_get_priv(hdev);
+ struct btmtk_data *data = hci_get_priv(hdev);
if (!IS_ENABLED(CONFIG_DEV_COREDUMP))
return -EOPNOTSUPP;
@@ -369,8 +393,9 @@ EXPORT_SYMBOL_GPL(btmtk_register_coredump);
int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
{
- struct btmediatek_data *data = hci_get_priv(hdev);
+ struct btmtk_data *data = hci_get_priv(hdev);
int err;
+ bool complete = false;
if (!IS_ENABLED(CONFIG_DEV_COREDUMP)) {
kfree_skb(skb);
@@ -392,19 +417,22 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
fallthrough;
case HCI_DEVCOREDUMP_ACTIVE:
default:
+ /* Mediatek coredump data would be more than MTK_COREDUMP_NUM */
+ if (data->cd_info.cnt >= MTK_COREDUMP_NUM &&
+ skb->len > MTK_COREDUMP_END_LEN)
+ if (!memcmp((char *)&skb->data[skb->len - MTK_COREDUMP_END_LEN],
+ MTK_COREDUMP_END, MTK_COREDUMP_END_LEN - 1))
+ complete = true;
+
err = hci_devcd_append(hdev, skb);
if (err < 0)
break;
data->cd_info.cnt++;
- /* Mediatek coredump data would be more than MTK_COREDUMP_NUM */
- if (data->cd_info.cnt > MTK_COREDUMP_NUM &&
- skb->len > MTK_COREDUMP_END_LEN)
- if (!memcmp((char *)&skb->data[skb->len - MTK_COREDUMP_END_LEN],
- MTK_COREDUMP_END, MTK_COREDUMP_END_LEN - 1)) {
- bt_dev_info(hdev, "Mediatek coredump end");
- hci_devcd_complete(hdev);
- }
+ if (complete) {
+ bt_dev_info(hdev, "Mediatek coredump end");
+ hci_devcd_complete(hdev);
+ }
break;
}
@@ -413,6 +441,1051 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(btmtk_process_coredump);
+#if IS_ENABLED(CONFIG_BT_HCIBTUSB_MTK)
+static void btmtk_usb_wmt_recv(struct urb *urb)
+{
+ struct hci_dev *hdev = urb->context;
+ struct btmtk_data *data = hci_get_priv(hdev);
+ struct sk_buff *skb;
+ int err;
+
+ if (urb->status == 0 && urb->actual_length > 0) {
+ hdev->stat.byte_rx += urb->actual_length;
+
+ /* WMT event shouldn't be fragmented and the size should be
+ * less than HCI_WMT_MAX_EVENT_SIZE.
+ */
+ skb = bt_skb_alloc(HCI_WMT_MAX_EVENT_SIZE, GFP_ATOMIC);
+ if (!skb) {
+ hdev->stat.err_rx++;
+ kfree(urb->setup_packet);
+ return;
+ }
+
+ hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
+ skb_put_data(skb, urb->transfer_buffer, urb->actual_length);
+
+ /* When someone waits for the WMT event, the skb is being cloned
+ * and being processed the events from there then.
+ */
+ if (test_bit(BTMTK_TX_WAIT_VND_EVT, &data->flags)) {
+ data->evt_skb = skb_clone(skb, GFP_ATOMIC);
+ if (!data->evt_skb) {
+ kfree_skb(skb);
+ kfree(urb->setup_packet);
+ return;
+ }
+ }
+
+ err = hci_recv_frame(hdev, skb);
+ if (err < 0) {
+ kfree_skb(data->evt_skb);
+ data->evt_skb = NULL;
+ kfree(urb->setup_packet);
+ return;
+ }
+
+ if (test_and_clear_bit(BTMTK_TX_WAIT_VND_EVT,
+ &data->flags)) {
+ /* Barrier to sync with other CPUs */
+ smp_mb__after_atomic();
+ wake_up_bit(&data->flags,
+ BTMTK_TX_WAIT_VND_EVT);
+ }
+ kfree(urb->setup_packet);
+ return;
+ } else if (urb->status == -ENOENT) {
+ /* Avoid suspend failed when usb_kill_urb */
+ return;
+ }
+
+ usb_mark_last_busy(data->udev);
+
+ /* The URB complete handler is still called with urb->actual_length = 0
+ * when the event is not available, so we should keep re-submitting
+ * URB until WMT event returns, Also, It's necessary to wait some time
+ * between the two consecutive control URBs to relax the target device
+ * to generate the event. Otherwise, the WMT event cannot return from
+ * the device successfully.
+ */
+ udelay(500);
+
+ usb_anchor_urb(urb, data->ctrl_anchor);
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err < 0) {
+ kfree(urb->setup_packet);
+ /* -EPERM: urb is being killed;
+ * -ENODEV: device got disconnected
+ */
+ if (err != -EPERM && err != -ENODEV)
+ bt_dev_err(hdev, "urb %p failed to resubmit (%d)",
+ urb, -err);
+ usb_unanchor_urb(urb);
+ }
+}
+
+static int btmtk_usb_submit_wmt_recv_urb(struct hci_dev *hdev)
+{
+ struct btmtk_data *data = hci_get_priv(hdev);
+ struct usb_ctrlrequest *dr;
+ unsigned char *buf;
+ int err, size = 64;
+ unsigned int pipe;
+ struct urb *urb;
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb)
+ return -ENOMEM;
+
+ dr = kmalloc(sizeof(*dr), GFP_KERNEL);
+ if (!dr) {
+ usb_free_urb(urb);
+ return -ENOMEM;
+ }
+
+ dr->bRequestType = USB_TYPE_VENDOR | USB_DIR_IN;
+ dr->bRequest = 1;
+ dr->wIndex = cpu_to_le16(0);
+ dr->wValue = cpu_to_le16(48);
+ dr->wLength = cpu_to_le16(size);
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf) {
+ kfree(dr);
+ usb_free_urb(urb);
+ return -ENOMEM;
+ }
+
+ pipe = usb_rcvctrlpipe(data->udev, 0);
+
+ usb_fill_control_urb(urb, data->udev, pipe, (void *)dr,
+ buf, size, btmtk_usb_wmt_recv, hdev);
+
+ urb->transfer_flags |= URB_FREE_BUFFER;
+
+ usb_anchor_urb(urb, data->ctrl_anchor);
+ err = usb_submit_urb(urb, GFP_KERNEL);
+ if (err < 0) {
+ if (err != -EPERM && err != -ENODEV)
+ bt_dev_err(hdev, "urb %p submission failed (%d)",
+ urb, -err);
+ usb_unanchor_urb(urb);
+ }
+
+ usb_free_urb(urb);
+
+ return err;
+}
+
+static int btmtk_usb_hci_wmt_sync(struct hci_dev *hdev,
+ struct btmtk_hci_wmt_params *wmt_params)
+{
+ struct btmtk_data *data = hci_get_priv(hdev);
+ struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc;
+ u32 hlen, status = BTMTK_WMT_INVALID;
+ struct btmtk_hci_wmt_evt *wmt_evt;
+ struct btmtk_hci_wmt_cmd *wc;
+ struct btmtk_wmt_hdr *hdr;
+ int err;
+
+ /* Send the WMT command and wait until the WMT event returns */
+ hlen = sizeof(*hdr) + wmt_params->dlen;
+ if (hlen > 255)
+ return -EINVAL;
+
+ wc = kzalloc(hlen, GFP_KERNEL);
+ if (!wc)
+ return -ENOMEM;
+
+ hdr = &wc->hdr;
+ hdr->dir = 1;
+ hdr->op = wmt_params->op;
+ hdr->dlen = cpu_to_le16(wmt_params->dlen + 1);
+ hdr->flag = wmt_params->flag;
+ memcpy(wc->data, wmt_params->data, wmt_params->dlen);
+
+ set_bit(BTMTK_TX_WAIT_VND_EVT, &data->flags);
+
+ /* WMT cmd/event doesn't follow up the generic HCI cmd/event handling,
+ * it needs constantly polling control pipe until the host received the
+ * WMT event, thus, we should require to specifically acquire PM counter
+ * on the USB to prevent the interface from entering auto suspended
+ * while WMT cmd/event in progress.
+ */
+ err = usb_autopm_get_interface(data->intf);
+ if (err < 0)
+ goto err_free_wc;
+
+ err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc);
+
+ if (err < 0) {
+ clear_bit(BTMTK_TX_WAIT_VND_EVT, &data->flags);
+ usb_autopm_put_interface(data->intf);
+ goto err_free_wc;
+ }
+
+ /* Submit control IN URB on demand to process the WMT event */
+ err = btmtk_usb_submit_wmt_recv_urb(hdev);
+
+ usb_autopm_put_interface(data->intf);
+
+ if (err < 0)
+ goto err_free_wc;
+
+ /* The vendor specific WMT commands are all answered by a vendor
+ * specific event and will have the Command Status or Command
+ * Complete as with usual HCI command flow control.
+ *
+ * After sending the command, wait for BTUSB_TX_WAIT_VND_EVT
+ * state to be cleared. The driver specific event receive routine
+ * will clear that state and with that indicate completion of the
+ * WMT command.
+ */
+ err = wait_on_bit_timeout(&data->flags, BTMTK_TX_WAIT_VND_EVT,
+ TASK_UNINTERRUPTIBLE, HCI_INIT_TIMEOUT);
+
+ if (err) {
+ bt_dev_err(hdev, "Execution of wmt command timed out");
+ clear_bit(BTMTK_TX_WAIT_VND_EVT, &data->flags);
+ err = -ETIMEDOUT;
+ goto err_free_wc;
+ }
+
+ if (data->evt_skb == NULL)
+ goto err_free_wc;
+
+ /* Parse and handle the return WMT event */
+ wmt_evt = (struct btmtk_hci_wmt_evt *)data->evt_skb->data;
+ if (wmt_evt->whdr.op != hdr->op) {
+ bt_dev_err(hdev, "Wrong op received %d expected %d",
+ wmt_evt->whdr.op, hdr->op);
+ err = -EIO;
+ goto err_free_skb;
+ }
+
+ switch (wmt_evt->whdr.op) {
+ case BTMTK_WMT_SEMAPHORE:
+ if (wmt_evt->whdr.flag == 2)
+ status = BTMTK_WMT_PATCH_UNDONE;
+ else
+ status = BTMTK_WMT_PATCH_DONE;
+ break;
+ case BTMTK_WMT_FUNC_CTRL:
+ wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt;
+ if (be16_to_cpu(wmt_evt_funcc->status) == 0x404)
+ status = BTMTK_WMT_ON_DONE;
+ else if (be16_to_cpu(wmt_evt_funcc->status) == 0x420)
+ status = BTMTK_WMT_ON_PROGRESS;
+ else
+ status = BTMTK_WMT_ON_UNDONE;
+ break;
+ case BTMTK_WMT_PATCH_DWNLD:
+ if (wmt_evt->whdr.flag == 2)
+ status = BTMTK_WMT_PATCH_DONE;
+ else if (wmt_evt->whdr.flag == 1)
+ status = BTMTK_WMT_PATCH_PROGRESS;
+ else
+ status = BTMTK_WMT_PATCH_UNDONE;
+ break;
+ }
+
+ if (wmt_params->status)
+ *wmt_params->status = status;
+
+err_free_skb:
+ kfree_skb(data->evt_skb);
+ data->evt_skb = NULL;
+err_free_wc:
+ kfree(wc);
+ return err;
+}
+
+static int btmtk_usb_func_query(struct hci_dev *hdev)
+{
+ struct btmtk_hci_wmt_params wmt_params;
+ int status, err;
+ u8 param = 0;
+
+ /* Query whether the function is enabled */
+ wmt_params.op = BTMTK_WMT_FUNC_CTRL;
+ wmt_params.flag = 4;
+ wmt_params.dlen = sizeof(param);
+ wmt_params.data = &param;
+ wmt_params.status = &status;
+
+ err = btmtk_usb_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to query function status (%d)", err);
+ return err;
+ }
+
+ return status;
+}
+
+static int btmtk_usb_uhw_reg_write(struct hci_dev *hdev, u32 reg, u32 val)
+{
+ struct btmtk_data *data = hci_get_priv(hdev);
+ int pipe, err;
+ void *buf;
+
+ buf = kzalloc(4, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ put_unaligned_le32(val, buf);
+
+ pipe = usb_sndctrlpipe(data->udev, 0);
+ err = usb_control_msg(data->udev, pipe, 0x02,
+ 0x5E,
+ reg >> 16, reg & 0xffff,
+ buf, 4, USB_CTRL_SET_TIMEOUT);
+ if (err < 0)
+ bt_dev_err(hdev, "Failed to write uhw reg(%d)", err);
+
+ kfree(buf);
+
+ return err;
+}
+
+static int btmtk_usb_uhw_reg_read(struct hci_dev *hdev, u32 reg, u32 *val)
+{
+ struct btmtk_data *data = hci_get_priv(hdev);
+ int pipe, err;
+ void *buf;
+
+ buf = kzalloc(4, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pipe = usb_rcvctrlpipe(data->udev, 0);
+ err = usb_control_msg(data->udev, pipe, 0x01,
+ 0xDE,
+ reg >> 16, reg & 0xffff,
+ buf, 4, USB_CTRL_GET_TIMEOUT);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to read uhw reg(%d)", err);
+ goto err_free_buf;
+ }
+
+ *val = get_unaligned_le32(buf);
+ bt_dev_dbg(hdev, "reg=%x, value=0x%08x", reg, *val);
+
+err_free_buf:
+ kfree(buf);
+
+ return err;
+}
+
+static int btmtk_usb_reg_read(struct hci_dev *hdev, u32 reg, u32 *val)
+{
+ struct btmtk_data *data = hci_get_priv(hdev);
+ int pipe, err, size = sizeof(u32);
+ void *buf;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pipe = usb_rcvctrlpipe(data->udev, 0);
+ err = usb_control_msg(data->udev, pipe, 0x63,
+ USB_TYPE_VENDOR | USB_DIR_IN,
+ reg >> 16, reg & 0xffff,
+ buf, size, USB_CTRL_GET_TIMEOUT);
+ if (err < 0)
+ goto err_free_buf;
+
+ *val = get_unaligned_le32(buf);
+
+err_free_buf:
+ kfree(buf);
+
+ return err;
+}
+
+static int btmtk_usb_id_get(struct hci_dev *hdev, u32 reg, u32 *id)
+{
+ return btmtk_usb_reg_read(hdev, reg, id);
+}
+
+static u32 btmtk_usb_reset_done(struct hci_dev *hdev)
+{
+ u32 val = 0;
+
+ btmtk_usb_uhw_reg_read(hdev, MTK_BT_MISC, &val);
+
+ return val & MTK_BT_RST_DONE;
+}
+
+int btmtk_usb_subsys_reset(struct hci_dev *hdev, u32 dev_id)
+{
+ u32 val;
+ int err;
+
+ if (dev_id == 0x7922) {
+ err = btmtk_usb_uhw_reg_read(hdev, MTK_BT_SUBSYS_RST, &val);
+ if (err < 0)
+ return err;
+ val |= 0x00002020;
+ err = btmtk_usb_uhw_reg_write(hdev, MTK_BT_SUBSYS_RST, val);
+ if (err < 0)
+ return err;
+ err = btmtk_usb_uhw_reg_write(hdev, MTK_EP_RST_OPT, 0x00010001);
+ if (err < 0)
+ return err;
+ err = btmtk_usb_uhw_reg_read(hdev, MTK_BT_SUBSYS_RST, &val);
+ if (err < 0)
+ return err;
+ val |= BIT(0);
+ err = btmtk_usb_uhw_reg_write(hdev, MTK_BT_SUBSYS_RST, val);
+ if (err < 0)
+ return err;
+ msleep(100);
+ } else if (dev_id == 0x7925) {
+ err = btmtk_usb_uhw_reg_read(hdev, MTK_BT_RESET_REG_CONNV3, &val);
+ if (err < 0)
+ return err;
+ val |= (1 << 5);
+ err = btmtk_usb_uhw_reg_write(hdev, MTK_BT_RESET_REG_CONNV3, val);
+ if (err < 0)
+ return err;
+ err = btmtk_usb_uhw_reg_read(hdev, MTK_BT_RESET_REG_CONNV3, &val);
+ if (err < 0)
+ return err;
+ val &= 0xFFFF00FF;
+ val |= (1 << 13);
+ err = btmtk_usb_uhw_reg_write(hdev, MTK_BT_RESET_REG_CONNV3, val);
+ if (err < 0)
+ return err;
+ err = btmtk_usb_uhw_reg_write(hdev, MTK_EP_RST_OPT, 0x00010001);
+ if (err < 0)
+ return err;
+ err = btmtk_usb_uhw_reg_read(hdev, MTK_BT_RESET_REG_CONNV3, &val);
+ if (err < 0)
+ return err;
+ val |= (1 << 0);
+ err = btmtk_usb_uhw_reg_write(hdev, MTK_BT_RESET_REG_CONNV3, val);
+ if (err < 0)
+ return err;
+ err = btmtk_usb_uhw_reg_write(hdev, MTK_UDMA_INT_STA_BT, 0x000000FF);
+ if (err < 0)
+ return err;
+ err = btmtk_usb_uhw_reg_read(hdev, MTK_UDMA_INT_STA_BT, &val);
+ if (err < 0)
+ return err;
+ err = btmtk_usb_uhw_reg_write(hdev, MTK_UDMA_INT_STA_BT1, 0x000000FF);
+ if (err < 0)
+ return err;
+ err = btmtk_usb_uhw_reg_read(hdev, MTK_UDMA_INT_STA_BT1, &val);
+ if (err < 0)
+ return err;
+ msleep(100);
+ } else {
+ /* It's Device EndPoint Reset Option Register */
+ bt_dev_dbg(hdev, "Initiating reset mechanism via uhw");
+ err = btmtk_usb_uhw_reg_write(hdev, MTK_EP_RST_OPT, MTK_EP_RST_IN_OUT_OPT);
+ if (err < 0)
+ return err;
+ err = btmtk_usb_uhw_reg_read(hdev, MTK_BT_WDT_STATUS, &val);
+ if (err < 0)
+ return err;
+ /* Reset the bluetooth chip via USB interface. */
+ err = btmtk_usb_uhw_reg_write(hdev, MTK_BT_SUBSYS_RST, 1);
+ if (err < 0)
+ return err;
+ err = btmtk_usb_uhw_reg_write(hdev, MTK_UDMA_INT_STA_BT, 0x000000FF);
+ if (err < 0)
+ return err;
+ err = btmtk_usb_uhw_reg_read(hdev, MTK_UDMA_INT_STA_BT, &val);
+ if (err < 0)
+ return err;
+ err = btmtk_usb_uhw_reg_write(hdev, MTK_UDMA_INT_STA_BT1, 0x000000FF);
+ if (err < 0)
+ return err;
+ err = btmtk_usb_uhw_reg_read(hdev, MTK_UDMA_INT_STA_BT1, &val);
+ if (err < 0)
+ return err;
+ /* MT7921 need to delay 20ms between toggle reset bit */
+ msleep(20);
+ err = btmtk_usb_uhw_reg_write(hdev, MTK_BT_SUBSYS_RST, 0);
+ if (err < 0)
+ return err;
+ err = btmtk_usb_uhw_reg_read(hdev, MTK_BT_SUBSYS_RST, &val);
+ if (err < 0)
+ return err;
+ }
+
+ err = readx_poll_timeout(btmtk_usb_reset_done, hdev, val,
+ val & MTK_BT_RST_DONE, 20000, 1000000);
+ if (err < 0)
+ bt_dev_err(hdev, "Reset timeout");
+
+ if (dev_id == 0x7922) {
+ err = btmtk_usb_uhw_reg_write(hdev, MTK_UDMA_INT_STA_BT, 0x000000FF);
+ if (err < 0)
+ return err;
+ }
+
+ err = btmtk_usb_id_get(hdev, 0x70010200, &val);
+ if (err < 0 || !val)
+ bt_dev_err(hdev, "Can't get device id, subsys reset fail.");
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(btmtk_usb_subsys_reset);
+
+int btmtk_usb_recv_acl(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct btmtk_data *data = hci_get_priv(hdev);
+ u16 handle = le16_to_cpu(hci_acl_hdr(skb)->handle);
+
+ switch (handle) {
+ case 0xfc6f: /* Firmware dump from device */
+ /* When the firmware hangs, the device can no longer
+ * suspend and thus disable auto-suspend.
+ */
+ usb_disable_autosuspend(data->udev);
+
+ /* We need to forward the diagnostic packet to userspace daemon
+ * for backward compatibility, so we have to clone the packet
+ * extraly for the in-kernel coredump support.
+ */
+ if (IS_ENABLED(CONFIG_DEV_COREDUMP)) {
+ struct sk_buff *skb_cd = skb_clone(skb, GFP_ATOMIC);
+
+ if (skb_cd)
+ btmtk_process_coredump(hdev, skb_cd);
+ }
+
+ fallthrough;
+ case 0x05ff: /* Firmware debug logging 1 */
+ case 0x05fe: /* Firmware debug logging 2 */
+ return hci_recv_diag(hdev, skb);
+ }
+
+ return hci_recv_frame(hdev, skb);
+}
+EXPORT_SYMBOL_GPL(btmtk_usb_recv_acl);
+
+static int btmtk_isopkt_pad(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ if (skb->len > MTK_ISO_THRESHOLD)
+ return -EINVAL;
+
+ if (skb_pad(skb, MTK_ISO_THRESHOLD - skb->len))
+ return -ENOMEM;
+
+ __skb_put(skb, MTK_ISO_THRESHOLD - skb->len);
+
+ return 0;
+}
+
+static int __set_mtk_intr_interface(struct hci_dev *hdev)
+{
+ struct btmtk_data *btmtk_data = hci_get_priv(hdev);
+ struct usb_interface *intf = btmtk_data->isopkt_intf;
+ int i, err;
+
+ if (!btmtk_data->isopkt_intf)
+ return -ENODEV;
+
+ err = usb_set_interface(btmtk_data->udev, MTK_ISO_IFNUM, 1);
+ if (err < 0) {
+ bt_dev_err(hdev, "setting interface failed (%d)", -err);
+ return err;
+ }
+
+ btmtk_data->isopkt_tx_ep = NULL;
+ btmtk_data->isopkt_rx_ep = NULL;
+
+ for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) {
+ struct usb_endpoint_descriptor *ep_desc;
+
+ ep_desc = &intf->cur_altsetting->endpoint[i].desc;
+
+ if (!btmtk_data->isopkt_tx_ep &&
+ usb_endpoint_is_int_out(ep_desc)) {
+ btmtk_data->isopkt_tx_ep = ep_desc;
+ continue;
+ }
+
+ if (!btmtk_data->isopkt_rx_ep &&
+ usb_endpoint_is_int_in(ep_desc)) {
+ btmtk_data->isopkt_rx_ep = ep_desc;
+ continue;
+ }
+ }
+
+ if (!btmtk_data->isopkt_tx_ep ||
+ !btmtk_data->isopkt_rx_ep) {
+ bt_dev_err(hdev, "invalid interrupt descriptors");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+struct urb *alloc_mtk_intr_urb(struct hci_dev *hdev, struct sk_buff *skb,
+ usb_complete_t tx_complete)
+{
+ struct btmtk_data *btmtk_data = hci_get_priv(hdev);
+ struct urb *urb;
+ unsigned int pipe;
+
+ if (!btmtk_data->isopkt_tx_ep)
+ return ERR_PTR(-ENODEV);
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb)
+ return ERR_PTR(-ENOMEM);
+
+ if (btmtk_isopkt_pad(hdev, skb))
+ return ERR_PTR(-EINVAL);
+
+ pipe = usb_sndintpipe(btmtk_data->udev,
+ btmtk_data->isopkt_tx_ep->bEndpointAddress);
+
+ usb_fill_int_urb(urb, btmtk_data->udev, pipe,
+ skb->data, skb->len, tx_complete,
+ skb, btmtk_data->isopkt_tx_ep->bInterval);
+
+ skb->dev = (void *)hdev;
+
+ return urb;
+}
+EXPORT_SYMBOL_GPL(alloc_mtk_intr_urb);
+
+static int btmtk_recv_isopkt(struct hci_dev *hdev, void *buffer, int count)
+{
+ struct btmtk_data *btmtk_data = hci_get_priv(hdev);
+ struct sk_buff *skb;
+ unsigned long flags;
+ int err = 0;
+
+ spin_lock_irqsave(&btmtk_data->isorxlock, flags);
+ skb = btmtk_data->isopkt_skb;
+
+ while (count) {
+ int len;
+
+ if (!skb) {
+ skb = bt_skb_alloc(HCI_MAX_ISO_SIZE, GFP_ATOMIC);
+ if (!skb) {
+ err = -ENOMEM;
+ break;
+ }
+
+ hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
+ hci_skb_expect(skb) = HCI_ISO_HDR_SIZE;
+ }
+
+ len = min_t(uint, hci_skb_expect(skb), count);
+ skb_put_data(skb, buffer, len);
+
+ count -= len;
+ buffer += len;
+ hci_skb_expect(skb) -= len;
+
+ if (skb->len == HCI_ISO_HDR_SIZE) {
+ __le16 dlen = ((struct hci_iso_hdr *)skb->data)->dlen;
+
+ /* Complete ISO header */
+ hci_skb_expect(skb) = __le16_to_cpu(dlen);
+
+ if (skb_tailroom(skb) < hci_skb_expect(skb)) {
+ kfree_skb(skb);
+ skb = NULL;
+
+ err = -EILSEQ;
+ break;
+ }
+ }
+
+ if (!hci_skb_expect(skb)) {
+ /* Complete frame */
+ hci_recv_frame(hdev, skb);
+ skb = NULL;
+ }
+ }
+
+ btmtk_data->isopkt_skb = skb;
+ spin_unlock_irqrestore(&btmtk_data->isorxlock, flags);
+
+ return err;
+}
+
+static void btmtk_intr_complete(struct urb *urb)
+{
+ struct hci_dev *hdev = urb->context;
+ struct btmtk_data *btmtk_data = hci_get_priv(hdev);
+ int err;
+
+ BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status,
+ urb->actual_length);
+
+ if (!test_bit(HCI_RUNNING, &hdev->flags))
+ return;
+
+ if (hdev->suspended)
+ return;
+
+ if (urb->status == 0) {
+ hdev->stat.byte_rx += urb->actual_length;
+
+ if (btmtk_recv_isopkt(hdev, urb->transfer_buffer,
+ urb->actual_length) < 0) {
+ bt_dev_err(hdev, "corrupted iso packet");
+ hdev->stat.err_rx++;
+ }
+ } else if (urb->status == -ENOENT) {
+ /* Avoid suspend failed when usb_kill_urb */
+ return;
+ }
+
+ usb_mark_last_busy(btmtk_data->udev);
+ usb_anchor_urb(urb, &btmtk_data->isopkt_anchor);
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err < 0) {
+ /* -EPERM: urb is being killed;
+ * -ENODEV: device got disconnected
+ */
+ if (err != -EPERM && err != -ENODEV)
+ bt_dev_err(hdev, "urb %p failed to resubmit (%d)",
+ urb, -err);
+ if (err != -EPERM)
+ hci_cmd_sync_cancel(hdev, -err);
+ usb_unanchor_urb(urb);
+ }
+}
+
+static int btmtk_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags)
+{
+ struct btmtk_data *btmtk_data = hci_get_priv(hdev);
+ unsigned char *buf;
+ unsigned int pipe;
+ struct urb *urb;
+ int err, size;
+
+ BT_DBG("%s", hdev->name);
+
+ if (!btmtk_data->isopkt_rx_ep)
+ return -ENODEV;
+
+ urb = usb_alloc_urb(0, mem_flags);
+ if (!urb)
+ return -ENOMEM;
+ size = le16_to_cpu(btmtk_data->isopkt_rx_ep->wMaxPacketSize);
+
+ buf = kmalloc(size, mem_flags);
+ if (!buf) {
+ usb_free_urb(urb);
+ return -ENOMEM;
+ }
+
+ pipe = usb_rcvintpipe(btmtk_data->udev,
+ btmtk_data->isopkt_rx_ep->bEndpointAddress);
+
+ usb_fill_int_urb(urb, btmtk_data->udev, pipe, buf, size,
+ btmtk_intr_complete, hdev,
+ btmtk_data->isopkt_rx_ep->bInterval);
+
+ urb->transfer_flags |= URB_FREE_BUFFER;
+
+ usb_mark_last_busy(btmtk_data->udev);
+ usb_anchor_urb(urb, &btmtk_data->isopkt_anchor);
+
+ err = usb_submit_urb(urb, mem_flags);
+ if (err < 0) {
+ if (err != -EPERM && err != -ENODEV)
+ bt_dev_err(hdev, "urb %p submission failed (%d)",
+ urb, -err);
+ usb_unanchor_urb(urb);
+ }
+
+ usb_free_urb(urb);
+
+ return err;
+}
+
+static int btmtk_usb_isointf_init(struct hci_dev *hdev)
+{
+ struct btmtk_data *btmtk_data = hci_get_priv(hdev);
+ u8 iso_param[2] = { 0x08, 0x01 };
+ struct sk_buff *skb;
+ int err;
+
+ spin_lock_init(&btmtk_data->isorxlock);
+
+ __set_mtk_intr_interface(hdev);
+
+ err = btmtk_submit_intr_urb(hdev, GFP_KERNEL);
+ if (err < 0) {
+ usb_kill_anchored_urbs(&btmtk_data->isopkt_anchor);
+ bt_dev_err(hdev, "ISO intf not support (%d)", err);
+ return err;
+ }
+
+ skb = __hci_cmd_sync(hdev, 0xfd98, sizeof(iso_param), iso_param,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Failed to apply iso setting (%ld)", PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+ kfree_skb(skb);
+
+ return 0;
+}
+
+int btmtk_usb_resume(struct hci_dev *hdev)
+{
+ /* This function describes the specific additional steps taken by MediaTek
+ * when Bluetooth usb driver's resume function is called.
+ */
+ struct btmtk_data *btmtk_data = hci_get_priv(hdev);
+
+ /* Resubmit urb for iso data transmission */
+ if (test_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags)) {
+ if (btmtk_submit_intr_urb(hdev, GFP_NOIO) < 0)
+ clear_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btmtk_usb_resume);
+
+int btmtk_usb_suspend(struct hci_dev *hdev)
+{
+ /* This function describes the specific additional steps taken by MediaTek
+ * when Bluetooth usb driver's suspend function is called.
+ */
+ struct btmtk_data *btmtk_data = hci_get_priv(hdev);
+
+ /* Stop urb anchor for iso data transmission */
+ if (test_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags))
+ usb_kill_anchored_urbs(&btmtk_data->isopkt_anchor);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btmtk_usb_suspend);
+
+int btmtk_usb_setup(struct hci_dev *hdev)
+{
+ struct btmtk_data *btmtk_data = hci_get_priv(hdev);
+ struct btmtk_hci_wmt_params wmt_params;
+ ktime_t calltime, delta, rettime;
+ struct btmtk_tci_sleep tci_sleep;
+ unsigned long long duration;
+ struct sk_buff *skb;
+ const char *fwname;
+ int err, status;
+ u32 dev_id = 0;
+ char fw_bin_name[64];
+ u32 fw_version = 0, fw_flavor = 0;
+ u8 param;
+
+ calltime = ktime_get();
+
+ err = btmtk_usb_id_get(hdev, 0x80000008, &dev_id);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to get device id (%d)", err);
+ return err;
+ }
+
+ if (!dev_id || dev_id != 0x7663) {
+ err = btmtk_usb_id_get(hdev, 0x70010200, &dev_id);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to get device id (%d)", err);
+ return err;
+ }
+ err = btmtk_usb_id_get(hdev, 0x80021004, &fw_version);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to get fw version (%d)", err);
+ return err;
+ }
+ err = btmtk_usb_id_get(hdev, 0x70010020, &fw_flavor);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to get fw flavor (%d)", err);
+ return err;
+ }
+ fw_flavor = (fw_flavor & 0x00000080) >> 7;
+ }
+
+ btmtk_data->dev_id = dev_id;
+
+ err = btmtk_register_coredump(hdev, btmtk_data->drv_name, fw_version);
+ if (err < 0)
+ bt_dev_err(hdev, "Failed to register coredump (%d)", err);
+
+ switch (dev_id) {
+ case 0x7663:
+ fwname = FIRMWARE_MT7663;
+ break;
+ case 0x7668:
+ fwname = FIRMWARE_MT7668;
+ break;
+ case 0x7922:
+ case 0x7925:
+ case 0x7961:
+ btmtk_fw_get_filename(fw_bin_name, sizeof(fw_bin_name), dev_id,
+ fw_version, fw_flavor);
+
+ err = btmtk_setup_firmware_79xx(hdev, fw_bin_name,
+ btmtk_usb_hci_wmt_sync);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to set up firmware (%d)", err);
+ return err;
+ }
+
+ /* It's Device EndPoint Reset Option Register */
+ err = btmtk_usb_uhw_reg_write(hdev, MTK_EP_RST_OPT,
+ MTK_EP_RST_IN_OUT_OPT);
+ if (err < 0)
+ return err;
+
+ /* Enable Bluetooth protocol */
+ param = 1;
+ wmt_params.op = BTMTK_WMT_FUNC_CTRL;
+ wmt_params.flag = 0;
+ wmt_params.dlen = sizeof(param);
+ wmt_params.data = &param;
+ wmt_params.status = NULL;
+
+ err = btmtk_usb_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
+ return err;
+ }
+
+ hci_set_msft_opcode(hdev, 0xFD30);
+ hci_set_aosp_capable(hdev);
+
+ /* Set up ISO interface after protocol enabled */
+ if (test_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags)) {
+ if (!btmtk_usb_isointf_init(hdev))
+ set_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags);
+ }
+
+ goto done;
+ default:
+ bt_dev_err(hdev, "Unsupported hardware variant (%08x)",
+ dev_id);
+ return -ENODEV;
+ }
+
+ /* Query whether the firmware is already download */
+ wmt_params.op = BTMTK_WMT_SEMAPHORE;
+ wmt_params.flag = 1;
+ wmt_params.dlen = 0;
+ wmt_params.data = NULL;
+ wmt_params.status = &status;
+
+ err = btmtk_usb_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to query firmware status (%d)", err);
+ return err;
+ }
+
+ if (status == BTMTK_WMT_PATCH_DONE) {
+ bt_dev_info(hdev, "firmware already downloaded");
+ goto ignore_setup_fw;
+ }
+
+ /* Setup a firmware which the device definitely requires */
+ err = btmtk_setup_firmware(hdev, fwname,
+ btmtk_usb_hci_wmt_sync);
+ if (err < 0)
+ return err;
+
+ignore_setup_fw:
+ err = readx_poll_timeout(btmtk_usb_func_query, hdev, status,
+ status < 0 || status != BTMTK_WMT_ON_PROGRESS,
+ 2000, 5000000);
+ /* -ETIMEDOUT happens */
+ if (err < 0)
+ return err;
+
+ /* The other errors happen in btmtk_usb_func_query */
+ if (status < 0)
+ return status;
+
+ if (status == BTMTK_WMT_ON_DONE) {
+ bt_dev_info(hdev, "function already on");
+ goto ignore_func_on;
+ }
+
+ /* Enable Bluetooth protocol */
+ param = 1;
+ wmt_params.op = BTMTK_WMT_FUNC_CTRL;
+ wmt_params.flag = 0;
+ wmt_params.dlen = sizeof(param);
+ wmt_params.data = &param;
+ wmt_params.status = NULL;
+
+ err = btmtk_usb_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
+ return err;
+ }
+
+ignore_func_on:
+ /* Apply the low power environment setup */
+ tci_sleep.mode = 0x5;
+ tci_sleep.duration = cpu_to_le16(0x640);
+ tci_sleep.host_duration = cpu_to_le16(0x640);
+ tci_sleep.host_wakeup_pin = 0;
+ tci_sleep.time_compensation = 0;
+
+ skb = __hci_cmd_sync(hdev, 0xfc7a, sizeof(tci_sleep), &tci_sleep,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ bt_dev_err(hdev, "Failed to apply low power setting (%d)", err);
+ return err;
+ }
+ kfree_skb(skb);
+
+done:
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ duration = (unsigned long long)ktime_to_ns(delta) >> 10;
+
+ bt_dev_info(hdev, "Device setup in %llu usecs", duration);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btmtk_usb_setup);
+
+int btmtk_usb_shutdown(struct hci_dev *hdev)
+{
+ struct btmtk_data *data = hci_get_priv(hdev);
+ struct btmtk_hci_wmt_params wmt_params;
+ u8 param = 0;
+ int err;
+
+ err = usb_autopm_get_interface(data->intf);
+ if (err < 0)
+ return err;
+
+ /* Disable the device */
+ wmt_params.op = BTMTK_WMT_FUNC_CTRL;
+ wmt_params.flag = 0;
+ wmt_params.dlen = sizeof(param);
+ wmt_params.data = &param;
+ wmt_params.status = NULL;
+
+ err = btmtk_usb_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
+ usb_autopm_put_interface(data->intf);
+ return err;
+ }
+
+ usb_autopm_put_interface(data->intf);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btmtk_usb_shutdown);
+#endif
+
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_AUTHOR("Mark Chen <mark-yw.chen@mediatek.com>");
MODULE_DESCRIPTION("Bluetooth support for MediaTek devices ver " VERSION);
diff --git a/drivers/bluetooth/btmtk.h b/drivers/bluetooth/btmtk.h
index cbcdb99a22e6..5df7c3296624 100644
--- a/drivers/bluetooth/btmtk.h
+++ b/drivers/bluetooth/btmtk.h
@@ -28,6 +28,21 @@
#define MTK_COREDUMP_END_LEN (sizeof(MTK_COREDUMP_END))
#define MTK_COREDUMP_NUM 255
+/* UHW CR mapping */
+#define MTK_BT_MISC 0x70002510
+#define MTK_BT_SUBSYS_RST 0x70002610
+#define MTK_UDMA_INT_STA_BT 0x74000024
+#define MTK_UDMA_INT_STA_BT1 0x74000308
+#define MTK_BT_WDT_STATUS 0x740003A0
+#define MTK_EP_RST_OPT 0x74011890
+#define MTK_EP_RST_IN_OUT_OPT 0x00010001
+#define MTK_BT_RST_DONE 0x00000100
+#define MTK_BT_RESET_REG_CONNV3 0x70028610
+#define MTK_BT_READ_DEV_ID 0x70010200
+
+/* MediaTek ISO Interface */
+#define MTK_ISO_IFNUM 2
+
enum {
BTMTK_WMT_PATCH_DWNLD = 0x1,
BTMTK_WMT_TEST = 0x2,
@@ -126,6 +141,14 @@ struct btmtk_hci_wmt_params {
u32 *status;
};
+enum {
+ BTMTK_TX_WAIT_VND_EVT,
+ BTMTK_FIRMWARE_LOADED,
+ BTMTK_HW_RESET_ACTIVE,
+ BTMTK_ISOPKT_OVER_INTR,
+ BTMTK_ISOPKT_RUNNING,
+};
+
typedef int (*btmtk_reset_sync_func_t)(struct hci_dev *, void *);
struct btmtk_coredump_info {
@@ -135,10 +158,25 @@ struct btmtk_coredump_info {
int state;
};
-struct btmediatek_data {
+struct btmtk_data {
+ const char *drv_name;
+ unsigned long flags;
u32 dev_id;
btmtk_reset_sync_func_t reset_sync;
struct btmtk_coredump_info cd_info;
+
+ struct usb_device *udev;
+ struct usb_interface *intf;
+ struct usb_anchor *ctrl_anchor;
+ struct sk_buff *evt_skb;
+ struct usb_endpoint_descriptor *isopkt_tx_ep;
+ struct usb_endpoint_descriptor *isopkt_rx_ep;
+ struct usb_interface *isopkt_intf;
+ struct usb_anchor isopkt_anchor;
+ struct sk_buff *isopkt_skb;
+
+ /* spinlock for ISO data transmission */
+ spinlock_t isorxlock;
};
typedef int (*wmt_cmd_sync_func_t)(struct hci_dev *,
@@ -160,6 +198,24 @@ int btmtk_register_coredump(struct hci_dev *hdev, const char *name,
u32 fw_version);
int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb);
+
+void btmtk_fw_get_filename(char *buf, size_t size, u32 dev_id, u32 fw_ver,
+ u32 fw_flavor);
+
+int btmtk_usb_subsys_reset(struct hci_dev *hdev, u32 dev_id);
+
+int btmtk_usb_recv_acl(struct hci_dev *hdev, struct sk_buff *skb);
+
+struct urb *alloc_mtk_intr_urb(struct hci_dev *hdev, struct sk_buff *skb,
+ usb_complete_t tx_complete);
+
+int btmtk_usb_resume(struct hci_dev *hdev);
+
+int btmtk_usb_suspend(struct hci_dev *hdev);
+
+int btmtk_usb_setup(struct hci_dev *hdev);
+
+int btmtk_usb_shutdown(struct hci_dev *hdev);
#else
static inline int btmtk_set_bdaddr(struct hci_dev *hdev,
@@ -168,29 +224,73 @@ static inline int btmtk_set_bdaddr(struct hci_dev *hdev,
return -EOPNOTSUPP;
}
-static int btmtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwname,
- wmt_cmd_sync_func_t wmt_cmd_sync)
+static inline int btmtk_setup_firmware_79xx(struct hci_dev *hdev,
+ const char *fwname,
+ wmt_cmd_sync_func_t wmt_cmd_sync)
{
return -EOPNOTSUPP;
}
-static int btmtk_setup_firmware(struct hci_dev *hdev, const char *fwname,
- wmt_cmd_sync_func_t wmt_cmd_sync)
+static inline int btmtk_setup_firmware(struct hci_dev *hdev, const char *fwname,
+ wmt_cmd_sync_func_t wmt_cmd_sync)
{
return -EOPNOTSUPP;
}
-static void btmtk_reset_sync(struct hci_dev *hdev)
+static inline void btmtk_reset_sync(struct hci_dev *hdev)
+{
+}
+
+static inline int btmtk_register_coredump(struct hci_dev *hdev,
+ const char *name, u32 fw_version)
{
+ return -EOPNOTSUPP;
+}
+
+static inline int btmtk_process_coredump(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void btmtk_fw_get_filename(char *buf, size_t size, u32 dev_id,
+ u32 fw_ver, u32 fw_flavor)
+{
+}
+
+static inline int btmtk_usb_subsys_reset(struct hci_dev *hdev, u32 dev_id)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int btmtk_usb_recv_acl(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline struct urb *alloc_mtk_intr_urb(struct hci_dev *hdev,
+ struct sk_buff *skb,
+ usb_complete_t tx_complete)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int btmtk_usb_resume(struct hci_dev *hdev)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int btmtk_usb_suspend(struct hci_dev *hdev)
+{
+ return -EOPNOTSUPP;
}
-static int btmtk_register_coredump(struct hci_dev *hdev, const char *name,
- u32 fw_version)
+static inline int btmtk_usb_setup(struct hci_dev *hdev)
{
return -EOPNOTSUPP;
}
-static int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
+static inline int btmtk_usb_shutdown(struct hci_dev *hdev)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
index 8ded9ef8089a..fba3ab6d30a5 100644
--- a/drivers/bluetooth/btmtksdio.c
+++ b/drivers/bluetooth/btmtksdio.c
@@ -10,7 +10,7 @@
*
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/atomic.h>
#include <linux/gpio/consumer.h>
#include <linux/init.h>
@@ -20,6 +20,7 @@
#include <linux/of.h>
#include <linux/pm_runtime.h>
#include <linux/skbuff.h>
+#include <linux/usb.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sdio_ids.h>
@@ -28,7 +29,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#include "h4_recv.h"
+#include "hci_uart.h"
#include "btmtk.h"
#define VERSION "0.1"
@@ -609,11 +610,11 @@ static void btmtksdio_txrx_work(struct work_struct *work)
} while (int_status || time_is_before_jiffies(txrx_timeout));
/* Enable interrupt */
- sdio_writel(bdev->func, C_INT_EN_SET, MTK_REG_CHLPCR, NULL);
+ if (bdev->func->irq_handler)
+ sdio_writel(bdev->func, C_INT_EN_SET, MTK_REG_CHLPCR, NULL);
sdio_release_host(bdev->func);
- pm_runtime_mark_last_busy(bdev->dev);
pm_runtime_put_autosuspend(bdev->dev);
}
@@ -680,7 +681,7 @@ static int btmtksdio_open(struct hci_dev *hdev)
if (err < 0)
goto err_release_irq;
- /* Explitly set write-1-clear method */
+ /* Explicitly set write-1-clear method */
val = sdio_readl(bdev->func, MTK_REG_CHCR, &err);
if (err < 0)
goto err_release_irq;
@@ -721,6 +722,10 @@ static int btmtksdio_close(struct hci_dev *hdev)
{
struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+ /* Skip btmtksdio_close if BTMTKSDIO_FUNC_ENABLED isn't set */
+ if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state))
+ return 0;
+
sdio_claim_host(bdev->func);
/* Disable interrupt */
@@ -1117,6 +1122,9 @@ static int btmtksdio_setup(struct hci_dev *hdev)
return err;
}
+ btmtk_fw_get_filename(fwname, sizeof(fwname), dev_id,
+ fw_version, 0);
+
snprintf(fwname, sizeof(fwname),
"mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin",
dev_id & 0xffff, (fw_version & 0xff) + 1);
@@ -1132,7 +1140,7 @@ static int btmtksdio_setup(struct hci_dev *hdev)
}
/* Enable WBS with mSBC codec */
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
/* Enable GPIO reset mechanism */
if (bdev->reset) {
@@ -1144,9 +1152,6 @@ static int btmtksdio_setup(struct hci_dev *hdev)
}
}
- /* Valid LE States quirk for MediaTek 7921 */
- set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
-
break;
case 0x7663:
case 0x7668:
@@ -1248,7 +1253,7 @@ static int btmtksdio_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
return 0;
}
-static void btmtksdio_cmd_timeout(struct hci_dev *hdev)
+static void btmtksdio_reset(struct hci_dev *hdev)
{
struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
u32 status;
@@ -1264,6 +1269,12 @@ static void btmtksdio_cmd_timeout(struct hci_dev *hdev)
sdio_claim_host(bdev->func);
+ /* set drv_pmctrl if BT is closed before doing reset */
+ if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) {
+ sdio_enable_func(bdev->func);
+ btmtksdio_drv_pmctrl(bdev);
+ }
+
sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, NULL);
skb_queue_purge(&bdev->txq);
cancel_work_sync(&bdev->txrx_work);
@@ -1279,6 +1290,12 @@ static void btmtksdio_cmd_timeout(struct hci_dev *hdev)
goto err;
}
+ /* set fw_pmctrl back if BT is closed after doing reset */
+ if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) {
+ btmtksdio_fw_pmctrl(bdev);
+ sdio_disable_func(bdev->func);
+ }
+
clear_bit(BTMTKSDIO_PATCH_ENABLED, &bdev->tx_state);
err:
sdio_release_host(bdev->func);
@@ -1327,6 +1344,8 @@ static int btmtksdio_probe(struct sdio_func *func,
{
struct btmtksdio_dev *bdev;
struct hci_dev *hdev;
+ struct device_node *old_node;
+ bool restore_node;
int err;
bdev = devm_kzalloc(&func->dev, sizeof(*bdev), GFP_KERNEL);
@@ -1357,7 +1376,7 @@ static int btmtksdio_probe(struct sdio_func *func,
hdev->open = btmtksdio_open;
hdev->close = btmtksdio_close;
- hdev->cmd_timeout = btmtksdio_cmd_timeout;
+ hdev->reset = btmtksdio_reset;
hdev->flush = btmtksdio_flush;
hdev->setup = btmtksdio_setup;
hdev->shutdown = btmtksdio_shutdown;
@@ -1376,7 +1395,7 @@ static int btmtksdio_probe(struct sdio_func *func,
SET_HCIDEV_DEV(hdev, &func->dev);
hdev->manufacturer = 70;
- set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP);
sdio_set_drvdata(func, bdev);
@@ -1395,7 +1414,7 @@ static int btmtksdio_probe(struct sdio_func *func,
if (pm_runtime_enabled(bdev->dev))
pm_runtime_disable(bdev->dev);
- /* As explaination in drivers/mmc/core/sdio_bus.c tells us:
+ /* As explanation in drivers/mmc/core/sdio_bus.c tells us:
* Unbound SDIO functions are always suspended.
* During probe, the function is set active and the usage count
* is incremented. If the driver supports runtime PM,
@@ -1406,17 +1425,28 @@ static int btmtksdio_probe(struct sdio_func *func,
*/
pm_runtime_put_noidle(bdev->dev);
- err = device_init_wakeup(bdev->dev, true);
+ err = devm_device_init_wakeup(bdev->dev);
if (err)
bt_dev_err(hdev, "failed to initialize device wakeup");
- bdev->dev->of_node = of_find_compatible_node(NULL, NULL,
- "mediatek,mt7921s-bluetooth");
+ restore_node = false;
+ if (!of_device_is_compatible(bdev->dev->of_node, "mediatek,mt7921s-bluetooth")) {
+ restore_node = true;
+ old_node = bdev->dev->of_node;
+ bdev->dev->of_node = of_find_compatible_node(NULL, NULL,
+ "mediatek,mt7921s-bluetooth");
+ }
+
bdev->reset = devm_gpiod_get_optional(bdev->dev, "reset",
GPIOD_OUT_LOW);
if (IS_ERR(bdev->reset))
err = PTR_ERR(bdev->reset);
+ if (restore_node) {
+ of_node_put(bdev->dev->of_node);
+ bdev->dev->of_node = old_node;
+ }
+
return err;
}
@@ -1428,11 +1458,15 @@ static void btmtksdio_remove(struct sdio_func *func)
if (!bdev)
return;
+ hdev = bdev->hdev;
+
+ /* Make sure to call btmtksdio_close before removing sdio card */
+ if (test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state))
+ btmtksdio_close(hdev);
+
/* Be consistent the state in btmtksdio_probe */
pm_runtime_get_noresume(bdev->dev);
- hdev = bdev->hdev;
-
sdio_set_drvdata(func, NULL);
hci_unregister_dev(hdev);
hci_free_dev(hdev);
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
index e6bc4a73c9fc..27aa48ff3ac2 100644
--- a/drivers/bluetooth/btmtkuart.c
+++ b/drivers/bluetooth/btmtkuart.c
@@ -8,7 +8,7 @@
*
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/atomic.h>
#include <linux/clk.h>
#include <linux/firmware.h>
@@ -22,11 +22,12 @@
#include <linux/regulator/consumer.h>
#include <linux/serdev.h>
#include <linux/skbuff.h>
+#include <linux/usb.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#include "h4_recv.h"
+#include "hci_uart.h"
#include "btmtk.h"
#define VERSION "0.2"
@@ -78,6 +79,7 @@ struct btmtkuart_dev {
u16 stp_dlen;
const struct btmtkuart_data *data;
+ struct hci_uart hu;
};
#define btmtkuart_is_standalone(bdev) \
@@ -315,7 +317,7 @@ mtk_stp_split(struct btmtkuart_dev *bdev, const unsigned char *data, int count,
/* Resync STP when unexpected data is being read */
if (shdr->prefix != 0x80 || bdev->stp_dlen > 2048) {
- bt_dev_err(bdev->hdev, "stp format unexpect (%d, %d)",
+ bt_dev_err(bdev->hdev, "stp format unexpected (%d, %d)",
shdr->prefix, bdev->stp_dlen);
bdev->stp_cursor = 2;
bdev->stp_dlen = 0;
@@ -326,7 +328,7 @@ mtk_stp_split(struct btmtkuart_dev *bdev, const unsigned char *data, int count,
if (count <= 0)
return NULL;
- /* Tranlate to how much the size of data H4 can handle so far */
+ /* Translate to how much the size of data H4 can handle so far */
*sz_h4 = min_t(int, count, bdev->stp_dlen);
/* Update the remaining size of STP packet */
@@ -367,7 +369,7 @@ static void btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
sz_left -= adv;
p_left += adv;
- bdev->rx_skb = h4_recv_buf(bdev->hdev, bdev->rx_skb, p_h4,
+ bdev->rx_skb = h4_recv_buf(&bdev->hu, bdev->rx_skb, p_h4,
sz_h4, mtk_recv_pkts,
ARRAY_SIZE(mtk_recv_pkts));
if (IS_ERR(bdev->rx_skb)) {
@@ -857,6 +859,7 @@ static int btmtkuart_probe(struct serdev_device *serdev)
}
bdev->hdev = hdev;
+ bdev->hu.hdev = hdev;
hdev->bus = HCI_UART;
hci_set_drvdata(hdev, bdev);
@@ -871,7 +874,7 @@ static int btmtkuart_probe(struct serdev_device *serdev)
SET_HCIDEV_DEV(hdev, &serdev->dev);
hdev->manufacturer = 70;
- set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP);
if (btmtkuart_is_standalone(bdev)) {
err = clk_prepare_enable(bdev->osc);
diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
index 9d0c7e278114..3b1e9224e965 100644
--- a/drivers/bluetooth/btnxpuart.c
+++ b/drivers/bluetooth/btnxpuart.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* NXP Bluetooth driver
- * Copyright 2023 NXP
+ * Copyright 2023-2025 NXP
*/
#include <linux/module.h>
@@ -10,17 +10,21 @@
#include <linux/serdev.h>
#include <linux/of.h>
#include <linux/skbuff.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/firmware.h>
#include <linux/string.h>
#include <linux/crc8.h>
#include <linux/crc32.h>
#include <linux/string_helpers.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of_irq.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#include "h4_recv.h"
+#include "hci_uart.h"
#define MANUFACTURER_NXP 37
@@ -29,27 +33,40 @@
#define BTNXPUART_CHECK_BOOT_SIGNATURE 3
#define BTNXPUART_SERDEV_OPEN 4
#define BTNXPUART_IR_IN_PROGRESS 5
+#define BTNXPUART_FW_DOWNLOAD_ABORT 6
+#define BTNXPUART_FW_DUMP_IN_PROGRESS 7
/* NXP HW err codes */
#define BTNXPUART_IR_HW_ERR 0xb0
-#define FIRMWARE_W8987 "nxp/uartuart8987_bt.bin"
-#define FIRMWARE_W8997 "nxp/uartuart8997_bt_v4.bin"
-#define FIRMWARE_W9098 "nxp/uartuart9098_bt_v1.bin"
-#define FIRMWARE_IW416 "nxp/uartiw416_bt_v0.bin"
-#define FIRMWARE_IW612 "nxp/uartspi_n61x_v1.bin.se"
-#define FIRMWARE_IW624 "nxp/uartiw624_bt.bin"
-#define FIRMWARE_SECURE_IW624 "nxp/uartiw624_bt.bin.se"
-#define FIRMWARE_AW693 "nxp/uartaw693_bt.bin"
-#define FIRMWARE_SECURE_AW693 "nxp/uartaw693_bt.bin.se"
-#define FIRMWARE_HELPER "nxp/helper_uart_3000000.bin"
+#define FIRMWARE_W8987 "uart8987_bt.bin"
+#define FIRMWARE_W8987_OLD "uartuart8987_bt.bin"
+#define FIRMWARE_W8997 "uart8997_bt_v4.bin"
+#define FIRMWARE_W8997_OLD "uartuart8997_bt_v4.bin"
+#define FIRMWARE_W9098 "uart9098_bt_v1.bin"
+#define FIRMWARE_W9098_OLD "uartuart9098_bt_v1.bin"
+#define FIRMWARE_IW416 "uartiw416_bt.bin"
+#define FIRMWARE_IW416_OLD "uartiw416_bt_v0.bin"
+#define FIRMWARE_IW612 "uartspi_n61x_v1.bin.se"
+#define FIRMWARE_IW610 "uartspi_iw610.bin"
+#define FIRMWARE_SECURE_IW610 "uartspi_iw610.bin.se"
+#define FIRMWARE_IW624 "uartiw624_bt.bin"
+#define FIRMWARE_SECURE_IW624 "uartiw624_bt.bin.se"
+#define FIRMWARE_AW693 "uartaw693_bt.bin"
+#define FIRMWARE_SECURE_AW693 "uartaw693_bt.bin.se"
+#define FIRMWARE_AW693_A1 "uartaw693_bt_v1.bin"
+#define FIRMWARE_SECURE_AW693_A1 "uartaw693_bt_v1.bin.se"
+#define FIRMWARE_HELPER "helper_uart_3000000.bin"
#define CHIP_ID_W9098 0x5c03
#define CHIP_ID_IW416 0x7201
#define CHIP_ID_IW612 0x7601
#define CHIP_ID_IW624a 0x8000
#define CHIP_ID_IW624c 0x8001
-#define CHIP_ID_AW693 0x8200
+#define CHIP_ID_AW693a0 0x8200
+#define CHIP_ID_AW693a1 0x8201
+#define CHIP_ID_IW610a0 0x8800
+#define CHIP_ID_IW610a1 0x8801
#define FW_SECURE_MASK 0xc0
#define FW_OPEN 0x00
@@ -58,7 +75,8 @@
#define FW_AUTH_ENC 0xc0
#define HCI_NXP_PRI_BAUDRATE 115200
-#define HCI_NXP_SEC_BAUDRATE 3000000
+#define HCI_NXP_SEC_BAUDRATE_3M 3000000
+#define HCI_NXP_SEC_BAUDRATE_4M 4000000
#define MAX_FW_FILE_NAME_LEN 50
@@ -70,6 +88,7 @@
#define WAKEUP_METHOD_BREAK 1
#define WAKEUP_METHOD_EXT_BREAK 2
#define WAKEUP_METHOD_RTS 3
+#define WAKEUP_METHOD_GPIO 4
#define WAKEUP_METHOD_INVALID 0xff
/* power save mode status */
@@ -84,14 +103,19 @@
#define PS_STATE_AWAKE 0
#define PS_STATE_SLEEP 1
-/* Bluetooth vendor command : Sleep mode */
+/* NXP Vendor Commands. Refer user manual UM11628 on nxp.com */
+/* Set custom BD Address */
+#define HCI_NXP_SET_BD_ADDR 0xfc22
+/* Set Auto-Sleep mode */
#define HCI_NXP_AUTO_SLEEP_MODE 0xfc23
-/* Bluetooth vendor command : Wakeup method */
+/* Set Wakeup method */
#define HCI_NXP_WAKEUP_METHOD 0xfc53
-/* Bluetooth vendor command : Set operational baudrate */
+/* Set operational baudrate */
#define HCI_NXP_SET_OPER_SPEED 0xfc09
-/* Bluetooth vendor command: Independent Reset */
+/* Independent Reset (Soft Reset) */
#define HCI_NXP_IND_RESET 0xfcfc
+/* Bluetooth vendor command: Trigger FW dump */
+#define HCI_NXP_TRIGGER_DUMP 0xfe91
/* Bluetooth Power State : Vendor cmd params */
#define BT_PS_ENABLE 0x02
@@ -123,6 +147,9 @@ struct ps_data {
bool driver_sent_cmd;
u16 h2c_ps_interval;
u16 c2h_ps_interval;
+ bool wakeup_source;
+ struct gpio_desc *h2c_ps_gpio;
+ s32 irq_handler;
struct hci_dev *hdev;
struct work_struct work;
struct timer_list ps_timer;
@@ -144,6 +171,13 @@ struct psmode_cmd_payload {
struct btnxpuart_data {
const char *helper_fw_name;
const char *fw_name;
+ const char *fw_name_old;
+};
+
+enum bootloader_param_change {
+ not_changed,
+ cmd_sent,
+ changed
};
struct btnxpuart_dev {
@@ -159,7 +193,9 @@ struct btnxpuart_dev {
u8 fw_name[MAX_FW_FILE_NAME_LEN];
u32 fw_dnld_v1_offset;
u32 fw_v1_sent_bytes;
+ u32 fw_dnld_v3_offset;
u32 fw_v3_offset_correction;
+ u32 fw_v3_prev_sent;
u32 fw_v1_expected_len;
u32 boot_reg_offset;
wait_queue_head_t fw_dnld_done_wait_q;
@@ -168,12 +204,15 @@ struct btnxpuart_dev {
u32 new_baudrate;
u32 current_baudrate;
u32 fw_init_baudrate;
- bool timeout_changed;
- bool baudrate_changed;
+ u32 secondary_baudrate;
+ enum bootloader_param_change timeout_changed;
+ enum bootloader_param_change baudrate_changed;
bool helper_downloaded;
struct ps_data psdata;
struct btnxpuart_data *nxp_data;
+ struct reset_control *pdn;
+ struct hci_uart hu;
};
#define NXP_V1_FW_REQ_PKT 0xa5
@@ -187,6 +226,12 @@ struct btnxpuart_dev {
#define NXP_NAK_V3 0x7b
#define NXP_CRC_ERROR_V3 0x7c
+/* Bootloader signature error codes: Refer AN12820 from nxp.com */
+#define NXP_CRC_RX_ERROR BIT(0) /* CRC error in previous packet */
+#define NXP_ACK_RX_TIMEOUT BIT(2) /* ACK not received from host */
+#define NXP_HDR_RX_TIMEOUT BIT(3) /* FW Header chunk not received */
+#define NXP_DATA_RX_TIMEOUT BIT(4) /* FW Data chunk not received */
+
#define HDR_LEN 16
#define NXP_RECV_CHIP_VER_V1 \
@@ -277,26 +322,75 @@ struct nxp_bootloader_cmd {
__be32 crc;
} __packed;
+struct nxp_v3_rx_timeout_nak {
+ u8 nak;
+ __le32 offset;
+ u8 crc;
+} __packed;
+
+union nxp_v3_rx_timeout_nak_u {
+ struct nxp_v3_rx_timeout_nak pkt;
+ u8 buf[6];
+};
+
+struct nxp_v3_crc_nak {
+ u8 nak;
+ u8 crc;
+} __packed;
+
+union nxp_v3_crc_nak_u {
+ struct nxp_v3_crc_nak pkt;
+ u8 buf[2];
+};
+
+/* FW dump */
+#define NXP_FW_DUMP_SIZE (1024 * 1000)
+
+struct nxp_fw_dump_hdr {
+ __le16 seq_num;
+ __le16 reserved;
+ __le16 buf_type;
+ __le16 buf_len;
+};
+
+union nxp_set_bd_addr_payload {
+ struct {
+ u8 param_id;
+ u8 param_len;
+ u8 param[6];
+ } __packed data;
+ u8 buf[8];
+};
+
static u8 crc8_table[CRC8_TABLE_SIZE];
/* Default configurations */
#define DEFAULT_H2C_WAKEUP_MODE WAKEUP_METHOD_BREAK
-#define DEFAULT_PS_MODE PS_MODE_DISABLE
+#define DEFAULT_PS_MODE PS_MODE_ENABLE
#define FW_INIT_BAUDRATE HCI_NXP_PRI_BAUDRATE
static struct sk_buff *nxp_drv_send_cmd(struct hci_dev *hdev, u16 opcode,
u32 plen,
- void *param)
+ void *param,
+ bool resp)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
struct ps_data *psdata = &nxpdev->psdata;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
/* set flag to prevent nxp_enqueue from parsing values from this command and
* calling hci_cmd_sync_queue() again.
*/
psdata->driver_sent_cmd = true;
- skb = __hci_cmd_sync(hdev, opcode, plen, param, HCI_CMD_TIMEOUT);
+ if (resp) {
+ skb = __hci_cmd_sync(hdev, opcode, plen, param, HCI_CMD_TIMEOUT);
+ } else {
+ __hci_cmd_send(hdev, opcode, plen, param);
+ /* Allow command to be sent before tx_work is cancelled
+ * by btnxpuart_flush()
+ */
+ msleep(20);
+ }
psdata->driver_sent_cmd = false;
return skb;
@@ -328,14 +422,14 @@ static void ps_cancel_timer(struct btnxpuart_dev *nxpdev)
struct ps_data *psdata = &nxpdev->psdata;
flush_work(&psdata->work);
- del_timer_sync(&psdata->ps_timer);
+ timer_shutdown_sync(&psdata->ps_timer);
}
static void ps_control(struct hci_dev *hdev, u8 ps_state)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
struct ps_data *psdata = &nxpdev->psdata;
- int status;
+ int status = 0;
if (psdata->ps_state == ps_state ||
!test_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state))
@@ -343,6 +437,14 @@ static void ps_control(struct hci_dev *hdev, u8 ps_state)
mutex_lock(&psdata->ps_lock);
switch (psdata->cur_h2c_wakeupmode) {
+ case WAKEUP_METHOD_GPIO:
+ if (ps_state == PS_STATE_AWAKE)
+ gpiod_set_value_cansleep(psdata->h2c_ps_gpio, 0);
+ else
+ gpiod_set_value_cansleep(psdata->h2c_ps_gpio, 1);
+ bt_dev_dbg(hdev, "Set h2c_ps_gpio: %s",
+ str_high_low(ps_state == PS_STATE_SLEEP));
+ break;
case WAKEUP_METHOD_DTR:
if (ps_state == PS_STATE_AWAKE)
status = serdev_device_set_tiocm(nxpdev->serdev, TIOCM_DTR, 0);
@@ -380,7 +482,7 @@ static void ps_work_func(struct work_struct *work)
static void ps_timeout_func(struct timer_list *t)
{
- struct ps_data *data = from_timer(data, t, ps_timer);
+ struct ps_data *data = timer_container_of(data, t, ps_timer);
struct hci_dev *hdev = data->hdev;
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
@@ -392,15 +494,72 @@ static void ps_timeout_func(struct timer_list *t)
}
}
-static void ps_setup(struct hci_dev *hdev)
+static irqreturn_t ps_host_wakeup_irq_handler(int irq, void *priv)
+{
+ struct btnxpuart_dev *nxpdev = (struct btnxpuart_dev *)priv;
+
+ bt_dev_dbg(nxpdev->hdev, "Host wakeup interrupt");
+ return IRQ_HANDLED;
+}
+static int ps_setup(struct hci_dev *hdev)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ struct serdev_device *serdev = nxpdev->serdev;
struct ps_data *psdata = &nxpdev->psdata;
+ int ret;
+
+ /* Out-Of-Band Device Wakeup */
+ psdata->h2c_ps_gpio = devm_gpiod_get_optional(&serdev->dev, "device-wakeup",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(psdata->h2c_ps_gpio)) {
+ bt_dev_err(hdev, "Error fetching device-wakeup-gpios: %ld",
+ PTR_ERR(psdata->h2c_ps_gpio));
+ return PTR_ERR(psdata->h2c_ps_gpio);
+ }
+
+ if (device_property_read_u8(&serdev->dev, "nxp,wakein-pin", &psdata->h2c_wakeup_gpio)) {
+ psdata->h2c_wakeup_gpio = 0xff; /* 0xff: use default pin/gpio */
+ } else if (!psdata->h2c_ps_gpio) {
+ bt_dev_warn(hdev, "nxp,wakein-pin property without device-wakeup-gpios");
+ psdata->h2c_wakeup_gpio = 0xff;
+ }
+
+ /* Out-Of-Band Host Wakeup */
+ if (of_property_read_bool(serdev->dev.of_node, "wakeup-source")) {
+ psdata->irq_handler = of_irq_get_byname(serdev->dev.of_node, "wakeup");
+ bt_dev_info(nxpdev->hdev, "irq_handler: %d", psdata->irq_handler);
+ if (psdata->irq_handler > 0)
+ psdata->wakeup_source = true;
+ }
+
+ if (device_property_read_u8(&serdev->dev, "nxp,wakeout-pin", &psdata->c2h_wakeup_gpio)) {
+ psdata->c2h_wakeup_gpio = 0xff;
+ if (psdata->wakeup_source) {
+ bt_dev_warn(hdev, "host wakeup interrupt without nxp,wakeout-pin");
+ psdata->wakeup_source = false;
+ }
+ } else if (!psdata->wakeup_source) {
+ bt_dev_warn(hdev, "nxp,wakeout-pin property without host wakeup interrupt");
+ psdata->c2h_wakeup_gpio = 0xff;
+ }
+
+ if (psdata->wakeup_source) {
+ ret = devm_request_threaded_irq(&serdev->dev, psdata->irq_handler,
+ NULL, ps_host_wakeup_irq_handler,
+ IRQF_ONESHOT,
+ dev_name(&serdev->dev), nxpdev);
+ if (ret)
+ bt_dev_info(hdev, "error setting wakeup IRQ handler, ignoring\n");
+ disable_irq(psdata->irq_handler);
+ device_init_wakeup(&serdev->dev, true);
+ }
psdata->hdev = hdev;
INIT_WORK(&psdata->work, ps_work_func);
mutex_init(&psdata->ps_lock);
timer_setup(&psdata->ps_timer, ps_timeout_func, 0);
+
+ return 0;
}
static bool ps_wakeup(struct btnxpuart_dev *nxpdev)
@@ -420,6 +579,23 @@ static bool ps_wakeup(struct btnxpuart_dev *nxpdev)
return false;
}
+static void ps_cleanup(struct btnxpuart_dev *nxpdev)
+{
+ struct ps_data *psdata = &nxpdev->psdata;
+ u8 ps_state;
+
+ mutex_lock(&psdata->ps_lock);
+ ps_state = psdata->ps_state;
+ mutex_unlock(&psdata->ps_lock);
+
+ if (ps_state != PS_STATE_AWAKE)
+ ps_control(psdata->hdev, PS_STATE_AWAKE);
+
+ ps_cancel_timer(nxpdev);
+ cancel_work_sync(&psdata->work);
+ mutex_destroy(&psdata->ps_lock);
+}
+
static int send_ps_cmd(struct hci_dev *hdev, void *data)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
@@ -434,7 +610,8 @@ static int send_ps_cmd(struct hci_dev *hdev, void *data)
pcmd.ps_cmd = BT_PS_DISABLE;
pcmd.c2h_ps_interval = __cpu_to_le16(psdata->c2h_ps_interval);
- skb = nxp_drv_send_cmd(hdev, HCI_NXP_AUTO_SLEEP_MODE, sizeof(pcmd), &pcmd);
+ skb = nxp_drv_send_cmd(hdev, HCI_NXP_AUTO_SLEEP_MODE, sizeof(pcmd),
+ &pcmd, true);
if (IS_ERR(skb)) {
bt_dev_err(hdev, "Setting Power Save mode failed (%ld)", PTR_ERR(skb));
return PTR_ERR(skb);
@@ -468,7 +645,12 @@ static int send_wakeup_method_cmd(struct hci_dev *hdev, void *data)
pcmd.c2h_wakeupmode = psdata->c2h_wakeupmode;
pcmd.c2h_wakeup_gpio = psdata->c2h_wakeup_gpio;
+ pcmd.h2c_wakeup_gpio = 0xff;
switch (psdata->h2c_wakeupmode) {
+ case WAKEUP_METHOD_GPIO:
+ pcmd.h2c_wakeupmode = BT_CTRL_WAKEUP_METHOD_GPIO;
+ pcmd.h2c_wakeup_gpio = psdata->h2c_wakeup_gpio;
+ break;
case WAKEUP_METHOD_DTR:
pcmd.h2c_wakeupmode = BT_CTRL_WAKEUP_METHOD_DSR;
break;
@@ -477,9 +659,9 @@ static int send_wakeup_method_cmd(struct hci_dev *hdev, void *data)
pcmd.h2c_wakeupmode = BT_CTRL_WAKEUP_METHOD_BREAK;
break;
}
- pcmd.h2c_wakeup_gpio = 0xff;
- skb = nxp_drv_send_cmd(hdev, HCI_NXP_WAKEUP_METHOD, sizeof(pcmd), &pcmd);
+ skb = nxp_drv_send_cmd(hdev, HCI_NXP_WAKEUP_METHOD, sizeof(pcmd),
+ &pcmd, true);
if (IS_ERR(skb)) {
bt_dev_err(hdev, "Setting wake-up method failed (%ld)", PTR_ERR(skb));
return PTR_ERR(skb);
@@ -503,6 +685,7 @@ static void ps_init(struct hci_dev *hdev)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
struct ps_data *psdata = &nxpdev->psdata;
+ u8 default_h2c_wakeup_mode = DEFAULT_H2C_WAKEUP_MODE;
serdev_device_set_tiocm(nxpdev->serdev, 0, TIOCM_RTS);
usleep_range(5000, 10000);
@@ -510,12 +693,24 @@ static void ps_init(struct hci_dev *hdev)
usleep_range(5000, 10000);
psdata->ps_state = PS_STATE_AWAKE;
- psdata->c2h_wakeupmode = BT_HOST_WAKEUP_METHOD_NONE;
- psdata->c2h_wakeup_gpio = 0xff;
+
+ if (psdata->c2h_wakeup_gpio != 0xff)
+ psdata->c2h_wakeupmode = BT_HOST_WAKEUP_METHOD_GPIO;
+ else
+ psdata->c2h_wakeupmode = BT_HOST_WAKEUP_METHOD_NONE;
psdata->cur_h2c_wakeupmode = WAKEUP_METHOD_INVALID;
+ if (psdata->h2c_ps_gpio)
+ default_h2c_wakeup_mode = WAKEUP_METHOD_GPIO;
+
psdata->h2c_ps_interval = PS_DEFAULT_TIMEOUT_PERIOD_MS;
- switch (DEFAULT_H2C_WAKEUP_MODE) {
+
+ switch (default_h2c_wakeup_mode) {
+ case WAKEUP_METHOD_GPIO:
+ psdata->h2c_wakeupmode = WAKEUP_METHOD_GPIO;
+ gpiod_set_value_cansleep(psdata->h2c_ps_gpio, 0);
+ usleep_range(5000, 10000);
+ break;
case WAKEUP_METHOD_DTR:
psdata->h2c_wakeupmode = WAKEUP_METHOD_DTR;
serdev_device_set_tiocm(nxpdev->serdev, 0, TIOCM_DTR);
@@ -533,11 +728,6 @@ static void ps_init(struct hci_dev *hdev)
psdata->cur_psmode = PS_MODE_DISABLE;
psdata->target_ps_mode = DEFAULT_PS_MODE;
-
- if (psdata->cur_h2c_wakeupmode != psdata->h2c_wakeupmode)
- hci_cmd_sync_queue(hdev, send_wakeup_method_cmd, NULL, NULL);
- if (psdata->cur_psmode != psdata->target_ps_mode)
- hci_cmd_sync_queue(hdev, send_ps_cmd, NULL, NULL);
}
/* NXP Firmware Download Feature */
@@ -550,9 +740,10 @@ static int nxp_download_firmware(struct hci_dev *hdev)
nxpdev->fw_v1_sent_bytes = 0;
nxpdev->fw_v1_expected_len = HDR_LEN;
nxpdev->boot_reg_offset = 0;
+ nxpdev->fw_dnld_v3_offset = 0;
nxpdev->fw_v3_offset_correction = 0;
- nxpdev->baudrate_changed = false;
- nxpdev->timeout_changed = false;
+ nxpdev->baudrate_changed = not_changed;
+ nxpdev->timeout_changed = not_changed;
nxpdev->helper_downloaded = false;
serdev_device_set_baudrate(nxpdev->serdev, HCI_NXP_PRI_BAUDRATE);
@@ -564,14 +755,25 @@ static int nxp_download_firmware(struct hci_dev *hdev)
!test_bit(BTNXPUART_FW_DOWNLOADING,
&nxpdev->tx_state),
msecs_to_jiffies(60000));
+
+ if (nxpdev->fw && strlen(nxpdev->fw_name)) {
+ release_firmware(nxpdev->fw);
+ memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name));
+ }
+
if (err == 0) {
- bt_dev_err(hdev, "FW Download Timeout.");
+ bt_dev_err(hdev, "FW Download Timeout. offset: %d",
+ nxpdev->fw_dnld_v1_offset ?
+ nxpdev->fw_dnld_v1_offset :
+ nxpdev->fw_dnld_v3_offset);
return -ETIMEDOUT;
}
+ if (test_bit(BTNXPUART_FW_DOWNLOAD_ABORT, &nxpdev->tx_state)) {
+ bt_dev_err(hdev, "FW Download Aborted");
+ return -EINTR;
+ }
serdev_device_set_flow_control(nxpdev->serdev, true);
- release_firmware(nxpdev->fw);
- memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name));
/* Allow the downloaded FW to initialize */
msleep(1200);
@@ -617,7 +819,10 @@ static bool nxp_fw_change_baudrate(struct hci_dev *hdev, u16 req_len)
nxpdev->fw_v3_offset_correction += req_len;
} else if (req_len == sizeof(uart_config)) {
uart_config.clkdiv.address = __cpu_to_le32(clkdivaddr);
- uart_config.clkdiv.value = __cpu_to_le32(0x00c00000);
+ if (nxpdev->new_baudrate == HCI_NXP_SEC_BAUDRATE_4M)
+ uart_config.clkdiv.value = __cpu_to_le32(0x01000000);
+ else
+ uart_config.clkdiv.value = __cpu_to_le32(0x00c00000);
uart_config.uartdiv.address = __cpu_to_le32(uartdivaddr);
uart_config.uartdiv.value = __cpu_to_le32(1);
uart_config.mcr.address = __cpu_to_le32(uartmcraddr);
@@ -672,6 +877,16 @@ static bool is_fw_downloading(struct btnxpuart_dev *nxpdev)
return test_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
}
+static bool ind_reset_in_progress(struct btnxpuart_dev *nxpdev)
+{
+ return test_bit(BTNXPUART_IR_IN_PROGRESS, &nxpdev->tx_state);
+}
+
+static bool fw_dump_in_progress(struct btnxpuart_dev *nxpdev)
+{
+ return test_bit(BTNXPUART_FW_DUMP_IN_PROGRESS, &nxpdev->tx_state);
+}
+
static bool process_boot_signature(struct btnxpuart_dev *nxpdev)
{
if (test_bit(BTNXPUART_CHECK_BOOT_SIGNATURE, &nxpdev->tx_state)) {
@@ -682,19 +897,30 @@ static bool process_boot_signature(struct btnxpuart_dev *nxpdev)
return is_fw_downloading(nxpdev);
}
-static int nxp_request_firmware(struct hci_dev *hdev, const char *fw_name)
+static int nxp_request_firmware(struct hci_dev *hdev, const char *fw_name,
+ const char *fw_name_old)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ const char *fw_name_dt;
int err = 0;
if (!fw_name)
return -ENOENT;
if (!strlen(nxpdev->fw_name)) {
- snprintf(nxpdev->fw_name, MAX_FW_FILE_NAME_LEN, "%s", fw_name);
+ if (strcmp(fw_name, FIRMWARE_HELPER) &&
+ !device_property_read_string(&nxpdev->serdev->dev,
+ "firmware-name",
+ &fw_name_dt))
+ fw_name = fw_name_dt;
+ snprintf(nxpdev->fw_name, MAX_FW_FILE_NAME_LEN, "nxp/%s", fw_name);
+ err = request_firmware_direct(&nxpdev->fw, nxpdev->fw_name, &hdev->dev);
+ if (err < 0 && fw_name_old) {
+ snprintf(nxpdev->fw_name, MAX_FW_FILE_NAME_LEN, "nxp/%s", fw_name_old);
+ err = request_firmware_direct(&nxpdev->fw, nxpdev->fw_name, &hdev->dev);
+ }
- bt_dev_dbg(hdev, "Request Firmware: %s", nxpdev->fw_name);
- err = request_firmware(&nxpdev->fw, nxpdev->fw_name, &hdev->dev);
+ bt_dev_info(hdev, "Request Firmware: %s", nxpdev->fw_name);
if (err < 0) {
bt_dev_err(hdev, "Firmware file %s not found", nxpdev->fw_name);
clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
@@ -754,40 +980,40 @@ static int nxp_recv_fw_req_v1(struct hci_dev *hdev, struct sk_buff *skb)
len = __le16_to_cpu(req->len);
if (!nxp_data->helper_fw_name) {
- if (!nxpdev->timeout_changed) {
- nxpdev->timeout_changed = nxp_fw_change_timeout(hdev,
- len);
+ if (nxpdev->timeout_changed != changed) {
+ nxp_fw_change_timeout(hdev, len);
+ nxpdev->timeout_changed = changed;
goto free_skb;
}
- if (!nxpdev->baudrate_changed) {
- nxpdev->baudrate_changed = nxp_fw_change_baudrate(hdev,
- len);
- if (nxpdev->baudrate_changed) {
+ if (nxpdev->baudrate_changed != changed) {
+ nxpdev->new_baudrate = nxpdev->secondary_baudrate;
+ if (nxp_fw_change_baudrate(hdev, len)) {
+ nxpdev->baudrate_changed = changed;
serdev_device_set_baudrate(nxpdev->serdev,
- HCI_NXP_SEC_BAUDRATE);
+ nxpdev->secondary_baudrate);
serdev_device_set_flow_control(nxpdev->serdev, true);
- nxpdev->current_baudrate = HCI_NXP_SEC_BAUDRATE;
+ nxpdev->current_baudrate = nxpdev->secondary_baudrate;
}
goto free_skb;
}
}
if (!nxp_data->helper_fw_name || nxpdev->helper_downloaded) {
- if (nxp_request_firmware(hdev, nxp_data->fw_name))
+ if (nxp_request_firmware(hdev, nxp_data->fw_name, nxp_data->fw_name_old))
goto free_skb;
} else if (nxp_data->helper_fw_name && !nxpdev->helper_downloaded) {
- if (nxp_request_firmware(hdev, nxp_data->helper_fw_name))
+ if (nxp_request_firmware(hdev, nxp_data->helper_fw_name, NULL))
goto free_skb;
}
if (!len) {
- bt_dev_dbg(hdev, "FW Downloaded Successfully: %zu bytes",
+ bt_dev_info(hdev, "FW Download Complete: %zu bytes",
nxpdev->fw->size);
if (nxp_data->helper_fw_name && !nxpdev->helper_downloaded) {
nxpdev->helper_downloaded = true;
serdev_device_wait_until_sent(nxpdev->serdev, 0);
serdev_device_set_baudrate(nxpdev->serdev,
- HCI_NXP_SEC_BAUDRATE);
+ HCI_NXP_SEC_BAUDRATE_3M);
serdev_device_set_flow_control(nxpdev->serdev, true);
} else {
clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
@@ -863,7 +1089,7 @@ static char *nxp_get_fw_name_from_chipid(struct hci_dev *hdev, u16 chipid,
else
bt_dev_err(hdev, "Illegal loader version %02x", loader_ver);
break;
- case CHIP_ID_AW693:
+ case CHIP_ID_AW693a0:
if ((loader_ver & FW_SECURE_MASK) == FW_OPEN)
fw_name = FIRMWARE_AW693;
else if ((loader_ver & FW_SECURE_MASK) != FW_AUTH_ILLEGAL)
@@ -871,6 +1097,23 @@ static char *nxp_get_fw_name_from_chipid(struct hci_dev *hdev, u16 chipid,
else
bt_dev_err(hdev, "Illegal loader version %02x", loader_ver);
break;
+ case CHIP_ID_AW693a1:
+ if ((loader_ver & FW_SECURE_MASK) == FW_OPEN)
+ fw_name = FIRMWARE_AW693_A1;
+ else if ((loader_ver & FW_SECURE_MASK) != FW_AUTH_ILLEGAL)
+ fw_name = FIRMWARE_SECURE_AW693_A1;
+ else
+ bt_dev_err(hdev, "Illegal loader version %02x", loader_ver);
+ break;
+ case CHIP_ID_IW610a0:
+ case CHIP_ID_IW610a1:
+ if ((loader_ver & FW_SECURE_MASK) == FW_OPEN)
+ fw_name = FIRMWARE_IW610;
+ else if ((loader_ver & FW_SECURE_MASK) != FW_AUTH_ILLEGAL)
+ fw_name = FIRMWARE_SECURE_IW610;
+ else
+ bt_dev_err(hdev, "Illegal loader version %02x", loader_ver);
+ break;
default:
bt_dev_err(hdev, "Unknown chip signature %04x", chipid);
break;
@@ -878,10 +1121,28 @@ static char *nxp_get_fw_name_from_chipid(struct hci_dev *hdev, u16 chipid,
return fw_name;
}
+static char *nxp_get_old_fw_name_from_chipid(struct hci_dev *hdev, u16 chipid,
+ u8 loader_ver)
+{
+ char *fw_name_old = NULL;
+
+ switch (chipid) {
+ case CHIP_ID_W9098:
+ fw_name_old = FIRMWARE_W9098_OLD;
+ break;
+ case CHIP_ID_IW416:
+ fw_name_old = FIRMWARE_IW416_OLD;
+ break;
+ }
+ return fw_name_old;
+}
+
static int nxp_recv_chip_ver_v3(struct hci_dev *hdev, struct sk_buff *skb)
{
struct v3_start_ind *req = skb_pull_data(skb, sizeof(*req));
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ const char *fw_name;
+ const char *fw_name_old;
u16 chip_id;
u8 loader_ver;
@@ -890,8 +1151,10 @@ static int nxp_recv_chip_ver_v3(struct hci_dev *hdev, struct sk_buff *skb)
chip_id = le16_to_cpu(req->chip_id);
loader_ver = req->loader_ver;
- if (!nxp_request_firmware(hdev, nxp_get_fw_name_from_chipid(hdev,
- chip_id, loader_ver)))
+ bt_dev_info(hdev, "ChipID: %04x, Version: %d", chip_id, loader_ver);
+ fw_name = nxp_get_fw_name_from_chipid(hdev, chip_id, loader_ver);
+ fw_name_old = nxp_get_old_fw_name_from_chipid(hdev, chip_id, loader_ver);
+ if (!nxp_request_firmware(hdev, fw_name, fw_name_old))
nxp_send_ack(NXP_ACK_V3, hdev);
free_skb:
@@ -899,11 +1162,40 @@ free_skb:
return 0;
}
+static void nxp_handle_fw_download_error(struct hci_dev *hdev, struct v3_data_req *req)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ __u32 offset = __le32_to_cpu(req->offset);
+ __u16 err = __le16_to_cpu(req->error);
+ union nxp_v3_rx_timeout_nak_u timeout_nak_buf;
+ union nxp_v3_crc_nak_u crc_nak_buf;
+
+ if (err & NXP_CRC_RX_ERROR) {
+ crc_nak_buf.pkt.nak = NXP_CRC_ERROR_V3;
+ crc_nak_buf.pkt.crc = crc8(crc8_table, crc_nak_buf.buf,
+ sizeof(crc_nak_buf) - 1, 0xff);
+ serdev_device_write_buf(nxpdev->serdev, crc_nak_buf.buf,
+ sizeof(crc_nak_buf));
+ } else if (err & NXP_ACK_RX_TIMEOUT ||
+ err & NXP_HDR_RX_TIMEOUT ||
+ err & NXP_DATA_RX_TIMEOUT) {
+ timeout_nak_buf.pkt.nak = NXP_NAK_V3;
+ timeout_nak_buf.pkt.offset = __cpu_to_le32(offset);
+ timeout_nak_buf.pkt.crc = crc8(crc8_table, timeout_nak_buf.buf,
+ sizeof(timeout_nak_buf) - 1, 0xff);
+ serdev_device_write_buf(nxpdev->serdev, timeout_nak_buf.buf,
+ sizeof(timeout_nak_buf));
+ } else {
+ bt_dev_err(hdev, "Unknown bootloader error code: %d", err);
+ }
+}
+
static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
struct v3_data_req *req;
- __u16 len;
+ __u16 len = 0;
+ __u16 err = 0;
__u32 offset;
if (!process_boot_signature(nxpdev))
@@ -913,36 +1205,56 @@ static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb)
if (!req || !nxpdev->fw)
goto free_skb;
- nxp_send_ack(NXP_ACK_V3, hdev);
+ err = __le16_to_cpu(req->error);
+
+ if (!err) {
+ nxp_send_ack(NXP_ACK_V3, hdev);
+ if (nxpdev->timeout_changed == cmd_sent)
+ nxpdev->timeout_changed = changed;
+ if (nxpdev->baudrate_changed == cmd_sent)
+ nxpdev->baudrate_changed = changed;
+ } else {
+ nxp_handle_fw_download_error(hdev, req);
+ if (nxpdev->timeout_changed == cmd_sent &&
+ err == NXP_CRC_RX_ERROR) {
+ nxpdev->fw_v3_offset_correction -= nxpdev->fw_v3_prev_sent;
+ nxpdev->timeout_changed = not_changed;
+ }
+ if (nxpdev->baudrate_changed == cmd_sent &&
+ err == NXP_CRC_RX_ERROR) {
+ nxpdev->fw_v3_offset_correction -= nxpdev->fw_v3_prev_sent;
+ nxpdev->baudrate_changed = not_changed;
+ }
+ goto free_skb;
+ }
len = __le16_to_cpu(req->len);
- if (!nxpdev->timeout_changed) {
- nxpdev->timeout_changed = nxp_fw_change_timeout(hdev, len);
+ if (nxpdev->timeout_changed != changed) {
+ nxp_fw_change_timeout(hdev, len);
+ nxpdev->timeout_changed = cmd_sent;
goto free_skb;
}
- if (!nxpdev->baudrate_changed) {
- nxpdev->baudrate_changed = nxp_fw_change_baudrate(hdev, len);
- if (nxpdev->baudrate_changed) {
+ if (nxpdev->baudrate_changed != changed) {
+ nxpdev->new_baudrate = nxpdev->secondary_baudrate;
+ if (nxp_fw_change_baudrate(hdev, len)) {
+ nxpdev->baudrate_changed = cmd_sent;
serdev_device_set_baudrate(nxpdev->serdev,
- HCI_NXP_SEC_BAUDRATE);
+ nxpdev->secondary_baudrate);
serdev_device_set_flow_control(nxpdev->serdev, true);
- nxpdev->current_baudrate = HCI_NXP_SEC_BAUDRATE;
+ nxpdev->current_baudrate = nxpdev->secondary_baudrate;
}
goto free_skb;
}
if (req->len == 0) {
- bt_dev_dbg(hdev, "FW Downloaded Successfully: %zu bytes",
+ bt_dev_info(hdev, "FW Download Complete: %zu bytes",
nxpdev->fw->size);
clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
wake_up_interruptible(&nxpdev->fw_dnld_done_wait_q);
goto free_skb;
}
- if (req->error)
- bt_dev_dbg(hdev, "FW Download received err 0x%02x from chip",
- req->error);
offset = __le32_to_cpu(req->offset);
if (offset < nxpdev->fw_v3_offset_correction) {
@@ -954,10 +1266,12 @@ static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb)
goto free_skb;
}
- serdev_device_write_buf(nxpdev->serdev, nxpdev->fw->data + offset -
- nxpdev->fw_v3_offset_correction, len);
+ nxpdev->fw_dnld_v3_offset = offset - nxpdev->fw_v3_offset_correction;
+ serdev_device_write_buf(nxpdev->serdev, nxpdev->fw->data +
+ nxpdev->fw_dnld_v3_offset, len);
free_skb:
+ nxpdev->fw_v3_prev_sent = len;
kfree_skb(skb);
return 0;
}
@@ -973,7 +1287,8 @@ static int nxp_set_baudrate_cmd(struct hci_dev *hdev, void *data)
if (!psdata)
return 0;
- skb = nxp_drv_send_cmd(hdev, HCI_NXP_SET_OPER_SPEED, 4, (u8 *)&new_baudrate);
+ skb = nxp_drv_send_cmd(hdev, HCI_NXP_SET_OPER_SPEED, 4,
+ (u8 *)&new_baudrate, true);
if (IS_ERR(skb)) {
bt_dev_err(hdev, "Setting baudrate failed (%ld)", PTR_ERR(skb));
return PTR_ERR(skb);
@@ -996,7 +1311,7 @@ static int nxp_set_baudrate_cmd(struct hci_dev *hdev, void *data)
static int nxp_check_boot_sign(struct btnxpuart_dev *nxpdev)
{
serdev_device_set_baudrate(nxpdev->serdev, HCI_NXP_PRI_BAUDRATE);
- if (test_bit(BTNXPUART_IR_IN_PROGRESS, &nxpdev->tx_state))
+ if (ind_reset_in_progress(nxpdev))
serdev_device_set_flow_control(nxpdev->serdev, false);
else
serdev_device_set_flow_control(nxpdev->serdev, true);
@@ -1025,10 +1340,111 @@ static int nxp_set_ind_reset(struct hci_dev *hdev, void *data)
return hci_recv_frame(hdev, skb);
}
+/* Firmware dump */
+static void nxp_coredump(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+ u8 pcmd = 2;
+
+ skb = nxp_drv_send_cmd(hdev, HCI_NXP_TRIGGER_DUMP, 1, &pcmd, true);
+ if (IS_ERR(skb))
+ bt_dev_err(hdev, "Failed to trigger FW Dump. (%ld)", PTR_ERR(skb));
+ else
+ kfree_skb(skb);
+}
+
+static void nxp_coredump_hdr(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ /* Nothing to be added in FW dump header */
+}
+
+static int nxp_process_fw_dump(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_acl_hdr *acl_hdr = (struct hci_acl_hdr *)skb_pull_data(skb,
+ sizeof(*acl_hdr));
+ struct nxp_fw_dump_hdr *fw_dump_hdr = (struct nxp_fw_dump_hdr *)skb->data;
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ __u16 seq_num = __le16_to_cpu(fw_dump_hdr->seq_num);
+ __u16 buf_len = __le16_to_cpu(fw_dump_hdr->buf_len);
+ int err;
+
+ if (seq_num == 0x0001) {
+ if (test_and_set_bit(BTNXPUART_FW_DUMP_IN_PROGRESS, &nxpdev->tx_state)) {
+ bt_dev_err(hdev, "FW dump already in progress");
+ goto free_skb;
+ }
+ bt_dev_warn(hdev, "==== Start FW dump ===");
+ err = hci_devcd_init(hdev, NXP_FW_DUMP_SIZE);
+ if (err < 0)
+ goto free_skb;
+
+ schedule_delayed_work(&hdev->dump.dump_timeout,
+ msecs_to_jiffies(20000));
+ }
+
+ err = hci_devcd_append(hdev, skb_clone(skb, GFP_ATOMIC));
+ if (err < 0)
+ goto free_skb;
+
+ if (buf_len == 0) {
+ bt_dev_warn(hdev, "==== FW dump complete ===");
+ hci_devcd_complete(hdev);
+ nxp_set_ind_reset(hdev, NULL);
+ }
+
+free_skb:
+ kfree_skb(skb);
+ return 0;
+}
+
+static int nxp_recv_acl_pkt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
+
+ /* FW dump chunks are ACL packets with conn handle 0xfff */
+ if ((handle & 0x0FFF) == 0xFFF)
+ return nxp_process_fw_dump(hdev, skb);
+ else
+ return hci_recv_frame(hdev, skb);
+}
+
+static int nxp_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+ union nxp_set_bd_addr_payload pcmd;
+ int err;
+
+ pcmd.data.param_id = 0xfe;
+ pcmd.data.param_len = 6;
+ memcpy(pcmd.data.param, bdaddr, 6);
+
+ /* BD address can be assigned only after first reset command. */
+ err = __hci_cmd_sync_status(hdev, HCI_OP_RESET, 0, NULL,
+ HCI_INIT_TIMEOUT);
+ if (err) {
+ bt_dev_err(hdev,
+ "Reset before setting local-bd-addr failed (%d)",
+ err);
+ return err;
+ }
+
+ err = __hci_cmd_sync_status(hdev, HCI_NXP_SET_BD_ADDR, sizeof(pcmd),
+ pcmd.buf, HCI_CMD_TIMEOUT);
+ if (err) {
+ bt_dev_err(hdev, "Changing device address failed (%d)", err);
+ return err;
+ }
+
+ return 0;
+}
+
/* NXP protocol */
static int nxp_setup(struct hci_dev *hdev)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ struct serdev_device *serdev = nxpdev->serdev;
+ char device_string[30];
+ char event_string[50];
+ char *envp[] = {device_string, event_string, NULL};
int err = 0;
if (nxp_check_boot_sign(nxpdev)) {
@@ -1037,18 +1453,19 @@ static int nxp_setup(struct hci_dev *hdev)
if (err < 0)
return err;
} else {
- bt_dev_dbg(hdev, "FW already running.");
+ bt_dev_info(hdev, "FW already running.");
clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
}
+ snprintf(device_string, 30, "BTNXPUART_DEV=%s", dev_name(&serdev->dev));
+ snprintf(event_string, 50, "BTNXPUART_STATE=FW_READY");
+ bt_dev_dbg(hdev, "==== Send uevent: %s:%s ===", device_string,
+ event_string);
+ kobject_uevent_env(&serdev->dev.kobj, KOBJ_CHANGE, envp);
+
serdev_device_set_baudrate(nxpdev->serdev, nxpdev->fw_init_baudrate);
nxpdev->current_baudrate = nxpdev->fw_init_baudrate;
- if (nxpdev->current_baudrate != HCI_NXP_SEC_BAUDRATE) {
- nxpdev->new_baudrate = HCI_NXP_SEC_BAUDRATE;
- hci_cmd_sync_queue(hdev, nxp_set_baudrate_cmd, NULL, NULL);
- }
-
ps_init(hdev);
if (test_and_clear_bit(BTNXPUART_IR_IN_PROGRESS, &nxpdev->tx_state))
@@ -1057,6 +1474,22 @@ static int nxp_setup(struct hci_dev *hdev)
return 0;
}
+static int nxp_post_init(struct hci_dev *hdev)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ struct ps_data *psdata = &nxpdev->psdata;
+
+ if (nxpdev->current_baudrate != nxpdev->secondary_baudrate) {
+ nxpdev->new_baudrate = nxpdev->secondary_baudrate;
+ nxp_set_baudrate_cmd(hdev, NULL);
+ }
+ if (psdata->cur_h2c_wakeupmode != psdata->h2c_wakeupmode)
+ send_wakeup_method_cmd(hdev, NULL);
+ if (psdata->cur_psmode != psdata->target_ps_mode)
+ send_ps_cmd(hdev, NULL);
+ return 0;
+}
+
static void nxp_hw_err(struct hci_dev *hdev, u8 code)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
@@ -1075,25 +1508,47 @@ static int nxp_shutdown(struct hci_dev *hdev)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
struct sk_buff *skb;
- u8 *status;
u8 pcmd = 0;
- if (test_bit(BTNXPUART_IR_IN_PROGRESS, &nxpdev->tx_state)) {
- skb = nxp_drv_send_cmd(hdev, HCI_NXP_IND_RESET, 1, &pcmd);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- status = skb_pull_data(skb, 1);
- if (status) {
- serdev_device_set_flow_control(nxpdev->serdev, false);
- set_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
- }
- kfree_skb(skb);
+ if (ind_reset_in_progress(nxpdev)) {
+ if (test_and_clear_bit(BTNXPUART_FW_DUMP_IN_PROGRESS,
+ &nxpdev->tx_state))
+ skb = nxp_drv_send_cmd(hdev, HCI_NXP_IND_RESET, 1,
+ &pcmd, false);
+ else
+ skb = nxp_drv_send_cmd(hdev, HCI_NXP_IND_RESET, 1,
+ &pcmd, true);
+ serdev_device_set_flow_control(nxpdev->serdev, false);
+ set_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
+ /* HCI_NXP_IND_RESET command may not returns any response */
+ if (!IS_ERR(skb))
+ kfree_skb(skb);
}
return 0;
}
+static bool nxp_wakeup(struct hci_dev *hdev)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ struct ps_data *psdata = &nxpdev->psdata;
+
+ if (psdata->c2h_wakeupmode != BT_HOST_WAKEUP_METHOD_NONE)
+ return true;
+
+ return false;
+}
+
+static void nxp_reset(struct hci_dev *hdev)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+
+ if (!ind_reset_in_progress(nxpdev) && !fw_dump_in_progress(nxpdev)) {
+ bt_dev_dbg(hdev, "CMD Timeout detected. Resetting.");
+ nxp_set_ind_reset(hdev, NULL);
+ }
+}
+
static int btnxpuart_queue_skb(struct hci_dev *hdev, struct sk_buff *skb)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
@@ -1114,6 +1569,9 @@ static int nxp_enqueue(struct hci_dev *hdev, struct sk_buff *skb)
struct wakeup_cmd_payload wakeup_parm;
__le32 baudrate_parm;
+ if (fw_dump_in_progress(nxpdev))
+ return -EBUSY;
+
/* if vendor commands are received from user space (e.g. hcitool), update
* driver flags accordingly and ask driver to re-send the command to FW.
* In case the payload for any command does not match expected payload
@@ -1145,6 +1603,9 @@ static int nxp_enqueue(struct hci_dev *hdev, struct sk_buff *skb)
psdata->c2h_wakeup_gpio = wakeup_parm.c2h_wakeup_gpio;
psdata->h2c_wakeup_gpio = wakeup_parm.h2c_wakeup_gpio;
switch (wakeup_parm.h2c_wakeupmode) {
+ case BT_CTRL_WAKEUP_METHOD_GPIO:
+ psdata->h2c_wakeupmode = WAKEUP_METHOD_GPIO;
+ break;
case BT_CTRL_WAKEUP_METHOD_DSR:
psdata->h2c_wakeupmode = WAKEUP_METHOD_DTR;
break;
@@ -1211,7 +1672,7 @@ static void btnxpuart_tx_work(struct work_struct *work)
skb_pull(skb, len);
if (skb->len > 0) {
skb_queue_head(&nxpdev->txq, skb);
- break;
+ continue;
}
switch (hci_skb_pkt_type(skb)) {
@@ -1250,11 +1711,12 @@ static int btnxpuart_close(struct hci_dev *hdev)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
- ps_wakeup(nxpdev);
serdev_device_close(nxpdev->serdev);
skb_queue_purge(&nxpdev->txq);
- kfree_skb(nxpdev->rx_skb);
- nxpdev->rx_skb = NULL;
+ if (!IS_ERR_OR_NULL(nxpdev->rx_skb)) {
+ kfree_skb(nxpdev->rx_skb);
+ nxpdev->rx_skb = NULL;
+ }
clear_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state);
return 0;
}
@@ -1269,16 +1731,19 @@ static int btnxpuart_flush(struct hci_dev *hdev)
cancel_work_sync(&nxpdev->tx_work);
- kfree_skb(nxpdev->rx_skb);
- nxpdev->rx_skb = NULL;
+ if (!IS_ERR_OR_NULL(nxpdev->rx_skb)) {
+ kfree_skb(nxpdev->rx_skb);
+ nxpdev->rx_skb = NULL;
+ }
return 0;
}
static const struct h4_recv_pkt nxp_recv_pkts[] = {
- { H4_RECV_ACL, .recv = hci_recv_frame },
+ { H4_RECV_ACL, .recv = nxp_recv_acl_pkt },
{ H4_RECV_SCO, .recv = hci_recv_frame },
{ H4_RECV_EVENT, .recv = hci_recv_frame },
+ { H4_RECV_ISO, .recv = hci_recv_frame },
{ NXP_RECV_CHIP_VER_V1, .recv = nxp_recv_chip_ver_v1 },
{ NXP_RECV_FW_REQ_V1, .recv = nxp_recv_fw_req_v1 },
{ NXP_RECV_CHIP_VER_V3, .recv = nxp_recv_chip_ver_v3 },
@@ -1292,16 +1757,18 @@ static size_t btnxpuart_receive_buf(struct serdev_device *serdev,
ps_start_timer(nxpdev);
- nxpdev->rx_skb = h4_recv_buf(nxpdev->hdev, nxpdev->rx_skb, data, count,
+ nxpdev->rx_skb = h4_recv_buf(&nxpdev->hu, nxpdev->rx_skb, data, count,
nxp_recv_pkts, ARRAY_SIZE(nxp_recv_pkts));
if (IS_ERR(nxpdev->rx_skb)) {
int err = PTR_ERR(nxpdev->rx_skb);
/* Safe to ignore out-of-sync bootloader signatures */
- if (!is_fw_downloading(nxpdev))
+ if (!is_fw_downloading(nxpdev) &&
+ !ind_reset_in_progress(nxpdev))
bt_dev_err(nxpdev->hdev, "Frame reassembly failed (%d)", err);
return count;
}
- if (!is_fw_downloading(nxpdev))
+ if (!is_fw_downloading(nxpdev) &&
+ !ind_reset_in_progress(nxpdev))
nxpdev->hdev->stat.byte_rx += count;
return count;
}
@@ -1316,10 +1783,41 @@ static const struct serdev_device_ops btnxpuart_client_ops = {
.write_wakeup = btnxpuart_write_wakeup,
};
+static void nxp_coredump_notify(struct hci_dev *hdev, int state)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ struct serdev_device *serdev = nxpdev->serdev;
+ char device_string[30];
+ char event_string[50];
+ char *envp[] = {device_string, event_string, NULL};
+
+ snprintf(device_string, 30, "BTNXPUART_DEV=%s", dev_name(&serdev->dev));
+ switch (state) {
+ case HCI_DEVCOREDUMP_ACTIVE:
+ snprintf(event_string, 50, "BTNXPUART_STATE=FW_DUMP_ACTIVE");
+ break;
+ case HCI_DEVCOREDUMP_DONE:
+ snprintf(event_string, 50, "BTNXPUART_STATE=FW_DUMP_DONE");
+ break;
+ case HCI_DEVCOREDUMP_TIMEOUT:
+ snprintf(event_string, 50, "BTNXPUART_STATE=FW_DUMP_TIMEOUT");
+ break;
+ default:
+ snprintf(event_string, 50, "BTNXPUART_STATE=FW_DUMP_STATE_%d",
+ state);
+ break;
+ }
+ bt_dev_dbg(hdev, "==== Send uevent: %s:%s ===", device_string,
+ event_string);
+ kobject_uevent_env(&serdev->dev.kobj, KOBJ_CHANGE, envp);
+}
+
static int nxp_serdev_probe(struct serdev_device *serdev)
{
struct hci_dev *hdev;
struct btnxpuart_dev *nxpdev;
+ bdaddr_t ba = {0};
+ int err;
nxpdev = devm_kzalloc(&serdev->dev, sizeof(*nxpdev), GFP_KERNEL);
if (!nxpdev)
@@ -1343,10 +1841,31 @@ static int nxp_serdev_probe(struct serdev_device *serdev)
if (!nxpdev->fw_init_baudrate)
nxpdev->fw_init_baudrate = FW_INIT_BAUDRATE;
+ device_property_read_u32(&nxpdev->serdev->dev, "max-speed",
+ &nxpdev->secondary_baudrate);
+ if (!nxpdev->secondary_baudrate ||
+ (nxpdev->secondary_baudrate != HCI_NXP_SEC_BAUDRATE_3M &&
+ nxpdev->secondary_baudrate != HCI_NXP_SEC_BAUDRATE_4M)) {
+ if (nxpdev->secondary_baudrate)
+ dev_err(&serdev->dev,
+ "Invalid max-speed. Using default 3000000.");
+ nxpdev->secondary_baudrate = HCI_NXP_SEC_BAUDRATE_3M;
+ }
+
set_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
crc8_populate_msb(crc8_table, POLYNOMIAL8);
+ nxpdev->pdn = devm_reset_control_get_optional_shared(&serdev->dev, NULL);
+ if (IS_ERR(nxpdev->pdn))
+ return PTR_ERR(nxpdev->pdn);
+
+ err = devm_regulator_get_enable(&serdev->dev, "vcc");
+ if (err) {
+ dev_err(&serdev->dev, "Failed to enable vcc regulator\n");
+ return err;
+ }
+
/* Initialize and register HCI device */
hdev = hci_alloc_dev();
if (!hdev) {
@@ -1354,7 +1873,10 @@ static int nxp_serdev_probe(struct serdev_device *serdev)
return -ENOMEM;
}
+ reset_control_deassert(nxpdev->pdn);
+
nxpdev->hdev = hdev;
+ nxpdev->hu.hdev = hdev;
hdev->bus = HCI_UART;
hci_set_drvdata(hdev, nxpdev);
@@ -1364,20 +1886,38 @@ static int nxp_serdev_probe(struct serdev_device *serdev)
hdev->close = btnxpuart_close;
hdev->flush = btnxpuart_flush;
hdev->setup = nxp_setup;
+ hdev->post_init = nxp_post_init;
hdev->send = nxp_enqueue;
hdev->hw_error = nxp_hw_err;
hdev->shutdown = nxp_shutdown;
+ hdev->wakeup = nxp_wakeup;
+ hdev->reset = nxp_reset;
+ hdev->set_bdaddr = nxp_set_bdaddr;
SET_HCIDEV_DEV(hdev, &serdev->dev);
+ device_property_read_u8_array(&nxpdev->serdev->dev,
+ "local-bd-address",
+ (u8 *)&ba, sizeof(ba));
+ if (bacmp(&ba, BDADDR_ANY))
+ hci_set_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY);
+
if (hci_register_dev(hdev) < 0) {
dev_err(&serdev->dev, "Can't register HCI device\n");
- hci_free_dev(hdev);
- return -ENODEV;
+ goto probe_fail;
}
- ps_setup(hdev);
+ if (ps_setup(hdev))
+ goto probe_fail;
+
+ hci_devcd_register(hdev, nxp_coredump, nxp_coredump_hdr,
+ nxp_coredump_notify);
return 0;
+
+probe_fail:
+ reset_control_assert(nxpdev->pdn);
+ hci_free_dev(hdev);
+ return -ENODEV;
}
static void nxp_serdev_remove(struct serdev_device *serdev)
@@ -1385,28 +1925,79 @@ static void nxp_serdev_remove(struct serdev_device *serdev)
struct btnxpuart_dev *nxpdev = serdev_device_get_drvdata(serdev);
struct hci_dev *hdev = nxpdev->hdev;
- /* Restore FW baudrate to fw_init_baudrate if changed.
- * This will ensure FW baudrate is in sync with
- * driver baudrate in case this driver is re-inserted.
- */
- if (nxpdev->current_baudrate != nxpdev->fw_init_baudrate) {
- nxpdev->new_baudrate = nxpdev->fw_init_baudrate;
- nxp_set_baudrate_cmd(hdev, NULL);
+ if (is_fw_downloading(nxpdev)) {
+ set_bit(BTNXPUART_FW_DOWNLOAD_ABORT, &nxpdev->tx_state);
+ clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
+ wake_up_interruptible(&nxpdev->check_boot_sign_wait_q);
+ wake_up_interruptible(&nxpdev->fw_dnld_done_wait_q);
+ } else {
+ /* Restore FW baudrate to fw_init_baudrate if changed.
+ * This will ensure FW baudrate is in sync with
+ * driver baudrate in case this driver is re-inserted.
+ */
+ if (nxpdev->current_baudrate != nxpdev->fw_init_baudrate) {
+ nxpdev->new_baudrate = nxpdev->fw_init_baudrate;
+ nxp_set_baudrate_cmd(hdev, NULL);
+ }
}
- ps_cancel_timer(nxpdev);
+ ps_cleanup(nxpdev);
hci_unregister_dev(hdev);
+ reset_control_assert(nxpdev->pdn);
hci_free_dev(hdev);
}
+#ifdef CONFIG_PM_SLEEP
+static int nxp_serdev_suspend(struct device *dev)
+{
+ struct btnxpuart_dev *nxpdev = dev_get_drvdata(dev);
+ struct ps_data *psdata = &nxpdev->psdata;
+
+ ps_control(psdata->hdev, PS_STATE_SLEEP);
+
+ if (psdata->wakeup_source) {
+ enable_irq_wake(psdata->irq_handler);
+ enable_irq(psdata->irq_handler);
+ }
+ return 0;
+}
+
+static int nxp_serdev_resume(struct device *dev)
+{
+ struct btnxpuart_dev *nxpdev = dev_get_drvdata(dev);
+ struct ps_data *psdata = &nxpdev->psdata;
+
+ if (psdata->wakeup_source) {
+ disable_irq(psdata->irq_handler);
+ disable_irq_wake(psdata->irq_handler);
+ }
+
+ ps_control(psdata->hdev, PS_STATE_AWAKE);
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_DEV_COREDUMP
+static void nxp_serdev_coredump(struct device *dev)
+{
+ struct btnxpuart_dev *nxpdev = dev_get_drvdata(dev);
+ struct hci_dev *hdev = nxpdev->hdev;
+
+ if (hdev->dump.coredump)
+ hdev->dump.coredump(hdev);
+}
+#endif
+
static struct btnxpuart_data w8987_data __maybe_unused = {
.helper_fw_name = NULL,
.fw_name = FIRMWARE_W8987,
+ .fw_name_old = FIRMWARE_W8987_OLD,
};
static struct btnxpuart_data w8997_data __maybe_unused = {
.helper_fw_name = FIRMWARE_HELPER,
.fw_name = FIRMWARE_W8997,
+ .fw_name_old = FIRMWARE_W8997_OLD,
};
static const struct of_device_id nxpuart_of_match_table[] __maybe_unused = {
@@ -1416,12 +2007,20 @@ static const struct of_device_id nxpuart_of_match_table[] __maybe_unused = {
};
MODULE_DEVICE_TABLE(of, nxpuart_of_match_table);
+static const struct dev_pm_ops nxp_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(nxp_serdev_suspend, nxp_serdev_resume)
+};
+
static struct serdev_device_driver nxp_serdev_driver = {
.probe = nxp_serdev_probe,
.remove = nxp_serdev_remove,
.driver = {
.name = "btnxpuart",
.of_match_table = of_match_ptr(nxpuart_of_match_table),
+ .pm = &nxp_pm_ops,
+#ifdef CONFIG_DEV_COREDUMP
+ .coredump = nxp_serdev_coredump,
+#endif
},
};
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index dfbbac92242a..7c958d6065be 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -272,6 +272,39 @@ int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
}
EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd);
+static bool qca_filename_has_extension(const char *filename)
+{
+ const char *suffix = strrchr(filename, '.');
+
+ /* File extensions require a dot, but not as the first or last character */
+ if (!suffix || suffix == filename || *(suffix + 1) == '\0')
+ return 0;
+
+ /* Avoid matching directories with names that look like files with extensions */
+ return !strchr(suffix, '/');
+}
+
+static bool qca_get_alt_nvm_file(char *filename, size_t max_size)
+{
+ char fwname[64];
+ const char *suffix;
+
+ /* nvm file name has an extension, replace with .bin */
+ if (qca_filename_has_extension(filename)) {
+ suffix = strrchr(filename, '.');
+ strscpy(fwname, filename, suffix - filename + 1);
+ snprintf(fwname + (suffix - filename),
+ sizeof(fwname) - (suffix - filename), ".bin");
+ /* If nvm file is already the default one, return false to skip the retry. */
+ if (strcmp(fwname, filename) == 0)
+ return false;
+
+ snprintf(filename, max_size, "%s", fwname);
+ return true;
+ }
+ return false;
+}
+
static int qca_tlv_check_data(struct hci_dev *hdev,
struct qca_fw_config *config,
u8 *fw_data, size_t fw_size,
@@ -564,6 +597,19 @@ static int qca_download_firmware(struct hci_dev *hdev,
config->fwname, ret);
return ret;
}
+ }
+ /* If the board-specific file is missing, try loading the default
+ * one, unless that was attempted already.
+ */
+ else if (config->type == TLV_TYPE_NVM &&
+ qca_get_alt_nvm_file(config->fwname, sizeof(config->fwname))) {
+ bt_dev_info(hdev, "QCA Downloading %s", config->fwname);
+ ret = request_firmware(&fw, config->fwname, &hdev->dev);
+ if (ret) {
+ bt_dev_err(hdev, "QCA Failed to request file: %s (%d)",
+ config->fwname, ret);
+ return ret;
+ }
} else {
bt_dev_err(hdev, "QCA Failed to request file: %s (%d)",
config->fwname, ret);
@@ -693,48 +739,53 @@ static int qca_check_bdaddr(struct hci_dev *hdev, const struct qca_fw_config *co
bda = (struct hci_rp_read_bd_addr *)skb->data;
if (!bacmp(&bda->bdaddr, &config->bdaddr))
- set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY);
kfree_skb(skb);
return 0;
}
-static void qca_generate_hsp_nvm_name(char *fwname, size_t max_size,
+static void qca_get_nvm_name_by_board(char *fwname, size_t max_size,
+ const char *stem, enum qca_btsoc_type soc_type,
struct qca_btsoc_version ver, u8 rom_ver, u16 bid)
{
const char *variant;
+ const char *prefix;
- /* hsp gf chip */
- if ((le32_to_cpu(ver.soc_id) & QCA_HSP_GF_SOC_MASK) == QCA_HSP_GF_SOC_ID)
- variant = "g";
- else
- variant = "";
+ /* Set the default value to variant and prefix */
+ variant = "";
+ prefix = "b";
- if (bid == 0x0)
- snprintf(fwname, max_size, "qca/hpnv%02x%s.bin", rom_ver, variant);
- else
- snprintf(fwname, max_size, "qca/hpnv%02x%s.%x", rom_ver, variant, bid);
-}
+ if (soc_type == QCA_QCA2066)
+ prefix = "";
-static inline void qca_get_nvm_name_generic(struct qca_fw_config *cfg,
- const char *stem, u8 rom_ver, u16 bid)
-{
- if (bid == 0x0)
- snprintf(cfg->fwname, sizeof(cfg->fwname), "qca/%snv%02x.bin", stem, rom_ver);
- else if (bid & 0xff00)
- snprintf(cfg->fwname, sizeof(cfg->fwname),
- "qca/%snv%02x.b%x", stem, rom_ver, bid);
- else
- snprintf(cfg->fwname, sizeof(cfg->fwname),
- "qca/%snv%02x.b%02x", stem, rom_ver, bid);
+ if (soc_type == QCA_WCN6855 || soc_type == QCA_QCA2066) {
+ /* If the chip is manufactured by GlobalFoundries */
+ if ((le32_to_cpu(ver.soc_id) & QCA_HSP_GF_SOC_MASK) == QCA_HSP_GF_SOC_ID)
+ variant = "g";
+ }
+
+ if (rom_ver != 0) {
+ if (bid == 0x0 || bid == 0xffff)
+ snprintf(fwname, max_size, "qca/%s%02x%s.bin", stem, rom_ver, variant);
+ else
+ snprintf(fwname, max_size, "qca/%s%02x%s.%s%02x", stem, rom_ver,
+ variant, prefix, bid);
+ } else {
+ if (bid == 0x0 || bid == 0xffff)
+ snprintf(fwname, max_size, "qca/%s%s.bin", stem, variant);
+ else
+ snprintf(fwname, max_size, "qca/%s%s.%s%02x", stem, variant, prefix, bid);
+ }
}
int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
enum qca_btsoc_type soc_type, struct qca_btsoc_version ver,
- const char *firmware_name)
+ const char *firmware_name, const char *rampatch_name)
{
struct qca_fw_config config = {};
+ const char *variant = "";
int err;
u8 rom_ver = 0;
u32 soc_ver;
@@ -761,44 +812,52 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
/* Download rampatch file */
config.type = TLV_TYPE_PATCH;
- switch (soc_type) {
- case QCA_WCN3990:
- case QCA_WCN3991:
- case QCA_WCN3998:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/crbtfw%02x.tlv", rom_ver);
- break;
- case QCA_WCN3988:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/apbtfw%02x.tlv", rom_ver);
- break;
- case QCA_QCA2066:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/hpbtfw%02x.tlv", rom_ver);
- break;
- case QCA_QCA6390:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/htbtfw%02x.tlv", rom_ver);
- break;
- case QCA_WCN6750:
- /* Choose mbn file by default.If mbn file is not found
- * then choose tlv file
- */
- config.type = ELF_TYPE_PATCH;
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/msbtfw%02x.mbn", rom_ver);
- break;
- case QCA_WCN6855:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/hpbtfw%02x.tlv", rom_ver);
- break;
- case QCA_WCN7850:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/hmtbtfw%02x.tlv", rom_ver);
- break;
- default:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/rampatch_%08x.bin", soc_ver);
+ if (rampatch_name) {
+ snprintf(config.fwname, sizeof(config.fwname), "qca/%s", rampatch_name);
+ } else {
+ switch (soc_type) {
+ case QCA_WCN3950:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/cmbtfw%02x.tlv", rom_ver);
+ break;
+ case QCA_WCN3990:
+ case QCA_WCN3991:
+ case QCA_WCN3998:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/crbtfw%02x.tlv", rom_ver);
+ break;
+ case QCA_WCN3988:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/apbtfw%02x.tlv", rom_ver);
+ break;
+ case QCA_QCA2066:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/hpbtfw%02x.tlv", rom_ver);
+ break;
+ case QCA_QCA6390:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/htbtfw%02x.tlv", rom_ver);
+ break;
+ case QCA_WCN6750:
+ /* Choose mbn file by default.If mbn file is not found
+ * then choose tlv file
+ */
+ config.type = ELF_TYPE_PATCH;
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/msbtfw%02x.mbn", rom_ver);
+ break;
+ case QCA_WCN6855:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/hpbtfw%02x.tlv", rom_ver);
+ break;
+ case QCA_WCN7850:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/hmtbtfw%02x.tlv", rom_ver);
+ break;
+ default:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/rampatch_%08x.bin", soc_ver);
+ }
}
err = qca_download_firmware(hdev, &config, soc_type, rom_ver);
@@ -816,28 +875,42 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
/* Download NVM configuration */
config.type = TLV_TYPE_NVM;
if (firmware_name) {
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/%s", firmware_name);
+ /* The firmware name has an extension, use it directly */
+ if (qca_filename_has_extension(firmware_name)) {
+ snprintf(config.fwname, sizeof(config.fwname), "qca/%s", firmware_name);
+ } else {
+ qca_read_fw_board_id(hdev, &boardid);
+ qca_get_nvm_name_by_board(config.fwname, sizeof(config.fwname),
+ firmware_name, soc_type, ver, 0, boardid);
+ }
} else {
switch (soc_type) {
+ case QCA_WCN3950:
+ if (le32_to_cpu(ver.soc_id) == QCA_WCN3950_SOC_ID_T)
+ variant = "t";
+ else if (le32_to_cpu(ver.soc_id) == QCA_WCN3950_SOC_ID_S)
+ variant = "s";
+
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/cmnv%02x%s.bin", rom_ver, variant);
+ break;
case QCA_WCN3990:
case QCA_WCN3991:
case QCA_WCN3998:
- if (le32_to_cpu(ver.soc_id) == QCA_WCN3991_SOC_ID) {
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/crnv%02xu.bin", rom_ver);
- } else {
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/crnv%02x.bin", rom_ver);
- }
+ if (le32_to_cpu(ver.soc_id) == QCA_WCN3991_SOC_ID)
+ variant = "u";
+
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/crnv%02x%s.bin", rom_ver, variant);
break;
case QCA_WCN3988:
snprintf(config.fwname, sizeof(config.fwname),
"qca/apnv%02x.bin", rom_ver);
break;
case QCA_QCA2066:
- qca_generate_hsp_nvm_name(config.fwname,
- sizeof(config.fwname), ver, rom_ver, boardid);
+ qca_get_nvm_name_by_board(config.fwname,
+ sizeof(config.fwname), "hpnv", soc_type, ver,
+ rom_ver, boardid);
break;
case QCA_QCA6390:
snprintf(config.fwname, sizeof(config.fwname),
@@ -848,13 +921,14 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
"qca/msnv%02x.bin", rom_ver);
break;
case QCA_WCN6855:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/hpnv%02x.bin", rom_ver);
+ qca_read_fw_board_id(hdev, &boardid);
+ qca_get_nvm_name_by_board(config.fwname, sizeof(config.fwname),
+ "hpnv", soc_type, ver, rom_ver, boardid);
break;
case QCA_WCN7850:
- qca_get_nvm_name_generic(&config, "hmt", rom_ver, boardid);
+ qca_get_nvm_name_by_board(config.fwname, sizeof(config.fwname),
+ "hmtnv", soc_type, ver, rom_ver, boardid);
break;
-
default:
snprintf(config.fwname, sizeof(config.fwname),
"qca/nvm_%08x.bin", soc_ver);
@@ -886,6 +960,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
* VsMsftOpCode.
*/
switch (soc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index bb5207d7a8c7..8f3c1b1c77b3 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -41,6 +41,9 @@
#define QCA_WCN3991_SOC_ID 0x40014320
+#define QCA_WCN3950_SOC_ID_T 0x40074130
+#define QCA_WCN3950_SOC_ID_S 0x40075130
+
/* QCA chipset version can be decided by patch and SoC
* version, combination with upper 2 bytes from SoC
* and lower 2 bytes from patch will be used.
@@ -145,6 +148,7 @@ enum qca_btsoc_type {
QCA_INVALID = -1,
QCA_AR3002,
QCA_ROME,
+ QCA_WCN3950,
QCA_WCN3988,
QCA_WCN3990,
QCA_WCN3998,
@@ -161,7 +165,7 @@ enum qca_btsoc_type {
int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr);
int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
enum qca_btsoc_type soc_type, struct qca_btsoc_version ver,
- const char *firmware_name);
+ const char *firmware_name, const char *rampatch_name);
int qca_read_soc_version(struct hci_dev *hdev, struct qca_btsoc_version *ver,
enum qca_btsoc_type);
int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
@@ -176,7 +180,8 @@ static inline int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdad
static inline int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
enum qca_btsoc_type soc_type,
struct qca_btsoc_version ver,
- const char *firmware_name)
+ const char *firmware_name,
+ const char *rampatch_name)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c
index 88dbb2f3fabf..d2e13fcb6bab 100644
--- a/drivers/bluetooth/btqcomsmd.c
+++ b/drivers/bluetooth/btqcomsmd.c
@@ -117,7 +117,7 @@ static int btqcomsmd_setup(struct hci_dev *hdev)
/* Devices do not have persistent storage for BD address. Retrieve
* it from the firmware node property.
*/
- set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY);
return 0;
}
@@ -216,7 +216,7 @@ MODULE_DEVICE_TABLE(of, btqcomsmd_of_match);
static struct platform_driver btqcomsmd_driver = {
.probe = btqcomsmd_probe,
- .remove_new = btqcomsmd_remove,
+ .remove = btqcomsmd_remove,
.driver = {
.name = "btqcomsmd",
.of_match_table = btqcomsmd_of_match,
diff --git a/drivers/bluetooth/btrsi.c b/drivers/bluetooth/btrsi.c
index 0c91d7635ac3..6c1f584c8a33 100644
--- a/drivers/bluetooth/btrsi.c
+++ b/drivers/bluetooth/btrsi.c
@@ -17,7 +17,7 @@
#include <linux/kernel.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/rsi_91x.h>
#define RSI_DMA_ALIGN 8
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 4f1e37b4f780..5603b282f9bc 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -7,7 +7,7 @@
#include <linux/module.h>
#include <linux/firmware.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/usb.h>
#include <net/bluetooth/bluetooth.h>
@@ -30,6 +30,7 @@
#define RTL_ROM_LMP_8822B 0x8822
#define RTL_ROM_LMP_8852A 0x8852
#define RTL_ROM_LMP_8851B 0x8851
+#define RTL_ROM_LMP_8922A 0x8922
#define RTL_CONFIG_MAGIC 0x8723ab55
#define RTL_VSC_OP_COREDUMP 0xfcff
@@ -49,7 +50,7 @@
#define RTL_CHIP_SUBVER (&(struct rtl_vendor_cmd) {{0x10, 0x38, 0x04, 0x28, 0x80}})
#define RTL_CHIP_REV (&(struct rtl_vendor_cmd) {{0x10, 0x3A, 0x04, 0x28, 0x80}})
-#define RTL_SEC_PROJ (&(struct rtl_vendor_cmd) {{0x10, 0xA4, 0x0D, 0x00, 0xb0}})
+#define RTL_SEC_PROJ (&(struct rtl_vendor_cmd) {{0x10, 0xA4, 0xAD, 0x00, 0xb0}})
#define RTL_PATCH_SNIPPETS 0x01
#define RTL_PATCH_DUMMY_HEADER 0x02
@@ -69,7 +70,9 @@ enum btrtl_chip_id {
CHIP_ID_8852B = 20,
CHIP_ID_8852C = 25,
CHIP_ID_8851B = 36,
+ CHIP_ID_8922A = 44,
CHIP_ID_8852BT = 47,
+ CHIP_ID_8761C = 51,
};
struct id_table {
@@ -228,6 +231,14 @@ static const struct id_table ic_id_table[] = {
.cfg_name = "rtl_bt/rtl8761bu_config",
.hw_info = "rtl8761bu" },
+ /* 8761CU */
+ { IC_INFO(RTL_ROM_LMP_8761A, 0x0e, 0, HCI_USB),
+ .config_needed = false,
+ .has_rom_version = true,
+ .fw_name = "rtl_bt/rtl8761cu_fw",
+ .cfg_name = "rtl_bt/rtl8761cu_config",
+ .hw_info = "rtl8761cu" },
+
/* 8822C with UART interface */
{ IC_INFO(RTL_ROM_LMP_8822B, 0xc, 0x8, HCI_UART),
.config_needed = true,
@@ -309,6 +320,15 @@ static const struct id_table ic_id_table[] = {
.cfg_name = "rtl_bt/rtl8851bu_config",
.hw_info = "rtl8851bu" },
+ /* 8922A */
+ { IC_INFO(RTL_ROM_LMP_8922A, 0xa, 0xc, HCI_USB),
+ .config_needed = false,
+ .has_rom_version = true,
+ .has_msft_ext = true,
+ .fw_name = "rtl_bt/rtl8922au_fw",
+ .cfg_name = "rtl_bt/rtl8922au_config",
+ .hw_info = "rtl8922au" },
+
/* 8852BT/8852BE-VT */
{ IC_INFO(RTL_ROM_LMP_8852A, 0x87, 0xc, HCI_USB),
.config_needed = false,
@@ -333,7 +353,8 @@ static const struct id_table *btrtl_match_ic(u16 lmp_subver, u16 hci_rev,
(ic_id_table[i].hci_rev != hci_rev))
continue;
if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIVER) &&
- (ic_id_table[i].hci_ver != hci_ver))
+ (ic_id_table[i].hci_ver != hci_ver) &&
+ (ic_id_table[i].hci_ver != 0))
continue;
if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIBUS) &&
(ic_id_table[i].hci_bus != hci_bus))
@@ -523,7 +544,6 @@ static int rtlbt_parse_firmware_v2(struct hci_dev *hdev,
{
struct rtl_epatch_header_v2 *hdr;
int rc;
- u8 reg_val[2];
u8 key_id;
u32 num_sections;
struct rtl_section *section;
@@ -538,14 +558,7 @@ static int rtlbt_parse_firmware_v2(struct hci_dev *hdev,
.len = btrtl_dev->fw_len - 7, /* Cut the tail */
};
- rc = btrtl_vendor_read_reg16(hdev, RTL_SEC_PROJ, reg_val);
- if (rc < 0)
- return -EIO;
- key_id = reg_val[0];
-
- rtl_dev_dbg(hdev, "%s: key id %u", __func__, key_id);
-
- btrtl_dev->key_id = key_id;
+ key_id = btrtl_dev->key_id;
hdr = rtl_iov_pull_data(&iov, sizeof(*hdr));
if (!hdr)
@@ -614,8 +627,10 @@ static int rtlbt_parse_firmware_v2(struct hci_dev *hdev,
len += entry->len;
}
- if (!len)
+ if (!len) {
+ kvfree(ptr);
return -EPERM;
+ }
*_buf = ptr;
return len;
@@ -655,7 +670,9 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
{ RTL_ROM_LMP_8852A, 20 }, /* 8852B */
{ RTL_ROM_LMP_8852A, 25 }, /* 8852C */
{ RTL_ROM_LMP_8851B, 36 }, /* 8851B */
+ { RTL_ROM_LMP_8922A, 44 }, /* 8922A */
{ RTL_ROM_LMP_8852A, 47 }, /* 8852BT */
+ { RTL_ROM_LMP_8761A, 51 }, /* 8761C */
};
if (btrtl_dev->fw_len <= 8)
@@ -681,7 +698,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
/* Loop from the end of the firmware parsing instructions, until
* we find an instruction that identifies the "project ID" for the
- * hardware supported by this firwmare file.
+ * hardware supported by this firmware file.
* Once we have that, we double-check that project_id is suitable
* for the hardware we are working with.
*/
@@ -811,7 +828,7 @@ static int rtl_download_firmware(struct hci_dev *hdev,
struct sk_buff *skb;
struct hci_rp_read_local_version *rp;
- dl_cmd = kmalloc(sizeof(struct rtl_download_cmd), GFP_KERNEL);
+ dl_cmd = kmalloc(sizeof(*dl_cmd), GFP_KERNEL);
if (!dl_cmd)
return -ENOMEM;
@@ -878,10 +895,8 @@ static int rtl_load_file(struct hci_dev *hdev, const char *name, u8 **buff)
if (ret < 0)
return ret;
ret = fw->size;
- *buff = kvmalloc(fw->size, GFP_KERNEL);
- if (*buff)
- memcpy(*buff, fw->data, ret);
- else
+ *buff = kvmemdup(fw->data, fw->size, GFP_KERNEL);
+ if (!*buff)
ret = -ENOMEM;
release_firmware(fw);
@@ -1058,6 +1073,8 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
u16 hci_rev, lmp_subver;
u8 hci_ver, lmp_ver, chip_type = 0;
int ret;
+ int rc;
+ u8 key_id;
u8 reg_val[2];
btrtl_dev = kzalloc(sizeof(*btrtl_dev), GFP_KERNEL);
@@ -1168,6 +1185,14 @@ next:
goto err_free;
}
+ rc = btrtl_vendor_read_reg16(hdev, RTL_SEC_PROJ, reg_val);
+ if (rc < 0)
+ goto err_free;
+
+ key_id = reg_val[0];
+ btrtl_dev->key_id = key_id;
+ rtl_dev_info(hdev, "%s: key id %u", __func__, key_id);
+
btrtl_dev->fw_len = -EIO;
if (lmp_subver == RTL_ROM_LMP_8852A && hci_rev == 0x000c) {
snprintf(fw_name, sizeof(fw_name), "%s_v2.bin",
@@ -1190,7 +1215,7 @@ next:
goto err_free;
}
- if (btrtl_dev->ic_info->cfg_name) {
+ if (btrtl_dev->ic_info->cfg_name && !btrtl_dev->key_id) {
if (postfix) {
snprintf(cfg_name, sizeof(cfg_name), "%s-%s.bin",
btrtl_dev->ic_info->cfg_name, postfix);
@@ -1205,6 +1230,8 @@ next:
rtl_dev_err(hdev, "mandatory config file %s not found",
btrtl_dev->ic_info->cfg_name);
ret = btrtl_dev->cfg_len;
+ if (!ret)
+ ret = -EINVAL;
goto err_free;
}
}
@@ -1255,6 +1282,7 @@ int btrtl_download_firmware(struct hci_dev *hdev,
case RTL_ROM_LMP_8852A:
case RTL_ROM_LMP_8703B:
case RTL_ROM_LMP_8851B:
+ case RTL_ROM_LMP_8922A:
err = btrtl_setup_rtl8723b(hdev, btrtl_dev);
break;
default:
@@ -1274,7 +1302,7 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev)
/* Enable controller to do both LE scan and BR/EDR inquiry
* simultaneously.
*/
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
/* Enable central-peripheral role (able to create new connections with
* an existing connection in slave role).
@@ -1286,9 +1314,10 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev)
case CHIP_ID_8852B:
case CHIP_ID_8852C:
case CHIP_ID_8851B:
+ case CHIP_ID_8922A:
case CHIP_ID_8852BT:
- set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ case CHIP_ID_8761C:
+ hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
/* RTL8852C needs to transmit mSBC data continuously without
* the zero length of USB packets for the ALT 6 supported chips
@@ -1297,8 +1326,10 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev)
btrealtek_set_flag(hdev, REALTEK_ALT6_CONTINUOUS_TX_CHIP);
if (btrtl_dev->project_id == CHIP_ID_8852A ||
+ btrtl_dev->project_id == CHIP_ID_8852B ||
btrtl_dev->project_id == CHIP_ID_8852C)
- set_bit(HCI_QUIRK_USE_MSFT_EXT_ADDRESS_FILTER, &hdev->quirks);
+ hci_set_quirk(hdev,
+ HCI_QUIRK_USE_MSFT_EXT_ADDRESS_FILTER);
hci_set_aosp_capable(hdev);
break;
@@ -1317,8 +1348,7 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev)
* but it doesn't support any features from page 2 -
* it either responds with garbage or with error status
*/
- set_bit(HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2,
- &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2);
break;
default:
break;
@@ -1339,12 +1369,14 @@ int btrtl_setup_realtek(struct hci_dev *hdev)
btrtl_set_quirks(hdev, btrtl_dev);
- hci_set_hw_info(hdev,
+ if (btrtl_dev->ic_info) {
+ hci_set_hw_info(hdev,
"RTL lmp_subver=%u hci_rev=%u hci_ver=%u hci_bus=%u",
btrtl_dev->ic_info->lmp_subver,
btrtl_dev->ic_info->hci_rev,
btrtl_dev->ic_info->hci_ver,
btrtl_dev->ic_info->hci_bus);
+ }
btrtl_free(btrtl_dev);
return ret;
@@ -1359,7 +1391,7 @@ int btrtl_shutdown_realtek(struct hci_dev *hdev)
/* According to the vendor driver, BT must be reset on close to avoid
* firmware crash.
*/
- skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
+ skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_CMD_TIMEOUT);
if (IS_ERR(skb)) {
ret = PTR_ERR(skb);
bt_dev_err(hdev, "HCI reset during shutdown failed");
@@ -1504,6 +1536,8 @@ MODULE_FIRMWARE("rtl_bt/rtl8761b_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8761b_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8761bu_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8761bu_config.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8761cu_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8761cu_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8821a_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8821a_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8821c_fw.bin");
@@ -1529,3 +1563,5 @@ MODULE_FIRMWARE("rtl_bt/rtl8852btu_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852cu_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852cu_fw_v2.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852cu_config.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8922au_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8922au_config.bin");
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index fdcfe9c50313..8325655ce6aa 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -295,6 +295,7 @@ static int btsdio_probe(struct sdio_func *func,
case SDIO_DEVICE_ID_BROADCOM_4345:
case SDIO_DEVICE_ID_BROADCOM_43455:
case SDIO_DEVICE_ID_BROADCOM_4356:
+ case SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373:
return -ENODEV;
}
}
@@ -326,7 +327,7 @@ static int btsdio_probe(struct sdio_func *func,
hdev->send = btsdio_send_frame;
if (func->vendor == 0x0104 && func->device == 0x00c5)
- set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE);
err = hci_register_dev(hdev);
if (err < 0) {
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index e384ef6ff050..8ed3883ab8ee 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -17,10 +17,11 @@
#include <linux/suspend.h>
#include <linux/gpio/consumer.h>
#include <linux/debugfs.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/hci_drv.h>
#include "btintel.h"
#include "btbcm.h"
@@ -59,12 +60,13 @@ static struct usb_driver btusb_driver;
#define BTUSB_CW6622 BIT(19)
#define BTUSB_MEDIATEK BIT(20)
#define BTUSB_WIDEBAND_SPEECH BIT(21)
-#define BTUSB_VALID_LE_STATES BIT(22)
+#define BTUSB_INVALID_LE_STATES BIT(22)
#define BTUSB_QCA_WCN6855 BIT(23)
#define BTUSB_INTEL_BROKEN_SHUTDOWN_LED BIT(24)
#define BTUSB_INTEL_BROKEN_INITIAL_NCMD BIT(25)
#define BTUSB_INTEL_NO_WBS_SUPPORT BIT(26)
#define BTUSB_ACTIONS_SEMI BIT(27)
+#define BTUSB_BARROT BIT(28)
static const struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
@@ -297,116 +299,118 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
/* QCA WCN6855 chipset */
- { USB_DEVICE(0x0cf3, 0xe600), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
- { USB_DEVICE(0x0489, 0xe0cc), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
- { USB_DEVICE(0x0489, 0xe0d6), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
- { USB_DEVICE(0x0489, 0xe0e3), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
- { USB_DEVICE(0x10ab, 0x9309), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
- { USB_DEVICE(0x10ab, 0x9409), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
- { USB_DEVICE(0x0489, 0xe0d0), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
- { USB_DEVICE(0x10ab, 0x9108), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
- { USB_DEVICE(0x10ab, 0x9109), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
- { USB_DEVICE(0x10ab, 0x9208), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
- { USB_DEVICE(0x10ab, 0x9209), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
- { USB_DEVICE(0x10ab, 0x9308), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
- { USB_DEVICE(0x10ab, 0x9408), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
- { USB_DEVICE(0x10ab, 0x9508), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
- { USB_DEVICE(0x10ab, 0x9509), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
- { USB_DEVICE(0x10ab, 0x9608), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
- { USB_DEVICE(0x10ab, 0x9609), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
- { USB_DEVICE(0x10ab, 0x9f09), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
- { USB_DEVICE(0x04ca, 0x3022), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x0489, 0xe0c7), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0c9), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0ca), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0cb), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe0cc), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0ce), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe0d0), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe0d6), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0de), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0df), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0e1), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe0e3), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0ea), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0ec), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x04ca, 0x3022), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x04ca, 0x3023), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x04ca, 0x3024), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x04ca, 0x3a22), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x04ca, 0x3a24), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x04ca, 0x3a26), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x04ca, 0x3a27), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0cf3, 0xe600), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x10ab, 0x9108), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x10ab, 0x9109), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x10ab, 0x9208), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x10ab, 0x9209), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x10ab, 0x9308), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x10ab, 0x9309), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x10ab, 0x9408), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x10ab, 0x9409), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x10ab, 0x9508), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x10ab, 0x9509), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x10ab, 0x9608), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x10ab, 0x9609), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x10ab, 0x9f09), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x28de, 0x1401), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
/* QCA WCN785x chipset */
{ USB_DEVICE(0x0cf3, 0xe700), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe0fc), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe0f3), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe100), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe103), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe10a), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe10d), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe11b), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe11c), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe11f), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe141), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe14a), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe14b), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe14d), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3623), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3624), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x2c7c, 0x0130), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x2c7c, 0x0131), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x2c7c, 0x0132), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
/* Broadcom BCM2035 */
{ USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
@@ -479,6 +483,7 @@ static const struct usb_device_id quirks_table[] = {
{ USB_DEVICE(0x8087, 0x0036), .driver_info = BTUSB_INTEL_COMBINED },
{ USB_DEVICE(0x8087, 0x0037), .driver_info = BTUSB_INTEL_COMBINED },
{ USB_DEVICE(0x8087, 0x0038), .driver_info = BTUSB_INTEL_COMBINED },
+ { USB_DEVICE(0x8087, 0x0039), .driver_info = BTUSB_INTEL_COMBINED },
{ USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL_COMBINED |
BTUSB_INTEL_NO_WBS_SUPPORT |
@@ -499,6 +504,8 @@ static const struct usb_device_id quirks_table[] = {
/* Realtek 8821CE Bluetooth devices */
{ USB_DEVICE(0x13d3, 0x3529), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3533), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Realtek 8822CE Bluetooth devices */
{ USB_DEVICE(0x0bda, 0xb00c), .driver_info = BTUSB_REALTEK |
@@ -510,6 +517,17 @@ static const struct usb_device_id quirks_table[] = {
{ USB_DEVICE(0x13d3, 0x3549), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ /* Realtek 8851BE Bluetooth devices */
+ { USB_DEVICE(0x0bda, 0xb850), .driver_info = BTUSB_REALTEK },
+ { USB_DEVICE(0x13d3, 0x3600), .driver_info = BTUSB_REALTEK },
+ { USB_DEVICE(0x13d3, 0x3601), .driver_info = BTUSB_REALTEK },
+
+ /* Realtek 8851BU Bluetooth devices */
+ { USB_DEVICE(0x3625, 0x010b), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x2001, 0x332a), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+
/* Realtek 8852AE Bluetooth devices */
{ USB_DEVICE(0x0bda, 0x2852), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
@@ -539,6 +557,8 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3592), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe122), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Realtek 8852BE Bluetooth devices */
{ USB_DEVICE(0x0cb8, 0xc559), .driver_info = BTUSB_REALTEK |
@@ -555,10 +575,35 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3572), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3591), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3618), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe123), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe125), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Realtek 8852BT/8852BE-VT Bluetooth devices */
{ USB_DEVICE(0x0bda, 0x8520), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe12f), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3618), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3619), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+
+ /* Realtek 8922AE Bluetooth devices */
+ { USB_DEVICE(0x0bda, 0x8922), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3617), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3616), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe130), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+
/* Realtek Bluetooth devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01),
.driver_info = BTUSB_REALTEK },
@@ -566,131 +611,152 @@ static const struct usb_device_id quirks_table[] = {
/* MediaTek Bluetooth devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0e8d, 0xe0, 0x01, 0x01),
.driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
/* Additional MediaTek MT7615E Bluetooth devices */
{ USB_DEVICE(0x13d3, 0x3560), .driver_info = BTUSB_MEDIATEK},
/* Additional MediaTek MT7663 Bluetooth devices */
{ USB_DEVICE(0x043e, 0x310c), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x04ca, 0x3801), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
/* Additional MediaTek MT7668 Bluetooth devices */
{ USB_DEVICE(0x043e, 0x3109), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
+
+ /* Additional MediaTek MT7920 Bluetooth devices */
+ { USB_DEVICE(0x0489, 0xe134), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe135), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3620), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3621), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3622), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Additional MediaTek MT7921 Bluetooth devices */
{ USB_DEVICE(0x0489, 0xe0c8), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0cd), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0e0), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0f2), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x04ca, 0x3802), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0e8d, 0x0608), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3563), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3564), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3567), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3576), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3578), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3583), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3606), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
/* MediaTek MT7922 Bluetooth devices */
{ USB_DEVICE(0x13d3, 0x3585), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3610), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
/* MediaTek MT7922A Bluetooth devices */
{ USB_DEVICE(0x0489, 0xe0d8), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0d9), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0e2), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0e4), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0f1), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0f2), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0f5), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0f6), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe102), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe152), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe153), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe170), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x04ca, 0x3804), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x04ca, 0x38e4), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3568), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3584), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3605), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3607), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3614), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3615), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3633), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x35f5, 0x7922), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
/* Additional MediaTek MT7925 Bluetooth devices */
+ { USB_DEVICE(0x0489, 0xe111), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe113), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe118), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe11e), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe124), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe139), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe14e), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe14f), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe150), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe151), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3602), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3603), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3604), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3608), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3613), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3627), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3628), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3630), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x2c7c, 0x7009), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Additional Realtek 8723AE Bluetooth devices */
{ USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK },
@@ -727,6 +793,8 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x2b89, 0x8761), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x2b89, 0x6275), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Additional Realtek 8821AE Bluetooth devices */
{ USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK },
@@ -763,6 +831,10 @@ static const struct usb_device_id quirks_table[] = {
{ USB_DEVICE(0x0cb5, 0xc547), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ /* Barrot Technology Bluetooth devices */
+ { USB_DEVICE(0x33fa, 0x0010), .driver_info = BTUSB_BARROT },
+ { USB_DEVICE(0x33fa, 0x0012), .driver_info = BTUSB_BARROT },
+
/* Actions Semiconductor ATS2851 based devices */
{ USB_DEVICE(0x10d7, 0xb012), .driver_info = BTUSB_ACTIONS_SEMI },
@@ -891,8 +963,11 @@ struct btusb_data {
int (*setup_on_usb)(struct hci_dev *hdev);
+ int (*suspend)(struct hci_dev *hdev);
+ int (*resume)(struct hci_dev *hdev);
+ int (*disconnect)(struct hci_dev *hdev);
+
int oob_wake_irq; /* irq for out-of-band wake-on-bt */
- unsigned cmd_timeout_cnt;
struct qca_dump_info qca_dump;
};
@@ -902,11 +977,6 @@ static void btusb_reset(struct hci_dev *hdev)
struct btusb_data *data;
int err;
- if (hdev->reset) {
- hdev->reset(hdev);
- return;
- }
-
data = hci_get_drvdata(hdev);
/* This is not an unbalanced PM reference since the device will reset */
err = usb_autopm_get_interface(data->intf);
@@ -919,15 +989,12 @@ static void btusb_reset(struct hci_dev *hdev)
usb_queue_reset_device(data->intf);
}
-static void btusb_intel_cmd_timeout(struct hci_dev *hdev)
+static void btusb_intel_reset(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
struct gpio_desc *reset_gpio = data->reset_gpio;
struct btintel_data *intel_data = hci_get_priv(hdev);
- if (++data->cmd_timeout_cnt < 5)
- return;
-
if (intel_data->acpi_reset_method) {
if (test_and_set_bit(INTEL_ACPI_RESET_ACTIVE, intel_data->flags)) {
bt_dev_err(hdev, "acpi: last reset failed ? Not resetting again");
@@ -1000,7 +1067,7 @@ static inline void btusb_rtl_alloc_devcoredump(struct hci_dev *hdev,
}
}
-static void btusb_rtl_cmd_timeout(struct hci_dev *hdev)
+static void btusb_rtl_reset(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
struct gpio_desc *reset_gpio = data->reset_gpio;
@@ -1010,9 +1077,6 @@ static void btusb_rtl_cmd_timeout(struct hci_dev *hdev)
btusb_rtl_alloc_devcoredump(hdev, &hdr, NULL, 0);
- if (++data->cmd_timeout_cnt < 5)
- return;
-
if (!reset_gpio) {
btusb_reset(hdev);
return;
@@ -1047,19 +1111,16 @@ static void btusb_rtl_hw_error(struct hci_dev *hdev, u8 code)
btusb_rtl_alloc_devcoredump(hdev, &hdr, NULL, 0);
}
-static void btusb_qca_cmd_timeout(struct hci_dev *hdev)
+static void btusb_qca_reset(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
struct gpio_desc *reset_gpio = data->reset_gpio;
if (test_bit(BTUSB_HW_SSR_ACTIVE, &data->flags)) {
- bt_dev_info(hdev, "Ramdump in progress, defer cmd_timeout");
+ bt_dev_info(hdev, "Ramdump in progress, defer reset");
return;
}
- if (++data->cmd_timeout_cnt < 5)
- return;
-
if (reset_gpio) {
bt_dev_err(hdev, "Reset qca device via bt_en gpio");
@@ -1084,6 +1145,24 @@ static void btusb_qca_cmd_timeout(struct hci_dev *hdev)
btusb_reset(hdev);
}
+static u8 btusb_classify_qca_pkt_type(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ /* Some Qualcomm controllers, e.g., QCNFA765 with WCN6855 chip, send debug
+ * packets as ACL frames with connection handle 0x2EDC. These are not real
+ * ACL packets and should be reclassified as HCI_DIAG_PKT to prevent
+ * "ACL packet for unknown connection handle 3804" errors.
+ */
+ if (skb->len >= 2) {
+ u16 handle = get_unaligned_le16(skb->data);
+
+ if (handle == 0x2EDC)
+ return HCI_DIAG_PKT;
+ }
+
+ /* Use default packet type for other packets */
+ return hci_skb_pkt_type(skb);
+}
+
static inline void btusb_free_frags(struct btusb_data *data)
{
unsigned long flags;
@@ -1105,7 +1184,7 @@ static inline void btusb_free_frags(struct btusb_data *data)
static int btusb_recv_event(struct btusb_data *data, struct sk_buff *skb)
{
if (data->intr_interval) {
- /* Trigger dequeue immediatelly if an event is received */
+ /* Trigger dequeue immediately if an event is received */
schedule_delayed_work(&data->rx_work, 0);
}
@@ -1156,6 +1235,18 @@ static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count)
}
if (!hci_skb_expect(skb)) {
+ /* Each chunk should correspond to at least 1 or more
+ * events so if there are still bytes left that doesn't
+ * constitute a new event this is likely a bug in the
+ * controller.
+ */
+ if (count && count < HCI_EVENT_HDR_SIZE) {
+ bt_dev_warn(data->hdev,
+ "Unexpected continuation: %d bytes",
+ count);
+ count = 0;
+ }
+
/* Complete frame */
btusb_recv_event(data, skb);
skb = NULL;
@@ -1389,7 +1480,15 @@ static int btusb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags)
if (!urb)
return -ENOMEM;
- size = le16_to_cpu(data->intr_ep->wMaxPacketSize);
+ if (le16_to_cpu(data->udev->descriptor.idVendor) == 0x0a12 &&
+ le16_to_cpu(data->udev->descriptor.idProduct) == 0x0001)
+ /* Fake CSR devices don't seem to support sort-transter */
+ size = le16_to_cpu(data->intr_ep->wMaxPacketSize);
+ else
+ /* Use maximum HCI Event size so the USB stack handles
+ * ZPL/short-transfer automatically.
+ */
+ size = HCI_MAX_EVENT_SIZE;
buf = kmalloc(size, mem_flags);
if (!buf) {
@@ -2111,7 +2210,8 @@ static int btusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
return submit_or_queue_tx_urb(hdev, urb);
case HCI_SCODATA_PKT:
- if (hci_conn_num(hdev, SCO_LINK) < 1)
+ if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
+ hci_conn_num(hdev, SCO_LINK) < 1)
return -ENODEV;
urb = alloc_isoc_urb(hdev, skb);
@@ -2440,16 +2540,18 @@ static int btusb_setup_csr(struct hci_dev *hdev)
* Probably will need to be expanded in the future;
* without these the controller will lock up.
*/
- set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks);
- set_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks);
- set_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks);
- set_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_STORED_LINK_KEY);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_ERR_DATA_REPORTING);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL);
+ hci_set_quirk(hdev, HCI_QUIRK_NO_SUSPEND_NOTIFIER);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_READ_VOICE_SETTING);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_READ_PAGE_SCAN_TYPE);
/* Clear the reset quirk since this is not an actual
* early Bluetooth 1.1 device from CSR.
*/
- clear_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
- clear_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+ hci_clear_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE);
+ hci_clear_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
/*
* Special workaround for these BT 4.0 chip clones, and potentially more:
@@ -2560,12 +2662,12 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb)
else
urb = alloc_ctrl_urb(hdev, skb);
- /* When the 0xfc01 command is issued to boot into
- * the operational firmware, it will actually not
- * send a command complete event. To keep the flow
+ /* When the BTINTEL_HCI_OP_RESET command is issued to
+ * boot into the operational firmware, it will actually
+ * not send a command complete event. To keep the flow
* control working inject that event here.
*/
- if (opcode == 0xfc01)
+ if (opcode == BTINTEL_HCI_OP_RESET)
inject_cmd_complete(hdev, opcode);
} else {
urb = alloc_ctrl_urb(hdev, skb);
@@ -2585,7 +2687,8 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb)
return submit_or_queue_tx_urb(hdev, urb);
case HCI_SCODATA_PKT:
- if (hci_conn_num(hdev, SCO_LINK) < 1)
+ if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
+ hci_conn_num(hdev, SCO_LINK) < 1)
return -ENODEV;
urb = alloc_isoc_urb(hdev, skb);
@@ -2638,410 +2741,91 @@ static int btusb_recv_event_realtek(struct hci_dev *hdev, struct sk_buff *skb)
return hci_recv_frame(hdev, skb);
}
-/* UHW CR mapping */
-#define MTK_BT_MISC 0x70002510
-#define MTK_BT_SUBSYS_RST 0x70002610
-#define MTK_UDMA_INT_STA_BT 0x74000024
-#define MTK_UDMA_INT_STA_BT1 0x74000308
-#define MTK_BT_WDT_STATUS 0x740003A0
-#define MTK_EP_RST_OPT 0x74011890
-#define MTK_EP_RST_IN_OUT_OPT 0x00010001
-#define MTK_BT_RST_DONE 0x00000100
-#define MTK_BT_RESET_REG_CONNV3 0x70028610
-#define MTK_BT_READ_DEV_ID 0x70010200
-
-
-static void btusb_mtk_wmt_recv(struct urb *urb)
+static void btusb_mtk_claim_iso_intf(struct btusb_data *data)
{
- struct hci_dev *hdev = urb->context;
- struct btusb_data *data = hci_get_drvdata(hdev);
- struct sk_buff *skb;
+ struct btmtk_data *btmtk_data;
int err;
- if (urb->status == 0 && urb->actual_length > 0) {
- hdev->stat.byte_rx += urb->actual_length;
-
- /* WMT event shouldn't be fragmented and the size should be
- * less than HCI_WMT_MAX_EVENT_SIZE.
- */
- skb = bt_skb_alloc(HCI_WMT_MAX_EVENT_SIZE, GFP_ATOMIC);
- if (!skb) {
- hdev->stat.err_rx++;
- kfree(urb->setup_packet);
- return;
- }
-
- hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
- skb_put_data(skb, urb->transfer_buffer, urb->actual_length);
-
- /* When someone waits for the WMT event, the skb is being cloned
- * and being processed the events from there then.
- */
- if (test_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags)) {
- data->evt_skb = skb_clone(skb, GFP_ATOMIC);
- if (!data->evt_skb) {
- kfree_skb(skb);
- kfree(urb->setup_packet);
- return;
- }
- }
-
- err = hci_recv_frame(hdev, skb);
- if (err < 0) {
- kfree_skb(data->evt_skb);
- data->evt_skb = NULL;
- kfree(urb->setup_packet);
- return;
- }
-
- if (test_and_clear_bit(BTUSB_TX_WAIT_VND_EVT,
- &data->flags)) {
- /* Barrier to sync with other CPUs */
- smp_mb__after_atomic();
- wake_up_bit(&data->flags,
- BTUSB_TX_WAIT_VND_EVT);
- }
- kfree(urb->setup_packet);
+ if (!data->hdev)
return;
- } else if (urb->status == -ENOENT) {
- /* Avoid suspend failed when usb_kill_urb */
- return;
- }
- usb_mark_last_busy(data->udev);
-
- /* The URB complete handler is still called with urb->actual_length = 0
- * when the event is not available, so we should keep re-submitting
- * URB until WMT event returns, Also, It's necessary to wait some time
- * between the two consecutive control URBs to relax the target device
- * to generate the event. Otherwise, the WMT event cannot return from
- * the device successfully.
- */
- udelay(500);
-
- usb_anchor_urb(urb, &data->ctrl_anchor);
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err < 0) {
- kfree(urb->setup_packet);
- /* -EPERM: urb is being killed;
- * -ENODEV: device got disconnected
- */
- if (err != -EPERM && err != -ENODEV)
- bt_dev_err(hdev, "urb %p failed to resubmit (%d)",
- urb, -err);
- usb_unanchor_urb(urb);
- }
-}
-
-static int btusb_mtk_submit_wmt_recv_urb(struct hci_dev *hdev)
-{
- struct btusb_data *data = hci_get_drvdata(hdev);
- struct usb_ctrlrequest *dr;
- unsigned char *buf;
- int err, size = 64;
- unsigned int pipe;
- struct urb *urb;
-
- urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb)
- return -ENOMEM;
-
- dr = kmalloc(sizeof(*dr), GFP_KERNEL);
- if (!dr) {
- usb_free_urb(urb);
- return -ENOMEM;
- }
-
- dr->bRequestType = USB_TYPE_VENDOR | USB_DIR_IN;
- dr->bRequest = 1;
- dr->wIndex = cpu_to_le16(0);
- dr->wValue = cpu_to_le16(48);
- dr->wLength = cpu_to_le16(size);
-
- buf = kmalloc(size, GFP_KERNEL);
- if (!buf) {
- kfree(dr);
- usb_free_urb(urb);
- return -ENOMEM;
- }
-
- pipe = usb_rcvctrlpipe(data->udev, 0);
-
- usb_fill_control_urb(urb, data->udev, pipe, (void *)dr,
- buf, size, btusb_mtk_wmt_recv, hdev);
-
- urb->transfer_flags |= URB_FREE_BUFFER;
-
- usb_anchor_urb(urb, &data->ctrl_anchor);
- err = usb_submit_urb(urb, GFP_KERNEL);
- if (err < 0) {
- if (err != -EPERM && err != -ENODEV)
- bt_dev_err(hdev, "urb %p submission failed (%d)",
- urb, -err);
- usb_unanchor_urb(urb);
- }
-
- usb_free_urb(urb);
-
- return err;
-}
-
-static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
- struct btmtk_hci_wmt_params *wmt_params)
-{
- struct btusb_data *data = hci_get_drvdata(hdev);
- struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc;
- u32 hlen, status = BTMTK_WMT_INVALID;
- struct btmtk_hci_wmt_evt *wmt_evt;
- struct btmtk_hci_wmt_cmd *wc;
- struct btmtk_wmt_hdr *hdr;
- int err;
-
- /* Send the WMT command and wait until the WMT event returns */
- hlen = sizeof(*hdr) + wmt_params->dlen;
- if (hlen > 255)
- return -EINVAL;
-
- wc = kzalloc(hlen, GFP_KERNEL);
- if (!wc)
- return -ENOMEM;
-
- hdr = &wc->hdr;
- hdr->dir = 1;
- hdr->op = wmt_params->op;
- hdr->dlen = cpu_to_le16(wmt_params->dlen + 1);
- hdr->flag = wmt_params->flag;
- memcpy(wc->data, wmt_params->data, wmt_params->dlen);
-
- set_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
-
- /* WMT cmd/event doesn't follow up the generic HCI cmd/event handling,
- * it needs constantly polling control pipe until the host received the
- * WMT event, thus, we should require to specifically acquire PM counter
- * on the USB to prevent the interface from entering auto suspended
- * while WMT cmd/event in progress.
- */
- err = usb_autopm_get_interface(data->intf);
- if (err < 0)
- goto err_free_wc;
-
- err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc);
+ btmtk_data = hci_get_priv(data->hdev);
+ if (!btmtk_data)
+ return;
- if (err < 0) {
- clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
- usb_autopm_put_interface(data->intf);
- goto err_free_wc;
+ if (!btmtk_data->isopkt_intf) {
+ bt_dev_err(data->hdev, "Can't claim NULL iso interface");
+ return;
}
- /* Submit control IN URB on demand to process the WMT event */
- err = btusb_mtk_submit_wmt_recv_urb(hdev);
-
- usb_autopm_put_interface(data->intf);
-
- if (err < 0)
- goto err_free_wc;
-
- /* The vendor specific WMT commands are all answered by a vendor
- * specific event and will have the Command Status or Command
- * Complete as with usual HCI command flow control.
- *
- * After sending the command, wait for BTUSB_TX_WAIT_VND_EVT
- * state to be cleared. The driver specific event receive routine
- * will clear that state and with that indicate completion of the
- * WMT command.
+ /*
+ * The function usb_driver_claim_interface() is documented to need
+ * locks held if it's not called from a probe routine. The code here
+ * is called from the hci_power_on workqueue, so grab the lock.
*/
- err = wait_on_bit_timeout(&data->flags, BTUSB_TX_WAIT_VND_EVT,
- TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT);
- if (err == -EINTR) {
- bt_dev_err(hdev, "Execution of wmt command interrupted");
- clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
- goto err_free_wc;
- }
-
- if (err) {
- bt_dev_err(hdev, "Execution of wmt command timed out");
- clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
- err = -ETIMEDOUT;
- goto err_free_wc;
- }
-
- if (data->evt_skb == NULL)
- goto err_free_wc;
-
- /* Parse and handle the return WMT event */
- wmt_evt = (struct btmtk_hci_wmt_evt *)data->evt_skb->data;
- if (wmt_evt->whdr.op != hdr->op) {
- bt_dev_err(hdev, "Wrong op received %d expected %d",
- wmt_evt->whdr.op, hdr->op);
- err = -EIO;
- goto err_free_skb;
- }
-
- switch (wmt_evt->whdr.op) {
- case BTMTK_WMT_SEMAPHORE:
- if (wmt_evt->whdr.flag == 2)
- status = BTMTK_WMT_PATCH_UNDONE;
- else
- status = BTMTK_WMT_PATCH_DONE;
- break;
- case BTMTK_WMT_FUNC_CTRL:
- wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt;
- if (be16_to_cpu(wmt_evt_funcc->status) == 0x404)
- status = BTMTK_WMT_ON_DONE;
- else if (be16_to_cpu(wmt_evt_funcc->status) == 0x420)
- status = BTMTK_WMT_ON_PROGRESS;
- else
- status = BTMTK_WMT_ON_UNDONE;
- break;
- case BTMTK_WMT_PATCH_DWNLD:
- if (wmt_evt->whdr.flag == 2)
- status = BTMTK_WMT_PATCH_DONE;
- else if (wmt_evt->whdr.flag == 1)
- status = BTMTK_WMT_PATCH_PROGRESS;
- else
- status = BTMTK_WMT_PATCH_UNDONE;
- break;
- }
-
- if (wmt_params->status)
- *wmt_params->status = status;
-
-err_free_skb:
- kfree_skb(data->evt_skb);
- data->evt_skb = NULL;
-err_free_wc:
- kfree(wc);
- return err;
-}
-
-static int btusb_mtk_func_query(struct hci_dev *hdev)
-{
- struct btmtk_hci_wmt_params wmt_params;
- int status, err;
- u8 param = 0;
-
- /* Query whether the function is enabled */
- wmt_params.op = BTMTK_WMT_FUNC_CTRL;
- wmt_params.flag = 4;
- wmt_params.dlen = sizeof(param);
- wmt_params.data = &param;
- wmt_params.status = &status;
-
- err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
+ device_lock(&btmtk_data->isopkt_intf->dev);
+ err = usb_driver_claim_interface(&btusb_driver,
+ btmtk_data->isopkt_intf, data);
+ device_unlock(&btmtk_data->isopkt_intf->dev);
if (err < 0) {
- bt_dev_err(hdev, "Failed to query function status (%d)", err);
- return err;
+ btmtk_data->isopkt_intf = NULL;
+ bt_dev_err(data->hdev, "Failed to claim iso interface: %d", err);
+ return;
}
- return status;
+ set_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags);
+ init_usb_anchor(&btmtk_data->isopkt_anchor);
}
-static int btusb_mtk_uhw_reg_write(struct btusb_data *data, u32 reg, u32 val)
+static void btusb_mtk_release_iso_intf(struct hci_dev *hdev)
{
- struct hci_dev *hdev = data->hdev;
- int pipe, err;
- void *buf;
+ struct btmtk_data *btmtk_data;
- buf = kzalloc(4, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- put_unaligned_le32(val, buf);
-
- pipe = usb_sndctrlpipe(data->udev, 0);
- err = usb_control_msg(data->udev, pipe, 0x02,
- 0x5E,
- reg >> 16, reg & 0xffff,
- buf, 4, USB_CTRL_SET_TIMEOUT);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to write uhw reg(%d)", err);
- goto err_free_buf;
- }
-
-err_free_buf:
- kfree(buf);
+ if (!hdev)
+ return;
- return err;
-}
+ btmtk_data = hci_get_priv(hdev);
+ if (!btmtk_data)
+ return;
-static int btusb_mtk_uhw_reg_read(struct btusb_data *data, u32 reg, u32 *val)
-{
- struct hci_dev *hdev = data->hdev;
- int pipe, err;
- void *buf;
+ if (test_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags)) {
+ usb_kill_anchored_urbs(&btmtk_data->isopkt_anchor);
+ clear_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags);
- buf = kzalloc(4, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
+ if (btmtk_data->isopkt_skb) {
+ dev_kfree_skb_irq(btmtk_data->isopkt_skb);
+ btmtk_data->isopkt_skb = NULL;
+ }
- pipe = usb_rcvctrlpipe(data->udev, 0);
- err = usb_control_msg(data->udev, pipe, 0x01,
- 0xDE,
- reg >> 16, reg & 0xffff,
- buf, 4, USB_CTRL_GET_TIMEOUT);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to read uhw reg(%d)", err);
- goto err_free_buf;
+ if (btmtk_data->isopkt_intf) {
+ usb_set_intfdata(btmtk_data->isopkt_intf, NULL);
+ usb_driver_release_interface(&btusb_driver,
+ btmtk_data->isopkt_intf);
+ btmtk_data->isopkt_intf = NULL;
+ }
}
- *val = get_unaligned_le32(buf);
- bt_dev_dbg(hdev, "reg=%x, value=0x%08x", reg, *val);
-
-err_free_buf:
- kfree(buf);
-
- return err;
+ clear_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags);
}
-static int btusb_mtk_reg_read(struct btusb_data *data, u32 reg, u32 *val)
+static int btusb_mtk_disconnect(struct hci_dev *hdev)
{
- int pipe, err, size = sizeof(u32);
- void *buf;
-
- buf = kzalloc(size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- pipe = usb_rcvctrlpipe(data->udev, 0);
- err = usb_control_msg(data->udev, pipe, 0x63,
- USB_TYPE_VENDOR | USB_DIR_IN,
- reg >> 16, reg & 0xffff,
- buf, size, USB_CTRL_GET_TIMEOUT);
- if (err < 0)
- goto err_free_buf;
-
- *val = get_unaligned_le32(buf);
-
-err_free_buf:
- kfree(buf);
-
- return err;
-}
-
-static int btusb_mtk_id_get(struct btusb_data *data, u32 reg, u32 *id)
-{
- return btusb_mtk_reg_read(data, reg, id);
-}
-
-static u32 btusb_mtk_reset_done(struct hci_dev *hdev)
-{
- struct btusb_data *data = hci_get_drvdata(hdev);
- u32 val = 0;
-
- btusb_mtk_uhw_reg_read(data, MTK_BT_MISC, &val);
+ /* This function describes the specific additional steps taken by MediaTek
+ * when Bluetooth usb driver's resume function is called.
+ */
+ btusb_mtk_release_iso_intf(hdev);
- return val & MTK_BT_RST_DONE;
+ return 0;
}
static int btusb_mtk_reset(struct hci_dev *hdev, void *rst_data)
{
struct btusb_data *data = hci_get_drvdata(hdev);
- struct btmediatek_data *mediatek;
- u32 val;
+ struct btmtk_data *btmtk_data = hci_get_priv(hdev);
int err;
/* It's MediaTek specific bluetooth reset mechanism via USB */
- if (test_and_set_bit(BTUSB_HW_RESET_ACTIVE, &data->flags)) {
+ if (test_and_set_bit(BTMTK_HW_RESET_ACTIVE, &btmtk_data->flags)) {
bt_dev_err(hdev, "last reset failed? Not resetting again");
return -EBUSY;
}
@@ -3050,302 +2834,83 @@ static int btusb_mtk_reset(struct hci_dev *hdev, void *rst_data)
if (err < 0)
return err;
+ /* Release MediaTek ISO data interface */
+ btusb_mtk_release_iso_intf(hdev);
+
btusb_stop_traffic(data);
usb_kill_anchored_urbs(&data->tx_anchor);
- mediatek = hci_get_priv(hdev);
-
- if (mediatek->dev_id == 0x7925) {
- btusb_mtk_uhw_reg_read(data, MTK_BT_RESET_REG_CONNV3, &val);
- val |= (1 << 5);
- btusb_mtk_uhw_reg_write(data, MTK_BT_RESET_REG_CONNV3, val);
- btusb_mtk_uhw_reg_read(data, MTK_BT_RESET_REG_CONNV3, &val);
- val &= 0xFFFF00FF;
- val |= (1 << 13);
- btusb_mtk_uhw_reg_write(data, MTK_BT_RESET_REG_CONNV3, val);
- btusb_mtk_uhw_reg_write(data, MTK_EP_RST_OPT, 0x00010001);
- btusb_mtk_uhw_reg_read(data, MTK_BT_RESET_REG_CONNV3, &val);
- val |= (1 << 0);
- btusb_mtk_uhw_reg_write(data, MTK_BT_RESET_REG_CONNV3, val);
- btusb_mtk_uhw_reg_write(data, MTK_UDMA_INT_STA_BT, 0x000000FF);
- btusb_mtk_uhw_reg_read(data, MTK_UDMA_INT_STA_BT, &val);
- btusb_mtk_uhw_reg_write(data, MTK_UDMA_INT_STA_BT1, 0x000000FF);
- btusb_mtk_uhw_reg_read(data, MTK_UDMA_INT_STA_BT1, &val);
- msleep(100);
- } else {
- /* It's Device EndPoint Reset Option Register */
- bt_dev_dbg(hdev, "Initiating reset mechanism via uhw");
- btusb_mtk_uhw_reg_write(data, MTK_EP_RST_OPT, MTK_EP_RST_IN_OUT_OPT);
- btusb_mtk_uhw_reg_read(data, MTK_BT_WDT_STATUS, &val);
-
- /* Reset the bluetooth chip via USB interface. */
- btusb_mtk_uhw_reg_write(data, MTK_BT_SUBSYS_RST, 1);
- btusb_mtk_uhw_reg_write(data, MTK_UDMA_INT_STA_BT, 0x000000FF);
- btusb_mtk_uhw_reg_read(data, MTK_UDMA_INT_STA_BT, &val);
- btusb_mtk_uhw_reg_write(data, MTK_UDMA_INT_STA_BT1, 0x000000FF);
- btusb_mtk_uhw_reg_read(data, MTK_UDMA_INT_STA_BT1, &val);
- /* MT7921 need to delay 20ms between toggle reset bit */
- msleep(20);
- btusb_mtk_uhw_reg_write(data, MTK_BT_SUBSYS_RST, 0);
- btusb_mtk_uhw_reg_read(data, MTK_BT_SUBSYS_RST, &val);
- }
-
- err = readx_poll_timeout(btusb_mtk_reset_done, hdev, val,
- val & MTK_BT_RST_DONE, 20000, 1000000);
- if (err < 0)
- bt_dev_err(hdev, "Reset timeout");
- btusb_mtk_id_get(data, 0x70010200, &val);
- if (!val)
- bt_dev_err(hdev, "Can't get device id, subsys reset fail.");
+ /* Toggle the hard reset line. The MediaTek device is going to
+ * yank itself off the USB and then replug. The cleanup is handled
+ * correctly on the way out (standard USB disconnect), and the new
+ * device is detected cleanly and bound to the driver again like
+ * it should be.
+ */
+ if (data->reset_gpio) {
+ gpiod_set_value_cansleep(data->reset_gpio, 1);
+ msleep(200);
+ gpiod_set_value_cansleep(data->reset_gpio, 0);
+ return 0;
+ }
- usb_queue_reset_device(data->intf);
+ err = btmtk_usb_subsys_reset(hdev, btmtk_data->dev_id);
- clear_bit(BTUSB_HW_RESET_ACTIVE, &data->flags);
+ usb_queue_reset_device(data->intf);
+ clear_bit(BTMTK_HW_RESET_ACTIVE, &btmtk_data->flags);
return err;
}
-static int btusb_mtk_setup(struct hci_dev *hdev)
+static int btusb_send_frame_mtk(struct hci_dev *hdev, struct sk_buff *skb)
{
- struct btusb_data *data = hci_get_drvdata(hdev);
- struct btmtk_hci_wmt_params wmt_params;
- ktime_t calltime, delta, rettime;
- struct btmtk_tci_sleep tci_sleep;
- unsigned long long duration;
- struct sk_buff *skb;
- const char *fwname;
- int err, status;
- u32 dev_id = 0;
- char fw_bin_name[64];
- u32 fw_version = 0, fw_flavor = 0;
- u8 param;
- struct btmediatek_data *mediatek;
-
- calltime = ktime_get();
-
- err = btusb_mtk_id_get(data, 0x80000008, &dev_id);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to get device id (%d)", err);
- return err;
- }
-
- if (!dev_id || dev_id != 0x7663) {
- err = btusb_mtk_id_get(data, 0x70010200, &dev_id);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to get device id (%d)", err);
- return err;
- }
- err = btusb_mtk_id_get(data, 0x80021004, &fw_version);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to get fw version (%d)", err);
- return err;
- }
- err = btusb_mtk_id_get(data, 0x70010020, &fw_flavor);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to get fw flavor (%d)", err);
- return err;
- }
- fw_flavor = (fw_flavor & 0x00000080) >> 7;
- }
-
- mediatek = hci_get_priv(hdev);
- mediatek->dev_id = dev_id;
- mediatek->reset_sync = btusb_mtk_reset;
-
- err = btmtk_register_coredump(hdev, btusb_driver.name, fw_version);
- if (err < 0)
- bt_dev_err(hdev, "Failed to register coredump (%d)", err);
-
- switch (dev_id) {
- case 0x7663:
- fwname = FIRMWARE_MT7663;
- break;
- case 0x7668:
- fwname = FIRMWARE_MT7668;
- break;
- case 0x7922:
- case 0x7961:
- case 0x7925:
- if (dev_id == 0x7925)
- snprintf(fw_bin_name, sizeof(fw_bin_name),
- "mediatek/mt%04x/BT_RAM_CODE_MT%04x_1_%x_hdr.bin",
- dev_id & 0xffff, dev_id & 0xffff, (fw_version & 0xff) + 1);
- else if (dev_id == 0x7961 && fw_flavor)
- snprintf(fw_bin_name, sizeof(fw_bin_name),
- "mediatek/BT_RAM_CODE_MT%04x_1a_%x_hdr.bin",
- dev_id & 0xffff, (fw_version & 0xff) + 1);
- else
- snprintf(fw_bin_name, sizeof(fw_bin_name),
- "mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin",
- dev_id & 0xffff, (fw_version & 0xff) + 1);
-
- err = btmtk_setup_firmware_79xx(hdev, fw_bin_name,
- btusb_mtk_hci_wmt_sync);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to set up firmware (%d)", err);
- return err;
- }
-
- /* It's Device EndPoint Reset Option Register */
- btusb_mtk_uhw_reg_write(data, MTK_EP_RST_OPT, MTK_EP_RST_IN_OUT_OPT);
-
- /* Enable Bluetooth protocol */
- param = 1;
- wmt_params.op = BTMTK_WMT_FUNC_CTRL;
- wmt_params.flag = 0;
- wmt_params.dlen = sizeof(param);
- wmt_params.data = &param;
- wmt_params.status = NULL;
-
- err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
- return err;
- }
-
- hci_set_msft_opcode(hdev, 0xFD30);
- hci_set_aosp_capable(hdev);
- goto done;
- default:
- bt_dev_err(hdev, "Unsupported hardware variant (%08x)",
- dev_id);
- return -ENODEV;
- }
-
- /* Query whether the firmware is already download */
- wmt_params.op = BTMTK_WMT_SEMAPHORE;
- wmt_params.flag = 1;
- wmt_params.dlen = 0;
- wmt_params.data = NULL;
- wmt_params.status = &status;
-
- err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to query firmware status (%d)", err);
- return err;
- }
-
- if (status == BTMTK_WMT_PATCH_DONE) {
- bt_dev_info(hdev, "firmware already downloaded");
- goto ignore_setup_fw;
- }
-
- /* Setup a firmware which the device definitely requires */
- err = btmtk_setup_firmware(hdev, fwname,
- btusb_mtk_hci_wmt_sync);
- if (err < 0)
- return err;
-
-ignore_setup_fw:
- err = readx_poll_timeout(btusb_mtk_func_query, hdev, status,
- status < 0 || status != BTMTK_WMT_ON_PROGRESS,
- 2000, 5000000);
- /* -ETIMEDOUT happens */
- if (err < 0)
- return err;
-
- /* The other errors happen in btusb_mtk_func_query */
- if (status < 0)
- return status;
-
- if (status == BTMTK_WMT_ON_DONE) {
- bt_dev_info(hdev, "function already on");
- goto ignore_func_on;
- }
-
- /* Enable Bluetooth protocol */
- param = 1;
- wmt_params.op = BTMTK_WMT_FUNC_CTRL;
- wmt_params.flag = 0;
- wmt_params.dlen = sizeof(param);
- wmt_params.data = &param;
- wmt_params.status = NULL;
+ struct urb *urb;
- err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
- return err;
- }
+ BT_DBG("%s", hdev->name);
-ignore_func_on:
- /* Apply the low power environment setup */
- tci_sleep.mode = 0x5;
- tci_sleep.duration = cpu_to_le16(0x640);
- tci_sleep.host_duration = cpu_to_le16(0x640);
- tci_sleep.host_wakeup_pin = 0;
- tci_sleep.time_compensation = 0;
+ if (hci_skb_pkt_type(skb) == HCI_ISODATA_PKT) {
+ urb = alloc_mtk_intr_urb(hdev, skb, btusb_tx_complete);
+ if (IS_ERR(urb))
+ return PTR_ERR(urb);
- skb = __hci_cmd_sync(hdev, 0xfc7a, sizeof(tci_sleep), &tci_sleep,
- HCI_INIT_TIMEOUT);
- if (IS_ERR(skb)) {
- err = PTR_ERR(skb);
- bt_dev_err(hdev, "Failed to apply low power setting (%d)", err);
- return err;
+ return submit_or_queue_tx_urb(hdev, urb);
+ } else {
+ return btusb_send_frame(hdev, skb);
}
- kfree_skb(skb);
-
-done:
- rettime = ktime_get();
- delta = ktime_sub(rettime, calltime);
- duration = (unsigned long long)ktime_to_ns(delta) >> 10;
-
- bt_dev_info(hdev, "Device setup in %llu usecs", duration);
-
- return 0;
}
-static int btusb_mtk_shutdown(struct hci_dev *hdev)
+static int btusb_mtk_setup(struct hci_dev *hdev)
{
- struct btmtk_hci_wmt_params wmt_params;
- u8 param = 0;
- int err;
+ struct btusb_data *data = hci_get_drvdata(hdev);
+ struct btmtk_data *btmtk_data = hci_get_priv(hdev);
- /* Disable the device */
- wmt_params.op = BTMTK_WMT_FUNC_CTRL;
- wmt_params.flag = 0;
- wmt_params.dlen = sizeof(param);
- wmt_params.data = &param;
- wmt_params.status = NULL;
+ /* MediaTek WMT vendor cmd requiring below USB resources to
+ * complete the handshake.
+ */
+ btmtk_data->drv_name = btusb_driver.name;
+ btmtk_data->intf = data->intf;
+ btmtk_data->udev = data->udev;
+ btmtk_data->ctrl_anchor = &data->ctrl_anchor;
+ btmtk_data->reset_sync = btusb_mtk_reset;
- err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
- return err;
+ /* Claim ISO data interface and endpoint */
+ if (!test_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags)) {
+ btmtk_data->isopkt_intf = usb_ifnum_to_if(data->udev, MTK_ISO_IFNUM);
+ btusb_mtk_claim_iso_intf(data);
}
- return 0;
+ return btmtk_usb_setup(hdev);
}
-static int btusb_recv_acl_mtk(struct hci_dev *hdev, struct sk_buff *skb)
+static int btusb_mtk_shutdown(struct hci_dev *hdev)
{
- struct btusb_data *data = hci_get_drvdata(hdev);
- u16 handle = le16_to_cpu(hci_acl_hdr(skb)->handle);
-
- switch (handle) {
- case 0xfc6f: /* Firmware dump from device */
- /* When the firmware hangs, the device can no longer
- * suspend and thus disable auto-suspend.
- */
- usb_disable_autosuspend(data->udev);
-
- /* We need to forward the diagnostic packet to userspace daemon
- * for backward compatibility, so we have to clone the packet
- * extraly for the in-kernel coredump support.
- */
- if (IS_ENABLED(CONFIG_DEV_COREDUMP)) {
- struct sk_buff *skb_cd = skb_clone(skb, GFP_ATOMIC);
+ int ret;
- if (skb_cd)
- btmtk_process_coredump(hdev, skb_cd);
- }
+ ret = btmtk_usb_shutdown(hdev);
- fallthrough;
- case 0x05ff: /* Firmware debug logging 1 */
- case 0x05fe: /* Firmware debug logging 2 */
- return hci_recv_diag(hdev, skb);
- }
+ /* Release MediaTek iso interface after shutdown */
+ btusb_mtk_release_iso_intf(hdev);
- return hci_recv_frame(hdev, skb);
+ return ret;
}
#ifdef CONFIG_PM
@@ -3518,55 +3083,27 @@ static void btusb_coredump_qca(struct hci_dev *hdev)
bt_dev_err(hdev, "%s: triggle crash failed (%d)", __func__, err);
}
-/*
- * ==0: not a dump pkt.
- * < 0: fails to handle a dump pkt
- * > 0: otherwise.
- */
+/* Return: 0 on success, negative errno on failure. */
static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
{
- int ret = 1;
+ int ret = 0;
+ unsigned int skip = 0;
u8 pkt_type;
- u8 *sk_ptr;
- unsigned int sk_len;
u16 seqno;
u32 dump_size;
- struct hci_event_hdr *event_hdr;
- struct hci_acl_hdr *acl_hdr;
struct qca_dump_hdr *dump_hdr;
struct btusb_data *btdata = hci_get_drvdata(hdev);
struct usb_device *udev = btdata->udev;
pkt_type = hci_skb_pkt_type(skb);
- sk_ptr = skb->data;
- sk_len = skb->len;
+ skip = sizeof(struct hci_event_hdr);
+ if (pkt_type == HCI_ACLDATA_PKT)
+ skip += sizeof(struct hci_acl_hdr);
- if (pkt_type == HCI_ACLDATA_PKT) {
- acl_hdr = hci_acl_hdr(skb);
- if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE)
- return 0;
- sk_ptr += HCI_ACL_HDR_SIZE;
- sk_len -= HCI_ACL_HDR_SIZE;
- event_hdr = (struct hci_event_hdr *)sk_ptr;
- } else {
- event_hdr = hci_event_hdr(skb);
- }
-
- if ((event_hdr->evt != HCI_VENDOR_PKT)
- || (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
- return 0;
+ skb_pull(skb, skip);
+ dump_hdr = (struct qca_dump_hdr *)skb->data;
- sk_ptr += HCI_EVENT_HDR_SIZE;
- sk_len -= HCI_EVENT_HDR_SIZE;
-
- dump_hdr = (struct qca_dump_hdr *)sk_ptr;
- if ((sk_len < offsetof(struct qca_dump_hdr, data))
- || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS)
- || (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
- return 0;
-
- /*it is dump pkt now*/
seqno = le16_to_cpu(dump_hdr->seqno);
if (seqno == 0) {
set_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags);
@@ -3586,16 +3123,15 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
btdata->qca_dump.ram_dump_size = dump_size;
btdata->qca_dump.ram_dump_seqno = 0;
- sk_ptr += offsetof(struct qca_dump_hdr, data0);
- sk_len -= offsetof(struct qca_dump_hdr, data0);
+
+ skb_pull(skb, offsetof(struct qca_dump_hdr, data0));
usb_disable_autosuspend(udev);
bt_dev_info(hdev, "%s memdump size(%u)\n",
(pkt_type == HCI_ACLDATA_PKT) ? "ACL" : "event",
dump_size);
} else {
- sk_ptr += offsetof(struct qca_dump_hdr, data);
- sk_len -= offsetof(struct qca_dump_hdr, data);
+ skb_pull(skb, offsetof(struct qca_dump_hdr, data));
}
if (!btdata->qca_dump.ram_dump_size) {
@@ -3615,7 +3151,6 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
return ret;
}
- skb_pull(skb, skb->len - sk_len);
hci_devcd_append(hdev, skb);
btdata->qca_dump.ram_dump_seqno++;
if (seqno == QCA_LAST_SEQUENCE_NUM) {
@@ -3640,17 +3175,74 @@ out:
return ret;
}
+/* Return: true if the ACL packet is a dump packet, false otherwise. */
+static bool acl_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_event_hdr *event_hdr;
+ struct hci_acl_hdr *acl_hdr;
+ struct qca_dump_hdr *dump_hdr;
+ struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
+ bool is_dump = false;
+
+ if (!clone)
+ return false;
+
+ acl_hdr = skb_pull_data(clone, sizeof(*acl_hdr));
+ if (!acl_hdr || (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE))
+ goto out;
+
+ event_hdr = skb_pull_data(clone, sizeof(*event_hdr));
+ if (!event_hdr || (event_hdr->evt != HCI_VENDOR_PKT))
+ goto out;
+
+ dump_hdr = skb_pull_data(clone, sizeof(*dump_hdr));
+ if (!dump_hdr || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
+ (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
+ goto out;
+
+ is_dump = true;
+out:
+ consume_skb(clone);
+ return is_dump;
+}
+
+/* Return: true if the event packet is a dump packet, false otherwise. */
+static bool evt_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_event_hdr *event_hdr;
+ struct qca_dump_hdr *dump_hdr;
+ struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
+ bool is_dump = false;
+
+ if (!clone)
+ return false;
+
+ event_hdr = skb_pull_data(clone, sizeof(*event_hdr));
+ if (!event_hdr || (event_hdr->evt != HCI_VENDOR_PKT))
+ goto out;
+
+ dump_hdr = skb_pull_data(clone, sizeof(*dump_hdr));
+ if (!dump_hdr || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
+ (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
+ goto out;
+
+ is_dump = true;
+out:
+ consume_skb(clone);
+ return is_dump;
+}
+
static int btusb_recv_acl_qca(struct hci_dev *hdev, struct sk_buff *skb)
{
- if (handle_dump_pkt_qca(hdev, skb))
- return 0;
+ if (acl_pkt_is_dump_qca(hdev, skb))
+ return handle_dump_pkt_qca(hdev, skb);
return hci_recv_frame(hdev, skb);
}
static int btusb_recv_evt_qca(struct hci_dev *hdev, struct sk_buff *skb)
{
- if (handle_dump_pkt_qca(hdev, skb))
- return 0;
+ if (evt_pkt_is_dump_qca(hdev, skb))
+ return handle_dump_pkt_qca(hdev, skb);
return hci_recv_frame(hdev, skb);
}
@@ -3693,6 +3285,12 @@ struct qca_device_info {
u8 ver_offset; /* offset of version structure in rampatch */
};
+struct qca_custom_firmware {
+ u32 rom_version;
+ u16 board_id;
+ const char *subdirectory;
+};
+
static const struct qca_device_info qca_devices_table[] = {
{ 0x00000100, 20, 4, 8 }, /* Rome 1.0 */
{ 0x00000101, 20, 4, 8 }, /* Rome 1.1 */
@@ -3706,6 +3304,58 @@ static const struct qca_device_info qca_devices_table[] = {
{ 0x00190200, 40, 4, 16 }, /* WCN785x 2.0 */
};
+static const struct qca_custom_firmware qca_custom_btfws[] = {
+ { 0x00130201, 0x030A, "QCA2066" },
+ { 0x00130201, 0x030B, "QCA2066" },
+ { },
+};
+
+static u16 qca_extract_board_id(const struct qca_version *ver)
+{
+ u16 flag = le16_to_cpu(ver->flag);
+ u16 board_id = 0;
+
+ if (((flag >> 8) & 0xff) == QCA_FLAG_MULTI_NVM) {
+ /* The board_id should be split into two bytes
+ * The 1st byte is chip ID, and the 2nd byte is platform ID
+ * For example, board ID 0x010A, 0x01 is platform ID. 0x0A is chip ID
+ * we have several platforms, and platform IDs are continuously added
+ * Platform ID:
+ * 0x00 is for Mobile
+ * 0x01 is for X86
+ * 0x02 is for Automotive
+ * 0x03 is for Consumer electronic
+ */
+ board_id = (ver->chip_id << 8) + ver->platform_id;
+ }
+
+ /* Take 0xffff as invalid board ID */
+ if (board_id == 0xffff)
+ board_id = 0;
+
+ return board_id;
+}
+
+static const char *qca_get_fw_subdirectory(const struct qca_version *ver)
+{
+ const struct qca_custom_firmware *ptr;
+ u32 rom_ver;
+ u16 board_id;
+
+ rom_ver = le32_to_cpu(ver->rom_version);
+ board_id = qca_extract_board_id(ver);
+ if (!board_id)
+ return NULL;
+
+ for (ptr = qca_custom_btfws; ptr->rom_version; ptr++) {
+ if (ptr->rom_version == rom_ver &&
+ ptr->board_id == board_id)
+ return ptr->subdirectory;
+ }
+
+ return NULL;
+}
+
static int btusb_qca_send_vendor_req(struct usb_device *udev, u8 request,
void *data, u16 size)
{
@@ -3810,15 +3460,22 @@ static int btusb_setup_qca_load_rampatch(struct hci_dev *hdev,
{
struct qca_rampatch_version *rver;
const struct firmware *fw;
+ const char *fw_subdir;
u32 ver_rom, ver_patch, rver_rom;
u16 rver_rom_low, rver_rom_high, rver_patch;
- char fwname[64];
+ char fwname[80];
int err;
ver_rom = le32_to_cpu(ver->rom_version);
ver_patch = le32_to_cpu(ver->patch_version);
- snprintf(fwname, sizeof(fwname), "qca/rampatch_usb_%08x.bin", ver_rom);
+ fw_subdir = qca_get_fw_subdirectory(ver);
+ if (fw_subdir)
+ snprintf(fwname, sizeof(fwname), "qca/%s/rampatch_usb_%08x.bin",
+ fw_subdir, ver_rom);
+ else
+ snprintf(fwname, sizeof(fwname), "qca/rampatch_usb_%08x.bin",
+ ver_rom);
err = request_firmware(&fw, fwname, &hdev->dev);
if (err) {
@@ -3862,44 +3519,34 @@ static void btusb_generate_qca_nvm_name(char *fwname, size_t max_size,
const struct qca_version *ver)
{
u32 rom_version = le32_to_cpu(ver->rom_version);
- u16 flag = le16_to_cpu(ver->flag);
+ const char *variant, *fw_subdir;
+ int len;
+ u16 board_id;
- if (((flag >> 8) & 0xff) == QCA_FLAG_MULTI_NVM) {
- /* The board_id should be split into two bytes
- * The 1st byte is chip ID, and the 2nd byte is platform ID
- * For example, board ID 0x010A, 0x01 is platform ID. 0x0A is chip ID
- * we have several platforms, and platform IDs are continuously added
- * Platform ID:
- * 0x00 is for Mobile
- * 0x01 is for X86
- * 0x02 is for Automotive
- * 0x03 is for Consumer electronic
- */
- u16 board_id = (ver->chip_id << 8) + ver->platform_id;
- const char *variant;
+ fw_subdir = qca_get_fw_subdirectory(ver);
+ board_id = qca_extract_board_id(ver);
- switch (le32_to_cpu(ver->ram_version)) {
- case WCN6855_2_0_RAM_VERSION_GF:
- case WCN6855_2_1_RAM_VERSION_GF:
- variant = "_gf";
- break;
- default:
- variant = "";
- break;
- }
-
- if (board_id == 0) {
- snprintf(fwname, max_size, "qca/nvm_usb_%08x%s.bin",
- rom_version, variant);
- } else {
- snprintf(fwname, max_size, "qca/nvm_usb_%08x%s_%04x.bin",
- rom_version, variant, board_id);
- }
- } else {
- snprintf(fwname, max_size, "qca/nvm_usb_%08x.bin",
- rom_version);
+ switch (le32_to_cpu(ver->ram_version)) {
+ case WCN6855_2_0_RAM_VERSION_GF:
+ case WCN6855_2_1_RAM_VERSION_GF:
+ variant = "_gf";
+ break;
+ default:
+ variant = NULL;
+ break;
}
+ if (fw_subdir)
+ len = snprintf(fwname, max_size, "qca/%s/nvm_usb_%08x",
+ fw_subdir, rom_version);
+ else
+ len = snprintf(fwname, max_size, "qca/nvm_usb_%08x",
+ rom_version);
+ if (variant)
+ len += snprintf(fwname + len, max_size - len, "%s", variant);
+ if (board_id)
+ len += snprintf(fwname + len, max_size - len, "_%04x", board_id);
+ len += snprintf(fwname + len, max_size - len, ".bin");
}
static int btusb_setup_qca_load_nvm(struct hci_dev *hdev,
@@ -3907,7 +3554,7 @@ static int btusb_setup_qca_load_nvm(struct hci_dev *hdev,
const struct qca_device_info *info)
{
const struct firmware *fw;
- char fwname[64];
+ char fwname[80];
int err;
btusb_generate_qca_nvm_name(fwname, sizeof(fwname), ver);
@@ -4008,7 +3655,7 @@ static int btusb_setup_qca(struct hci_dev *hdev)
/* Mark HCI_OP_ENHANCED_SETUP_SYNC_CONN as broken as it doesn't seem to
* work with the likes of HSP/HFP mSBC.
*/
- set_bit(HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN);
return 0;
}
@@ -4227,12 +3874,143 @@ static ssize_t force_poll_sync_write(struct file *file,
}
static const struct file_operations force_poll_sync_fops = {
+ .owner = THIS_MODULE,
.open = simple_open,
.read = force_poll_sync_read,
.write = force_poll_sync_write,
.llseek = default_llseek,
};
+#define BTUSB_HCI_DRV_OP_SUPPORTED_ALTSETTINGS \
+ hci_opcode_pack(HCI_DRV_OGF_DRIVER_SPECIFIC, 0x0000)
+#define BTUSB_HCI_DRV_SUPPORTED_ALTSETTINGS_SIZE 0
+struct btusb_hci_drv_rp_supported_altsettings {
+ __u8 num;
+ __u8 altsettings[];
+} __packed;
+
+#define BTUSB_HCI_DRV_OP_SWITCH_ALTSETTING \
+ hci_opcode_pack(HCI_DRV_OGF_DRIVER_SPECIFIC, 0x0001)
+#define BTUSB_HCI_DRV_SWITCH_ALTSETTING_SIZE 1
+struct btusb_hci_drv_cmd_switch_altsetting {
+ __u8 altsetting;
+} __packed;
+
+static const struct {
+ u16 opcode;
+ const char *desc;
+} btusb_hci_drv_supported_commands[] = {
+ /* Common commands */
+ { HCI_DRV_OP_READ_INFO, "Read Info" },
+
+ /* Driver specific commands */
+ { BTUSB_HCI_DRV_OP_SUPPORTED_ALTSETTINGS, "Supported Altsettings" },
+ { BTUSB_HCI_DRV_OP_SWITCH_ALTSETTING, "Switch Altsetting" },
+};
+static int btusb_hci_drv_read_info(struct hci_dev *hdev, void *data,
+ u16 data_len)
+{
+ struct hci_drv_rp_read_info *rp;
+ size_t rp_size;
+ int err, i;
+ u16 opcode, num_supported_commands =
+ ARRAY_SIZE(btusb_hci_drv_supported_commands);
+
+ rp_size = sizeof(*rp) + num_supported_commands * 2;
+
+ rp = kmalloc(rp_size, GFP_KERNEL);
+ if (!rp)
+ return -ENOMEM;
+
+ strscpy_pad(rp->driver_name, btusb_driver.name);
+
+ rp->num_supported_commands = cpu_to_le16(num_supported_commands);
+ for (i = 0; i < num_supported_commands; i++) {
+ opcode = btusb_hci_drv_supported_commands[i].opcode;
+ bt_dev_info(hdev,
+ "Supported HCI Drv command (0x%02x|0x%04x): %s",
+ hci_opcode_ogf(opcode),
+ hci_opcode_ocf(opcode),
+ btusb_hci_drv_supported_commands[i].desc);
+ rp->supported_commands[i] = cpu_to_le16(opcode);
+ }
+
+ err = hci_drv_cmd_complete(hdev, HCI_DRV_OP_READ_INFO,
+ HCI_DRV_STATUS_SUCCESS, rp, rp_size);
+
+ kfree(rp);
+ return err;
+}
+
+static int btusb_hci_drv_supported_altsettings(struct hci_dev *hdev, void *data,
+ u16 data_len)
+{
+ struct btusb_data *drvdata = hci_get_drvdata(hdev);
+ struct btusb_hci_drv_rp_supported_altsettings *rp;
+ size_t rp_size;
+ int err;
+ u8 i;
+
+ /* There are at most 7 alt (0 - 6) */
+ rp = kmalloc(sizeof(*rp) + 7, GFP_KERNEL);
+ if (!rp)
+ return -ENOMEM;
+
+ rp->num = 0;
+ if (!drvdata->isoc)
+ goto done;
+
+ for (i = 0; i <= 6; i++) {
+ if (btusb_find_altsetting(drvdata, i))
+ rp->altsettings[rp->num++] = i;
+ }
+
+done:
+ rp_size = sizeof(*rp) + rp->num;
+
+ err = hci_drv_cmd_complete(hdev, BTUSB_HCI_DRV_OP_SUPPORTED_ALTSETTINGS,
+ HCI_DRV_STATUS_SUCCESS, rp, rp_size);
+ kfree(rp);
+ return err;
+}
+
+static int btusb_hci_drv_switch_altsetting(struct hci_dev *hdev, void *data,
+ u16 data_len)
+{
+ struct btusb_hci_drv_cmd_switch_altsetting *cmd = data;
+ u8 status;
+
+ if (cmd->altsetting > 6) {
+ status = HCI_DRV_STATUS_INVALID_PARAMETERS;
+ } else {
+ if (btusb_switch_alt_setting(hdev, cmd->altsetting))
+ status = HCI_DRV_STATUS_UNSPECIFIED_ERROR;
+ else
+ status = HCI_DRV_STATUS_SUCCESS;
+ }
+
+ return hci_drv_cmd_status(hdev, BTUSB_HCI_DRV_OP_SWITCH_ALTSETTING,
+ status);
+}
+
+static const struct hci_drv_handler btusb_hci_drv_common_handlers[] = {
+ { btusb_hci_drv_read_info, HCI_DRV_READ_INFO_SIZE },
+};
+
+static const struct hci_drv_handler btusb_hci_drv_specific_handlers[] = {
+ { btusb_hci_drv_supported_altsettings,
+ BTUSB_HCI_DRV_SUPPORTED_ALTSETTINGS_SIZE },
+ { btusb_hci_drv_switch_altsetting,
+ BTUSB_HCI_DRV_SWITCH_ALTSETTING_SIZE },
+};
+
+static struct hci_drv btusb_hci_drv = {
+ .common_handler_count = ARRAY_SIZE(btusb_hci_drv_common_handlers),
+ .common_handlers = btusb_hci_drv_common_handlers,
+ .specific_handler_count = ARRAY_SIZE(btusb_hci_drv_specific_handlers),
+ .specific_handlers = btusb_hci_drv_specific_handlers,
+};
+
static int btusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -4347,7 +4125,7 @@ static int btusb_probe(struct usb_interface *intf,
data->recv_event = btusb_recv_event_realtek;
} else if (id->driver_info & BTUSB_MEDIATEK) {
/* Allocate extra space for Mediatek device */
- priv_size += sizeof(struct btmediatek_data);
+ priv_size += sizeof(struct btmtk_data);
}
data->recv_acl = hci_recv_frame;
@@ -4372,12 +4150,13 @@ static int btusb_probe(struct usb_interface *intf,
data->reset_gpio = reset_gpio;
}
- hdev->open = btusb_open;
- hdev->close = btusb_close;
- hdev->flush = btusb_flush;
- hdev->send = btusb_send_frame;
- hdev->notify = btusb_notify;
- hdev->wakeup = btusb_wakeup;
+ hdev->open = btusb_open;
+ hdev->close = btusb_close;
+ hdev->flush = btusb_flush;
+ hdev->send = btusb_send_frame;
+ hdev->notify = btusb_notify;
+ hdev->wakeup = btusb_wakeup;
+ hdev->hci_drv = &btusb_hci_drv;
#ifdef CONFIG_PM
err = btusb_config_oob_wake(hdev);
@@ -4392,10 +4171,10 @@ static int btusb_probe(struct usb_interface *intf,
}
#endif
if (id->driver_info & BTUSB_CW6622)
- set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_STORED_LINK_KEY);
if (id->driver_info & BTUSB_BCM2045)
- set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_STORED_LINK_KEY);
if (id->driver_info & BTUSB_BCM92035)
hdev->setup = btusb_setup_bcm92035;
@@ -4429,7 +4208,7 @@ static int btusb_probe(struct usb_interface *intf,
/* Transport specific configuration */
hdev->send = btusb_send_frame_intel;
- hdev->cmd_timeout = btusb_intel_cmd_timeout;
+ hdev->reset = btusb_intel_reset;
if (id->driver_info & BTUSB_INTEL_NO_WBS_SUPPORT)
btintel_set_flag(hdev, INTEL_ROM_LEGACY_NO_WBS_SUPPORT);
@@ -4449,36 +4228,40 @@ static int btusb_probe(struct usb_interface *intf,
hdev->setup = btusb_mtk_setup;
hdev->shutdown = btusb_mtk_shutdown;
hdev->manufacturer = 70;
- hdev->cmd_timeout = btmtk_reset_sync;
+ hdev->reset = btmtk_reset_sync;
hdev->set_bdaddr = btmtk_set_bdaddr;
- set_bit(HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN, &hdev->quirks);
- set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
- data->recv_acl = btusb_recv_acl_mtk;
+ hdev->send = btusb_send_frame_mtk;
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN);
+ hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP);
+ data->recv_acl = btmtk_usb_recv_acl;
+ data->suspend = btmtk_usb_suspend;
+ data->resume = btmtk_usb_resume;
+ data->disconnect = btusb_mtk_disconnect;
}
if (id->driver_info & BTUSB_SWAVE) {
- set_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks);
- set_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_FIXUP_INQUIRY_MODE);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_LOCAL_COMMANDS);
}
if (id->driver_info & BTUSB_INTEL_BOOT) {
hdev->manufacturer = 2;
- set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RAW_DEVICE);
}
if (id->driver_info & BTUSB_ATH3012) {
data->setup_on_usb = btusb_setup_qca;
hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
- set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
+ hci_set_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER);
}
if (id->driver_info & BTUSB_QCA_ROME) {
data->setup_on_usb = btusb_setup_qca;
hdev->shutdown = btusb_shutdown_qca;
hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
- hdev->cmd_timeout = btusb_qca_cmd_timeout;
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+ hdev->reset = btusb_qca_reset;
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
btusb_check_needs_reset_resume(intf);
}
@@ -4489,10 +4272,11 @@ static int btusb_probe(struct usb_interface *intf,
data->recv_acl = btusb_recv_acl_qca;
hci_devcd_register(hdev, btusb_coredump_qca, btusb_dump_hdr_qca, NULL);
data->setup_on_usb = btusb_setup_qca;
+ hdev->classify_pkt_type = btusb_classify_qca_pkt_type;
hdev->shutdown = btusb_shutdown_qca;
hdev->set_bdaddr = btusb_set_bdaddr_wcn6855;
- hdev->cmd_timeout = btusb_qca_cmd_timeout;
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+ hdev->reset = btusb_qca_reset;
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
hci_set_msft_opcode(hdev, 0xFD70);
}
@@ -4510,7 +4294,7 @@ static int btusb_probe(struct usb_interface *intf,
btrtl_set_driver_name(hdev, btusb_driver.name);
hdev->setup = btusb_setup_realtek;
hdev->shutdown = btrtl_shutdown_realtek;
- hdev->cmd_timeout = btusb_rtl_cmd_timeout;
+ hdev->reset = btusb_rtl_reset;
hdev->hw_error = btusb_rtl_hw_error;
/* Realtek devices need to set remote wakeup on auto-suspend */
@@ -4520,33 +4304,35 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_ACTIONS_SEMI) {
/* Support is advertised, but not implemented */
- set_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks);
- set_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks);
- set_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks);
- set_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &hdev->quirks);
- set_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_ERR_DATA_REPORTING);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_EXT_SCAN);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_EXT_CREATE_CONN);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_WRITE_AUTH_PAYLOAD_TIMEOUT);
}
if (!reset)
- set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE);
if (force_scofix || id->driver_info & BTUSB_WRONG_SCO_MTU) {
if (!disable_scofix)
- set_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_FIXUP_BUFFER_SIZE);
}
if (id->driver_info & BTUSB_BROKEN_ISOC)
data->isoc = NULL;
if (id->driver_info & BTUSB_WIDEBAND_SPEECH)
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
- if (id->driver_info & BTUSB_VALID_LE_STATES)
- set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
+ if (id->driver_info & BTUSB_INVALID_LE_STATES)
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_LE_STATES);
if (id->driver_info & BTUSB_DIGIANSWER) {
data->cmdreq_type = USB_TYPE_VENDOR;
- set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE);
}
if (id->driver_info & BTUSB_CSR) {
@@ -4555,10 +4341,10 @@ static int btusb_probe(struct usb_interface *intf,
/* Old firmware would otherwise execute USB reset */
if (bcdDevice < 0x117)
- set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE);
/* This must be set first in case we disable it for fakes */
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
/* Fake CSR devices with broken commands */
if (le16_to_cpu(udev->descriptor.idVendor) == 0x0a12 &&
@@ -4571,7 +4357,7 @@ static int btusb_probe(struct usb_interface *intf,
/* New sniffer firmware has crippled HCI interface */
if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x997)
- set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RAW_DEVICE);
}
if (id->driver_info & BTUSB_INTEL_BOOT) {
@@ -4642,8 +4428,16 @@ static void btusb_disconnect(struct usb_interface *intf)
if (data->diag)
usb_set_intfdata(data->diag, NULL);
+ if (data->disconnect)
+ data->disconnect(hdev);
+
hci_unregister_dev(hdev);
+ if (data->oob_wake_irq)
+ device_init_wakeup(&data->udev->dev, false);
+ if (data->reset_gpio)
+ gpiod_put(data->reset_gpio);
+
if (intf == data->intf) {
if (data->isoc)
usb_driver_release_interface(&btusb_driver, data->isoc);
@@ -4654,17 +4448,11 @@ static void btusb_disconnect(struct usb_interface *intf)
usb_driver_release_interface(&btusb_driver, data->diag);
usb_driver_release_interface(&btusb_driver, data->intf);
} else if (intf == data->diag) {
- usb_driver_release_interface(&btusb_driver, data->intf);
if (data->isoc)
usb_driver_release_interface(&btusb_driver, data->isoc);
+ usb_driver_release_interface(&btusb_driver, data->intf);
}
- if (data->oob_wake_irq)
- device_init_wakeup(&data->udev->dev, false);
-
- if (data->reset_gpio)
- gpiod_put(data->reset_gpio);
-
hci_free_dev(hdev);
}
@@ -4675,8 +4463,10 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message)
BT_DBG("intf %p", intf);
- /* Don't suspend if there are connections */
- if (hci_conn_count(data->hdev))
+ /* Don't auto-suspend if there are connections; external suspend calls
+ * shall never fail.
+ */
+ if (PMSG_IS_AUTO(message) && hci_conn_count(data->hdev))
return -EBUSY;
if (data->suspend_count++)
@@ -4694,6 +4484,9 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message)
cancel_work_sync(&data->work);
+ if (data->suspend)
+ data->suspend(data->hdev);
+
btusb_stop_traffic(data);
usb_kill_anchored_urbs(&data->tx_anchor);
@@ -4797,6 +4590,9 @@ static int btusb_resume(struct usb_interface *intf)
btusb_submit_isoc_urb(hdev, GFP_NOIO);
}
+ if (data->resume)
+ data->resume(hdev);
+
spin_lock_irq(&data->txlock);
play_deferred(data);
clear_bit(BTUSB_SUSPENDING, &data->flags);
diff --git a/drivers/bluetooth/h4_recv.h b/drivers/bluetooth/h4_recv.h
deleted file mode 100644
index 4f2c89742245..000000000000
--- a/drivers/bluetooth/h4_recv.h
+++ /dev/null
@@ -1,146 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- *
- * Generic Bluetooth HCI UART driver
- *
- * Copyright (C) 2015-2018 Intel Corporation
- */
-
-#include <asm/unaligned.h>
-
-struct h4_recv_pkt {
- u8 type; /* Packet type */
- u8 hlen; /* Header length */
- u8 loff; /* Data length offset in header */
- u8 lsize; /* Data length field size */
- u16 maxlen; /* Max overall packet length */
- int (*recv)(struct hci_dev *hdev, struct sk_buff *skb);
-};
-
-#define H4_RECV_ACL \
- .type = HCI_ACLDATA_PKT, \
- .hlen = HCI_ACL_HDR_SIZE, \
- .loff = 2, \
- .lsize = 2, \
- .maxlen = HCI_MAX_FRAME_SIZE \
-
-#define H4_RECV_SCO \
- .type = HCI_SCODATA_PKT, \
- .hlen = HCI_SCO_HDR_SIZE, \
- .loff = 2, \
- .lsize = 1, \
- .maxlen = HCI_MAX_SCO_SIZE
-
-#define H4_RECV_EVENT \
- .type = HCI_EVENT_PKT, \
- .hlen = HCI_EVENT_HDR_SIZE, \
- .loff = 1, \
- .lsize = 1, \
- .maxlen = HCI_MAX_EVENT_SIZE
-
-static inline struct sk_buff *h4_recv_buf(struct hci_dev *hdev,
- struct sk_buff *skb,
- const unsigned char *buffer,
- int count,
- const struct h4_recv_pkt *pkts,
- int pkts_count)
-{
- /* Check for error from previous call */
- if (IS_ERR(skb))
- skb = NULL;
-
- while (count) {
- int i, len;
-
- if (!skb) {
- for (i = 0; i < pkts_count; i++) {
- if (buffer[0] != (&pkts[i])->type)
- continue;
-
- skb = bt_skb_alloc((&pkts[i])->maxlen,
- GFP_ATOMIC);
- if (!skb)
- return ERR_PTR(-ENOMEM);
-
- hci_skb_pkt_type(skb) = (&pkts[i])->type;
- hci_skb_expect(skb) = (&pkts[i])->hlen;
- break;
- }
-
- /* Check for invalid packet type */
- if (!skb)
- return ERR_PTR(-EILSEQ);
-
- count -= 1;
- buffer += 1;
- }
-
- len = min_t(uint, hci_skb_expect(skb) - skb->len, count);
- skb_put_data(skb, buffer, len);
-
- count -= len;
- buffer += len;
-
- /* Check for partial packet */
- if (skb->len < hci_skb_expect(skb))
- continue;
-
- for (i = 0; i < pkts_count; i++) {
- if (hci_skb_pkt_type(skb) == (&pkts[i])->type)
- break;
- }
-
- if (i >= pkts_count) {
- kfree_skb(skb);
- return ERR_PTR(-EILSEQ);
- }
-
- if (skb->len == (&pkts[i])->hlen) {
- u16 dlen;
-
- switch ((&pkts[i])->lsize) {
- case 0:
- /* No variable data length */
- dlen = 0;
- break;
- case 1:
- /* Single octet variable length */
- dlen = skb->data[(&pkts[i])->loff];
- hci_skb_expect(skb) += dlen;
-
- if (skb_tailroom(skb) < dlen) {
- kfree_skb(skb);
- return ERR_PTR(-EMSGSIZE);
- }
- break;
- case 2:
- /* Double octet variable length */
- dlen = get_unaligned_le16(skb->data +
- (&pkts[i])->loff);
- hci_skb_expect(skb) += dlen;
-
- if (skb_tailroom(skb) < dlen) {
- kfree_skb(skb);
- return ERR_PTR(-EMSGSIZE);
- }
- break;
- default:
- /* Unsupported variable length */
- kfree_skb(skb);
- return ERR_PTR(-EILSEQ);
- }
-
- if (!dlen) {
- /* No more data, complete frame */
- (&pkts[i])->recv(hdev, skb);
- skb = NULL;
- }
- } else {
- /* Complete frame */
- (&pkts[i])->recv(hdev, skb);
- skb = NULL;
- }
- }
-
- return skb;
-}
diff --git a/drivers/bluetooth/hci_ag6xx.c b/drivers/bluetooth/hci_ag6xx.c
index 2d40302409ff..94588676510f 100644
--- a/drivers/bluetooth/hci_ag6xx.c
+++ b/drivers/bluetooth/hci_ag6xx.c
@@ -105,7 +105,7 @@ static int ag6xx_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
- ag6xx->rx_skb = h4_recv_buf(hu->hdev, ag6xx->rx_skb, data, count,
+ ag6xx->rx_skb = h4_recv_buf(hu, ag6xx->rx_skb, data, count,
ag6xx_recv_pkts,
ARRAY_SIZE(ag6xx_recv_pkts));
if (IS_ERR(ag6xx->rx_skb)) {
diff --git a/drivers/bluetooth/hci_aml.c b/drivers/bluetooth/hci_aml.c
new file mode 100644
index 000000000000..b1f32c5a8a3f
--- /dev/null
+++ b/drivers/bluetooth/hci_aml.c
@@ -0,0 +1,754 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR MIT)
+/*
+ * Copyright (C) 2024 Amlogic, Inc. All rights reserved
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/property.h>
+#include <linux/of.h>
+#include <linux/serdev.h>
+#include <linux/clk.h>
+#include <linux/firmware.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/hci.h>
+
+#include "hci_uart.h"
+
+#define AML_EVT_HEAD_SIZE 4
+#define AML_BDADDR_DEFAULT (&(bdaddr_t) {{ 0x00, 0xff, 0x00, 0x22, 0x2d, 0xae }})
+
+#define AML_FIRMWARE_OPERATION_SIZE (248)
+#define AML_FIRMWARE_MAX_SIZE (512 * 1024)
+
+/* TCI command */
+#define AML_TCI_CMD_READ 0xFEF0
+#define AML_TCI_CMD_WRITE 0xFEF1
+#define AML_TCI_CMD_UPDATE_BAUDRATE 0xFEF2
+#define AML_TCI_CMD_HARDWARE_RESET 0xFEF2
+#define AML_TCI_CMD_DOWNLOAD_BT_FW 0xFEF3
+
+/* Vendor command */
+#define AML_BT_HCI_VENDOR_CMD 0xFC1A
+
+/* TCI operation parameter in controller chip */
+#define AML_OP_UART_MODE 0x00A30128
+#define AML_OP_EVT_ENABLE 0x00A70014
+#define AML_OP_MEM_HARD_TRANS_EN 0x00A7000C
+#define AML_OP_RF_CFG 0x00F03040
+#define AML_OP_RAM_POWER_CTR 0x00F03050
+#define AML_OP_HARDWARE_RST 0x00F03058
+#define AML_OP_ICCM_RAM_BASE 0x00000000
+#define AML_OP_DCCM_RAM_BASE 0x00D00000
+
+/* UART configuration */
+#define AML_UART_XMIT_EN BIT(12)
+#define AML_UART_RECV_EN BIT(13)
+#define AML_UART_TIMEOUT_INT_EN BIT(14)
+#define AML_UART_CLK_SOURCE 40000000
+
+/* Controller event */
+#define AML_EVT_EN BIT(24)
+
+/* RAM power control */
+#define AML_RAM_POWER_ON (0)
+#define AML_RAM_POWER_OFF (1)
+
+/* RF configuration */
+#define AML_RF_ANT_SINGLE BIT(28)
+#define AML_RF_ANT_DOUBLE BIT(29)
+
+/* Memory transaction */
+#define AML_MM_CTR_HARD_TRAS_EN BIT(27)
+
+/* Controller reset */
+#define AML_CTR_CPU_RESET BIT(8)
+#define AML_CTR_MAC_RESET BIT(9)
+#define AML_CTR_PHY_RESET BIT(10)
+
+enum {
+ FW_ICCM,
+ FW_DCCM
+};
+
+struct aml_fw_len {
+ u32 iccm_len;
+ u32 dccm_len;
+};
+
+struct aml_tci_rsp {
+ u8 num_cmd_packet;
+ u16 opcode;
+ u8 status;
+} __packed;
+
+struct aml_device_data {
+ int iccm_offset;
+ int dccm_offset;
+ bool is_coex;
+};
+
+struct aml_serdev {
+ struct hci_uart serdev_hu;
+ struct device *dev;
+ struct gpio_desc *bt_en_gpio;
+ struct regulator *bt_supply;
+ struct clk *lpo_clk;
+ const struct aml_device_data *aml_dev_data;
+ const char *firmware_name;
+};
+
+struct aml_data {
+ struct sk_buff *rx_skb;
+ struct sk_buff_head txq;
+};
+
+static const struct h4_recv_pkt aml_recv_pkts[] = {
+ { H4_RECV_ACL, .recv = hci_recv_frame },
+ { H4_RECV_SCO, .recv = hci_recv_frame },
+ { H4_RECV_EVENT, .recv = hci_recv_frame },
+ { H4_RECV_ISO, .recv = hci_recv_frame },
+};
+
+/* The TCI command is a private command, which is for setting baud rate,
+ * downloading firmware, initiating RAM.
+ *
+ * op_code | op_len | op_addr | parameter |
+ * --------|-----------------------|---------|-------------|
+ * 2B | 1B len(addr+param) | 4B | len(param) |
+ */
+static int aml_send_tci_cmd(struct hci_dev *hdev, u16 op_code, u32 op_addr,
+ u32 *param, u32 param_len)
+{
+ struct aml_tci_rsp *rsp = NULL;
+ struct sk_buff *skb = NULL;
+ size_t buf_len = 0;
+ u8 *buf = NULL;
+ int err = 0;
+
+ buf_len = sizeof(op_addr) + param_len;
+ buf = kmalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ memcpy(buf, &op_addr, sizeof(op_addr));
+ if (param && param_len > 0)
+ memcpy(buf + sizeof(op_addr), param, param_len);
+
+ skb = __hci_cmd_sync_ev(hdev, op_code, buf_len, buf,
+ HCI_EV_CMD_COMPLETE, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ bt_dev_err(hdev, "Failed to send TCI cmd (error: %d)", err);
+ goto exit;
+ }
+
+ rsp = skb_pull_data(skb, sizeof(struct aml_tci_rsp));
+ if (!rsp)
+ goto skb_free;
+
+ if (rsp->opcode != op_code || rsp->status != 0x00) {
+ bt_dev_err(hdev, "send TCI cmd (0x%04X), response (0x%04X):(%d)",
+ op_code, rsp->opcode, rsp->status);
+ err = -EINVAL;
+ goto skb_free;
+ }
+
+skb_free:
+ kfree_skb(skb);
+
+exit:
+ kfree(buf);
+ return err;
+}
+
+static int aml_update_chip_baudrate(struct hci_dev *hdev, u32 baud)
+{
+ u32 value;
+
+ value = ((AML_UART_CLK_SOURCE / baud) - 1) & 0x0FFF;
+ value |= AML_UART_XMIT_EN | AML_UART_RECV_EN | AML_UART_TIMEOUT_INT_EN;
+
+ return aml_send_tci_cmd(hdev, AML_TCI_CMD_UPDATE_BAUDRATE,
+ AML_OP_UART_MODE, &value, sizeof(value));
+}
+
+static int aml_start_chip(struct hci_dev *hdev)
+{
+ u32 value = 0;
+ int ret;
+
+ value = AML_MM_CTR_HARD_TRAS_EN;
+ ret = aml_send_tci_cmd(hdev, AML_TCI_CMD_WRITE,
+ AML_OP_MEM_HARD_TRANS_EN,
+ &value, sizeof(value));
+ if (ret)
+ return ret;
+
+ /* controller hardware reset */
+ value = AML_CTR_CPU_RESET | AML_CTR_MAC_RESET | AML_CTR_PHY_RESET;
+ ret = aml_send_tci_cmd(hdev, AML_TCI_CMD_HARDWARE_RESET,
+ AML_OP_HARDWARE_RST,
+ &value, sizeof(value));
+ return ret;
+}
+
+static int aml_send_firmware_segment(struct hci_dev *hdev,
+ u8 fw_type,
+ u8 *seg,
+ u32 seg_size,
+ u32 offset)
+{
+ u32 op_addr = 0;
+
+ if (fw_type == FW_ICCM)
+ op_addr = AML_OP_ICCM_RAM_BASE + offset;
+ else if (fw_type == FW_DCCM)
+ op_addr = AML_OP_DCCM_RAM_BASE + offset;
+
+ return aml_send_tci_cmd(hdev, AML_TCI_CMD_DOWNLOAD_BT_FW,
+ op_addr, (u32 *)seg, seg_size);
+}
+
+static int aml_send_firmware(struct hci_dev *hdev, u8 fw_type,
+ u8 *fw, u32 fw_size, u32 offset)
+{
+ u32 seg_size = 0;
+ u32 seg_off = 0;
+
+ if (fw_size > AML_FIRMWARE_MAX_SIZE) {
+ bt_dev_err(hdev,
+ "Firmware size %d kB is larger than the maximum of 512 kB. Aborting.",
+ fw_size);
+ return -EINVAL;
+ }
+ while (fw_size > 0) {
+ seg_size = (fw_size > AML_FIRMWARE_OPERATION_SIZE) ?
+ AML_FIRMWARE_OPERATION_SIZE : fw_size;
+ if (aml_send_firmware_segment(hdev, fw_type, (fw + seg_off),
+ seg_size, offset)) {
+ bt_dev_err(hdev, "Failed send firmware, type: %d, offset: 0x%x",
+ fw_type, offset);
+ return -EINVAL;
+ }
+ seg_off += seg_size;
+ fw_size -= seg_size;
+ offset += seg_size;
+ }
+ return 0;
+}
+
+static int aml_download_firmware(struct hci_dev *hdev, const char *fw_name)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct aml_serdev *amldev = serdev_device_get_drvdata(hu->serdev);
+ const struct firmware *firmware = NULL;
+ struct aml_fw_len *fw_len = NULL;
+ u8 *iccm_start = NULL, *dccm_start = NULL;
+ u32 iccm_len, dccm_len;
+ u32 value = 0;
+ int ret = 0;
+
+ /* Enable firmware download event */
+ value = AML_EVT_EN;
+ ret = aml_send_tci_cmd(hdev, AML_TCI_CMD_WRITE,
+ AML_OP_EVT_ENABLE,
+ &value, sizeof(value));
+ if (ret)
+ goto exit;
+
+ /* RAM power on */
+ value = AML_RAM_POWER_ON;
+ ret = aml_send_tci_cmd(hdev, AML_TCI_CMD_WRITE,
+ AML_OP_RAM_POWER_CTR,
+ &value, sizeof(value));
+ if (ret)
+ goto exit;
+
+ /* Check RAM power status */
+ ret = aml_send_tci_cmd(hdev, AML_TCI_CMD_READ,
+ AML_OP_RAM_POWER_CTR, NULL, 0);
+ if (ret)
+ goto exit;
+
+ ret = request_firmware(&firmware, fw_name, &hdev->dev);
+ if (ret < 0) {
+ bt_dev_err(hdev, "Failed to load <%s>:(%d)", fw_name, ret);
+ goto exit;
+ }
+
+ fw_len = (struct aml_fw_len *)firmware->data;
+
+ /* Download ICCM */
+ iccm_start = (u8 *)(firmware->data) + sizeof(struct aml_fw_len)
+ + amldev->aml_dev_data->iccm_offset;
+ iccm_len = fw_len->iccm_len - amldev->aml_dev_data->iccm_offset;
+ ret = aml_send_firmware(hdev, FW_ICCM, iccm_start, iccm_len,
+ amldev->aml_dev_data->iccm_offset);
+ if (ret) {
+ bt_dev_err(hdev, "Failed to send FW_ICCM (%d)", ret);
+ goto exit;
+ }
+
+ /* Download DCCM */
+ dccm_start = (u8 *)(firmware->data) + sizeof(struct aml_fw_len) + fw_len->iccm_len;
+ dccm_len = fw_len->dccm_len;
+ ret = aml_send_firmware(hdev, FW_DCCM, dccm_start, dccm_len,
+ amldev->aml_dev_data->dccm_offset);
+ if (ret) {
+ bt_dev_err(hdev, "Failed to send FW_DCCM (%d)", ret);
+ goto exit;
+ }
+
+ /* Disable firmware download event */
+ value = 0;
+ ret = aml_send_tci_cmd(hdev, AML_TCI_CMD_WRITE,
+ AML_OP_EVT_ENABLE,
+ &value, sizeof(value));
+ if (ret)
+ goto exit;
+
+exit:
+ release_firmware(firmware);
+ return ret;
+}
+
+static int aml_send_reset(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+ int err;
+
+ skb = __hci_cmd_sync_ev(hdev, HCI_OP_RESET, 0, NULL,
+ HCI_EV_CMD_COMPLETE, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ bt_dev_err(hdev, "Failed to send hci reset cmd (%d)", err);
+ return err;
+ }
+
+ kfree_skb(skb);
+ return 0;
+}
+
+static int aml_dump_fw_version(struct hci_dev *hdev)
+{
+ struct aml_tci_rsp *rsp = NULL;
+ struct sk_buff *skb;
+ u8 value[6] = {0};
+ u8 *fw_ver = NULL;
+ int err = 0;
+
+ skb = __hci_cmd_sync_ev(hdev, AML_BT_HCI_VENDOR_CMD, sizeof(value), value,
+ HCI_EV_CMD_COMPLETE, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ bt_dev_err(hdev, "Failed to get fw version (error: %d)", err);
+ return err;
+ }
+
+ rsp = skb_pull_data(skb, sizeof(struct aml_tci_rsp));
+ if (!rsp)
+ goto exit;
+
+ if (rsp->opcode != AML_BT_HCI_VENDOR_CMD || rsp->status != 0x00) {
+ bt_dev_err(hdev, "dump version, error response (0x%04X):(%d)",
+ rsp->opcode, rsp->status);
+ err = -EINVAL;
+ goto exit;
+ }
+
+ fw_ver = (u8 *)rsp + AML_EVT_HEAD_SIZE;
+ bt_dev_info(hdev, "fw_version: date = %02x.%02x, number = 0x%02x%02x",
+ *(fw_ver + 1), *fw_ver, *(fw_ver + 3), *(fw_ver + 2));
+
+exit:
+ kfree_skb(skb);
+ return err;
+}
+
+static int aml_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+ struct aml_tci_rsp *rsp = NULL;
+ struct sk_buff *skb;
+ int err = 0;
+
+ bt_dev_info(hdev, "set bdaddr (%pM)", bdaddr);
+ skb = __hci_cmd_sync_ev(hdev, AML_BT_HCI_VENDOR_CMD,
+ sizeof(bdaddr_t), bdaddr,
+ HCI_EV_CMD_COMPLETE, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ bt_dev_err(hdev, "Failed to set bdaddr (error: %d)", err);
+ return err;
+ }
+
+ rsp = skb_pull_data(skb, sizeof(struct aml_tci_rsp));
+ if (!rsp)
+ goto exit;
+
+ if (rsp->opcode != AML_BT_HCI_VENDOR_CMD || rsp->status != 0x00) {
+ bt_dev_err(hdev, "error response (0x%x):(%d)", rsp->opcode, rsp->status);
+ err = -EINVAL;
+ goto exit;
+ }
+
+exit:
+ kfree_skb(skb);
+ return err;
+}
+
+static int aml_check_bdaddr(struct hci_dev *hdev)
+{
+ struct hci_rp_read_bd_addr *paddr;
+ struct sk_buff *skb;
+ int err;
+
+ if (bacmp(&hdev->public_addr, BDADDR_ANY))
+ return 0;
+
+ skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ bt_dev_err(hdev, "Failed to read bdaddr (error: %d)", err);
+ return err;
+ }
+
+ paddr = skb_pull_data(skb, sizeof(struct hci_rp_read_bd_addr));
+ if (!paddr)
+ goto exit;
+
+ if (!bacmp(&paddr->bdaddr, AML_BDADDR_DEFAULT)) {
+ bt_dev_info(hdev, "amlbt using default bdaddr (%pM)", &paddr->bdaddr);
+ hci_set_quirk(hdev, HCI_QUIRK_INVALID_BDADDR);
+ }
+
+exit:
+ kfree_skb(skb);
+ return 0;
+}
+
+static int aml_config_rf(struct hci_dev *hdev, bool is_coex)
+{
+ u32 value = AML_RF_ANT_DOUBLE;
+
+ /* Use a single antenna when co-existing with wifi */
+ if (is_coex)
+ value = AML_RF_ANT_SINGLE;
+
+ return aml_send_tci_cmd(hdev, AML_TCI_CMD_WRITE,
+ AML_OP_RF_CFG,
+ &value, sizeof(value));
+}
+
+static int aml_parse_dt(struct aml_serdev *amldev)
+{
+ struct device *pdev = amldev->dev;
+
+ amldev->bt_en_gpio = devm_gpiod_get(pdev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(amldev->bt_en_gpio)) {
+ dev_err(pdev, "Failed to acquire enable gpios");
+ return PTR_ERR(amldev->bt_en_gpio);
+ }
+
+ if (device_property_read_string(pdev, "firmware-name",
+ &amldev->firmware_name)) {
+ dev_err(pdev, "Failed to acquire firmware path");
+ return -ENODEV;
+ }
+
+ amldev->bt_supply = devm_regulator_get(pdev, "vddio");
+ if (IS_ERR(amldev->bt_supply)) {
+ dev_err(pdev, "Failed to acquire regulator");
+ return PTR_ERR(amldev->bt_supply);
+ }
+
+ amldev->lpo_clk = devm_clk_get(pdev, NULL);
+ if (IS_ERR(amldev->lpo_clk)) {
+ dev_err(pdev, "Failed to acquire clock source");
+ return PTR_ERR(amldev->lpo_clk);
+ }
+
+ return 0;
+}
+
+static int aml_power_on(struct aml_serdev *amldev)
+{
+ int err;
+
+ err = regulator_enable(amldev->bt_supply);
+ if (err) {
+ dev_err(amldev->dev, "Failed to enable regulator: (%d)", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(amldev->lpo_clk);
+ if (err) {
+ dev_err(amldev->dev, "Failed to enable lpo clock: (%d)", err);
+ return err;
+ }
+
+ gpiod_set_value_cansleep(amldev->bt_en_gpio, 1);
+
+ /* Wait 20ms for bluetooth controller power on */
+ msleep(20);
+ return 0;
+}
+
+static int aml_power_off(struct aml_serdev *amldev)
+{
+ gpiod_set_value_cansleep(amldev->bt_en_gpio, 0);
+
+ clk_disable_unprepare(amldev->lpo_clk);
+
+ regulator_disable(amldev->bt_supply);
+
+ return 0;
+}
+
+static int aml_set_baudrate(struct hci_uart *hu, unsigned int speed)
+{
+ /* update controller baudrate */
+ if (aml_update_chip_baudrate(hu->hdev, speed) != 0) {
+ bt_dev_err(hu->hdev, "Failed to update baud rate");
+ return -EINVAL;
+ }
+
+ /* update local baudrate */
+ serdev_device_set_baudrate(hu->serdev, speed);
+
+ return 0;
+}
+
+/* Initialize protocol */
+static int aml_open(struct hci_uart *hu)
+{
+ struct aml_serdev *amldev = serdev_device_get_drvdata(hu->serdev);
+ struct aml_data *aml_data;
+ int err;
+
+ err = aml_parse_dt(amldev);
+ if (err)
+ return err;
+
+ if (!hci_uart_has_flow_control(hu)) {
+ bt_dev_err(hu->hdev, "no flow control");
+ return -EOPNOTSUPP;
+ }
+
+ aml_data = kzalloc(sizeof(*aml_data), GFP_KERNEL);
+ if (!aml_data)
+ return -ENOMEM;
+
+ skb_queue_head_init(&aml_data->txq);
+
+ hu->priv = aml_data;
+
+ return 0;
+}
+
+static int aml_close(struct hci_uart *hu)
+{
+ struct aml_serdev *amldev = serdev_device_get_drvdata(hu->serdev);
+ struct aml_data *aml_data = hu->priv;
+
+ skb_queue_purge(&aml_data->txq);
+ kfree_skb(aml_data->rx_skb);
+ kfree(aml_data);
+
+ hu->priv = NULL;
+
+ return aml_power_off(amldev);
+}
+
+static int aml_flush(struct hci_uart *hu)
+{
+ struct aml_data *aml_data = hu->priv;
+
+ skb_queue_purge(&aml_data->txq);
+
+ return 0;
+}
+
+static int aml_setup(struct hci_uart *hu)
+{
+ struct aml_serdev *amldev = serdev_device_get_drvdata(hu->serdev);
+ struct hci_dev *hdev = amldev->serdev_hu.hdev;
+ int err;
+
+ /* Setup bdaddr */
+ hdev->set_bdaddr = aml_set_bdaddr;
+
+ err = aml_power_on(amldev);
+ if (err)
+ return err;
+
+ err = aml_set_baudrate(hu, amldev->serdev_hu.proto->oper_speed);
+ if (err)
+ return err;
+
+ err = aml_download_firmware(hdev, amldev->firmware_name);
+ if (err)
+ return err;
+
+ err = aml_config_rf(hdev, amldev->aml_dev_data->is_coex);
+ if (err)
+ return err;
+
+ err = aml_start_chip(hdev);
+ if (err)
+ return err;
+
+ /* Wait 60ms for controller startup */
+ msleep(60);
+
+ err = aml_dump_fw_version(hdev);
+ if (err)
+ return err;
+
+ err = aml_send_reset(hdev);
+ if (err)
+ return err;
+
+ err = aml_check_bdaddr(hdev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int aml_enqueue(struct hci_uart *hu, struct sk_buff *skb)
+{
+ struct aml_data *aml_data = hu->priv;
+
+ skb_queue_tail(&aml_data->txq, skb);
+
+ return 0;
+}
+
+static struct sk_buff *aml_dequeue(struct hci_uart *hu)
+{
+ struct aml_data *aml_data = hu->priv;
+ struct sk_buff *skb;
+
+ skb = skb_dequeue(&aml_data->txq);
+
+ /* Prepend skb with frame type */
+ if (skb)
+ memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
+
+ return skb;
+}
+
+static int aml_recv(struct hci_uart *hu, const void *data, int count)
+{
+ struct aml_data *aml_data = hu->priv;
+ int err;
+
+ aml_data->rx_skb = h4_recv_buf(hu, aml_data->rx_skb, data, count,
+ aml_recv_pkts,
+ ARRAY_SIZE(aml_recv_pkts));
+ if (IS_ERR(aml_data->rx_skb)) {
+ err = PTR_ERR(aml_data->rx_skb);
+ bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err);
+ aml_data->rx_skb = NULL;
+ return err;
+ }
+
+ return count;
+}
+
+static const struct hci_uart_proto aml_hci_proto = {
+ .id = HCI_UART_AML,
+ .name = "AML",
+ .init_speed = 115200,
+ .oper_speed = 4000000,
+ .open = aml_open,
+ .close = aml_close,
+ .setup = aml_setup,
+ .flush = aml_flush,
+ .recv = aml_recv,
+ .enqueue = aml_enqueue,
+ .dequeue = aml_dequeue,
+};
+
+static void aml_device_driver_shutdown(struct device *dev)
+{
+ struct aml_serdev *amldev = dev_get_drvdata(dev);
+
+ aml_power_off(amldev);
+}
+
+static int aml_serdev_probe(struct serdev_device *serdev)
+{
+ struct aml_serdev *amldev;
+ int err;
+
+ amldev = devm_kzalloc(&serdev->dev, sizeof(*amldev), GFP_KERNEL);
+ if (!amldev)
+ return -ENOMEM;
+
+ amldev->serdev_hu.serdev = serdev;
+ amldev->dev = &serdev->dev;
+ serdev_device_set_drvdata(serdev, amldev);
+
+ err = hci_uart_register_device(&amldev->serdev_hu, &aml_hci_proto);
+ if (err)
+ return dev_err_probe(amldev->dev, err,
+ "Failed to register hci uart device");
+
+ amldev->aml_dev_data = device_get_match_data(&serdev->dev);
+
+ return 0;
+}
+
+static void aml_serdev_remove(struct serdev_device *serdev)
+{
+ struct aml_serdev *amldev = serdev_device_get_drvdata(serdev);
+
+ hci_uart_unregister_device(&amldev->serdev_hu);
+}
+
+static const struct aml_device_data data_w155s2 = {
+ .iccm_offset = 256 * 1024,
+};
+
+static const struct aml_device_data data_w265s2 = {
+ .iccm_offset = 384 * 1024,
+};
+
+static const struct of_device_id aml_bluetooth_of_match[] = {
+ { .compatible = "amlogic,w155s2-bt", .data = &data_w155s2 },
+ { .compatible = "amlogic,w265s2-bt", .data = &data_w265s2 },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, aml_bluetooth_of_match);
+
+static struct serdev_device_driver aml_serdev_driver = {
+ .probe = aml_serdev_probe,
+ .remove = aml_serdev_remove,
+ .driver = {
+ .name = "hci_uart_aml",
+ .of_match_table = aml_bluetooth_of_match,
+ .shutdown = aml_device_driver_shutdown,
+ },
+};
+
+int __init aml_init(void)
+{
+ serdev_device_driver_register(&aml_serdev_driver);
+
+ return hci_uart_register_proto(&aml_hci_proto);
+}
+
+int __exit aml_deinit(void)
+{
+ serdev_device_driver_unregister(&aml_serdev_driver);
+
+ return hci_uart_unregister_proto(&aml_hci_proto);
+}
diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c
index dbfe34664633..8d2b5e7f0d6a 100644
--- a/drivers/bluetooth/hci_ath.c
+++ b/drivers/bluetooth/hci_ath.c
@@ -191,7 +191,7 @@ static int ath_recv(struct hci_uart *hu, const void *data, int count)
{
struct ath_struct *ath = hu->priv;
- ath->rx_skb = h4_recv_buf(hu->hdev, ath->rx_skb, data, count,
+ ath->rx_skb = h4_recv_buf(hu, ath->rx_skb, data, count,
ath_recv_pkts, ARRAY_SIZE(ath_recv_pkts));
if (IS_ERR(ath->rx_skb)) {
int err = PTR_ERR(ath->rx_skb);
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 89d4c2224546..9286a5f40f55 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -326,7 +326,6 @@ static irqreturn_t bcm_host_wake(int irq, void *data)
bt_dev_dbg(bdev, "Host wake IRQ");
pm_runtime_get(bdev->dev);
- pm_runtime_mark_last_busy(bdev->dev);
pm_runtime_put_autosuspend(bdev->dev);
return IRQ_HANDLED;
@@ -643,8 +642,8 @@ static int bcm_setup(struct hci_uart *hu)
* Allow the bootloader to set a valid address through the
* device tree.
*/
- if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hu->hdev->quirks))
- set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hu->hdev->quirks);
+ if (hci_test_quirk(hu->hdev, HCI_QUIRK_INVALID_BDADDR))
+ hci_set_quirk(hu->hdev, HCI_QUIRK_USE_BDADDR_PROPERTY);
if (!bcm_request_irq(bcm))
err = bcm_setup_sleep(hu);
@@ -698,7 +697,7 @@ static int bcm_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
- bcm->rx_skb = h4_recv_buf(hu->hdev, bcm->rx_skb, data, count,
+ bcm->rx_skb = h4_recv_buf(hu, bcm->rx_skb, data, count,
bcm_recv_pkts, ARRAY_SIZE(bcm_recv_pkts));
if (IS_ERR(bcm->rx_skb)) {
int err = PTR_ERR(bcm->rx_skb);
@@ -710,7 +709,6 @@ static int bcm_recv(struct hci_uart *hu, const void *data, int count)
mutex_lock(&bcm_device_lock);
if (bcm->dev && bcm_device_exists(bcm->dev)) {
pm_runtime_get(bcm->dev->dev);
- pm_runtime_mark_last_busy(bcm->dev->dev);
pm_runtime_put_autosuspend(bcm->dev->dev);
}
mutex_unlock(&bcm_device_lock);
@@ -748,10 +746,8 @@ static struct sk_buff *bcm_dequeue(struct hci_uart *hu)
skb = skb_dequeue(&bcm->txq);
- if (bdev) {
- pm_runtime_mark_last_busy(bdev->dev);
+ if (bdev)
pm_runtime_put_autosuspend(bdev->dev);
- }
mutex_unlock(&bcm_device_lock);
@@ -1068,17 +1064,17 @@ static struct clk *bcm_get_txco(struct device *dev)
struct clk *clk;
/* New explicit name */
- clk = devm_clk_get(dev, "txco");
- if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
+ clk = devm_clk_get_optional(dev, "txco");
+ if (clk)
return clk;
/* Deprecated name */
- clk = devm_clk_get(dev, "extclk");
- if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
+ clk = devm_clk_get_optional(dev, "extclk");
+ if (clk)
return clk;
/* Original code used no name at all */
- return devm_clk_get(dev, NULL);
+ return devm_clk_get_optional(dev, NULL);
}
static int bcm_get_resources(struct bcm_device *dev)
@@ -1093,21 +1089,12 @@ static int bcm_get_resources(struct bcm_device *dev)
return 0;
dev->txco_clk = bcm_get_txco(dev->dev);
-
- /* Handle deferred probing */
- if (dev->txco_clk == ERR_PTR(-EPROBE_DEFER))
- return PTR_ERR(dev->txco_clk);
-
- /* Ignore all other errors as before */
if (IS_ERR(dev->txco_clk))
- dev->txco_clk = NULL;
-
- dev->lpo_clk = devm_clk_get(dev->dev, "lpo");
- if (dev->lpo_clk == ERR_PTR(-EPROBE_DEFER))
- return PTR_ERR(dev->lpo_clk);
+ return PTR_ERR(dev->txco_clk);
+ dev->lpo_clk = devm_clk_get_optional(dev->dev, "lpo");
if (IS_ERR(dev->lpo_clk))
- dev->lpo_clk = NULL;
+ return PTR_ERR(dev->lpo_clk);
/* Check if we accidentally fetched the lpo clock twice */
if (dev->lpo_clk && clk_is_match(dev->lpo_clk, dev->txco_clk)) {
@@ -1507,7 +1494,7 @@ static const struct dev_pm_ops bcm_pm_ops = {
static struct platform_driver bcm_driver = {
.probe = bcm_probe,
- .remove_new = bcm_remove,
+ .remove = bcm_remove,
.driver = {
.name = "hci_bcm",
.acpi_match_table = ACPI_PTR(bcm_acpi_match),
diff --git a/drivers/bluetooth/hci_bcm4377.c b/drivers/bluetooth/hci_bcm4377.c
index 0c2f15235b4c..45e6d84224ee 100644
--- a/drivers/bluetooth/hci_bcm4377.c
+++ b/drivers/bluetooth/hci_bcm4377.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only OR MIT
/*
- * Bluetooth HCI driver for Broadcom 4377/4378/4387 devices attached via PCIe
+ * Bluetooth HCI driver for Broadcom 4377/4378/4387/4388 devices attached via PCIe
*
* Copyright (C) The Asahi Linux Contributors
*/
@@ -17,7 +17,7 @@
#include <linux/pci.h>
#include <linux/printk.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -26,13 +26,16 @@ enum bcm4377_chip {
BCM4377 = 0,
BCM4378,
BCM4387,
+ BCM4388,
};
#define BCM4377_DEVICE_ID 0x5fa0
#define BCM4378_DEVICE_ID 0x5f69
#define BCM4387_DEVICE_ID 0x5f71
+#define BCM4388_DEVICE_ID 0x5f72
-#define BCM4377_TIMEOUT 1000
+#define BCM4377_TIMEOUT msecs_to_jiffies(1000)
+#define BCM4377_BOOT_TIMEOUT msecs_to_jiffies(5000)
/*
* These devices only support DMA transactions inside a 32bit window
@@ -417,7 +420,7 @@ struct bcm4377_ring_state {
* payloads_dma:DMA address for payload buffer
* events: pointer to array of completions if waiting is allowed
* msgids: bitmap to keep track of used message ids
- * lock: Spinlock to protect access to ring structurs used in the irq handler
+ * lock: Spinlock to protect access to ring structures used in the irq handler
*/
struct bcm4377_transfer_ring {
enum bcm4377_transfer_ring_id ring_id;
@@ -487,6 +490,7 @@ struct bcm4377_data;
* second window in BAR0
* has_bar0_core2_window2: Set to true if this chip requires the second core's
* second window to be configured
+ * bar2_offset: Offset to the start of the variables in BAR2
* clear_pciecfg_subsystem_ctrl_bit19: Set to true if bit 19 in the
* vendor-specific subsystem control
* register has to be cleared
@@ -495,6 +499,10 @@ struct bcm4377_data;
* extended scanning
* broken_mws_transport_config: Set to true if the chip erroneously claims to
* support MWS Transport Configuration
+ * broken_le_ext_adv_report_phy: Set to true if this chip stuffs flags inside
+ * reserved bits of Primary/Secondary_PHY inside
+ * LE Extended Advertising Report events which
+ * have to be ignored
* send_calibration: Optional callback to send calibration data
* send_ptb: Callback to send "PTB" regulatory/calibration data
*/
@@ -506,6 +514,7 @@ struct bcm4377_hw {
u32 bar0_window1;
u32 bar0_window2;
u32 bar0_core2_window2;
+ u32 bar2_offset;
unsigned long has_bar0_core2_window2 : 1;
unsigned long clear_pciecfg_subsystem_ctrl_bit19 : 1;
@@ -513,6 +522,7 @@ struct bcm4377_hw {
unsigned long broken_ext_scan : 1;
unsigned long broken_mws_transport_config : 1;
unsigned long broken_le_coded : 1;
+ unsigned long broken_le_ext_adv_report_phy : 1;
int (*send_calibration)(struct bcm4377_data *bcm4377);
int (*send_ptb)(struct bcm4377_data *bcm4377,
@@ -716,7 +726,7 @@ static void bcm4377_handle_ack(struct bcm4377_data *bcm4377,
ring->events[msgid] = NULL;
}
- bitmap_release_region(ring->msgids, msgid, ring->n_entries);
+ bitmap_release_region(ring->msgids, msgid, 0);
unlock:
spin_unlock_irqrestore(&ring->lock, flags);
@@ -830,8 +840,8 @@ static irqreturn_t bcm4377_irq(int irq, void *data)
struct bcm4377_data *bcm4377 = data;
u32 bootstage, rti_status;
- bootstage = ioread32(bcm4377->bar2 + BCM4377_BAR2_BOOTSTAGE);
- rti_status = ioread32(bcm4377->bar2 + BCM4377_BAR2_RTI_STATUS);
+ bootstage = ioread32(bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_BOOTSTAGE);
+ rti_status = ioread32(bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_RTI_STATUS);
if (bootstage != bcm4377->bootstage ||
rti_status != bcm4377->rti_status) {
@@ -1191,6 +1201,14 @@ static int bcm4387_send_calibration(struct bcm4377_data *bcm4377)
bcm4377->taurus_cal_size);
}
+static int bcm4388_send_calibration(struct bcm4377_data *bcm4377)
+{
+ /* BCM4388 always uses beamforming */
+ return __bcm4378_send_calibration(
+ bcm4377, bcm4377->taurus_beamforming_cal_blob,
+ bcm4377->taurus_beamforming_cal_size);
+}
+
static const struct firmware *bcm4377_request_blob(struct bcm4377_data *bcm4377,
const char *suffix)
{
@@ -1417,7 +1435,7 @@ static int bcm4377_check_bdaddr(struct bcm4377_data *bcm4377)
bda = (struct hci_rp_read_bd_addr *)skb->data;
if (!bcm4377_is_valid_bdaddr(bcm4377, &bda->bdaddr))
- set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &bcm4377->hdev->quirks);
+ hci_set_quirk(bcm4377->hdev, HCI_QUIRK_USE_BDADDR_PROPERTY);
kfree_skb(skb);
return 0;
@@ -1814,8 +1832,8 @@ static int bcm4377_boot(struct bcm4377_data *bcm4377)
int ret = 0;
u32 bootstage, rti_status;
- bootstage = ioread32(bcm4377->bar2 + BCM4377_BAR2_BOOTSTAGE);
- rti_status = ioread32(bcm4377->bar2 + BCM4377_BAR2_RTI_STATUS);
+ bootstage = ioread32(bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_BOOTSTAGE);
+ rti_status = ioread32(bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_RTI_STATUS);
if (bootstage != 0) {
dev_err(&bcm4377->pdev->dev, "bootstage is %d and not 0\n",
@@ -1849,15 +1867,18 @@ static int bcm4377_boot(struct bcm4377_data *bcm4377)
iowrite32(BCM4377_DMA_MASK,
bcm4377->bar0 + BCM4377_BAR0_HOST_WINDOW_SIZE);
- iowrite32(lower_32_bits(fw_dma), bcm4377->bar2 + BCM4377_BAR2_FW_LO);
- iowrite32(upper_32_bits(fw_dma), bcm4377->bar2 + BCM4377_BAR2_FW_HI);
- iowrite32(fw->size, bcm4377->bar2 + BCM4377_BAR2_FW_SIZE);
+ iowrite32(lower_32_bits(fw_dma),
+ bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_FW_LO);
+ iowrite32(upper_32_bits(fw_dma),
+ bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_FW_HI);
+ iowrite32(fw->size,
+ bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_FW_SIZE);
iowrite32(0, bcm4377->bar0 + BCM4377_BAR0_FW_DOORBELL);
dev_dbg(&bcm4377->pdev->dev, "waiting for firmware to boot\n");
ret = wait_for_completion_interruptible_timeout(&bcm4377->event,
- BCM4377_TIMEOUT);
+ BCM4377_BOOT_TIMEOUT);
if (ret == 0) {
ret = -ETIMEDOUT;
goto out_dma_free;
@@ -1908,16 +1929,16 @@ static int bcm4377_setup_rti(struct bcm4377_data *bcm4377)
dev_dbg(&bcm4377->pdev->dev, "RTI is in state 1\n");
/* allow access to the entire IOVA space again */
- iowrite32(0, bcm4377->bar2 + BCM4377_BAR2_RTI_WINDOW_LO);
- iowrite32(0, bcm4377->bar2 + BCM4377_BAR2_RTI_WINDOW_HI);
+ iowrite32(0, bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_RTI_WINDOW_LO);
+ iowrite32(0, bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_RTI_WINDOW_HI);
iowrite32(BCM4377_DMA_MASK,
- bcm4377->bar2 + BCM4377_BAR2_RTI_WINDOW_SIZE);
+ bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_RTI_WINDOW_SIZE);
/* setup "Converged IPC" context */
iowrite32(lower_32_bits(bcm4377->ctx_dma),
- bcm4377->bar2 + BCM4377_BAR2_CONTEXT_ADDR_LO);
+ bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_CONTEXT_ADDR_LO);
iowrite32(upper_32_bits(bcm4377->ctx_dma),
- bcm4377->bar2 + BCM4377_BAR2_CONTEXT_ADDR_HI);
+ bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_CONTEXT_ADDR_HI);
iowrite32(2, bcm4377->bar0 + BCM4377_BAR0_RTI_CONTROL);
ret = wait_for_completion_interruptible_timeout(&bcm4377->event,
@@ -2368,11 +2389,13 @@ static int bcm4377_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hdev->setup = bcm4377_hci_setup;
if (bcm4377->hw->broken_mws_transport_config)
- set_bit(HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG);
if (bcm4377->hw->broken_ext_scan)
- set_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_EXT_SCAN);
if (bcm4377->hw->broken_le_coded)
- set_bit(HCI_QUIRK_BROKEN_LE_CODED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_LE_CODED);
+ if (bcm4377->hw->broken_le_ext_adv_report_phy)
+ hci_set_quirk(hdev, HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY);
pci_set_drvdata(pdev, bcm4377);
hci_set_drvdata(hdev, bcm4377);
@@ -2477,9 +2500,25 @@ static const struct bcm4377_hw bcm4377_hw_variants[] = {
.clear_pciecfg_subsystem_ctrl_bit19 = true,
.broken_mws_transport_config = true,
.broken_le_coded = true,
+ .broken_le_ext_adv_report_phy = true,
.send_calibration = bcm4387_send_calibration,
.send_ptb = bcm4378_send_ptb,
},
+
+ [BCM4388] = {
+ .id = 0x4388,
+ .otp_offset = 0x415c,
+ .bar2_offset = 0x200000,
+ .bar0_window1 = 0x18002000,
+ .bar0_window2 = 0x18109000,
+ .bar0_core2_window2 = 0x18106000,
+ .has_bar0_core2_window2 = true,
+ .broken_mws_transport_config = true,
+ .broken_le_coded = true,
+ .broken_le_ext_adv_report_phy = true,
+ .send_calibration = bcm4388_send_calibration,
+ .send_ptb = bcm4378_send_ptb,
+ },
};
#define BCM4377_DEVID_ENTRY(id) \
@@ -2493,6 +2532,7 @@ static const struct pci_device_id bcm4377_devid_table[] = {
BCM4377_DEVID_ENTRY(4377),
BCM4377_DEVID_ENTRY(4378),
BCM4377_DEVID_ENTRY(4387),
+ BCM4377_DEVID_ENTRY(4388),
{},
};
MODULE_DEVICE_TABLE(pci, bcm4377_devid_table);
@@ -2507,7 +2547,7 @@ static struct pci_driver bcm4377_pci_driver = {
module_pci_driver(bcm4377_pci_driver);
MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
-MODULE_DESCRIPTION("Bluetooth support for Broadcom 4377/4378/4387 devices");
+MODULE_DESCRIPTION("Bluetooth support for Broadcom 4377/4378/4387/4388 devices");
MODULE_LICENSE("Dual MIT/GPL");
MODULE_FIRMWARE("brcm/brcmbt4377*.bin");
MODULE_FIRMWARE("brcm/brcmbt4377*.ptb");
@@ -2515,3 +2555,5 @@ MODULE_FIRMWARE("brcm/brcmbt4378*.bin");
MODULE_FIRMWARE("brcm/brcmbt4378*.ptb");
MODULE_FIRMWARE("brcm/brcmbt4387*.bin");
MODULE_FIRMWARE("brcm/brcmbt4387*.ptb");
+MODULE_FIRMWARE("brcm/brcmbt4388*.bin");
+MODULE_FIRMWARE("brcm/brcmbt4388*.ptb");
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index 2a5a27d713f8..591abe6d63dd 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -25,7 +25,7 @@
#include <linux/ioctl.h>
#include <linux/skbuff.h>
#include <linux/bitrev.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -382,7 +382,7 @@ static void bcsp_pkt_cull(struct bcsp_struct *bcsp)
}
if (skb_queue_empty(&bcsp->unack))
- del_timer(&bcsp->tbcsp);
+ timer_delete(&bcsp->tbcsp);
spin_unlock_irqrestore(&bcsp->unack.lock, flags);
@@ -582,6 +582,9 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
struct bcsp_struct *bcsp = hu->priv;
const unsigned char *ptr;
+ if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
+ return -EUNATCH;
+
BT_DBG("hu %p count %d rx_state %d rx_count %ld",
hu, count, bcsp->rx_state, bcsp->rx_count);
@@ -688,7 +691,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
/* Arrange to retransmit all messages in the relq. */
static void bcsp_timed_event(struct timer_list *t)
{
- struct bcsp_struct *bcsp = from_timer(bcsp, t, tbcsp);
+ struct bcsp_struct *bcsp = timer_container_of(bcsp, t, tbcsp);
struct hci_uart *hu = bcsp->hu;
struct sk_buff *skb;
unsigned long flags;
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 1d0cdf023243..ec017df8572c 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -25,7 +25,7 @@
#include <linux/signal.h>
#include <linux/ioctl.h>
#include <linux/skbuff.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -112,7 +112,7 @@ static int h4_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
- h4->rx_skb = h4_recv_buf(hu->hdev, h4->rx_skb, data, count,
+ h4->rx_skb = h4_recv_buf(hu, h4->rx_skb, data, count,
h4_recv_pkts, ARRAY_SIZE(h4_recv_pkts));
if (IS_ERR(h4->rx_skb)) {
int err = PTR_ERR(h4->rx_skb);
@@ -151,12 +151,12 @@ int __exit h4_deinit(void)
return hci_uart_unregister_proto(&h4p);
}
-struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
+struct sk_buff *h4_recv_buf(struct hci_uart *hu, struct sk_buff *skb,
const unsigned char *buffer, int count,
const struct h4_recv_pkt *pkts, int pkts_count)
{
- struct hci_uart *hu = hci_get_drvdata(hdev);
u8 alignment = hu->alignment ? hu->alignment : 1;
+ struct hci_dev *hdev = hu->hdev;
/* Check for error from previous call */
if (IS_ERR(skb))
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index c0436881a533..96e20a66ecd1 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -7,6 +7,8 @@
*/
#include <linux/acpi.h>
+#include <linux/bitrev.h>
+#include <linux/crc-ccitt.h>
#include <linux/errno.h>
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
@@ -58,6 +60,7 @@ enum {
H5_TX_ACK_REQ, /* Pending ack to send */
H5_WAKEUP_DISABLE, /* Device cannot wake host */
H5_HW_FLOW_CONTROL, /* Use HW flow control */
+ H5_CRC, /* Use CRC */
};
struct h5 {
@@ -141,15 +144,15 @@ static void h5_link_control(struct hci_uart *hu, const void *data, size_t len)
static u8 h5_cfg_field(struct h5 *h5)
{
- /* Sliding window size (first 3 bits) */
- return h5->tx_win & 0x07;
+ /* Sliding window size (first 3 bits) and CRC request (fifth bit). */
+ return (h5->tx_win & 0x07) | 0x10;
}
static void h5_timed_event(struct timer_list *t)
{
const unsigned char sync_req[] = { 0x01, 0x7e };
unsigned char conf_req[3] = { 0x03, 0xfc };
- struct h5 *h5 = from_timer(h5, t, timer);
+ struct h5 *h5 = timer_container_of(h5, t, timer);
struct hci_uart *hu = h5->hu;
struct sk_buff *skb;
unsigned long flags;
@@ -197,7 +200,7 @@ static void h5_peer_reset(struct hci_uart *hu)
h5->state = H5_UNINITIALIZED;
- del_timer(&h5->timer);
+ timer_delete(&h5->timer);
skb_queue_purge(&h5->rel);
skb_queue_purge(&h5->unrel);
@@ -213,7 +216,6 @@ static void h5_peer_reset(struct hci_uart *hu)
static int h5_open(struct hci_uart *hu)
{
struct h5 *h5;
- const unsigned char sync[] = { 0x01, 0x7e };
BT_DBG("hu %p", hu);
@@ -243,9 +245,11 @@ static int h5_open(struct hci_uart *hu)
set_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags);
- /* Send initial sync request */
- h5_link_control(hu, sync, sizeof(sync));
- mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
+ /*
+ * Wait one jiffy because the UART layer won't set HCI_UART_PROTO_READY,
+ * which allows us to send link packets, until this function returns.
+ */
+ mod_timer(&h5->timer, jiffies + 1);
return 0;
}
@@ -254,7 +258,7 @@ static int h5_close(struct hci_uart *hu)
{
struct h5 *h5 = hu->priv;
- del_timer_sync(&h5->timer);
+ timer_delete_sync(&h5->timer);
skb_queue_purge(&h5->unack);
skb_queue_purge(&h5->rel);
@@ -318,7 +322,7 @@ static void h5_pkt_cull(struct h5 *h5)
}
if (skb_queue_empty(&h5->unack))
- del_timer(&h5->timer);
+ timer_delete(&h5->timer);
unlock:
spin_unlock_irqrestore(&h5->unack.lock, flags);
@@ -360,8 +364,10 @@ static void h5_handle_internal_rx(struct hci_uart *hu)
h5_link_control(hu, conf_rsp, 2);
h5_link_control(hu, conf_req, 3);
} else if (memcmp(data, conf_rsp, 2) == 0) {
- if (H5_HDR_LEN(hdr) > 2)
+ if (H5_HDR_LEN(hdr) > 2) {
h5->tx_win = (data[2] & 0x07);
+ assign_bit(H5_CRC, &h5->flags, data[2] & 0x10);
+ }
BT_DBG("Three-wire init complete. tx_win %u", h5->tx_win);
h5->state = H5_ACTIVE;
hci_uart_init_ready(hu);
@@ -425,7 +431,24 @@ static void h5_complete_rx_pkt(struct hci_uart *hu)
static int h5_rx_crc(struct hci_uart *hu, unsigned char c)
{
- h5_complete_rx_pkt(hu);
+ struct h5 *h5 = hu->priv;
+ const unsigned char *hdr = h5->rx_skb->data;
+ u16 crc;
+ __be16 crc_be;
+
+ crc = crc_ccitt(0xffff, hdr, 4 + H5_HDR_LEN(hdr));
+ crc = bitrev16(crc);
+
+ crc_be = cpu_to_be16(crc);
+
+ if (memcmp(&crc_be, hdr + 4 + H5_HDR_LEN(hdr), 2) != 0) {
+ bt_dev_err(hu->hdev, "Received packet with invalid CRC");
+ h5_reset_rx(h5);
+ } else {
+ /* Remove CRC bytes */
+ skb_trim(h5->rx_skb, 4 + H5_HDR_LEN(hdr));
+ h5_complete_rx_pkt(hu);
+ }
return 0;
}
@@ -556,6 +579,7 @@ static void h5_reset_rx(struct h5 *h5)
h5->rx_func = h5_rx_delimiter;
h5->rx_pending = 0;
clear_bit(H5_RX_ESC, &h5->flags);
+ clear_bit(H5_CRC, &h5->flags);
}
static int h5_recv(struct hci_uart *hu, const void *data, int count)
@@ -592,7 +616,6 @@ static int h5_recv(struct hci_uart *hu, const void *data, int count)
if (hu->serdev) {
pm_runtime_get(&hu->serdev->dev);
- pm_runtime_mark_last_busy(&hu->serdev->dev);
pm_runtime_put_autosuspend(&hu->serdev->dev);
}
@@ -634,7 +657,6 @@ static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
if (hu->serdev) {
pm_runtime_get_sync(&hu->serdev->dev);
- pm_runtime_mark_last_busy(&hu->serdev->dev);
pm_runtime_put_autosuspend(&hu->serdev->dev);
}
@@ -686,6 +708,7 @@ static struct sk_buff *h5_prepare_pkt(struct hci_uart *hu, u8 pkt_type,
struct h5 *h5 = hu->priv;
struct sk_buff *nskb;
u8 hdr[4];
+ u16 crc;
int i;
if (!valid_packet_type(pkt_type)) {
@@ -713,6 +736,7 @@ static struct sk_buff *h5_prepare_pkt(struct hci_uart *hu, u8 pkt_type,
/* Reliable packet? */
if (pkt_type == HCI_ACLDATA_PKT || pkt_type == HCI_COMMAND_PKT) {
hdr[0] |= 1 << 7;
+ hdr[0] |= (test_bit(H5_CRC, &h5->flags) && 1) << 6;
hdr[0] |= h5->tx_seq;
h5->tx_seq = (h5->tx_seq + 1) % 8;
}
@@ -732,6 +756,15 @@ static struct sk_buff *h5_prepare_pkt(struct hci_uart *hu, u8 pkt_type,
for (i = 0; i < len; i++)
h5_slip_one_byte(nskb, data[i]);
+ if (H5_HDR_CRC(hdr)) {
+ crc = crc_ccitt(0xffff, hdr, 4);
+ crc = crc_ccitt(crc, data, len);
+ crc = bitrev16(crc);
+
+ h5_slip_one_byte(nskb, (crc >> 8) & 0xff);
+ h5_slip_one_byte(nskb, crc & 0xff);
+ }
+
h5_slip_delim(nskb);
return nskb;
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 999ccd5bb4f2..20baf2895dec 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -280,7 +280,6 @@ static irqreturn_t intel_irq(int irq, void *dev_id)
/* Host/Controller are now LPM resumed, trigger a new delayed suspend */
pm_runtime_get(&idev->pdev->dev);
- pm_runtime_mark_last_busy(&idev->pdev->dev);
pm_runtime_put_autosuspend(&idev->pdev->dev);
return IRQ_HANDLED;
@@ -371,7 +370,6 @@ static void intel_busy_work(struct work_struct *work)
list_for_each_entry(idev, &intel_device_list, list) {
if (intel->hu->tty->dev->parent == idev->pdev->dev.parent) {
pm_runtime_get(&idev->pdev->dev);
- pm_runtime_mark_last_busy(&idev->pdev->dev);
pm_runtime_put_autosuspend(&idev->pdev->dev);
break;
}
@@ -660,7 +658,7 @@ static int intel_setup(struct hci_uart *hu)
*/
if (!bacmp(&params.otp_bdaddr, BDADDR_ANY)) {
bt_dev_info(hdev, "No device address configured");
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_INVALID_BDADDR);
}
/* With this Intel bootloader only the hardware variant and device
@@ -972,7 +970,7 @@ static int intel_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
- intel->rx_skb = h4_recv_buf(hu->hdev, intel->rx_skb, data, count,
+ intel->rx_skb = h4_recv_buf(hu, intel->rx_skb, data, count,
intel_recv_pkts,
ARRAY_SIZE(intel_recv_pkts));
if (IS_ERR(intel->rx_skb)) {
@@ -1003,7 +1001,6 @@ static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb)
list_for_each_entry(idev, &intel_device_list, list) {
if (hu->tty->dev->parent == idev->pdev->dev.parent) {
pm_runtime_get_sync(&idev->pdev->dev);
- pm_runtime_mark_last_busy(&idev->pdev->dev);
pm_runtime_put_autosuspend(&idev->pdev->dev);
break;
}
@@ -1029,12 +1026,12 @@ static struct sk_buff *intel_dequeue(struct hci_uart *hu)
struct hci_command_hdr *cmd = (void *)skb->data;
__u16 opcode = le16_to_cpu(cmd->opcode);
- /* When the 0xfc01 command is issued to boot into
- * the operational firmware, it will actually not
- * send a command complete event. To keep the flow
- * control working inject that event here.
+ /* When the BTINTEL_HCI_OP_RESET command is issued to boot into
+ * the operational firmware, it will actually not send a command
+ * complete event. To keep the flow control working inject that
+ * event here.
*/
- if (opcode == 0xfc01)
+ if (opcode == BTINTEL_HCI_OP_RESET)
inject_cmd_complete(hu->hdev, opcode);
}
@@ -1206,7 +1203,7 @@ static void intel_remove(struct platform_device *pdev)
static struct platform_driver intel_driver = {
.probe = intel_probe,
- .remove_new = intel_remove,
+ .remove = intel_remove,
.driver = {
.name = "hci_intel",
.acpi_match_table = ACPI_PTR(intel_acpi_match),
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 17a2f158a0df..d0adae3267b4 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -102,7 +102,8 @@ static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu)
if (!skb) {
percpu_down_read(&hu->proto_lock);
- if (test_bit(HCI_UART_PROTO_READY, &hu->flags))
+ if (test_bit(HCI_UART_PROTO_READY, &hu->flags) ||
+ test_bit(HCI_UART_PROTO_INIT, &hu->flags))
skb = hu->proto->dequeue(hu);
percpu_up_read(&hu->proto_lock);
@@ -124,7 +125,8 @@ int hci_uart_tx_wakeup(struct hci_uart *hu)
if (!percpu_down_read_trylock(&hu->proto_lock))
return 0;
- if (!test_bit(HCI_UART_PROTO_READY, &hu->flags))
+ if (!test_bit(HCI_UART_PROTO_READY, &hu->flags) &&
+ !test_bit(HCI_UART_PROTO_INIT, &hu->flags))
goto no_schedule;
set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
@@ -278,7 +280,8 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
percpu_down_read(&hu->proto_lock);
- if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) {
+ if (!test_bit(HCI_UART_PROTO_READY, &hu->flags) &&
+ !test_bit(HCI_UART_PROTO_INIT, &hu->flags)) {
percpu_up_read(&hu->proto_lock);
return -EUNATCH;
}
@@ -488,7 +491,7 @@ static int hci_uart_tty_open(struct tty_struct *tty)
if (tty->ops->write == NULL)
return -EOPNOTSUPP;
- hu = kzalloc(sizeof(struct hci_uart), GFP_KERNEL);
+ hu = kzalloc(sizeof(*hu), GFP_KERNEL);
if (!hu) {
BT_ERR("Can't allocate control structure");
return -ENFILE;
@@ -507,6 +510,9 @@ static int hci_uart_tty_open(struct tty_struct *tty)
hu->alignment = 1;
hu->padding = 0;
+ /* Use serial port speed as oper_speed */
+ hu->oper_speed = tty->termios.c_ospeed;
+
INIT_WORK(&hu->init_ready, hci_uart_init_work);
INIT_WORK(&hu->write_work, hci_uart_write_work);
@@ -582,7 +588,8 @@ static void hci_uart_tty_wakeup(struct tty_struct *tty)
if (tty != hu->tty)
return;
- if (test_bit(HCI_UART_PROTO_READY, &hu->flags))
+ if (test_bit(HCI_UART_PROTO_READY, &hu->flags) ||
+ test_bit(HCI_UART_PROTO_INIT, &hu->flags))
hci_uart_tx_wakeup(hu);
}
@@ -591,7 +598,7 @@ static void hci_uart_tty_wakeup(struct tty_struct *tty)
* Called by tty low level driver when receive data is
* available.
*
- * Arguments: tty pointer to tty isntance data
+ * Arguments: tty pointer to tty instance data
* data pointer to received data
* flags pointer to flags for data
* count count of received data in bytes
@@ -608,7 +615,8 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data,
percpu_down_read(&hu->proto_lock);
- if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) {
+ if (!test_bit(HCI_UART_PROTO_READY, &hu->flags) &&
+ !test_bit(HCI_UART_PROTO_INIT, &hu->flags)) {
percpu_up_read(&hu->proto_lock);
return;
}
@@ -659,13 +667,13 @@ static int hci_uart_register_dev(struct hci_uart *hu)
SET_HCIDEV_DEV(hdev, hu->tty->dev);
if (test_bit(HCI_UART_RAW_DEVICE, &hu->hdev_flags))
- set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RAW_DEVICE);
if (test_bit(HCI_UART_EXT_CONFIG, &hu->hdev_flags))
- set_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG);
if (!test_bit(HCI_UART_RESET_ON_INIT, &hu->hdev_flags))
- set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE);
/* Only call open() for the protocol after hdev is fully initialized as
* open() (or a timer/workqueue it starts) may attempt to reference it.
@@ -704,12 +712,16 @@ static int hci_uart_set_proto(struct hci_uart *hu, int id)
hu->proto = p;
+ set_bit(HCI_UART_PROTO_INIT, &hu->flags);
+
err = hci_uart_register_dev(hu);
if (err) {
return err;
}
set_bit(HCI_UART_PROTO_READY, &hu->flags);
+ clear_bit(HCI_UART_PROTO_INIT, &hu->flags);
+
return 0;
}
@@ -870,7 +882,9 @@ static int __init hci_uart_init(void)
#ifdef CONFIG_BT_HCIUART_MRVL
mrvl_init();
#endif
-
+#ifdef CONFIG_BT_HCIUART_AML
+ aml_init();
+#endif
return 0;
}
@@ -906,7 +920,9 @@ static void __exit hci_uart_exit(void)
#ifdef CONFIG_BT_HCIUART_MRVL
mrvl_deinit();
#endif
-
+#ifdef CONFIG_BT_HCIUART_AML
+ aml_deinit();
+#endif
tty_unregister_ldisc(&hci_uart_ldisc);
}
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index 4a0b5c3160c2..6f4e25917b86 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -305,7 +305,7 @@ static void ll_device_woke_up(struct hci_uart *hu)
hci_uart_tx_wakeup(hu);
}
-/* Enqueue frame for transmittion (padding, crc, etc) */
+/* Enqueue frame for transmission (padding, crc, etc) */
/* may be called from two simultaneous tasklets */
static int ll_enqueue(struct hci_uart *hu, struct sk_buff *skb)
{
@@ -429,7 +429,7 @@ static int ll_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
- ll->rx_skb = h4_recv_buf(hu->hdev, ll->rx_skb, data, count,
+ ll->rx_skb = h4_recv_buf(hu, ll->rx_skb, data, count,
ll_recv_pkts, ARRAY_SIZE(ll_recv_pkts));
if (IS_ERR(ll->rx_skb)) {
int err = PTR_ERR(ll->rx_skb);
@@ -649,11 +649,11 @@ static int ll_setup(struct hci_uart *hu)
/* This means that there was an error getting the BD address
* during probe, so mark the device as having a bad address.
*/
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hu->hdev->quirks);
+ hci_set_quirk(hu->hdev, HCI_QUIRK_INVALID_BDADDR);
} else if (bacmp(&lldev->bdaddr, BDADDR_ANY)) {
err = ll_set_bdaddr(hu->hdev, &lldev->bdaddr);
if (err)
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hu->hdev->quirks);
+ hci_set_quirk(hu->hdev, HCI_QUIRK_INVALID_BDADDR);
}
/* Operational speed if any */
diff --git a/drivers/bluetooth/hci_mrvl.c b/drivers/bluetooth/hci_mrvl.c
index e08222395772..8767522ec4c6 100644
--- a/drivers/bluetooth/hci_mrvl.c
+++ b/drivers/bluetooth/hci_mrvl.c
@@ -264,9 +264,9 @@ static int mrvl_recv(struct hci_uart *hu, const void *data, int count)
!test_bit(STATE_FW_LOADED, &mrvl->flags))
return count;
- mrvl->rx_skb = h4_recv_buf(hu->hdev, mrvl->rx_skb, data, count,
- mrvl_recv_pkts,
- ARRAY_SIZE(mrvl_recv_pkts));
+ mrvl->rx_skb = h4_recv_buf(hu, mrvl->rx_skb, data, count,
+ mrvl_recv_pkts,
+ ARRAY_SIZE(mrvl_recv_pkts));
if (IS_ERR(mrvl->rx_skb)) {
int err = PTR_ERR(mrvl->rx_skb);
bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err);
diff --git a/drivers/bluetooth/hci_nokia.c b/drivers/bluetooth/hci_nokia.c
index 97da0b2bfd17..1e65b541f8ad 100644
--- a/drivers/bluetooth/hci_nokia.c
+++ b/drivers/bluetooth/hci_nokia.c
@@ -20,7 +20,7 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/types.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -116,11 +116,6 @@ struct hci_nokia_neg_evt {
#define SETUP_BAUD_RATE 921600
#define INIT_BAUD_RATE 120000
-struct hci_nokia_radio_hdr {
- u8 evt;
- u8 dlen;
-} __packed;
-
struct nokia_bt_dev {
struct hci_uart hu;
struct serdev_device *serdev;
@@ -444,7 +439,7 @@ static int nokia_setup(struct hci_uart *hu)
if (btdev->man_id == NOKIA_ID_BCM2048) {
hu->hdev->set_bdaddr = btbcm_set_bdaddr;
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hu->hdev->quirks);
+ hci_set_quirk(hu->hdev, HCI_QUIRK_INVALID_BDADDR);
dev_dbg(dev, "bcm2048 has invalid bluetooth address!");
}
@@ -506,7 +501,7 @@ static int nokia_close(struct hci_uart *hu)
return 0;
}
-/* Enqueue frame for transmittion (padding, crc, etc) */
+/* Enqueue frame for transmission (padding, crc, etc) */
static int nokia_enqueue(struct hci_uart *hu, struct sk_buff *skb)
{
struct nokia_bt_dev *btdev = hu->priv;
@@ -629,8 +624,8 @@ static int nokia_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
- btdev->rx_skb = h4_recv_buf(hu->hdev, btdev->rx_skb, data, count,
- nokia_recv_pkts, ARRAY_SIZE(nokia_recv_pkts));
+ btdev->rx_skb = h4_recv_buf(hu, btdev->rx_skb, data, count,
+ nokia_recv_pkts, ARRAY_SIZE(nokia_recv_pkts));
if (IS_ERR(btdev->rx_skb)) {
err = PTR_ERR(btdev->rx_skb);
dev_err(dev, "Frame reassembly failed (%d)", err);
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 0c9c9ee56592..888176b0faa9 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -28,10 +28,12 @@
#include <linux/of.h>
#include <linux/acpi.h>
#include <linux/platform_device.h>
+#include <linux/pwrseq/consumer.h>
#include <linux/regulator/consumer.h>
#include <linux/serdev.h>
+#include <linux/string_choices.h>
#include <linux/mutex.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -214,6 +216,7 @@ struct qca_power {
struct regulator_bulk_data *vreg_bulk;
int num_vregs;
bool vregs_on;
+ struct pwrseq_desc *pwrseq;
};
struct qca_serdev {
@@ -226,7 +229,7 @@ struct qca_serdev {
u32 init_speed;
u32 oper_speed;
bool bdaddr_property_broken;
- const char *firmware_name;
+ const char *firmware_name[2];
};
static int qca_regulator_enable(struct qca_serdev *qcadev);
@@ -256,7 +259,18 @@ static const char *qca_get_firmware_name(struct hci_uart *hu)
if (hu->serdev) {
struct qca_serdev *qsd = serdev_device_get_drvdata(hu->serdev);
- return qsd->firmware_name;
+ return qsd->firmware_name[0];
+ } else {
+ return NULL;
+ }
+}
+
+static const char *qca_get_rampatch_name(struct hci_uart *hu)
+{
+ if (hu->serdev) {
+ struct qca_serdev *qsd = serdev_device_get_drvdata(hu->serdev);
+
+ return qsd->firmware_name[1];
} else {
return NULL;
}
@@ -330,8 +344,8 @@ static void serial_clock_vote(unsigned long vote, struct hci_uart *hu)
else
__serial_clock_off(hu->tty);
- BT_DBG("Vote serial clock %s(%s)", new_vote ? "true" : "false",
- vote ? "true" : "false");
+ BT_DBG("Vote serial clock %s(%s)", str_true_false(new_vote),
+ str_true_false(vote));
diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
@@ -460,7 +474,7 @@ static void qca_wq_serial_tx_clock_vote_off(struct work_struct *work)
static void hci_ibs_tx_idle_timeout(struct timer_list *t)
{
- struct qca_data *qca = from_timer(qca, t, tx_idle_timer);
+ struct qca_data *qca = timer_container_of(qca, t, tx_idle_timer);
struct hci_uart *hu = qca->hu;
unsigned long flags;
@@ -493,7 +507,7 @@ static void hci_ibs_tx_idle_timeout(struct timer_list *t)
static void hci_ibs_wake_retrans_timeout(struct timer_list *t)
{
- struct qca_data *qca = from_timer(qca, t, wake_retrans_timer);
+ struct qca_data *qca = timer_container_of(qca, t, wake_retrans_timer);
struct hci_uart *hu = qca->hu;
unsigned long flags, retrans_delay;
bool retransmit = false;
@@ -569,7 +583,7 @@ static int qca_open(struct hci_uart *hu)
if (!hci_uart_has_flow_control(hu))
return -EOPNOTSUPP;
- qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL);
+ qca = kzalloc(sizeof(*qca), GFP_KERNEL);
if (!qca)
return -ENOMEM;
@@ -609,6 +623,7 @@ static int qca_open(struct hci_uart *hu)
qcadev = serdev_device_get_drvdata(hu->serdev);
switch (qcadev->btsoc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -852,7 +867,7 @@ static void device_woke_up(struct hci_uart *hu)
skb_queue_tail(&qca->txq, skb);
/* Switch timers and change state to HCI_IBS_TX_AWAKE */
- del_timer(&qca->wake_retrans_timer);
+ timer_delete(&qca->wake_retrans_timer);
idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
qca->tx_ibs_state = HCI_IBS_TX_AWAKE;
@@ -871,7 +886,7 @@ static void device_woke_up(struct hci_uart *hu)
hci_uart_tx_wakeup(hu);
}
-/* Enqueue frame for transmittion (padding, crc, etc) may be called from
+/* Enqueue frame for transmission (padding, crc, etc) may be called from
* two simultaneous tasklets.
*/
static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
@@ -1040,8 +1055,7 @@ static void qca_controller_memdump(struct work_struct *work)
}
if (!qca_memdump) {
- qca_memdump = kzalloc(sizeof(struct qca_memdump_info),
- GFP_ATOMIC);
+ qca_memdump = kzalloc(sizeof(*qca_memdump), GFP_ATOMIC);
if (!qca_memdump) {
mutex_unlock(&qca->hci_memdump_lock);
return;
@@ -1058,7 +1072,7 @@ static void qca_controller_memdump(struct work_struct *work)
if (!seq_no) {
/* This is the first frame of memdump packet from
- * the controller, Disable IBS to recevie dump
+ * the controller, Disable IBS to receive dump
* with out any interruption, ideally time required for
* the controller to send the dump is 8 seconds. let us
* start timer to handle this asynchronous activity.
@@ -1090,6 +1104,7 @@ static void qca_controller_memdump(struct work_struct *work)
qca->memdump_state = QCA_MEMDUMP_COLLECTED;
cancel_delayed_work(&qca->ctrl_memdump_timeout);
clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
+ clear_bit(QCA_IBS_DISABLED, &qca->flags);
mutex_unlock(&qca->hci_memdump_lock);
return;
}
@@ -1249,6 +1264,7 @@ static const struct h4_recv_pkt qca_recv_pkts[] = {
{ H4_RECV_ACL, .recv = qca_recv_acl_data },
{ H4_RECV_SCO, .recv = hci_recv_frame },
{ H4_RECV_EVENT, .recv = qca_recv_event },
+ { H4_RECV_ISO, .recv = hci_recv_frame },
{ QCA_IBS_WAKE_IND_EVENT, .recv = qca_ibs_wake_ind },
{ QCA_IBS_WAKE_ACK_EVENT, .recv = qca_ibs_wake_ack },
{ QCA_IBS_SLEEP_IND_EVENT, .recv = qca_ibs_sleep_ind },
@@ -1261,7 +1277,7 @@ static int qca_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
- qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count,
+ qca->rx_skb = h4_recv_buf(hu, qca->rx_skb, data, count,
qca_recv_pkts, ARRAY_SIZE(qca_recv_pkts));
if (IS_ERR(qca->rx_skb)) {
int err = PTR_ERR(qca->rx_skb);
@@ -1352,6 +1368,7 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
/* Give the controller time to process the request */
switch (qca_soc_type(hu)) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1438,6 +1455,7 @@ static unsigned int qca_get_speed(struct hci_uart *hu,
static int qca_check_speeds(struct hci_uart *hu)
{
switch (qca_soc_type(hu)) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1480,6 +1498,7 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
* changing the baudrate of chip and host.
*/
switch (soc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1514,6 +1533,7 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
error:
switch (soc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1636,7 +1656,7 @@ static void qca_hw_error(struct hci_dev *hdev, u8 code)
clear_bit(QCA_HW_ERROR_EVENT, &qca->flags);
}
-static void qca_cmd_timeout(struct hci_dev *hdev)
+static void qca_reset(struct hci_dev *hdev)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
struct qca_data *qca = hu->priv;
@@ -1685,6 +1705,27 @@ static bool qca_wakeup(struct hci_dev *hdev)
return wakeup;
}
+static int qca_port_reopen(struct hci_uart *hu)
+{
+ int ret;
+
+ /* Now the device is in ready state to communicate with host.
+ * To sync host with device we need to reopen port.
+ * Without this, we will have RTS and CTS synchronization
+ * issues.
+ */
+ serdev_device_close(hu->serdev);
+ ret = serdev_device_open(hu->serdev);
+ if (ret) {
+ bt_dev_err(hu->hdev, "failed to open port");
+ return ret;
+ }
+
+ hci_uart_set_flow_control(hu, false);
+
+ return 0;
+}
+
static int qca_regulator_init(struct hci_uart *hu)
{
enum qca_btsoc_type soc_type = qca_soc_type(hu);
@@ -1696,6 +1737,7 @@ static int qca_regulator_init(struct hci_uart *hu)
* off the voltage regulator.
*/
qcadev = serdev_device_get_drvdata(hu->serdev);
+
if (!qcadev->bt_power->vregs_on) {
serdev_device_close(hu->serdev);
ret = qca_regulator_enable(qcadev);
@@ -1710,6 +1752,7 @@ static int qca_regulator_init(struct hci_uart *hu)
}
switch (soc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1740,6 +1783,7 @@ static int qca_regulator_init(struct hci_uart *hu)
qca_set_speed(hu, QCA_INIT_SPEED);
switch (soc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1753,21 +1797,7 @@ static int qca_regulator_init(struct hci_uart *hu)
break;
}
- /* Now the device is in ready state to communicate with host.
- * To sync host with device we need to reopen port.
- * Without this, we will have RTS and CTS synchronization
- * issues.
- */
- serdev_device_close(hu->serdev);
- ret = serdev_device_open(hu->serdev);
- if (ret) {
- bt_dev_err(hu->hdev, "failed to open port");
- return ret;
- }
-
- hci_uart_set_flow_control(hu, false);
-
- return 0;
+ return qca_port_reopen(hu);
}
static int qca_power_on(struct hci_dev *hdev)
@@ -1785,6 +1815,7 @@ static int qca_power_on(struct hci_dev *hdev)
return 0;
switch (soc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1792,6 +1823,7 @@ static int qca_power_on(struct hci_dev *hdev)
case QCA_WCN6750:
case QCA_WCN6855:
case QCA_WCN7850:
+ case QCA_QCA6390:
ret = qca_regulator_init(hu);
break;
@@ -1844,6 +1876,7 @@ static int qca_setup(struct hci_uart *hu)
unsigned int retries = 0;
enum qca_btsoc_type soc_type = qca_soc_type(hu);
const char *firmware_name = qca_get_firmware_name(hu);
+ const char *rampatch_name = qca_get_rampatch_name(hu);
int ret;
struct qca_btsoc_version ver;
struct qca_serdev *qcadev;
@@ -1860,13 +1893,14 @@ static int qca_setup(struct hci_uart *hu)
/* Enable controller to do both LE scan and BR/EDR inquiry
* simultaneously.
*/
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
switch (soc_type) {
case QCA_QCA2066:
soc_name = "qca2066";
break;
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1901,6 +1935,7 @@ retry:
clear_bit(QCA_SSR_TRIGGERED, &qca->flags);
switch (soc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1910,7 +1945,7 @@ retry:
case QCA_WCN7850:
qcadev = serdev_device_get_drvdata(hu->serdev);
if (qcadev->bdaddr_property_broken)
- set_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BDADDR_PROPERTY_BROKEN);
hci_set_aosp_capable(hdev);
@@ -1934,6 +1969,7 @@ retry:
}
switch (soc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1952,12 +1988,12 @@ retry:
/* Setup patch / NVM configurations */
ret = qca_uart_setup(hdev, qca_baudrate, soc_type, ver,
- firmware_name);
+ firmware_name, rampatch_name);
if (!ret) {
clear_bit(QCA_IBS_DISABLED, &qca->flags);
qca_debugfs_init(hdev);
hu->hdev->hw_error = qca_hw_error;
- hu->hdev->cmd_timeout = qca_cmd_timeout;
+ hu->hdev->reset = qca_reset;
if (hu->serdev) {
if (device_can_wakeup(hu->serdev->ctrl->dev.parent))
hu->hdev->wakeup = qca_wakeup;
@@ -2022,6 +2058,17 @@ static const struct hci_uart_proto qca_proto = {
.dequeue = qca_dequeue,
};
+static const struct qca_device_data qca_soc_data_wcn3950 __maybe_unused = {
+ .soc_type = QCA_WCN3950,
+ .vregs = (struct qca_vreg []) {
+ { "vddio", 15000 },
+ { "vddxo", 60000 },
+ { "vddrf", 155000 },
+ { "vddch0", 585000 },
+ },
+ .num_vregs = 4,
+};
+
static const struct qca_device_data qca_soc_data_wcn3988 __maybe_unused = {
.soc_type = QCA_WCN3988,
.vregs = (struct qca_vreg []) {
@@ -2130,6 +2177,7 @@ static void qca_power_shutdown(struct hci_uart *hu)
unsigned long flags;
enum qca_btsoc_type soc_type = qca_soc_type(hu);
bool sw_ctrl_state;
+ struct qca_power *power;
/* From this point we go into power off state. But serial port is
* still open, stop queueing the IBS data and flush all the buffered
@@ -2147,6 +2195,13 @@ static void qca_power_shutdown(struct hci_uart *hu)
return;
qcadev = serdev_device_get_drvdata(hu->serdev);
+ power = qcadev->bt_power;
+
+ if (power && power->pwrseq) {
+ pwrseq_power_off(power->pwrseq);
+ set_bit(QCA_BT_OFF, &qca->flags);
+ return;
+ }
switch (soc_type) {
case QCA_WCN3988:
@@ -2183,10 +2238,10 @@ static int qca_power_off(struct hci_dev *hdev)
enum qca_btsoc_type soc_type = qca_soc_type(hu);
hu->hdev->hw_error = NULL;
- hu->hdev->cmd_timeout = NULL;
+ hu->hdev->reset = NULL;
- del_timer_sync(&qca->wake_retrans_timer);
- del_timer_sync(&qca->tx_idle_timer);
+ timer_delete_sync(&qca->wake_retrans_timer);
+ timer_delete_sync(&qca->tx_idle_timer);
/* Stop sending shutdown command if soc crashes. */
if (soc_type != QCA_ROME
@@ -2204,6 +2259,9 @@ static int qca_regulator_enable(struct qca_serdev *qcadev)
struct qca_power *power = qcadev->bt_power;
int ret;
+ if (power->pwrseq)
+ return pwrseq_power_on(power->pwrseq);
+
/* Already enabled */
if (power->vregs_on)
return 0;
@@ -2287,8 +2345,8 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->serdev_hu.serdev = serdev;
data = device_get_match_data(&serdev->dev);
serdev_device_set_drvdata(serdev, qcadev);
- device_property_read_string(&serdev->dev, "firmware-name",
- &qcadev->firmware_name);
+ device_property_read_string_array(&serdev->dev, "firmware-name",
+ qcadev->firmware_name, ARRAY_SIZE(qcadev->firmware_name));
device_property_read_u32(&serdev->dev, "max-speed",
&qcadev->oper_speed);
if (!qcadev->oper_speed)
@@ -2303,6 +2361,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->btsoc_type = QCA_ROME;
switch (qcadev->btsoc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -2310,12 +2369,48 @@ static int qca_serdev_probe(struct serdev_device *serdev)
case QCA_WCN6750:
case QCA_WCN6855:
case QCA_WCN7850:
+ case QCA_QCA6390:
qcadev->bt_power = devm_kzalloc(&serdev->dev,
sizeof(struct qca_power),
GFP_KERNEL);
if (!qcadev->bt_power)
return -ENOMEM;
+ break;
+ default:
+ break;
+ }
+ switch (qcadev->btsoc_type) {
+ case QCA_WCN6855:
+ case QCA_WCN7850:
+ case QCA_WCN6750:
+ if (!device_property_present(&serdev->dev, "enable-gpios")) {
+ /*
+ * Backward compatibility with old DT sources. If the
+ * node doesn't have the 'enable-gpios' property then
+ * let's use the power sequencer. Otherwise, let's
+ * drive everything ourselves.
+ */
+ qcadev->bt_power->pwrseq = devm_pwrseq_get(&serdev->dev,
+ "bluetooth");
+
+ /*
+ * Some modules have BT_EN enabled via a hardware pull-up,
+ * meaning it is not defined in the DTS and is not controlled
+ * through the power sequence. In such cases, fall through
+ * to follow the legacy flow.
+ */
+ if (IS_ERR(qcadev->bt_power->pwrseq))
+ qcadev->bt_power->pwrseq = NULL;
+ else
+ break;
+ }
+ fallthrough;
+ case QCA_WCN3950:
+ case QCA_WCN3988:
+ case QCA_WCN3990:
+ case QCA_WCN3991:
+ case QCA_WCN3998:
qcadev->bt_power->dev = &serdev->dev;
err = qca_init_regulators(qcadev->bt_power, data->vregs,
data->num_vregs);
@@ -2328,14 +2423,14 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
GPIOD_OUT_LOW);
- if (IS_ERR(qcadev->bt_en) &&
- (data->soc_type == QCA_WCN6750 ||
- data->soc_type == QCA_WCN6855)) {
- dev_err(&serdev->dev, "failed to acquire BT_EN gpio\n");
- return PTR_ERR(qcadev->bt_en);
- }
+ if (IS_ERR(qcadev->bt_en))
+ return dev_err_probe(&serdev->dev,
+ PTR_ERR(qcadev->bt_en),
+ "failed to acquire BT_EN gpio\n");
- if (!qcadev->bt_en)
+ if (!qcadev->bt_en &&
+ (data->soc_type == QCA_WCN6750 ||
+ data->soc_type == QCA_WCN6855))
power_ctrl_enabled = false;
qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl",
@@ -2353,13 +2448,17 @@ static int qca_serdev_probe(struct serdev_device *serdev)
dev_err(&serdev->dev, "failed to acquire clk\n");
return PTR_ERR(qcadev->susclk);
}
+ break;
- err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
- if (err) {
- BT_ERR("wcn3990 serdev registration failed");
- return err;
+ case QCA_QCA6390:
+ if (dev_of_node(&serdev->dev)) {
+ qcadev->bt_power->pwrseq = devm_pwrseq_get(&serdev->dev,
+ "bluetooth");
+ if (IS_ERR(qcadev->bt_power->pwrseq))
+ return PTR_ERR(qcadev->bt_power->pwrseq);
+ break;
}
- break;
+ fallthrough;
default:
qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
@@ -2372,31 +2471,24 @@ static int qca_serdev_probe(struct serdev_device *serdev)
if (!qcadev->bt_en)
power_ctrl_enabled = false;
- qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
+ qcadev->susclk = devm_clk_get_optional_enabled_with_rate(
+ &serdev->dev, NULL, SUSCLK_RATE_32KHZ);
if (IS_ERR(qcadev->susclk)) {
dev_warn(&serdev->dev, "failed to acquire clk\n");
return PTR_ERR(qcadev->susclk);
}
- err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ);
- if (err)
- return err;
-
- err = clk_prepare_enable(qcadev->susclk);
- if (err)
- return err;
-
- err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
- if (err) {
- BT_ERR("Rome serdev registration failed");
- clk_disable_unprepare(qcadev->susclk);
- return err;
- }
+ }
+
+ err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
+ if (err) {
+ BT_ERR("serdev registration failed");
+ return err;
}
hdev = qcadev->serdev_hu.hdev;
if (power_ctrl_enabled) {
- set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP);
hdev->shutdown = qca_power_off;
}
@@ -2405,11 +2497,11 @@ static int qca_serdev_probe(struct serdev_device *serdev)
* be queried via hci. Same with the valid le states quirk.
*/
if (data->capabilities & QCA_CAP_WIDEBAND_SPEECH)
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
- &hdev->quirks);
+ hci_set_quirk(hdev,
+ HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
- if (data->capabilities & QCA_CAP_VALID_LE_STATES)
- set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
+ if (!(data->capabilities & QCA_CAP_VALID_LE_STATES))
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_LE_STATES);
}
return 0;
@@ -2428,15 +2520,11 @@ static void qca_serdev_remove(struct serdev_device *serdev)
case QCA_WCN6750:
case QCA_WCN6855:
case QCA_WCN7850:
- if (power->vregs_on) {
+ if (power->vregs_on)
qca_power_shutdown(&qcadev->serdev_hu);
- break;
- }
- fallthrough;
-
+ break;
default:
- if (qcadev->susclk)
- clk_disable_unprepare(qcadev->susclk);
+ break;
}
hci_uart_unregister_device(&qcadev->serdev_hu);
@@ -2450,15 +2538,27 @@ static void qca_serdev_shutdown(struct device *dev)
struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
struct hci_uart *hu = &qcadev->serdev_hu;
struct hci_dev *hdev = hu->hdev;
- struct qca_data *qca = hu->priv;
const u8 ibs_wake_cmd[] = { 0xFD };
const u8 edl_reset_soc_cmd[] = { 0x01, 0x00, 0xFC, 0x01, 0x05 };
if (qcadev->btsoc_type == QCA_QCA6390) {
- if (test_bit(QCA_BT_OFF, &qca->flags) ||
- !test_bit(HCI_RUNNING, &hdev->flags))
+ /* The purpose of sending the VSC is to reset SOC into a initial
+ * state and the state will ensure next hdev->setup() success.
+ * if HCI_QUIRK_NON_PERSISTENT_SETUP is set, it means that
+ * hdev->setup() can do its job regardless of SoC state, so
+ * don't need to send the VSC.
+ * if HCI_SETUP is set, it means that hdev->setup() was never
+ * invoked and the SOC is already in the initial state, so
+ * don't also need to send the VSC.
+ */
+ if (hci_test_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP) ||
+ hci_dev_test_flag(hdev, HCI_SETUP))
return;
+ /* The serdev must be in open state when control logic arrives
+ * here, so also fix the use-after-free issue caused by that
+ * the serdev is flushed or wrote after it is closed.
+ */
serdev_device_write_flush(serdev);
ret = serdev_device_write_buf(serdev, ibs_wake_cmd,
sizeof(ibs_wake_cmd));
@@ -2537,10 +2637,10 @@ static int __maybe_unused qca_suspend(struct device *dev)
switch (qca->tx_ibs_state) {
case HCI_IBS_TX_WAKING:
- del_timer(&qca->wake_retrans_timer);
+ timer_delete(&qca->wake_retrans_timer);
fallthrough;
case HCI_IBS_TX_AWAKE:
- del_timer(&qca->tx_idle_timer);
+ timer_delete(&qca->tx_idle_timer);
serdev_device_write_flush(hu->serdev);
cmd = HCI_IBS_SLEEP_IND;
@@ -2615,6 +2715,7 @@ static const struct of_device_id qca_bluetooth_of_match[] = {
{ .compatible = "qcom,qca6174-bt" },
{ .compatible = "qcom,qca6390-bt", .data = &qca_soc_data_qca6390},
{ .compatible = "qcom,qca9377-bt" },
+ { .compatible = "qcom,wcn3950-bt", .data = &qca_soc_data_wcn3950},
{ .compatible = "qcom,wcn3988-bt", .data = &qca_soc_data_wcn3988},
{ .compatible = "qcom,wcn3990-bt", .data = &qca_soc_data_wcn3990},
{ .compatible = "qcom,wcn3991-bt", .data = &qca_soc_data_wcn3991},
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
index 89a22e9b3253..593d9cefbbf9 100644
--- a/drivers/bluetooth/hci_serdev.c
+++ b/drivers/bluetooth/hci_serdev.c
@@ -152,7 +152,7 @@ static int hci_uart_close(struct hci_dev *hdev)
* BT SOC is completely powered OFF during BT OFF, holding port
* open may drain the battery.
*/
- if (test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
+ if (hci_test_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP)) {
clear_bit(HCI_UART_PROTO_READY, &hu->flags);
serdev_device_close(hu->serdev);
}
@@ -358,13 +358,13 @@ int hci_uart_register_device_priv(struct hci_uart *hu,
SET_HCIDEV_DEV(hdev, &hu->serdev->dev);
if (test_bit(HCI_UART_NO_SUSPEND_NOTIFIER, &hu->flags))
- set_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_NO_SUSPEND_NOTIFIER);
if (test_bit(HCI_UART_RAW_DEVICE, &hu->hdev_flags))
- set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RAW_DEVICE);
if (test_bit(HCI_UART_EXT_CONFIG, &hu->hdev_flags))
- set_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG);
if (test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags))
return 0;
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index 00bf7ae82c5b..48ac7ca9334e 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -20,7 +20,7 @@
#define HCIUARTGETFLAGS _IOR('U', 204, int)
/* UART protocols */
-#define HCI_UART_MAX_PROTO 12
+#define HCI_UART_MAX_PROTO 13
#define HCI_UART_H4 0
#define HCI_UART_BCSP 1
@@ -34,6 +34,7 @@
#define HCI_UART_AG6XX 9
#define HCI_UART_NOKIA 10
#define HCI_UART_MRVL 11
+#define HCI_UART_AML 12
#define HCI_UART_RAW_DEVICE 0
#define HCI_UART_RESET_ON_INIT 1
@@ -89,6 +90,7 @@ struct hci_uart {
#define HCI_UART_REGISTERED 1
#define HCI_UART_PROTO_READY 2
#define HCI_UART_NO_SUSPEND_NOTIFIER 3
+#define HCI_UART_PROTO_INIT 4
/* TX states */
#define HCI_UART_SENDING 1
@@ -119,10 +121,6 @@ void hci_uart_set_flow_control(struct hci_uart *hu, bool enable);
void hci_uart_set_speeds(struct hci_uart *hu, unsigned int init_speed,
unsigned int oper_speed);
-#ifdef CONFIG_BT_HCIUART_H4
-int h4_init(void);
-int h4_deinit(void);
-
struct h4_recv_pkt {
u8 type; /* Packet type */
u8 hlen; /* Header length */
@@ -160,7 +158,11 @@ struct h4_recv_pkt {
.lsize = 2, \
.maxlen = HCI_MAX_FRAME_SIZE \
-struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
+#ifdef CONFIG_BT_HCIUART_H4
+int h4_init(void);
+int h4_deinit(void);
+
+struct sk_buff *h4_recv_buf(struct hci_uart *hu, struct sk_buff *skb,
const unsigned char *buffer, int count,
const struct h4_recv_pkt *pkts, int pkts_count);
#endif
@@ -209,3 +211,8 @@ int ag6xx_deinit(void);
int mrvl_init(void);
int mrvl_deinit(void);
#endif
+
+#ifdef CONFIG_BT_HCIUART_AML
+int aml_init(void);
+int aml_deinit(void);
+#endif
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 28750a40f0ed..2fef08254d78 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -9,7 +9,7 @@
*/
#include <linux/module.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/atomic.h>
#include <linux/kernel.h>
@@ -289,18 +289,18 @@ static void vhci_coredump(struct hci_dev *hdev)
static void vhci_coredump_hdr(struct hci_dev *hdev, struct sk_buff *skb)
{
- char buf[80];
+ const char *buf;
- snprintf(buf, sizeof(buf), "Controller Name: vhci_ctrl\n");
+ buf = "Controller Name: vhci_ctrl\n";
skb_put_data(skb, buf, strlen(buf));
- snprintf(buf, sizeof(buf), "Firmware Version: vhci_fw\n");
+ buf = "Firmware Version: vhci_fw\n";
skb_put_data(skb, buf, strlen(buf));
- snprintf(buf, sizeof(buf), "Driver: vhci_drv\n");
+ buf = "Driver: vhci_drv\n";
skb_put_data(skb, buf, strlen(buf));
- snprintf(buf, sizeof(buf), "Vendor: vhci\n");
+ buf = "Vendor: vhci\n";
skb_put_data(skb, buf, strlen(buf));
}
@@ -316,7 +316,7 @@ static inline void force_devcd_timeout(struct hci_dev *hdev,
unsigned int timeout)
{
#ifdef CONFIG_DEV_COREDUMP
- hdev->dump.timeout = msecs_to_jiffies(timeout * 1000);
+ hdev->dump.timeout = secs_to_jiffies(timeout);
#endif
}
@@ -380,6 +380,28 @@ static const struct file_operations force_devcoredump_fops = {
.write = force_devcd_write,
};
+static void vhci_debugfs_init(struct vhci_data *data)
+{
+ struct hci_dev *hdev = data->hdev;
+
+ debugfs_create_file("force_suspend", 0644, hdev->debugfs, data,
+ &force_suspend_fops);
+
+ debugfs_create_file("force_wakeup", 0644, hdev->debugfs, data,
+ &force_wakeup_fops);
+
+ if (IS_ENABLED(CONFIG_BT_MSFTEXT))
+ debugfs_create_file("msft_opcode", 0644, hdev->debugfs, data,
+ &msft_opcode_fops);
+
+ if (IS_ENABLED(CONFIG_BT_AOSPEXT))
+ debugfs_create_file("aosp_capable", 0644, hdev->debugfs, data,
+ &aosp_capable_fops);
+
+ debugfs_create_file("force_devcoredump", 0644, hdev->debugfs, data,
+ &force_devcoredump_fops);
+}
+
static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
{
struct hci_dev *hdev;
@@ -415,17 +437,16 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
hdev->get_codec_config_data = vhci_get_codec_config_data;
hdev->wakeup = vhci_wakeup;
hdev->setup = vhci_setup;
- set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP);
+ hci_set_quirk(hdev, HCI_QUIRK_SYNC_FLOWCTL_SUPPORTED);
/* bit 6 is for external configuration */
if (opcode & 0x40)
- set_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG);
/* bit 7 is for raw device */
if (opcode & 0x80)
- set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
-
- set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RAW_DEVICE);
if (hci_register_dev(hdev) < 0) {
BT_ERR("Can't register HCI device");
@@ -435,22 +456,8 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
return -EBUSY;
}
- debugfs_create_file("force_suspend", 0644, hdev->debugfs, data,
- &force_suspend_fops);
-
- debugfs_create_file("force_wakeup", 0644, hdev->debugfs, data,
- &force_wakeup_fops);
-
- if (IS_ENABLED(CONFIG_BT_MSFTEXT))
- debugfs_create_file("msft_opcode", 0644, hdev->debugfs, data,
- &msft_opcode_fops);
-
- if (IS_ENABLED(CONFIG_BT_AOSPEXT))
- debugfs_create_file("aosp_capable", 0644, hdev->debugfs, data,
- &aosp_capable_fops);
-
- debugfs_create_file("force_devcoredump", 0644, hdev->debugfs, data,
- &force_devcoredump_fops);
+ if (!IS_ERR_OR_NULL(hdev->debugfs))
+ vhci_debugfs_init(data);
hci_skb_pkt_type(skb) = HCI_VENDOR_PKT;
@@ -633,7 +640,7 @@ static int vhci_open(struct inode *inode, struct file *file)
{
struct vhci_data *data;
- data = kzalloc(sizeof(struct vhci_data), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -647,11 +654,26 @@ static int vhci_open(struct inode *inode, struct file *file)
file->private_data = data;
nonseekable_open(inode, file);
- schedule_delayed_work(&data->open_timeout, msecs_to_jiffies(1000));
+ schedule_delayed_work(&data->open_timeout, secs_to_jiffies(1));
return 0;
}
+static void vhci_debugfs_remove(struct hci_dev *hdev)
+{
+ debugfs_lookup_and_remove("force_suspend", hdev->debugfs);
+
+ debugfs_lookup_and_remove("force_wakeup", hdev->debugfs);
+
+ if (IS_ENABLED(CONFIG_BT_MSFTEXT))
+ debugfs_lookup_and_remove("msft_opcode", hdev->debugfs);
+
+ if (IS_ENABLED(CONFIG_BT_AOSPEXT))
+ debugfs_lookup_and_remove("aosp_capable", hdev->debugfs);
+
+ debugfs_lookup_and_remove("force_devcoredump", hdev->debugfs);
+}
+
static int vhci_release(struct inode *inode, struct file *file)
{
struct vhci_data *data = file->private_data;
@@ -663,6 +685,8 @@ static int vhci_release(struct inode *inode, struct file *file)
hdev = data->hdev;
if (hdev) {
+ if (!IS_ERR_OR_NULL(hdev->debugfs))
+ vhci_debugfs_remove(hdev);
hci_unregister_dev(hdev);
hci_free_dev(hdev);
}
@@ -681,7 +705,6 @@ static const struct file_operations vhci_fops = {
.poll = vhci_poll,
.open = vhci_open,
.release = vhci_release,
- .llseek = no_llseek,
};
static struct miscdevice vhci_miscdev = {
diff --git a/drivers/bluetooth/virtio_bt.c b/drivers/bluetooth/virtio_bt.c
index 18208e152a36..6f1a37e85c6a 100644
--- a/drivers/bluetooth/virtio_bt.c
+++ b/drivers/bluetooth/virtio_bt.c
@@ -254,13 +254,9 @@ static void virtbt_rx_done(struct virtqueue *vq)
static int virtbt_probe(struct virtio_device *vdev)
{
- vq_callback_t *callbacks[VIRTBT_NUM_VQS] = {
- [VIRTBT_VQ_TX] = virtbt_tx_done,
- [VIRTBT_VQ_RX] = virtbt_rx_done,
- };
- const char *names[VIRTBT_NUM_VQS] = {
- [VIRTBT_VQ_TX] = "tx",
- [VIRTBT_VQ_RX] = "rx",
+ struct virtqueue_info vqs_info[VIRTBT_NUM_VQS] = {
+ [VIRTBT_VQ_TX] = { "tx", virtbt_tx_done },
+ [VIRTBT_VQ_RX] = { "rx", virtbt_rx_done },
};
struct virtio_bluetooth *vbt;
struct hci_dev *hdev;
@@ -288,8 +284,7 @@ static int virtbt_probe(struct virtio_device *vdev)
INIT_WORK(&vbt->rx, virtbt_rx_work);
- err = virtio_find_vqs(vdev, VIRTBT_NUM_VQS, vbt->vqs, callbacks,
- names, NULL);
+ err = virtio_find_vqs(vdev, VIRTBT_NUM_VQS, vbt->vqs, vqs_info, NULL);
if (err)
return err;
@@ -332,17 +327,17 @@ static int virtbt_probe(struct virtio_device *vdev)
hdev->setup = virtbt_setup_intel;
hdev->shutdown = virtbt_shutdown_generic;
hdev->set_bdaddr = virtbt_set_bdaddr_intel;
- set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER);
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
+ hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
break;
case VIRTIO_BT_CONFIG_VENDOR_REALTEK:
hdev->manufacturer = 93;
hdev->setup = virtbt_setup_realtek;
hdev->shutdown = virtbt_shutdown_generic;
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
+ hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
break;
}
}
@@ -415,7 +410,6 @@ static const unsigned int virtbt_features[] = {
static struct virtio_driver virtbt_driver = {
.driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
.feature_table = virtbt_features,
.feature_table_size = ARRAY_SIZE(virtbt_features),
.id_table = virtbt_table,
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 64cd2ee03aa3..fe7600283e70 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -87,9 +87,15 @@ config HISILICON_LPC
Driver to enable I/O access to devices attached to the Low Pin
Count bus on the HiSilicon Hip06/7 SoC.
+config IMX_AIPSTZ
+ tristate "Support for IMX Secure AHB to IP Slave bus (AIPSTZ) bridge"
+ depends on ARCH_MXC
+ help
+ Enable support for IMX AIPSTZ bridge.
+
config IMX_WEIM
bool "Freescale EIM DRIVER"
- depends on ARCH_MXC
+ depends on ARCH_MXC || COMPILE_TEST
help
Driver for i.MX WEIM controller.
The WEIM(Wireless External Interface Module) works like a bus.
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index cddd4984d6af..8e693fe8a03a 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/
obj-$(CONFIG_BT1_APB) += bt1-apb.o
obj-$(CONFIG_BT1_AXI) += bt1-axi.o
+obj-$(CONFIG_IMX_AIPSTZ) += imx-aipstz.o
obj-$(CONFIG_IMX_WEIM) += imx-weim.o
obj-$(CONFIG_INTEL_IXP4XX_EB) += intel-ixp4xx-eb.o
obj-$(CONFIG_MIPS_CDMM) += mips_cdmm.o
diff --git a/drivers/bus/arm-integrator-lm.c b/drivers/bus/arm-integrator-lm.c
index b715c8ab36e8..a65c79b08804 100644
--- a/drivers/bus/arm-integrator-lm.c
+++ b/drivers/bus/arm-integrator-lm.c
@@ -85,6 +85,7 @@ static int integrator_ap_lm_probe(struct platform_device *pdev)
return -ENODEV;
}
map = syscon_node_to_regmap(syscon);
+ of_node_put(syscon);
if (IS_ERR(map)) {
dev_err(dev,
"could not find Integrator/AP system controller\n");
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
index ee29162da4ee..91ef99c42344 100644
--- a/drivers/bus/brcmstb_gisb.c
+++ b/drivers/bus/brcmstb_gisb.c
@@ -395,10 +395,7 @@ static struct attribute *gisb_arb_sysfs_attrs[] = {
&dev_attr_gisb_arb_timeout.attr,
NULL,
};
-
-static struct attribute_group gisb_arb_sysfs_attr_group = {
- .attrs = gisb_arb_sysfs_attrs,
-};
+ATTRIBUTE_GROUPS(gisb_arb_sysfs);
static const struct of_device_id brcmstb_gisb_arb_of_match[] = {
{ .compatible = "brcm,gisb-arb", .data = gisb_offsets_bcm7445 },
@@ -490,10 +487,6 @@ static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev)
}
}
- err = sysfs_create_group(&pdev->dev.kobj, &gisb_arb_sysfs_attr_group);
- if (err)
- return err;
-
platform_set_drvdata(pdev, gdev);
list_add_tail(&gdev->next, &brcmstb_gisb_arb_device_list);
@@ -550,6 +543,7 @@ static struct platform_driver brcmstb_gisb_arb_driver = {
.name = "brcm-gisb-arb",
.of_match_table = brcmstb_gisb_arb_of_match,
.pm = &brcmstb_gisb_arb_pm_ops,
+ .dev_groups = gisb_arb_sysfs_groups,
},
};
diff --git a/drivers/bus/bt1-apb.c b/drivers/bus/bt1-apb.c
index 595fb22b73e0..7463124b6dd9 100644
--- a/drivers/bus/bt1-apb.c
+++ b/drivers/bus/bt1-apb.c
@@ -185,34 +185,13 @@ static int bt1_apb_request_rst(struct bt1_apb *apb)
return ret;
}
-static void bt1_apb_disable_clk(void *data)
-{
- struct bt1_apb *apb = data;
-
- clk_disable_unprepare(apb->pclk);
-}
-
static int bt1_apb_request_clk(struct bt1_apb *apb)
{
- int ret;
-
- apb->pclk = devm_clk_get(apb->dev, "pclk");
+ apb->pclk = devm_clk_get_enabled(apb->dev, "pclk");
if (IS_ERR(apb->pclk))
return dev_err_probe(apb->dev, PTR_ERR(apb->pclk),
"Couldn't get APB clock descriptor\n");
- ret = clk_prepare_enable(apb->pclk);
- if (ret) {
- dev_err(apb->dev, "Couldn't enable the APB clock\n");
- return ret;
- }
-
- ret = devm_add_action_or_reset(apb->dev, bt1_apb_disable_clk, apb);
- if (ret) {
- dev_err(apb->dev, "Can't add APB EHB clocks disable action\n");
- return ret;
- }
-
apb->rate = clk_get_rate(apb->pclk);
if (!apb->rate) {
dev_err(apb->dev, "Invalid clock rate\n");
diff --git a/drivers/bus/bt1-axi.c b/drivers/bus/bt1-axi.c
index 4007e7322cf2..a5254c73bf43 100644
--- a/drivers/bus/bt1-axi.c
+++ b/drivers/bus/bt1-axi.c
@@ -146,33 +146,14 @@ static int bt1_axi_request_rst(struct bt1_axi *axi)
return ret;
}
-static void bt1_axi_disable_clk(void *data)
-{
- struct bt1_axi *axi = data;
-
- clk_disable_unprepare(axi->aclk);
-}
-
static int bt1_axi_request_clk(struct bt1_axi *axi)
{
- int ret;
-
- axi->aclk = devm_clk_get(axi->dev, "aclk");
+ axi->aclk = devm_clk_get_enabled(axi->dev, "aclk");
if (IS_ERR(axi->aclk))
return dev_err_probe(axi->dev, PTR_ERR(axi->aclk),
"Couldn't get AXI Interconnect clock\n");
- ret = clk_prepare_enable(axi->aclk);
- if (ret) {
- dev_err(axi->dev, "Couldn't enable the AXI clock\n");
- return ret;
- }
-
- ret = devm_add_action_or_reset(axi->dev, bt1_axi_disable_clk, axi);
- if (ret)
- dev_err(axi->dev, "Can't add AXI clock disable action\n");
-
- return ret;
+ return 0;
}
static int bt1_axi_request_irq(struct bt1_axi *axi)
diff --git a/drivers/bus/fsl-mc/dpmcp.c b/drivers/bus/fsl-mc/dpmcp.c
index 5fbd0dbde24a..7816c0a728ef 100644
--- a/drivers/bus/fsl-mc/dpmcp.c
+++ b/drivers/bus/fsl-mc/dpmcp.c
@@ -75,25 +75,3 @@ int dpmcp_close(struct fsl_mc_io *mc_io,
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
-
-/**
- * dpmcp_reset() - Reset the DPMCP, returns the object to initial state.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPMCP object
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpmcp_reset(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token)
-{
- struct fsl_mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_RESET,
- cmd_flags, token);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
diff --git a/drivers/bus/fsl-mc/dprc-driver.c b/drivers/bus/fsl-mc/dprc-driver.c
index 4b68c84ef485..c63a7e688db6 100644
--- a/drivers/bus/fsl-mc/dprc-driver.c
+++ b/drivers/bus/fsl-mc/dprc-driver.c
@@ -22,8 +22,8 @@ struct fsl_mc_child_objs {
struct fsl_mc_obj_desc *child_array;
};
-static bool fsl_mc_device_match(struct fsl_mc_device *mc_dev,
- struct fsl_mc_obj_desc *obj_desc)
+static bool fsl_mc_device_match(const struct fsl_mc_device *mc_dev,
+ const struct fsl_mc_obj_desc *obj_desc)
{
return mc_dev->obj_desc.id == obj_desc->id &&
strcmp(mc_dev->obj_desc.type, obj_desc->type) == 0;
@@ -112,9 +112,9 @@ void dprc_remove_devices(struct fsl_mc_device *mc_bus_dev,
}
EXPORT_SYMBOL_GPL(dprc_remove_devices);
-static int __fsl_mc_device_match(struct device *dev, void *data)
+static int __fsl_mc_device_match(struct device *dev, const void *data)
{
- struct fsl_mc_obj_desc *obj_desc = data;
+ const struct fsl_mc_obj_desc *obj_desc = data;
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
return fsl_mc_device_match(mc_dev, obj_desc);
@@ -806,8 +806,6 @@ int dprc_cleanup(struct fsl_mc_device *mc_dev)
dev_set_msi_domain(&mc_dev->dev, NULL);
}
- fsl_mc_cleanup_all_resource_pools(mc_dev);
-
/* if this step fails we cannot go further with cleanup as there is no way of
* communicating with the firmware
*/
diff --git a/drivers/bus/fsl-mc/dprc.c b/drivers/bus/fsl-mc/dprc.c
index dd1b5c0fb7e2..38d40c09b719 100644
--- a/drivers/bus/fsl-mc/dprc.c
+++ b/drivers/bus/fsl-mc/dprc.c
@@ -489,7 +489,7 @@ int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr);
cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
cmd_params->obj_id = cpu_to_le32(obj_id);
- strscpy_pad(cmd_params->obj_type, obj_type, 16);
+ strscpy(cmd_params->obj_type, obj_type);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -561,7 +561,7 @@ int dprc_get_obj_region(struct fsl_mc_io *mc_io,
cmd_params = (struct dprc_cmd_get_obj_region *)cmd.params;
cmd_params->obj_id = cpu_to_le32(obj_id);
cmd_params->region_index = region_index;
- strscpy_pad(cmd_params->obj_type, obj_type, 16);
+ strscpy(cmd_params->obj_type, obj_type);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
diff --git a/drivers/bus/fsl-mc/fsl-mc-allocator.c b/drivers/bus/fsl-mc/fsl-mc-allocator.c
index b5e8c021fa1f..d2ea59471323 100644
--- a/drivers/bus/fsl-mc/fsl-mc-allocator.c
+++ b/drivers/bus/fsl-mc/fsl-mc-allocator.c
@@ -555,27 +555,6 @@ void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
}
}
-static void fsl_mc_cleanup_resource_pool(struct fsl_mc_device *mc_bus_dev,
- enum fsl_mc_pool_type pool_type)
-{
- struct fsl_mc_resource *resource;
- struct fsl_mc_resource *next;
- struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
- struct fsl_mc_resource_pool *res_pool =
- &mc_bus->resource_pools[pool_type];
-
- list_for_each_entry_safe(resource, next, &res_pool->free_list, node)
- devm_kfree(&mc_bus_dev->dev, resource);
-}
-
-void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
-{
- int pool_type;
-
- for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++)
- fsl_mc_cleanup_resource_pool(mc_bus_dev, pool_type);
-}
-
/*
* fsl_mc_allocator_probe - callback invoked when an allocatable device is
* being added to the system
@@ -656,8 +635,3 @@ int __init fsl_mc_allocator_driver_init(void)
{
return fsl_mc_driver_register(&fsl_mc_allocator_driver);
}
-
-void fsl_mc_allocator_driver_exit(void)
-{
- fsl_mc_driver_unregister(&fsl_mc_allocator_driver);
-}
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index 78b96cd63de9..25845c04e562 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -80,11 +80,11 @@ static phys_addr_t mc_portal_base_phys_addr;
*
* Returns 1 on success, 0 otherwise.
*/
-static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv)
+static int fsl_mc_bus_match(struct device *dev, const struct device_driver *drv)
{
const struct fsl_mc_device_id *id;
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
- struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv);
+ const struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv);
bool found = false;
/* When driver_override is set, only bind to the matching driver */
@@ -139,9 +139,9 @@ static int fsl_mc_bus_uevent(const struct device *dev, struct kobj_uevent_env *e
static int fsl_mc_dma_configure(struct device *dev)
{
+ const struct device_driver *drv = READ_ONCE(dev->driver);
struct device *dma_dev = dev;
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
- struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
u32 input_id = mc_dev->icid;
int ret;
@@ -153,7 +153,8 @@ static int fsl_mc_dma_configure(struct device *dev)
else
ret = acpi_dma_configure_id(dev, DEV_DMA_COHERENT, &input_id);
- if (!ret && !mc_drv->driver_managed_dma) {
+ /* @drv may not be valid when we're called from the IOMMU layer */
+ if (!ret && drv && !to_fsl_mc_driver(drv)->driver_managed_dma) {
ret = iommu_device_use_default_domain(dev);
if (ret)
arch_teardown_dma_ops(dev);
@@ -175,8 +176,8 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
{
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
- return sprintf(buf, "fsl-mc:v%08Xd%s\n", mc_dev->obj_desc.vendor,
- mc_dev->obj_desc.type);
+ return sysfs_emit(buf, "fsl-mc:v%08Xd%s\n", mc_dev->obj_desc.vendor,
+ mc_dev->obj_desc.type);
}
static DEVICE_ATTR_RO(modalias);
@@ -202,7 +203,7 @@ static ssize_t driver_override_show(struct device *dev,
{
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n", mc_dev->driver_override);
+ return sysfs_emit(buf, "%s\n", mc_dev->driver_override);
}
static DEVICE_ATTR_RW(driver_override);
@@ -309,7 +310,7 @@ static struct attribute *fsl_mc_bus_attrs[] = {
ATTRIBUTE_GROUPS(fsl_mc_bus);
-struct bus_type fsl_mc_bus_type = {
+const struct bus_type fsl_mc_bus_type = {
.name = "fsl-mc",
.match = fsl_mc_bus_match,
.uevent = fsl_mc_bus_uevent,
@@ -320,90 +321,90 @@ struct bus_type fsl_mc_bus_type = {
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_type);
-struct device_type fsl_mc_bus_dprc_type = {
+const struct device_type fsl_mc_bus_dprc_type = {
.name = "fsl_mc_bus_dprc"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dprc_type);
-struct device_type fsl_mc_bus_dpni_type = {
+const struct device_type fsl_mc_bus_dpni_type = {
.name = "fsl_mc_bus_dpni"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpni_type);
-struct device_type fsl_mc_bus_dpio_type = {
+const struct device_type fsl_mc_bus_dpio_type = {
.name = "fsl_mc_bus_dpio"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpio_type);
-struct device_type fsl_mc_bus_dpsw_type = {
+const struct device_type fsl_mc_bus_dpsw_type = {
.name = "fsl_mc_bus_dpsw"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpsw_type);
-struct device_type fsl_mc_bus_dpbp_type = {
+const struct device_type fsl_mc_bus_dpbp_type = {
.name = "fsl_mc_bus_dpbp"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpbp_type);
-struct device_type fsl_mc_bus_dpcon_type = {
+const struct device_type fsl_mc_bus_dpcon_type = {
.name = "fsl_mc_bus_dpcon"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpcon_type);
-struct device_type fsl_mc_bus_dpmcp_type = {
+const struct device_type fsl_mc_bus_dpmcp_type = {
.name = "fsl_mc_bus_dpmcp"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpmcp_type);
-struct device_type fsl_mc_bus_dpmac_type = {
+const struct device_type fsl_mc_bus_dpmac_type = {
.name = "fsl_mc_bus_dpmac"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpmac_type);
-struct device_type fsl_mc_bus_dprtc_type = {
+const struct device_type fsl_mc_bus_dprtc_type = {
.name = "fsl_mc_bus_dprtc"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dprtc_type);
-struct device_type fsl_mc_bus_dpseci_type = {
+const struct device_type fsl_mc_bus_dpseci_type = {
.name = "fsl_mc_bus_dpseci"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpseci_type);
-struct device_type fsl_mc_bus_dpdmux_type = {
+const struct device_type fsl_mc_bus_dpdmux_type = {
.name = "fsl_mc_bus_dpdmux"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdmux_type);
-struct device_type fsl_mc_bus_dpdcei_type = {
+const struct device_type fsl_mc_bus_dpdcei_type = {
.name = "fsl_mc_bus_dpdcei"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdcei_type);
-struct device_type fsl_mc_bus_dpaiop_type = {
+const struct device_type fsl_mc_bus_dpaiop_type = {
.name = "fsl_mc_bus_dpaiop"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpaiop_type);
-struct device_type fsl_mc_bus_dpci_type = {
+const struct device_type fsl_mc_bus_dpci_type = {
.name = "fsl_mc_bus_dpci"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpci_type);
-struct device_type fsl_mc_bus_dpdmai_type = {
+const struct device_type fsl_mc_bus_dpdmai_type = {
.name = "fsl_mc_bus_dpdmai"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdmai_type);
-struct device_type fsl_mc_bus_dpdbg_type = {
+const struct device_type fsl_mc_bus_dpdbg_type = {
.name = "fsl_mc_bus_dpdbg"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdbg_type);
-static struct device_type *fsl_mc_get_device_type(const char *type)
+static const struct device_type *fsl_mc_get_device_type(const char *type)
{
static const struct {
- struct device_type *dev_type;
+ const struct device_type *dev_type;
const char *type;
} dev_types[] = {
{ &fsl_mc_bus_dprc_type, "dprc" },
@@ -905,8 +906,10 @@ int fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc,
error_cleanup_dev:
kfree(mc_dev->regions);
- kfree(mc_bus);
- kfree(mc_dev);
+ if (mc_bus)
+ kfree(mc_bus);
+ else
+ kfree(mc_dev);
return error;
}
@@ -940,6 +943,7 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev,
struct fsl_mc_obj_desc endpoint_desc = {{ 0 }};
struct dprc_endpoint endpoint1 = {{ 0 }};
struct dprc_endpoint endpoint2 = {{ 0 }};
+ struct fsl_mc_bus *mc_bus;
int state, err;
mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
@@ -963,6 +967,8 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev,
strcpy(endpoint_desc.type, endpoint2.type);
endpoint_desc.id = endpoint2.id;
endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev);
+ if (endpoint)
+ return endpoint;
/*
* We know that the device has an endpoint because we verified by
@@ -970,17 +976,13 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev,
* yet discovered by the fsl-mc bus, thus the lookup returned NULL.
* Force a rescan of the devices in this container and retry the lookup.
*/
- if (!endpoint) {
- struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
-
- if (mutex_trylock(&mc_bus->scan_mutex)) {
- err = dprc_scan_objects(mc_bus_dev, true);
- mutex_unlock(&mc_bus->scan_mutex);
- }
-
- if (err < 0)
- return ERR_PTR(err);
+ mc_bus = to_fsl_mc_bus(mc_bus_dev);
+ if (mutex_trylock(&mc_bus->scan_mutex)) {
+ err = dprc_scan_objects(mc_bus_dev, true);
+ mutex_unlock(&mc_bus->scan_mutex);
}
+ if (err < 0)
+ return ERR_PTR(err);
endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev);
/*
@@ -1102,6 +1104,9 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
* Get physical address of MC portal for the root DPRC:
*/
plat_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!plat_res)
+ return -EINVAL;
+
mc_portal_phys_addr = plat_res->start;
mc_portal_size = resource_size(plat_res);
mc_portal_base_phys_addr = mc_portal_phys_addr & ~0x3ffffff;
@@ -1210,7 +1215,7 @@ static struct platform_driver fsl_mc_bus_driver = {
.acpi_match_table = fsl_mc_bus_acpi_match_table,
},
.probe = fsl_mc_bus_probe,
- .remove_new = fsl_mc_bus_remove,
+ .remove = fsl_mc_bus_remove,
.shutdown = fsl_mc_bus_remove,
};
diff --git a/drivers/bus/fsl-mc/fsl-mc-private.h b/drivers/bus/fsl-mc/fsl-mc-private.h
index b3520ea1b9f4..beed4c53533d 100644
--- a/drivers/bus/fsl-mc/fsl-mc-private.h
+++ b/drivers/bus/fsl-mc/fsl-mc-private.h
@@ -66,10 +66,6 @@ int dpmcp_close(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token);
-int dpmcp_reset(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token);
-
/*
* Data Path Resource Container (DPRC) API
*/
@@ -631,12 +627,8 @@ int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
int __init fsl_mc_allocator_driver_init(void);
-void fsl_mc_allocator_driver_exit(void);
-
void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
-void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
-
int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
enum fsl_mc_pool_type pool_type,
struct fsl_mc_resource
diff --git a/drivers/bus/fsl-mc/fsl-mc-uapi.c b/drivers/bus/fsl-mc/fsl-mc-uapi.c
index 9c4c1395fcdb..823969e4159c 100644
--- a/drivers/bus/fsl-mc/fsl-mc-uapi.c
+++ b/drivers/bus/fsl-mc/fsl-mc-uapi.c
@@ -48,6 +48,7 @@ enum fsl_mc_cmd_index {
DPRC_GET_POOL,
DPRC_GET_POOL_COUNT,
DPRC_GET_CONNECTION,
+ DPRC_GET_MEM,
DPCI_GET_LINK_STATE,
DPCI_GET_PEER_ATTR,
DPAIOP_GET_SL_VERSION,
@@ -194,6 +195,12 @@ static struct fsl_mc_cmd_desc fsl_mc_accepted_cmds[] = {
.token = true,
.size = 32,
},
+ [DPRC_GET_MEM] = {
+ .cmdid_value = 0x16D0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 12,
+ },
[DPCI_GET_LINK_STATE] = {
.cmdid_value = 0x0E10,
@@ -275,13 +282,13 @@ static struct fsl_mc_cmd_desc fsl_mc_accepted_cmds[] = {
.size = 8,
},
[DPSW_GET_TAILDROP] = {
- .cmdid_value = 0x0A80,
+ .cmdid_value = 0x0A90,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 14,
},
[DPSW_SET_TAILDROP] = {
- .cmdid_value = 0x0A90,
+ .cmdid_value = 0x0A80,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 24,
diff --git a/drivers/bus/fsl-mc/mc-io.c b/drivers/bus/fsl-mc/mc-io.c
index 95b10a6cf307..cd8754763f40 100644
--- a/drivers/bus/fsl-mc/mc-io.c
+++ b/drivers/bus/fsl-mc/mc-io.c
@@ -214,12 +214,19 @@ int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
if (error < 0)
goto error_cleanup_resource;
- dpmcp_dev->consumer_link = device_link_add(&mc_dev->dev,
- &dpmcp_dev->dev,
- DL_FLAG_AUTOREMOVE_CONSUMER);
- if (!dpmcp_dev->consumer_link) {
- error = -EINVAL;
- goto error_cleanup_mc_io;
+ /* If the DPRC device itself tries to allocate a portal (usually for
+ * UAPI interaction), don't add a device link between them since the
+ * DPMCP device is an actual child device of the DPRC and a reverse
+ * dependency is not allowed.
+ */
+ if (mc_dev != mc_bus_dev) {
+ dpmcp_dev->consumer_link = device_link_add(&mc_dev->dev,
+ &dpmcp_dev->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
+ if (!dpmcp_dev->consumer_link) {
+ error = -EINVAL;
+ goto error_cleanup_mc_io;
+ }
}
*new_mc_io = mc_io;
@@ -263,23 +270,3 @@ void fsl_mc_portal_free(struct fsl_mc_io *mc_io)
dpmcp_dev->consumer_link = NULL;
}
EXPORT_SYMBOL_GPL(fsl_mc_portal_free);
-
-/**
- * fsl_mc_portal_reset - Resets the dpmcp object for a given fsl_mc_io object
- *
- * @mc_io: Pointer to the fsl_mc_io object that wraps the MC portal to free
- */
-int fsl_mc_portal_reset(struct fsl_mc_io *mc_io)
-{
- int error;
- struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
-
- error = dpmcp_reset(mc_io, 0, dpmcp_dev->mc_handle);
- if (error < 0) {
- dev_err(&dpmcp_dev->dev, "dpmcp_reset() failed: %d\n", error);
- return error;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(fsl_mc_portal_reset);
diff --git a/drivers/bus/fsl-mc/mc-sys.c b/drivers/bus/fsl-mc/mc-sys.c
index f2052cd0a051..31037f41893e 100644
--- a/drivers/bus/fsl-mc/mc-sys.c
+++ b/drivers/bus/fsl-mc/mc-sys.c
@@ -19,7 +19,7 @@
/*
* Timeout in milliseconds to wait for the completion of an MC command
*/
-#define MC_CMD_COMPLETION_TIMEOUT_MS 500
+#define MC_CMD_COMPLETION_TIMEOUT_MS 15000
/*
* usleep_range() min and max values used to throttle down polling
@@ -248,7 +248,7 @@ int mc_send_command(struct fsl_mc_io *mc_io, struct fsl_mc_command *cmd)
enum mc_cmd_status status;
unsigned long irq_flags = 0;
- if (in_irq() && !(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL))
+ if (in_hardirq() && !(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL))
return -EINVAL;
if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c
index 09340adbacc2..53dd1573e323 100644
--- a/drivers/bus/hisi_lpc.c
+++ b/drivers/bus/hisi_lpc.c
@@ -689,6 +689,6 @@ static struct platform_driver hisi_lpc_driver = {
.acpi_match_table = hisi_lpc_acpi_match,
},
.probe = hisi_lpc_probe,
- .remove_new = hisi_lpc_remove,
+ .remove = hisi_lpc_remove,
};
builtin_platform_driver(hisi_lpc_driver);
diff --git a/drivers/bus/imx-aipstz.c b/drivers/bus/imx-aipstz.c
new file mode 100644
index 000000000000..5fdf377f5d06
--- /dev/null
+++ b/drivers/bus/imx-aipstz.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2025 NXP
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#define IMX_AIPSTZ_MPR0 0x0
+
+struct imx_aipstz_config {
+ u32 mpr0;
+};
+
+struct imx_aipstz_data {
+ void __iomem *base;
+ const struct imx_aipstz_config *default_cfg;
+};
+
+static void imx_aipstz_apply_default(struct imx_aipstz_data *data)
+{
+ writel(data->default_cfg->mpr0, data->base + IMX_AIPSTZ_MPR0);
+}
+
+static const struct of_device_id imx_aipstz_match_table[] = {
+ { .compatible = "simple-bus", },
+ { }
+};
+
+static int imx_aipstz_probe(struct platform_device *pdev)
+{
+ struct imx_aipstz_data *data;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return dev_err_probe(&pdev->dev, -ENOMEM,
+ "failed to allocate data memory\n");
+
+ data->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(data->base))
+ return dev_err_probe(&pdev->dev, -ENOMEM,
+ "failed to get/ioremap AC memory\n");
+
+ data->default_cfg = of_device_get_match_data(&pdev->dev);
+
+ imx_aipstz_apply_default(data);
+
+ dev_set_drvdata(&pdev->dev, data);
+
+ pm_runtime_set_active(&pdev->dev);
+ devm_pm_runtime_enable(&pdev->dev);
+
+ return of_platform_populate(pdev->dev.of_node, imx_aipstz_match_table,
+ NULL, &pdev->dev);
+}
+
+static void imx_aipstz_remove(struct platform_device *pdev)
+{
+ of_platform_depopulate(&pdev->dev);
+}
+
+static int imx_aipstz_runtime_resume(struct device *dev)
+{
+ struct imx_aipstz_data *data = dev_get_drvdata(dev);
+
+ /* restore potentially lost configuration during domain power-off */
+ imx_aipstz_apply_default(data);
+
+ return 0;
+}
+
+static const struct dev_pm_ops imx_aipstz_pm_ops = {
+ RUNTIME_PM_OPS(NULL, imx_aipstz_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+};
+
+/*
+ * following configuration is equivalent to:
+ * masters 0-7 => trusted for R/W + use AHB's HPROT[1] to det. privilege
+ */
+static const struct imx_aipstz_config imx8mp_aipstz_default_cfg = {
+ .mpr0 = 0x77777777,
+};
+
+static const struct of_device_id imx_aipstz_of_ids[] = {
+ { .compatible = "fsl,imx8mp-aipstz", .data = &imx8mp_aipstz_default_cfg },
+ { }
+};
+MODULE_DEVICE_TABLE(of, imx_aipstz_of_ids);
+
+static struct platform_driver imx_aipstz_of_driver = {
+ .probe = imx_aipstz_probe,
+ .remove = imx_aipstz_remove,
+ .driver = {
+ .name = "imx-aipstz",
+ .of_match_table = imx_aipstz_of_ids,
+ .pm = pm_ptr(&imx_aipstz_pm_ops),
+ },
+};
+module_platform_driver(imx_aipstz_of_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IMX secure AHB to IP Slave bus (AIPSTZ) bridge driver");
+MODULE_AUTHOR("Laurentiu Mihalcea <laurentiu.mihalcea@nxp.com>");
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index 837bf9d51c6e..83d623d97f5f 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -282,22 +282,18 @@ static int weim_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, priv);
/* get the clock */
- clk = devm_clk_get(&pdev->dev, NULL);
+ clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(clk))
return PTR_ERR(clk);
- ret = clk_prepare_enable(clk);
- if (ret)
- return ret;
-
/* parse the device node */
ret = weim_parse_dt(pdev);
if (ret)
- clk_disable_unprepare(clk);
- else
- dev_info(&pdev->dev, "Driver registered.\n");
+ return ret;
- return ret;
+ dev_info(&pdev->dev, "Driver registered.\n");
+
+ return 0;
}
#if IS_ENABLED(CONFIG_OF_DYNAMIC)
diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h
index 577965f95fda..512da7482acc 100644
--- a/drivers/bus/mhi/ep/internal.h
+++ b/drivers/bus/mhi/ep/internal.h
@@ -11,7 +11,7 @@
#include "../common.h"
-extern struct bus_type mhi_ep_bus_type;
+extern const struct bus_type mhi_ep_bus_type;
#define MHI_REG_OFFSET 0x100
#define BHI_REG_OFFSET 0x200
diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
index f8f674adf1d4..3c208b5c8446 100644
--- a/drivers/bus/mhi/ep/main.c
+++ b/drivers/bus/mhi/ep/main.c
@@ -90,7 +90,7 @@ static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct m
struct mhi_ring_element *event;
int ret;
- event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
+ event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL);
if (!event)
return -ENOMEM;
@@ -109,7 +109,7 @@ int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_stat
struct mhi_ring_element *event;
int ret;
- event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
+ event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL);
if (!event)
return -ENOMEM;
@@ -127,7 +127,7 @@ int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_e
struct mhi_ring_element *event;
int ret;
- event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
+ event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL);
if (!event)
return -ENOMEM;
@@ -146,7 +146,7 @@ static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_e
struct mhi_ring_element *event;
int ret;
- event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
+ event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL);
if (!event)
return -ENOMEM;
@@ -403,17 +403,13 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
{
struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
struct device *dev = &mhi_cntrl->mhi_dev->dev;
- size_t tr_len, read_offset, write_offset;
+ size_t tr_len, read_offset;
struct mhi_ep_buf_info buf_info = {};
u32 len = MHI_EP_DEFAULT_MTU;
struct mhi_ring_element *el;
- bool tr_done = false;
void *buf_addr;
- u32 buf_left;
int ret;
- buf_left = len;
-
do {
/* Don't process the transfer ring if the channel is not in RUNNING state */
if (mhi_chan->state != MHI_CH_STATE_RUNNING) {
@@ -426,24 +422,23 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
/* Check if there is data pending to be read from previous read operation */
if (mhi_chan->tre_bytes_left) {
dev_dbg(dev, "TRE bytes remaining: %u\n", mhi_chan->tre_bytes_left);
- tr_len = min(buf_left, mhi_chan->tre_bytes_left);
+ tr_len = min(len, mhi_chan->tre_bytes_left);
} else {
mhi_chan->tre_loc = MHI_TRE_DATA_GET_PTR(el);
mhi_chan->tre_size = MHI_TRE_DATA_GET_LEN(el);
mhi_chan->tre_bytes_left = mhi_chan->tre_size;
- tr_len = min(buf_left, mhi_chan->tre_size);
+ tr_len = min(len, mhi_chan->tre_size);
}
read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left;
- write_offset = len - buf_left;
- buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL | GFP_DMA);
+ buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL);
if (!buf_addr)
return -ENOMEM;
buf_info.host_addr = mhi_chan->tre_loc + read_offset;
- buf_info.dev_addr = buf_addr + write_offset;
+ buf_info.dev_addr = buf_addr;
buf_info.size = tr_len;
buf_info.cb = mhi_ep_read_completion;
buf_info.cb_buf = buf_addr;
@@ -459,16 +454,12 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
goto err_free_buf_addr;
}
- buf_left -= tr_len;
mhi_chan->tre_bytes_left -= tr_len;
- if (!mhi_chan->tre_bytes_left) {
- if (MHI_TRE_DATA_GET_IEOT(el))
- tr_done = true;
-
+ if (!mhi_chan->tre_bytes_left)
mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size;
- }
- } while (buf_left && !tr_done);
+ /* Read until the some buffer is left or the ring becomes not empty */
+ } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE));
return 0;
@@ -502,15 +493,11 @@ static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring)
mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
} else {
/* UL channel */
- do {
- ret = mhi_ep_read_channel(mhi_cntrl, ring);
- if (ret < 0) {
- dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n");
- return ret;
- }
-
- /* Read until the ring becomes empty */
- } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE));
+ ret = mhi_ep_read_channel(mhi_cntrl, ring);
+ if (ret < 0) {
+ dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n");
+ return ret;
+ }
}
return 0;
@@ -1481,14 +1468,14 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
mhi_cntrl->ev_ring_el_cache = kmem_cache_create("mhi_ep_event_ring_el",
sizeof(struct mhi_ring_element), 0,
- SLAB_CACHE_DMA, NULL);
+ 0, NULL);
if (!mhi_cntrl->ev_ring_el_cache) {
ret = -ENOMEM;
goto err_free_cmd;
}
mhi_cntrl->tre_buf_cache = kmem_cache_create("mhi_ep_tre_buf", MHI_EP_DEFAULT_MTU, 0,
- SLAB_CACHE_DMA, NULL);
+ 0, NULL);
if (!mhi_cntrl->tre_buf_cache) {
ret = -ENOMEM;
goto err_destroy_ev_ring_el_cache;
@@ -1507,7 +1494,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker);
INIT_WORK(&mhi_cntrl->ch_ring_work, mhi_ep_ch_ring_worker);
- mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0);
+ mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", WQ_PERCPU, 0);
if (!mhi_cntrl->wq) {
ret = -ENOMEM;
goto err_destroy_ring_item_cache;
@@ -1694,10 +1681,10 @@ static int mhi_ep_uevent(const struct device *dev, struct kobj_uevent_env *env)
mhi_dev->name);
}
-static int mhi_ep_match(struct device *dev, struct device_driver *drv)
+static int mhi_ep_match(struct device *dev, const struct device_driver *drv)
{
struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
- struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(drv);
+ const struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(drv);
const struct mhi_device_id *id;
/*
@@ -1716,7 +1703,7 @@ static int mhi_ep_match(struct device *dev, struct device_driver *drv)
return 0;
};
-struct bus_type mhi_ep_bus_type = {
+const struct bus_type mhi_ep_bus_type = {
.name = "mhi_ep",
.dev_name = "mhi_ep",
.match = mhi_ep_match,
diff --git a/drivers/bus/mhi/ep/ring.c b/drivers/bus/mhi/ep/ring.c
index aeb53b2c34a8..26357ee68dee 100644
--- a/drivers/bus/mhi/ep/ring.c
+++ b/drivers/bus/mhi/ep/ring.c
@@ -131,19 +131,23 @@ int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *e
}
old_offset = ring->rd_offset;
- mhi_ep_ring_inc_index(ring);
dev_dbg(dev, "Adding an element to ring at offset (%zu)\n", ring->rd_offset);
+ buf_info.host_addr = ring->rbase + (old_offset * sizeof(*el));
+ buf_info.dev_addr = el;
+ buf_info.size = sizeof(*el);
+
+ ret = mhi_cntrl->write_sync(mhi_cntrl, &buf_info);
+ if (ret)
+ return ret;
+
+ mhi_ep_ring_inc_index(ring);
/* Update rp in ring context */
rp = cpu_to_le64(ring->rd_offset * sizeof(*el) + ring->rbase);
memcpy_toio((void __iomem *) &ring->ring_ctx->generic.rp, &rp, sizeof(u64));
- buf_info.host_addr = ring->rbase + (old_offset * sizeof(*el));
- buf_info.dev_addr = el;
- buf_info.size = sizeof(*el);
-
- return mhi_cntrl->write_sync(mhi_cntrl, &buf_info);
+ return ret;
}
void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id)
diff --git a/drivers/bus/mhi/host/boot.c b/drivers/bus/mhi/host/boot.c
index dedd29ca8db3..205d83ac069f 100644
--- a/drivers/bus/mhi/host/boot.c
+++ b/drivers/bus/mhi/host/boot.c
@@ -31,8 +31,8 @@ int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
int ret;
for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) {
- bhi_vec->dma_addr = mhi_buf->dma_addr;
- bhi_vec->size = mhi_buf->len;
+ bhi_vec->dma_addr = cpu_to_le64(mhi_buf->dma_addr);
+ bhi_vec->size = cpu_to_le64(mhi_buf->len);
}
dev_dbg(dev, "BHIe programming for RDDM\n");
@@ -82,9 +82,9 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
* other cores to shutdown while we're collecting RDDM buffer. After
* returning from this function, we expect the device to reset.
*
- * Normaly, we read/write pm_state only after grabbing the
+ * Normally, we read/write pm_state only after grabbing the
* pm_lock, since we're in a panic, skipping it. Also there is no
- * gurantee that this state change would take effect since
+ * guarantee that this state change would take effect since
* we're setting it w/o grabbing pm_lock
*/
mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
@@ -177,6 +177,36 @@ int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic)
}
EXPORT_SYMBOL_GPL(mhi_download_rddm_image);
+static void mhi_fw_load_error_dump(struct mhi_controller *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
+ void __iomem *base = mhi_cntrl->bhi;
+ int ret, i;
+ u32 val;
+ struct {
+ char *name;
+ u32 offset;
+ } error_reg[] = {
+ { "ERROR_CODE", BHI_ERRCODE },
+ { "ERROR_DBG1", BHI_ERRDBG1 },
+ { "ERROR_DBG2", BHI_ERRDBG2 },
+ { "ERROR_DBG3", BHI_ERRDBG3 },
+ { NULL },
+ };
+
+ read_lock_bh(pm_lock);
+ if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ for (i = 0; error_reg[i].name; i++) {
+ ret = mhi_read_reg(mhi_cntrl, base, error_reg[i].offset, &val);
+ if (ret)
+ break;
+ dev_err(dev, "Reg: %s value: 0x%x\n", error_reg[i].name, val);
+ }
+ }
+ read_unlock_bh(pm_lock);
+}
+
static int mhi_fw_load_bhie(struct mhi_controller *mhi_cntrl,
const struct mhi_buf *mhi_buf)
{
@@ -226,24 +256,13 @@ static int mhi_fw_load_bhie(struct mhi_controller *mhi_cntrl,
}
static int mhi_fw_load_bhi(struct mhi_controller *mhi_cntrl,
- dma_addr_t dma_addr,
- size_t size)
+ const struct mhi_buf *mhi_buf)
{
- u32 tx_status, val, session_id;
- int i, ret;
- void __iomem *base = mhi_cntrl->bhi;
- rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
- struct {
- char *name;
- u32 offset;
- } error_reg[] = {
- { "ERROR_CODE", BHI_ERRCODE },
- { "ERROR_DBG1", BHI_ERRDBG1 },
- { "ERROR_DBG2", BHI_ERRDBG2 },
- { "ERROR_DBG3", BHI_ERRDBG3 },
- { NULL },
- };
+ rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
+ void __iomem *base = mhi_cntrl->bhi;
+ u32 tx_status, session_id;
+ int ret;
read_lock_bh(pm_lock);
if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
@@ -255,11 +274,9 @@ static int mhi_fw_load_bhi(struct mhi_controller *mhi_cntrl,
dev_dbg(dev, "Starting image download via BHI. Session ID: %u\n",
session_id);
mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0);
- mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH,
- upper_32_bits(dma_addr));
- mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW,
- lower_32_bits(dma_addr));
- mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size);
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH, upper_32_bits(mhi_buf->dma_addr));
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW, lower_32_bits(mhi_buf->dma_addr));
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, mhi_buf->len);
mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, session_id);
read_unlock_bh(pm_lock);
@@ -274,18 +291,7 @@ static int mhi_fw_load_bhi(struct mhi_controller *mhi_cntrl,
if (tx_status == BHI_STATUS_ERROR) {
dev_err(dev, "Image transfer failed\n");
- read_lock_bh(pm_lock);
- if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
- for (i = 0; error_reg[i].name; i++) {
- ret = mhi_read_reg(mhi_cntrl, base,
- error_reg[i].offset, &val);
- if (ret)
- break;
- dev_err(dev, "Reg: %s value: 0x%x\n",
- error_reg[i].name, val);
- }
- }
- read_unlock_bh(pm_lock);
+ mhi_fw_load_error_dump(mhi_cntrl);
goto invalid_pm_state;
}
@@ -296,6 +302,16 @@ invalid_pm_state:
return -EIO;
}
+static void mhi_free_bhi_buffer(struct mhi_controller *mhi_cntrl,
+ struct image_info *image_info)
+{
+ struct mhi_buf *mhi_buf = image_info->mhi_buf;
+
+ dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len, mhi_buf->buf, mhi_buf->dma_addr);
+ kfree(image_info->mhi_buf);
+ kfree(image_info);
+}
+
void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
struct image_info *image_info)
{
@@ -310,6 +326,45 @@ void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
kfree(image_info);
}
+static int mhi_alloc_bhi_buffer(struct mhi_controller *mhi_cntrl,
+ struct image_info **image_info,
+ size_t alloc_size)
+{
+ struct image_info *img_info;
+ struct mhi_buf *mhi_buf;
+
+ img_info = kzalloc(sizeof(*img_info), GFP_KERNEL);
+ if (!img_info)
+ return -ENOMEM;
+
+ /* Allocate memory for entry */
+ img_info->mhi_buf = kzalloc(sizeof(*img_info->mhi_buf), GFP_KERNEL);
+ if (!img_info->mhi_buf)
+ goto error_alloc_mhi_buf;
+
+ /* Allocate and populate vector table */
+ mhi_buf = img_info->mhi_buf;
+
+ mhi_buf->len = alloc_size;
+ mhi_buf->buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len,
+ &mhi_buf->dma_addr, GFP_KERNEL);
+ if (!mhi_buf->buf)
+ goto error_alloc_segment;
+
+ img_info->bhi_vec = NULL;
+ img_info->entries = 1;
+ *image_info = img_info;
+
+ return 0;
+
+error_alloc_segment:
+ kfree(mhi_buf);
+error_alloc_mhi_buf:
+ kfree(img_info);
+
+ return -ENOMEM;
+}
+
int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
struct image_info **image_info,
size_t alloc_size)
@@ -357,6 +412,7 @@ error_alloc_segment:
for (--i, --mhi_buf; i >= 0; i--, mhi_buf--)
dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len,
mhi_buf->buf, mhi_buf->dma_addr);
+ kfree(img_info->mhi_buf);
error_alloc_mhi_buf:
kfree(img_info);
@@ -364,9 +420,9 @@ error_alloc_mhi_buf:
return -ENOMEM;
}
-static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
- const u8 *buf, size_t remainder,
- struct image_info *img_info)
+static void mhi_firmware_copy_bhie(struct mhi_controller *mhi_cntrl,
+ const u8 *buf, size_t remainder,
+ struct image_info *img_info)
{
size_t to_cpy;
struct mhi_buf *mhi_buf = img_info->mhi_buf;
@@ -375,8 +431,8 @@ static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
while (remainder) {
to_cpy = min(remainder, mhi_buf->len);
memcpy(mhi_buf->buf, buf, to_cpy);
- bhi_vec->dma_addr = mhi_buf->dma_addr;
- bhi_vec->size = to_cpy;
+ bhi_vec->dma_addr = cpu_to_le64(mhi_buf->dma_addr);
+ bhi_vec->size = cpu_to_le64(to_cpy);
buf += to_cpy;
remainder -= to_cpy;
@@ -385,15 +441,61 @@ static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
}
}
+static enum mhi_fw_load_type mhi_fw_load_type_get(const struct mhi_controller *mhi_cntrl)
+{
+ if (mhi_cntrl->fbc_download) {
+ return MHI_FW_LOAD_FBC;
+ } else {
+ if (mhi_cntrl->seg_len)
+ return MHI_FW_LOAD_BHIE;
+ else
+ return MHI_FW_LOAD_BHI;
+ }
+}
+
+static int mhi_load_image_bhi(struct mhi_controller *mhi_cntrl, const u8 *fw_data, size_t size)
+{
+ struct image_info *image;
+ int ret;
+
+ ret = mhi_alloc_bhi_buffer(mhi_cntrl, &image, size);
+ if (ret)
+ return ret;
+
+ /* Load the firmware into BHI vec table */
+ memcpy(image->mhi_buf->buf, fw_data, size);
+
+ ret = mhi_fw_load_bhi(mhi_cntrl, &image->mhi_buf[image->entries - 1]);
+ mhi_free_bhi_buffer(mhi_cntrl, image);
+
+ return ret;
+}
+
+static int mhi_load_image_bhie(struct mhi_controller *mhi_cntrl, const u8 *fw_data, size_t size)
+{
+ struct image_info *image;
+ int ret;
+
+ ret = mhi_alloc_bhie_table(mhi_cntrl, &image, size);
+ if (ret)
+ return ret;
+
+ mhi_firmware_copy_bhie(mhi_cntrl, fw_data, size, image);
+
+ ret = mhi_fw_load_bhie(mhi_cntrl, &image->mhi_buf[image->entries - 1]);
+ mhi_free_bhie_table(mhi_cntrl, image);
+
+ return ret;
+}
+
void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
{
const struct firmware *firmware = NULL;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_fw_load_type fw_load_type;
enum mhi_pm_state new_state;
const char *fw_name;
const u8 *fw_data;
- void *buf;
- dma_addr_t dma_addr;
size_t size, fw_sz;
int ret;
@@ -452,21 +554,17 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
fw_sz = firmware->size;
skip_req_fw:
- buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, size, &dma_addr,
- GFP_KERNEL);
- if (!buf) {
- release_firmware(firmware);
- goto error_fw_load;
- }
-
- /* Download image using BHI */
- memcpy(buf, fw_data, size);
- ret = mhi_fw_load_bhi(mhi_cntrl, dma_addr, size);
- dma_free_coherent(mhi_cntrl->cntrl_dev, size, buf, dma_addr);
+ fw_load_type = mhi_fw_load_type_get(mhi_cntrl);
+ if (fw_load_type == MHI_FW_LOAD_BHIE)
+ ret = mhi_load_image_bhie(mhi_cntrl, fw_data, size);
+ else
+ ret = mhi_load_image_bhi(mhi_cntrl, fw_data, size);
/* Error or in EDL mode, we're done */
if (ret) {
- dev_err(dev, "MHI did not load image over BHI, ret: %d\n", ret);
+ dev_err(dev, "MHI did not load image over BHI%s, ret: %d\n",
+ fw_load_type == MHI_FW_LOAD_BHIE ? "e" : "",
+ ret);
release_firmware(firmware);
goto error_fw_load;
}
@@ -485,7 +583,7 @@ skip_req_fw:
* If we're doing fbc, populate vector tables while
* device transitioning into MHI READY state
*/
- if (mhi_cntrl->fbc_download) {
+ if (fw_load_type == MHI_FW_LOAD_FBC) {
ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image, fw_sz);
if (ret) {
release_firmware(firmware);
@@ -493,7 +591,7 @@ skip_req_fw:
}
/* Load the firmware into BHIE vec table */
- mhi_firmware_copy(mhi_cntrl, fw_data, fw_sz, mhi_cntrl->fbc_image);
+ mhi_firmware_copy_bhie(mhi_cntrl, fw_data, fw_sz, mhi_cntrl->fbc_image);
}
release_firmware(firmware);
@@ -510,7 +608,7 @@ fw_load_ready_state:
return;
error_ready_state:
- if (mhi_cntrl->fbc_download) {
+ if (mhi_cntrl->fbc_image) {
mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
mhi_cntrl->fbc_image = NULL;
}
diff --git a/drivers/bus/mhi/host/debugfs.c b/drivers/bus/mhi/host/debugfs.c
index cfec7811dfbb..39e45748a24c 100644
--- a/drivers/bus/mhi/host/debugfs.c
+++ b/drivers/bus/mhi/host/debugfs.c
@@ -10,6 +10,7 @@
#include <linux/list.h>
#include <linux/mhi.h>
#include <linux/module.h>
+#include <linux/string_choices.h>
#include "internal.h"
static int mhi_debugfs_states_show(struct seq_file *m, void *d)
@@ -22,7 +23,7 @@ static int mhi_debugfs_states_show(struct seq_file *m, void *d)
mhi_is_active(mhi_cntrl) ? "Active" : "Inactive",
mhi_state_str(mhi_cntrl->dev_state),
TO_MHI_EXEC_STR(mhi_cntrl->ee),
- mhi_cntrl->wake_set ? "true" : "false");
+ str_true_false(mhi_cntrl->wake_set));
/* counters */
seq_printf(m, "M0: %u M2: %u M3: %u", mhi_cntrl->M0, mhi_cntrl->M2,
diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c
index 44f934981de8..099be8dd1900 100644
--- a/drivers/bus/mhi/host/init.c
+++ b/drivers/bus/mhi/host/init.c
@@ -127,6 +127,30 @@ static ssize_t soc_reset_store(struct device *dev,
}
static DEVICE_ATTR_WO(soc_reset);
+static ssize_t trigger_edl_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ if (!val)
+ return -EINVAL;
+
+ ret = mhi_cntrl->edl_trigger(mhi_cntrl);
+ if (ret)
+ return ret;
+
+ return count;
+}
+static DEVICE_ATTR_WO(trigger_edl);
+
static struct attribute *mhi_dev_attrs[] = {
&dev_attr_serial_number.attr,
&dev_attr_oem_pk_hash.attr,
@@ -152,7 +176,7 @@ static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl,
return 0;
}
-void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
+static void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
{
int i;
struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
@@ -167,10 +191,9 @@ void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
free_irq(mhi_cntrl->irq[0], mhi_cntrl);
}
-int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
+static int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
{
struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
- struct device *dev = &mhi_cntrl->mhi_dev->dev;
unsigned long irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND;
int i, ret;
@@ -197,7 +220,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
continue;
if (mhi_event->irq >= mhi_cntrl->nr_irqs) {
- dev_err(dev, "irq %d not available for event ring\n",
+ dev_err(mhi_cntrl->cntrl_dev, "irq %d not available for event ring\n",
mhi_event->irq);
ret = -EINVAL;
goto error_request;
@@ -208,7 +231,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
irq_flags,
"mhi", mhi_event);
if (ret) {
- dev_err(dev, "Error requesting irq:%d for ev:%d\n",
+ dev_err(mhi_cntrl->cntrl_dev, "Error requesting irq:%d for ev:%d\n",
mhi_cntrl->irq[mhi_event->irq], i);
goto error_request;
}
@@ -230,7 +253,7 @@ error_request:
return ret;
}
-void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
+static void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
{
int i;
struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt;
@@ -275,7 +298,7 @@ void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
mhi_cntrl->mhi_ctxt = NULL;
}
-int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
+static int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
{
struct mhi_ctxt *mhi_ctxt;
struct mhi_chan_ctxt *chan_ctxt;
@@ -517,11 +540,9 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
dev_dbg(dev, "Initializing MHI registers\n");
/* Read channel db offset */
- ret = mhi_read_reg(mhi_cntrl, base, CHDBOFF, &val);
- if (ret) {
- dev_err(dev, "Unable to read CHDBOFF register\n");
- return -EIO;
- }
+ ret = mhi_get_channel_doorbell_offset(mhi_cntrl, &val);
+ if (ret)
+ return ret;
if (val >= mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB)) {
dev_err(dev, "CHDB offset: 0x%x is out of range: 0x%zx\n",
@@ -1018,6 +1039,12 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
if (ret)
goto err_release_dev;
+ if (mhi_cntrl->edl_trigger) {
+ ret = sysfs_create_file(&mhi_dev->dev.kobj, &dev_attr_trigger_edl.attr);
+ if (ret)
+ goto err_release_dev;
+ }
+
mhi_cntrl->mhi_dev = mhi_dev;
mhi_create_debugfs(mhi_cntrl);
@@ -1051,6 +1078,9 @@ void mhi_unregister_controller(struct mhi_controller *mhi_cntrl)
mhi_deinit_free_irq(mhi_cntrl);
mhi_destroy_debugfs(mhi_cntrl);
+ if (mhi_cntrl->edl_trigger)
+ sysfs_remove_file(&mhi_dev->dev.kobj, &dev_attr_trigger_edl.attr);
+
destroy_workqueue(mhi_cntrl->hiprio_wq);
kfree(mhi_cntrl->mhi_cmd);
kfree(mhi_cntrl->mhi_event);
@@ -1113,7 +1143,7 @@ int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
}
mhi_cntrl->bhi = mhi_cntrl->regs + bhi_off;
- if (mhi_cntrl->fbc_download || mhi_cntrl->rddm_size) {
+ if (mhi_cntrl->fbc_download || mhi_cntrl->rddm_size || mhi_cntrl->seg_len) {
ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF,
&bhie_off);
if (ret) {
@@ -1411,10 +1441,10 @@ static int mhi_uevent(const struct device *dev, struct kobj_uevent_env *env)
mhi_dev->name);
}
-static int mhi_match(struct device *dev, struct device_driver *drv)
+static int mhi_match(struct device *dev, const struct device_driver *drv)
{
struct mhi_device *mhi_dev = to_mhi_device(dev);
- struct mhi_driver *mhi_drv = to_mhi_driver(drv);
+ const struct mhi_driver *mhi_drv = to_mhi_driver(drv);
const struct mhi_device_id *id;
/*
@@ -1433,7 +1463,7 @@ static int mhi_match(struct device *dev, struct device_driver *drv)
return 0;
};
-struct bus_type mhi_bus_type = {
+const struct bus_type mhi_bus_type = {
.name = "mhi",
.dev_name = "mhi",
.match = mhi_match,
diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h
index aaad40a07f69..7937bb1f742c 100644
--- a/drivers/bus/mhi/host/internal.h
+++ b/drivers/bus/mhi/host/internal.h
@@ -9,7 +9,7 @@
#include "../common.h"
-extern struct bus_type mhi_bus_type;
+extern const struct bus_type mhi_bus_type;
/* Host request register */
#define MHI_SOC_RESET_REQ_OFFSET 0xb0
@@ -25,8 +25,15 @@ struct mhi_ctxt {
};
struct bhi_vec_entry {
- u64 dma_addr;
- u64 size;
+ __le64 dma_addr;
+ __le64 size;
+};
+
+enum mhi_fw_load_type {
+ MHI_FW_LOAD_BHI, /* BHI only in PBL */
+ MHI_FW_LOAD_BHIE, /* BHIe only in PBL */
+ MHI_FW_LOAD_FBC, /* BHI in PBL followed by BHIe in SBL */
+ MHI_FW_LOAD_MAX,
};
enum mhi_ch_state_type {
@@ -163,6 +170,8 @@ enum mhi_pm_state {
MHI_PM_IN_ERROR_STATE(pm_state))
#define MHI_PM_IN_SUSPEND_STATE(pm_state) (pm_state & \
(MHI_PM_M3_ENTER | MHI_PM_M3))
+#define MHI_PM_FATAL_ERROR(pm_state) ((pm_state == MHI_PM_FW_DL_ERR) || \
+ (pm_state >= MHI_PM_SYS_ERR_FAIL))
#define NR_OF_CMD_RINGS 1
#define CMD_EL_PER_RING 128
@@ -255,7 +264,7 @@ struct mhi_chan {
/*
* Important: When consuming, increment tre_ring first and when
* releasing, decrement buf_ring first. If tre_ring has space, buf_ring
- * is guranteed to have space so we do not need to check both rings.
+ * is guaranteed to have space so we do not need to check both rings.
*/
struct mhi_ring buf_ring;
struct mhi_ring tre_ring;
@@ -376,19 +385,12 @@ void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
/* Initialization methods */
int mhi_init_mmio(struct mhi_controller *mhi_cntrl);
-int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl);
-void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl);
-int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl);
-void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl);
int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
struct image_info *img_info);
void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl);
/* Automatically allocate and queue inbound buffers */
#define MHI_CH_INBOUND_ALLOC_BUFS BIT(0)
-int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
- struct mhi_chan *mhi_chan, unsigned int flags);
-
int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan);
void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
@@ -403,6 +405,7 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
struct mhi_event *mhi_event, u32 event_quota);
int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
struct mhi_event *mhi_event, u32 event_quota);
+void mhi_uevent_notify(struct mhi_controller *mhi_cntrl, enum mhi_ee_type ee);
/* ISR handlers */
irqreturn_t mhi_irq_handler(int irq_number, void *dev);
diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c
index 15d657af9b5b..861551274319 100644
--- a/drivers/bus/mhi/host/main.c
+++ b/drivers/bus/mhi/host/main.c
@@ -512,6 +512,7 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) {
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
mhi_cntrl->ee = ee;
+ mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee);
wake_up_all(&mhi_cntrl->state_event);
}
break;
@@ -602,7 +603,7 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
{
dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event);
struct mhi_ring_element *local_rp, *ev_tre;
- void *dev_rp;
+ void *dev_rp, *next_rp;
struct mhi_buf_info *buf_info;
u16 xfer_len;
@@ -621,6 +622,16 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
result.dir = mhi_chan->dir;
local_rp = tre_ring->rp;
+
+ next_rp = local_rp + 1;
+ if (next_rp >= tre_ring->base + tre_ring->len)
+ next_rp = tre_ring->base;
+ if (dev_rp != next_rp && !MHI_TRE_DATA_GET_CHAIN(local_rp)) {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Event element points to an unexpected TRE\n");
+ break;
+ }
+
while (local_rp != dev_rp) {
buf_info = buf_ring->rp;
/* If it's the last TRE, get length from the event */
@@ -1181,25 +1192,6 @@ int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
}
EXPORT_SYMBOL_GPL(mhi_queue_skb);
-int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
- struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
-{
- struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
- mhi_dev->dl_chan;
- struct mhi_buf_info buf_info = { };
-
- buf_info.p_addr = mhi_buf->dma_addr;
- buf_info.cb_buf = mhi_buf;
- buf_info.pre_mapped = true;
- buf_info.len = len;
-
- if (unlikely(mhi_chan->pre_alloc))
- return -EINVAL;
-
- return mhi_queue(mhi_dev, &buf_info, dir, mflags);
-}
-EXPORT_SYMBOL_GPL(mhi_queue_dma);
-
int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
struct mhi_buf_info *info, enum mhi_flags flags)
{
@@ -1207,11 +1199,16 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
struct mhi_ring_element *mhi_tre;
struct mhi_buf_info *buf_info;
int eot, eob, chain, bei;
- int ret;
+ int ret = 0;
/* Protect accesses for reading and incrementing WP */
write_lock_bh(&mhi_chan->lock);
+ if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) {
+ ret = -ENODEV;
+ goto out;
+ }
+
buf_ring = &mhi_chan->buf_ring;
tre_ring = &mhi_chan->tre_ring;
@@ -1229,10 +1226,8 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
if (!info->pre_mapped) {
ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
- if (ret) {
- write_unlock_bh(&mhi_chan->lock);
- return ret;
- }
+ if (ret)
+ goto out;
}
eob = !!(flags & MHI_EOB);
@@ -1250,9 +1245,10 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
mhi_add_ring_element(mhi_cntrl, tre_ring);
mhi_add_ring_element(mhi_cntrl, buf_ring);
+out:
write_unlock_bh(&mhi_chan->lock);
- return 0;
+ return ret;
}
int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
@@ -1450,7 +1446,7 @@ exit_unprepare_channel:
mutex_unlock(&mhi_chan->mutex);
}
-int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
+static int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan, unsigned int flags)
{
int ret = 0;
@@ -1691,3 +1687,19 @@ void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev)
}
}
EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer);
+
+int mhi_get_channel_doorbell_offset(struct mhi_controller *mhi_cntrl, u32 *chdb_offset)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ void __iomem *base = mhi_cntrl->regs;
+ int ret;
+
+ ret = mhi_read_reg(mhi_cntrl, base, CHDBOFF, chdb_offset);
+ if (ret) {
+ dev_err(dev, "Unable to read CHDBOFF register\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mhi_get_channel_doorbell_offset);
diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
index 51639bfcfec7..e3bc737313a2 100644
--- a/drivers/bus/mhi/host/pci_generic.c
+++ b/drivers/bus/mhi/host/pci_generic.c
@@ -26,28 +26,42 @@
/* PCI VID definitions */
#define PCI_VENDOR_ID_THALES 0x1269
#define PCI_VENDOR_ID_QUECTEL 0x1eac
+#define PCI_VENDOR_ID_NETPRISMA 0x203e
+
+#define MHI_EDL_DB 91
+#define MHI_EDL_COOKIE 0xEDEDEDED
/**
* struct mhi_pci_dev_info - MHI PCI device specific information
* @config: MHI controller configuration
+ * @vf_config: MHI controller configuration for Virtual function (optional)
* @name: name of the PCI module
* @fw: firmware path (if any)
* @edl: emergency download mode firmware path (if any)
+ * @edl_trigger: capable of triggering EDL mode in the device (if supported)
* @bar_num: PCI base address register to use for MHI MMIO register space
* @dma_data_width: DMA transfer word size (32 or 64 bits)
+ * @vf_dma_data_width: DMA transfer word size for VF's (optional)
* @mru_default: default MRU size for MBIM network packets
* @sideband_wake: Devices using dedicated sideband GPIO for wakeup instead
* of inband wake support (such as sdx24)
+ * @no_m3: M3 not supported
+ * @reset_on_remove: Set true for devices that require SoC during driver removal
*/
struct mhi_pci_dev_info {
const struct mhi_controller_config *config;
+ const struct mhi_controller_config *vf_config;
const char *name;
const char *fw;
const char *edl;
+ bool edl_trigger;
unsigned int bar_num;
unsigned int dma_data_width;
+ unsigned int vf_dma_data_width;
unsigned int mru_default;
bool sideband_wake;
+ bool no_m3;
+ bool reset_on_remove;
};
#define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, el_count, ev_ring) \
@@ -239,6 +253,74 @@ struct mhi_pci_dev_info {
.channel = ch_num, \
}
+static const struct mhi_channel_config mhi_qcom_qdu100_channels[] = {
+ MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 32, 2),
+ MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 32, 2),
+ MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 128, 1),
+ MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 128, 1),
+ MHI_CHANNEL_CONFIG_UL(4, "DIAG", 64, 3),
+ MHI_CHANNEL_CONFIG_DL(5, "DIAG", 64, 3),
+ MHI_CHANNEL_CONFIG_UL(9, "QDSS", 64, 3),
+ MHI_CHANNEL_CONFIG_UL(14, "NMEA", 32, 4),
+ MHI_CHANNEL_CONFIG_DL(15, "NMEA", 32, 4),
+ MHI_CHANNEL_CONFIG_UL(16, "CSM_CTRL", 32, 4),
+ MHI_CHANNEL_CONFIG_DL(17, "CSM_CTRL", 32, 4),
+ MHI_CHANNEL_CONFIG_UL(40, "MHI_PHC", 32, 4),
+ MHI_CHANNEL_CONFIG_DL(41, "MHI_PHC", 32, 4),
+ MHI_CHANNEL_CONFIG_UL(46, "IP_SW0", 256, 5),
+ MHI_CHANNEL_CONFIG_DL(47, "IP_SW0", 256, 5),
+};
+
+static struct mhi_event_config mhi_qcom_qdu100_events[] = {
+ /* first ring is control+data ring */
+ MHI_EVENT_CONFIG_CTRL(0, 64),
+ /* SAHARA dedicated event ring */
+ MHI_EVENT_CONFIG_SW_DATA(1, 256),
+ /* Software channels dedicated event ring */
+ MHI_EVENT_CONFIG_SW_DATA(2, 64),
+ MHI_EVENT_CONFIG_SW_DATA(3, 256),
+ MHI_EVENT_CONFIG_SW_DATA(4, 256),
+ /* Software IP channels dedicated event ring */
+ MHI_EVENT_CONFIG_SW_DATA(5, 512),
+ MHI_EVENT_CONFIG_SW_DATA(6, 512),
+ MHI_EVENT_CONFIG_SW_DATA(7, 512),
+};
+
+static const struct mhi_controller_config mhi_qcom_qdu100_config = {
+ .max_channels = 128,
+ .timeout_ms = 120000,
+ .num_channels = ARRAY_SIZE(mhi_qcom_qdu100_channels),
+ .ch_cfg = mhi_qcom_qdu100_channels,
+ .num_events = ARRAY_SIZE(mhi_qcom_qdu100_events),
+ .event_cfg = mhi_qcom_qdu100_events,
+};
+
+static const struct mhi_pci_dev_info mhi_qcom_qdu100_info = {
+ .name = "qcom-qdu100",
+ .fw = "qcom/qdu100/xbl_s.melf",
+ .edl_trigger = true,
+ .config = &mhi_qcom_qdu100_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .vf_dma_data_width = 40,
+ .sideband_wake = false,
+ .no_m3 = true,
+ .reset_on_remove = true,
+};
+
+static const struct mhi_channel_config mhi_qcom_sa8775p_channels[] = {
+ MHI_CHANNEL_CONFIG_UL(46, "IP_SW0", 2048, 1),
+ MHI_CHANNEL_CONFIG_DL(47, "IP_SW0", 2048, 2),
+};
+
+static struct mhi_event_config mhi_qcom_sa8775p_events[] = {
+ /* first ring is control+data ring */
+ MHI_EVENT_CONFIG_CTRL(0, 64),
+ /* Software channels dedicated event ring */
+ MHI_EVENT_CONFIG_SW_DATA(1, 64),
+ MHI_EVENT_CONFIG_SW_DATA(2, 64),
+};
+
static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = {
MHI_CHANNEL_CONFIG_UL(4, "DIAG", 16, 1),
MHI_CHANNEL_CONFIG_DL(5, "DIAG", 16, 1),
@@ -269,6 +351,15 @@ static struct mhi_event_config modem_qcom_v1_mhi_events[] = {
MHI_EVENT_CONFIG_HW_DATA(5, 2048, 101)
};
+static const struct mhi_controller_config mhi_qcom_sa8775p_config = {
+ .max_channels = 128,
+ .timeout_ms = 8000,
+ .num_channels = ARRAY_SIZE(mhi_qcom_sa8775p_channels),
+ .ch_cfg = mhi_qcom_sa8775p_channels,
+ .num_events = ARRAY_SIZE(mhi_qcom_sa8775p_events),
+ .event_cfg = mhi_qcom_sa8775p_events,
+};
+
static const struct mhi_controller_config modem_qcom_v2_mhiv_config = {
.max_channels = 128,
.timeout_ms = 8000,
@@ -288,10 +379,21 @@ static const struct mhi_controller_config modem_qcom_v1_mhiv_config = {
.event_cfg = modem_qcom_v1_mhi_events,
};
+static const struct mhi_pci_dev_info mhi_qcom_sa8775p_info = {
+ .name = "qcom-sa8775p",
+ .edl_trigger = false,
+ .config = &mhi_qcom_sa8775p_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
static const struct mhi_pci_dev_info mhi_qcom_sdx75_info = {
.name = "qcom-sdx75m",
.fw = "qcom/sdx75m/xbl.elf",
.edl = "qcom/sdx75m/edl.mbn",
+ .edl_trigger = true,
.config = &modem_qcom_v2_mhiv_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
@@ -302,6 +404,7 @@ static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = {
.name = "qcom-sdx65m",
.fw = "qcom/sdx65m/xbl.elf",
.edl = "qcom/sdx65m/edl.mbn",
+ .edl_trigger = true,
.config = &modem_qcom_v1_mhiv_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
@@ -312,6 +415,7 @@ static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = {
.name = "qcom-sdx55m",
.fw = "qcom/sdx55m/sbl1.mbn",
.edl = "qcom/sdx55m/edl.mbn",
+ .edl_trigger = true,
.config = &modem_qcom_v1_mhiv_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
@@ -391,6 +495,25 @@ static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = {
MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
+ MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
+};
+
+static const struct mhi_channel_config mhi_foxconn_sdx61_channels[] = {
+ MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1),
+ MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1),
+ MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
+ MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(50, "NMEA", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(51, "NMEA", 32, 0),
MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
};
@@ -411,8 +534,29 @@ static const struct mhi_controller_config modem_foxconn_sdx55_config = {
.event_cfg = mhi_foxconn_sdx55_events,
};
-static const struct mhi_pci_dev_info mhi_foxconn_sdx24_info = {
- .name = "foxconn-sdx24",
+static const struct mhi_controller_config modem_foxconn_sdx61_config = {
+ .max_channels = 128,
+ .timeout_ms = 20000,
+ .num_channels = ARRAY_SIZE(mhi_foxconn_sdx61_channels),
+ .ch_cfg = mhi_foxconn_sdx61_channels,
+ .num_events = ARRAY_SIZE(mhi_foxconn_sdx55_events),
+ .event_cfg = mhi_foxconn_sdx55_events,
+};
+
+static const struct mhi_controller_config modem_foxconn_sdx72_config = {
+ .max_channels = 128,
+ .timeout_ms = 20000,
+ .ready_timeout_ms = 50000,
+ .num_channels = ARRAY_SIZE(mhi_foxconn_sdx55_channels),
+ .ch_cfg = mhi_foxconn_sdx55_channels,
+ .num_events = ARRAY_SIZE(mhi_foxconn_sdx55_events),
+ .event_cfg = mhi_foxconn_sdx55_events,
+};
+
+static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
+ .name = "foxconn-sdx55",
+ .edl = "qcom/sdx55m/foxconn/prog_firehose_sdx55.mbn",
+ .edl_trigger = true,
.config = &modem_foxconn_sdx55_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
@@ -420,10 +564,10 @@ static const struct mhi_pci_dev_info mhi_foxconn_sdx24_info = {
.sideband_wake = false,
};
-static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
- .name = "foxconn-sdx55",
- .fw = "qcom/sdx55m/sbl1.mbn",
- .edl = "qcom/sdx55m/edl.mbn",
+static const struct mhi_pci_dev_info mhi_foxconn_t99w175_info = {
+ .name = "foxconn-t99w175",
+ .edl = "qcom/sdx55m/foxconn/prog_firehose_sdx55.mbn",
+ .edl_trigger = true,
.config = &modem_foxconn_sdx55_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
@@ -431,8 +575,10 @@ static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
.sideband_wake = false,
};
-static const struct mhi_pci_dev_info mhi_foxconn_sdx65_info = {
- .name = "foxconn-sdx65",
+static const struct mhi_pci_dev_info mhi_foxconn_dw5930e_info = {
+ .name = "foxconn-dw5930e",
+ .edl = "qcom/sdx55m/foxconn/prog_firehose_sdx55.mbn",
+ .edl_trigger = true,
.config = &modem_foxconn_sdx55_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
@@ -440,6 +586,94 @@ static const struct mhi_pci_dev_info mhi_foxconn_sdx65_info = {
.sideband_wake = false,
};
+static const struct mhi_pci_dev_info mhi_foxconn_t99w368_info = {
+ .name = "foxconn-t99w368",
+ .edl = "qcom/sdx65m/foxconn/prog_firehose_lite.elf",
+ .edl_trigger = true,
+ .config = &modem_foxconn_sdx55_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
+static const struct mhi_pci_dev_info mhi_foxconn_t99w373_info = {
+ .name = "foxconn-t99w373",
+ .edl = "qcom/sdx65m/foxconn/prog_firehose_lite.elf",
+ .edl_trigger = true,
+ .config = &modem_foxconn_sdx55_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
+static const struct mhi_pci_dev_info mhi_foxconn_t99w510_info = {
+ .name = "foxconn-t99w510",
+ .edl = "qcom/sdx24m/foxconn/prog_firehose_sdx24.mbn",
+ .edl_trigger = true,
+ .config = &modem_foxconn_sdx55_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
+static const struct mhi_pci_dev_info mhi_foxconn_dw5932e_info = {
+ .name = "foxconn-dw5932e",
+ .edl = "qcom/sdx65m/foxconn/prog_firehose_lite.elf",
+ .edl_trigger = true,
+ .config = &modem_foxconn_sdx55_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
+static const struct mhi_pci_dev_info mhi_foxconn_t99w640_info = {
+ .name = "foxconn-t99w640",
+ .edl = "qcom/sdx72m/foxconn/edl.mbn",
+ .edl_trigger = true,
+ .config = &modem_foxconn_sdx72_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
+static const struct mhi_pci_dev_info mhi_foxconn_dw5934e_info = {
+ .name = "foxconn-dw5934e",
+ .edl = "qcom/sdx72m/foxconn/edl.mbn",
+ .edl_trigger = true,
+ .config = &modem_foxconn_sdx72_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
+static const struct mhi_pci_dev_info mhi_foxconn_t99w696_info = {
+ .name = "foxconn-t99w696",
+ .edl = "qcom/sdx61/foxconn/prog_firehose_lite.elf",
+ .edl_trigger = true,
+ .config = &modem_foxconn_sdx61_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
+static const struct mhi_pci_dev_info mhi_foxconn_t99w760_info = {
+ .name = "foxconn-t99w760",
+ .edl = "qcom/sdx35/foxconn/xbl_s_devprg_ns.melf",
+ .edl_trigger = true,
+ .config = &modem_foxconn_sdx61_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
static const struct mhi_channel_config mhi_mv3x_channels[] = {
MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 64, 0),
MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 64, 0),
@@ -520,6 +754,7 @@ static const struct mhi_pci_dev_info mhi_sierra_em919x_info = {
.config = &modem_sierra_em919x_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
+ .mru_default = 32768,
.sideband_wake = false,
};
@@ -598,8 +833,98 @@ static const struct mhi_pci_dev_info mhi_telit_fn990_info = {
.mru_default = 32768,
};
+static const struct mhi_pci_dev_info mhi_telit_fe990a_info = {
+ .name = "telit-fe990a",
+ .config = &modem_telit_fn990_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .sideband_wake = false,
+ .mru_default = 32768,
+};
+
+static const struct mhi_channel_config mhi_telit_fn920c04_channels[] = {
+ MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
+ MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(4, "DIAG", 64, 1),
+ MHI_CHANNEL_CONFIG_DL(5, "DIAG", 64, 1),
+ MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
+ MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(92, "DUN2", 32, 1),
+ MHI_CHANNEL_CONFIG_DL(93, "DUN2", 32, 1),
+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 2),
+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 3),
+};
+
+static const struct mhi_controller_config modem_telit_fn920c04_config = {
+ .max_channels = 128,
+ .timeout_ms = 50000,
+ .num_channels = ARRAY_SIZE(mhi_telit_fn920c04_channels),
+ .ch_cfg = mhi_telit_fn920c04_channels,
+ .num_events = ARRAY_SIZE(mhi_telit_fn990_events),
+ .event_cfg = mhi_telit_fn990_events,
+};
+
+static const struct mhi_pci_dev_info mhi_telit_fn920c04_info = {
+ .name = "telit-fn920c04",
+ .config = &modem_telit_fn920c04_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .sideband_wake = false,
+ .mru_default = 32768,
+ .edl_trigger = true,
+};
+
+static const struct mhi_pci_dev_info mhi_telit_fn990b40_info = {
+ .name = "telit-fn990b40",
+ .config = &modem_telit_fn920c04_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .sideband_wake = false,
+ .mru_default = 32768,
+ .edl_trigger = true,
+};
+
+static const struct mhi_pci_dev_info mhi_telit_fe990b40_info = {
+ .name = "telit-fe990b40",
+ .config = &modem_telit_fn920c04_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .sideband_wake = false,
+ .mru_default = 32768,
+ .edl_trigger = true,
+};
+
+static const struct mhi_pci_dev_info mhi_netprisma_lcur57_info = {
+ .name = "netprisma-lcur57",
+ .edl = "qcom/prog_firehose_sdx24.mbn",
+ .config = &modem_quectel_em1xx_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = true,
+};
+
+static const struct mhi_pci_dev_info mhi_netprisma_fcun69_info = {
+ .name = "netprisma-fcun69",
+ .edl = "qcom/prog_firehose_sdx6x.elf",
+ .config = &modem_quectel_em1xx_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = true,
+};
+
/* Keep the list sorted based on the PID. New VID should be added as the last entry */
static const struct pci_device_id mhi_pci_id_table[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0116),
+ .driver_data = (kernel_ulong_t) &mhi_qcom_sa8775p_info },
+ /* Telit FN920C04 (sdx35) */
+ {PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x011a, 0x1c5d, 0x2020),
+ .driver_data = (kernel_ulong_t) &mhi_telit_fn920c04_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304),
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, PCI_VENDOR_ID_QCOM, 0x010c),
@@ -607,6 +932,9 @@ static const struct pci_device_id mhi_pci_id_table[] = {
/* EM919x (sdx55), use the same vid:pid as qcom-sdx55m */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x18d7, 0x0200),
.driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info },
+ /* EM929x (sdx65), use the same configuration as EM919x */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x18d7, 0x0301),
+ .driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info },
/* Telit FN980 hardware revision v1 */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x1C5D, 0x2000),
.driver_data = (kernel_ulong_t) &mhi_telit_fn980_hw_v1_info },
@@ -615,13 +943,25 @@ static const struct pci_device_id mhi_pci_id_table[] = {
/* Telit FN990 */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010),
.driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
- /* Telit FE990 */
+ /* Telit FE990A */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2015),
- .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
+ .driver_data = (kernel_ulong_t) &mhi_telit_fe990a_info },
+ /* Foxconn T99W696, all variants */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, PCI_VENDOR_ID_FOXCONN, PCI_ANY_ID),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w696_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308),
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
+ /* Telit FN990B40 (sdx72) */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0309, 0x1c5d, 0x201a),
+ .driver_data = (kernel_ulong_t) &mhi_telit_fn990b40_info },
+ /* Telit FE990B40 (sdx72) */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0309, 0x1c5d, 0x2025),
+ .driver_data = (kernel_ulong_t) &mhi_telit_fe990b40_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0309),
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx75_info },
+ /* QDU100, x100-DU */
+ { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0601),
+ .driver_data = (kernel_ulong_t) &mhi_qcom_qdu100_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1001), /* EM120R-GL (sdx24) */
.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1002), /* EM160R-GL (sdx24) */
@@ -638,40 +978,51 @@ static const struct pci_device_id mhi_pci_id_table[] = {
.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
/* T99W175 (sdx55), Both for eSIM and Non-eSIM */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0ab),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w175_info },
/* DW5930e (sdx55), With eSIM, It's also T99W175 */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b0),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_dw5930e_info },
/* DW5930e (sdx55), Non-eSIM, It's also T99W175 */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b1),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_dw5930e_info },
/* T99W175 (sdx55), Based on Qualcomm new baseline */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0bf),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w175_info },
/* T99W175 (sdx55) */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0c3),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w175_info },
/* T99W368 (sdx65) */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d8),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w368_info },
/* T99W373 (sdx62) */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d9),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w373_info },
/* T99W510 (sdx24), variant 1 */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f0),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx24_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w510_info },
/* T99W510 (sdx24), variant 2 */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f1),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx24_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w510_info },
/* T99W510 (sdx24), variant 3 */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f2),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx24_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w510_info },
/* DW5932e-eSIM (sdx62), With eSIM */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f5),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_dw5932e_info },
/* DW5932e (sdx62), Non-eSIM */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f9),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_dw5932e_info },
+ /* T99W640 (sdx72) */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe118),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w640_info },
+ /* DW5934e(sdx72), With eSIM */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe11d),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_dw5934e_info },
+ /* DW5934e(sdx72), Non-eSIM */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe11e),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_dw5934e_info },
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe123),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w760_info },
/* MV31-W (Cinterion) */
{ PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00b3),
.driver_data = (kernel_ulong_t) &mhi_mv31_info },
@@ -686,7 +1037,13 @@ static const struct pci_device_id mhi_pci_id_table[] = {
.driver_data = (kernel_ulong_t) &mhi_mv32_info },
/* T99W175 (sdx55), HP variant */
{ PCI_DEVICE(0x03f0, 0x0a6c),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w175_info },
+ /* NETPRISMA LCUR57 (SDX24) */
+ { PCI_DEVICE(PCI_VENDOR_ID_NETPRISMA, 0x1000),
+ .driver_data = (kernel_ulong_t) &mhi_netprisma_lcur57_info },
+ /* NETPRISMA FCUN69 (SDX6X) */
+ { PCI_DEVICE(PCI_VENDOR_ID_NETPRISMA, 0x1001),
+ .driver_data = (kernel_ulong_t) &mhi_netprisma_fcun69_info },
{ }
};
MODULE_DEVICE_TABLE(pci, mhi_pci_id_table);
@@ -702,6 +1059,7 @@ struct mhi_pci_device {
struct work_struct recovery_work;
struct timer_list health_check_timer;
unsigned long status;
+ bool reset_on_remove;
};
static int mhi_pci_read_reg(struct mhi_controller *mhi_cntrl,
@@ -757,7 +1115,7 @@ static bool mhi_pci_is_alive(struct mhi_controller *mhi_cntrl)
struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
u16 vendor = 0;
- if (pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor))
+ if (pci_read_config_word(pci_physfn(pdev), PCI_VENDOR_ID, &vendor))
return false;
if (vendor == (u16) ~0 || vendor == 0)
@@ -772,22 +1130,18 @@ static int mhi_pci_claim(struct mhi_controller *mhi_cntrl,
struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
int err;
- err = pci_assign_resource(pdev, bar_num);
- if (err)
- return err;
-
err = pcim_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "failed to enable pci device: %d\n", err);
return err;
}
- err = pcim_iomap_regions(pdev, 1 << bar_num, pci_name(pdev));
- if (err) {
+ mhi_cntrl->regs = pcim_iomap_region(pdev, bar_num, pci_name(pdev));
+ if (IS_ERR(mhi_cntrl->regs)) {
+ err = PTR_ERR(mhi_cntrl->regs);
dev_err(&pdev->dev, "failed to map pci region: %d\n", err);
return err;
}
- mhi_cntrl->regs = pcim_iomap_table(pdev)[bar_num];
mhi_cntrl->reg_len = pci_resource_len(pdev, bar_num);
err = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
@@ -814,7 +1168,7 @@ static int mhi_pci_get_irqs(struct mhi_controller *mhi_cntrl,
*/
mhi_cntrl->nr_irqs = 1 + mhi_cntrl_config->num_events;
- nr_vectors = pci_alloc_irq_vectors(pdev, 1, mhi_cntrl->nr_irqs, PCI_IRQ_MSI);
+ nr_vectors = pci_alloc_irq_vectors(pdev, 1, mhi_cntrl->nr_irqs, PCI_IRQ_MSIX | PCI_IRQ_MSI);
if (nr_vectors < 0) {
dev_err(&pdev->dev, "Error allocating MSI vectors %d\n",
nr_vectors);
@@ -872,7 +1226,9 @@ static void mhi_pci_recovery_work(struct work_struct *work)
dev_warn(&pdev->dev, "device recovery started\n");
- del_timer(&mhi_pdev->health_check_timer);
+ if (pdev->is_physfn)
+ timer_delete(&mhi_pdev->health_check_timer);
+
pm_runtime_forbid(&pdev->dev);
/* Clean up MHI state */
@@ -899,19 +1255,24 @@ static void mhi_pci_recovery_work(struct work_struct *work)
dev_dbg(&pdev->dev, "Recovery completed\n");
set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
- mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+
+ if (pdev->is_physfn)
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+
return;
err_unprepare:
mhi_unprepare_after_power_down(mhi_cntrl);
err_try_reset:
- if (pci_reset_function(pdev))
- dev_err(&pdev->dev, "Recovery failed\n");
+ err = pci_try_reset_function(pdev);
+ if (err)
+ dev_err(&pdev->dev, "Recovery failed: %d\n", err);
}
static void health_check(struct timer_list *t)
{
- struct mhi_pci_device *mhi_pdev = from_timer(mhi_pdev, t, health_check_timer);
+ struct mhi_pci_device *mhi_pdev = timer_container_of(mhi_pdev, t,
+ health_check_timer);
struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
@@ -928,12 +1289,47 @@ static void health_check(struct timer_list *t)
mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
}
+static int mhi_pci_generic_edl_trigger(struct mhi_controller *mhi_cntrl)
+{
+ void __iomem *base = mhi_cntrl->regs;
+ void __iomem *edl_db;
+ int ret;
+ u32 val;
+
+ ret = mhi_device_get_sync(mhi_cntrl->mhi_dev);
+ if (ret) {
+ dev_err(mhi_cntrl->cntrl_dev, "Failed to wakeup the device\n");
+ return ret;
+ }
+
+ pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0);
+ mhi_cntrl->runtime_get(mhi_cntrl);
+
+ ret = mhi_get_channel_doorbell_offset(mhi_cntrl, &val);
+ if (ret)
+ goto err_get_chdb;
+
+ edl_db = base + val + (8 * MHI_EDL_DB);
+
+ mhi_cntrl->write_reg(mhi_cntrl, edl_db + 4, upper_32_bits(MHI_EDL_COOKIE));
+ mhi_cntrl->write_reg(mhi_cntrl, edl_db, lower_32_bits(MHI_EDL_COOKIE));
+
+ mhi_soc_reset(mhi_cntrl);
+
+err_get_chdb:
+ mhi_cntrl->runtime_put(mhi_cntrl);
+ mhi_device_put(mhi_cntrl->mhi_dev);
+
+ return ret;
+}
+
static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
const struct mhi_pci_dev_info *info = (struct mhi_pci_dev_info *) id->driver_data;
const struct mhi_controller_config *mhi_cntrl_config;
struct mhi_pci_device *mhi_pdev;
struct mhi_controller *mhi_cntrl;
+ unsigned int dma_data_width;
int err;
dev_info(&pdev->dev, "MHI PCI device found: %s\n", info->name);
@@ -944,14 +1340,24 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENOMEM;
INIT_WORK(&mhi_pdev->recovery_work, mhi_pci_recovery_work);
- timer_setup(&mhi_pdev->health_check_timer, health_check, 0);
- mhi_cntrl_config = info->config;
+ if (pdev->is_virtfn && info->vf_config)
+ mhi_cntrl_config = info->vf_config;
+ else
+ mhi_cntrl_config = info->config;
+
+ /* Initialize health check monitor only for Physical functions */
+ if (pdev->is_physfn)
+ timer_setup(&mhi_pdev->health_check_timer, health_check, 0);
+
mhi_cntrl = &mhi_pdev->mhi_cntrl;
+ dma_data_width = (pdev->is_virtfn && info->vf_dma_data_width) ?
+ info->vf_dma_data_width : info->dma_data_width;
+
mhi_cntrl->cntrl_dev = &pdev->dev;
mhi_cntrl->iova_start = 0;
- mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(info->dma_data_width);
+ mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(dma_data_width);
mhi_cntrl->fw_image = info->fw;
mhi_cntrl->edl_image = info->edl;
@@ -961,6 +1367,13 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mhi_cntrl->runtime_get = mhi_pci_runtime_get;
mhi_cntrl->runtime_put = mhi_pci_runtime_put;
mhi_cntrl->mru = info->mru_default;
+ mhi_cntrl->name = info->name;
+
+ if (pdev->is_physfn)
+ mhi_pdev->reset_on_remove = info->reset_on_remove;
+
+ if (info->edl_trigger)
+ mhi_cntrl->edl_trigger = mhi_pci_generic_edl_trigger;
if (info->sideband_wake) {
mhi_cntrl->wake_get = mhi_pci_wake_get_nop;
@@ -968,7 +1381,7 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop;
}
- err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width));
+ err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(dma_data_width));
if (err)
return err;
@@ -1005,10 +1418,11 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
/* start health check */
- mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+ if (pdev->is_physfn)
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
- /* Only allow runtime-suspend if PME capable (for wakeup) */
- if (pci_pme_capable(pdev, PCI_D3hot)) {
+ /* Allow runtime suspend only if both PME from D3Hot and M3 are supported */
+ if (pci_pme_capable(pdev, PCI_D3hot) && !(info->no_m3)) {
pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_mark_last_busy(&pdev->dev);
@@ -1030,7 +1444,10 @@ static void mhi_pci_remove(struct pci_dev *pdev)
struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
- del_timer_sync(&mhi_pdev->health_check_timer);
+ pci_disable_sriov(pdev);
+
+ if (pdev->is_physfn)
+ timer_delete_sync(&mhi_pdev->health_check_timer);
cancel_work_sync(&mhi_pdev->recovery_work);
if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
@@ -1042,6 +1459,9 @@ static void mhi_pci_remove(struct pci_dev *pdev)
if (pci_pme_capable(pdev, PCI_D3hot))
pm_runtime_get_noresume(&pdev->dev);
+ if (mhi_pdev->reset_on_remove)
+ mhi_soc_reset(mhi_cntrl);
+
mhi_unregister_controller(mhi_cntrl);
}
@@ -1058,7 +1478,8 @@ static void mhi_pci_reset_prepare(struct pci_dev *pdev)
dev_info(&pdev->dev, "reset\n");
- del_timer(&mhi_pdev->health_check_timer);
+ if (pdev->is_physfn)
+ timer_delete(&mhi_pdev->health_check_timer);
/* Clean up MHI state */
if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
@@ -1103,7 +1524,8 @@ static void mhi_pci_reset_done(struct pci_dev *pdev)
}
set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
- mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+ if (pdev->is_physfn)
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
}
static pci_ers_result_t mhi_pci_error_detected(struct pci_dev *pdev,
@@ -1168,7 +1590,9 @@ static int __maybe_unused mhi_pci_runtime_suspend(struct device *dev)
if (test_and_set_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
return 0;
- del_timer(&mhi_pdev->health_check_timer);
+ if (pdev->is_physfn)
+ timer_delete(&mhi_pdev->health_check_timer);
+
cancel_work_sync(&mhi_pdev->recovery_work);
if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
@@ -1219,7 +1643,8 @@ static int __maybe_unused mhi_pci_runtime_resume(struct device *dev)
}
/* Resume health check */
- mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+ if (pdev->is_physfn)
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
/* It can be a remote wakeup (no mhi runtime_get), update access time */
pm_runtime_mark_last_busy(dev);
@@ -1305,7 +1730,8 @@ static struct pci_driver mhi_pci_driver = {
.remove = mhi_pci_remove,
.shutdown = mhi_pci_shutdown,
.err_handler = &mhi_pci_err_handler,
- .driver.pm = &mhi_pci_pm_ops
+ .driver.pm = &mhi_pci_pm_ops,
+ .sriov_configure = pci_sriov_configure_simple,
};
module_pci_driver(mhi_pci_driver);
diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c
index 11c0e751f223..b4ef115189b5 100644
--- a/drivers/bus/mhi/host/pm.c
+++ b/drivers/bus/mhi/host/pm.c
@@ -418,6 +418,7 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
device_for_each_child(&mhi_cntrl->mhi_dev->dev, &current_ee,
mhi_destroy_device);
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE);
+ mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee);
/* Force MHI to be in M0 state before continuing */
ret = __mhi_device_get_sync(mhi_cntrl);
@@ -602,6 +603,7 @@ static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
struct mhi_cmd *mhi_cmd;
struct mhi_event_ctxt *er_ctxt;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ bool reset_device = false;
int ret, i;
dev_dbg(dev, "Transitioning from PM state: %s to: %s\n",
@@ -630,8 +632,25 @@ static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
/* Wake up threads waiting for state transition */
wake_up_all(&mhi_cntrl->state_event);
- /* Trigger MHI RESET so that the device will not access host memory */
+ mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee);
+
if (MHI_REG_ACCESS_VALID(prev_state)) {
+ /*
+ * If the device is in PBL or SBL, it will only respond to
+ * RESET if the device is in SYSERR state. SYSERR might
+ * already be cleared at this point.
+ */
+ enum mhi_state cur_state = mhi_get_mhi_state(mhi_cntrl);
+ enum mhi_ee_type cur_ee = mhi_get_exec_env(mhi_cntrl);
+
+ if (cur_state == MHI_STATE_SYS_ERR)
+ reset_device = true;
+ else if (cur_ee != MHI_EE_PBL && cur_ee != MHI_EE_SBL)
+ reset_device = true;
+ }
+
+ /* Trigger MHI RESET so that the device will not access host memory */
+ if (reset_device) {
u32 in_reset = -1;
unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
@@ -813,6 +832,8 @@ void mhi_pm_st_worker(struct work_struct *work)
mhi_create_devices(mhi_cntrl);
if (mhi_cntrl->fbc_download)
mhi_download_amss_image(mhi_cntrl);
+
+ mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee);
break;
case DEV_ST_TRANSITION_MISSION_MODE:
mhi_pm_mission_mode_transition(mhi_cntrl);
@@ -822,6 +843,7 @@ void mhi_pm_st_worker(struct work_struct *work)
mhi_cntrl->ee = MHI_EE_FP;
write_unlock_irq(&mhi_cntrl->pm_lock);
mhi_create_devices(mhi_cntrl);
+ mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee);
break;
case DEV_ST_TRANSITION_READY:
mhi_ready_state_transition(mhi_cntrl);
@@ -1224,6 +1246,8 @@ static void __mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful,
write_unlock_irq(&mhi_cntrl->pm_lock);
mutex_unlock(&mhi_cntrl->pm_mutex);
+ mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee);
+
if (destroy_device)
mhi_queue_state_transition(mhi_cntrl,
DEV_ST_TRANSITION_DISABLE_DESTROY_DEVICE);
@@ -1263,7 +1287,7 @@ int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
mhi_cntrl->ready_timeout_ms : mhi_cntrl->timeout_ms;
wait_event_timeout(mhi_cntrl->state_event,
MHI_IN_MISSION_MODE(mhi_cntrl->ee) ||
- MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ MHI_PM_FATAL_ERROR(mhi_cntrl->pm_state),
msecs_to_jiffies(timeout_ms));
ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT;
@@ -1296,20 +1320,6 @@ int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl)
}
EXPORT_SYMBOL_GPL(mhi_force_rddm_mode);
-void mhi_device_get(struct mhi_device *mhi_dev)
-{
- struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
-
- mhi_dev->dev_wake++;
- read_lock_bh(&mhi_cntrl->pm_lock);
- if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
- mhi_trigger_resume(mhi_cntrl);
-
- mhi_cntrl->wake_get(mhi_cntrl, true);
- read_unlock_bh(&mhi_cntrl->pm_lock);
-}
-EXPORT_SYMBOL_GPL(mhi_device_get);
-
int mhi_device_get_sync(struct mhi_device *mhi_dev)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
@@ -1336,3 +1346,22 @@ void mhi_device_put(struct mhi_device *mhi_dev)
read_unlock_bh(&mhi_cntrl->pm_lock);
}
EXPORT_SYMBOL_GPL(mhi_device_put);
+
+void mhi_uevent_notify(struct mhi_controller *mhi_cntrl, enum mhi_ee_type ee)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ char *buf[2];
+ int ret;
+
+ buf[0] = kasprintf(GFP_KERNEL, "EXEC_ENV=%s", TO_MHI_EXEC_STR(ee));
+ buf[1] = NULL;
+
+ if (!buf[0])
+ return;
+
+ ret = kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, buf);
+ if (ret)
+ dev_err(dev, "Failed to send %s uevent\n", TO_MHI_EXEC_STR(ee));
+
+ kfree(buf[0]);
+}
diff --git a/drivers/bus/mhi/host/trace.h b/drivers/bus/mhi/host/trace.h
index 368515dcb22d..3e0c41777429 100644
--- a/drivers/bus/mhi/host/trace.h
+++ b/drivers/bus/mhi/host/trace.h
@@ -9,6 +9,7 @@
#if !defined(_TRACE_EVENT_MHI_HOST_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_EVENT_MHI_HOST_H
+#include <linux/byteorder/generic.h>
#include <linux/tracepoint.h>
#include <linux/trace_seq.h>
#include "../common.h"
@@ -97,18 +98,18 @@ TRACE_EVENT(mhi_gen_tre,
__string(name, mhi_cntrl->mhi_dev->name)
__field(int, ch_num)
__field(void *, wp)
- __field(__le64, tre_ptr)
- __field(__le32, dword0)
- __field(__le32, dword1)
+ __field(uint64_t, tre_ptr)
+ __field(uint32_t, dword0)
+ __field(uint32_t, dword1)
),
TP_fast_assign(
- __assign_str(name, mhi_cntrl->mhi_dev->name);
+ __assign_str(name);
__entry->ch_num = mhi_chan->chan;
__entry->wp = mhi_tre;
- __entry->tre_ptr = mhi_tre->ptr;
- __entry->dword0 = mhi_tre->dword[0];
- __entry->dword1 = mhi_tre->dword[1];
+ __entry->tre_ptr = le64_to_cpu(mhi_tre->ptr);
+ __entry->dword0 = le32_to_cpu(mhi_tre->dword[0]);
+ __entry->dword1 = le32_to_cpu(mhi_tre->dword[1]);
),
TP_printk("%s: Chan: %d TRE: 0x%p TRE buf: 0x%llx DWORD0: 0x%08x DWORD1: 0x%08x\n",
@@ -131,7 +132,7 @@ TRACE_EVENT(mhi_intvec_states,
),
TP_fast_assign(
- __assign_str(name, mhi_cntrl->mhi_dev->name);
+ __assign_str(name);
__entry->local_ee = mhi_cntrl->ee;
__entry->state = mhi_cntrl->dev_state;
__entry->dev_ee = dev_ee;
@@ -158,7 +159,7 @@ TRACE_EVENT(mhi_tryset_pm_state,
),
TP_fast_assign(
- __assign_str(name, mhi_cntrl->mhi_dev->name);
+ __assign_str(name);
if (pm_state)
pm_state = __fls(pm_state);
__entry->pm_state = pm_state;
@@ -176,19 +177,19 @@ DECLARE_EVENT_CLASS(mhi_process_event_ring,
TP_STRUCT__entry(
__string(name, mhi_cntrl->mhi_dev->name)
- __field(__le32, dword0)
- __field(__le32, dword1)
+ __field(uint32_t, dword0)
+ __field(uint32_t, dword1)
__field(int, state)
- __field(__le64, ptr)
+ __field(uint64_t, ptr)
__field(void *, rp)
),
TP_fast_assign(
- __assign_str(name, mhi_cntrl->mhi_dev->name);
+ __assign_str(name);
__entry->rp = rp;
- __entry->ptr = rp->ptr;
- __entry->dword0 = rp->dword[0];
- __entry->dword1 = rp->dword[1];
+ __entry->ptr = le64_to_cpu(rp->ptr);
+ __entry->dword0 = le32_to_cpu(rp->dword[0]);
+ __entry->dword1 = le32_to_cpu(rp->dword[1]);
__entry->state = MHI_TRE_GET_EV_STATE(rp);
),
@@ -226,7 +227,7 @@ DECLARE_EVENT_CLASS(mhi_update_channel_state,
),
TP_fast_assign(
- __assign_str(name, mhi_cntrl->mhi_dev->name);
+ __assign_str(name);
__entry->ch_num = mhi_chan->chan;
__entry->state = state;
__entry->reason = reason;
@@ -265,7 +266,7 @@ TRACE_EVENT(mhi_pm_st_transition,
),
TP_fast_assign(
- __assign_str(name, mhi_cntrl->mhi_dev->name);
+ __assign_str(name);
__entry->state = state;
),
diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c
index 8baf14bd5eff..12dd32fd0b62 100644
--- a/drivers/bus/mips_cdmm.c
+++ b/drivers/bus/mips_cdmm.c
@@ -37,7 +37,7 @@
/* Each block of device registers is 64 bytes */
#define CDMM_DRB_SIZE 64
-#define to_mips_cdmm_driver(d) container_of(d, struct mips_cdmm_driver, drv)
+#define to_mips_cdmm_driver(d) container_of_const(d, struct mips_cdmm_driver, drv)
/* Default physical base address */
static phys_addr_t mips_cdmm_default_base;
@@ -59,10 +59,10 @@ mips_cdmm_lookup(const struct mips_cdmm_device_id *table,
return ret ? table : NULL;
}
-static int mips_cdmm_match(struct device *dev, struct device_driver *drv)
+static int mips_cdmm_match(struct device *dev, const struct device_driver *drv)
{
struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev);
- struct mips_cdmm_driver *cdrv = to_mips_cdmm_driver(drv);
+ const struct mips_cdmm_driver *cdrv = to_mips_cdmm_driver(drv);
return mips_cdmm_lookup(cdrv->id_table, cdev) != NULL;
}
diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c
index 641c1a6adc8a..7ce61d629a87 100644
--- a/drivers/bus/moxtet.c
+++ b/drivers/bus/moxtet.c
@@ -83,10 +83,10 @@ static const struct attribute_group *moxtet_dev_groups[] = {
NULL,
};
-static int moxtet_match(struct device *dev, struct device_driver *drv)
+static int moxtet_match(struct device *dev, const struct device_driver *drv)
{
struct moxtet_device *mdev = to_moxtet_device(dev);
- struct moxtet_driver *tdrv = to_moxtet_driver(drv);
+ const struct moxtet_driver *tdrv = to_moxtet_driver(drv);
const enum turris_mox_module_id *t;
if (of_driver_match_device(dev, drv))
@@ -484,7 +484,6 @@ static const struct file_operations input_fops = {
.owner = THIS_MODULE,
.open = moxtet_debug_open,
.read = input_read,
- .llseek = no_llseek,
};
static ssize_t output_read(struct file *file, char __user *buf, size_t len,
@@ -549,7 +548,6 @@ static const struct file_operations output_fops = {
.open = moxtet_debug_open,
.read = output_read,
.write = output_write,
- .llseek = no_llseek,
};
static int moxtet_register_debugfs(struct moxtet *moxtet)
@@ -659,7 +657,7 @@ static void moxtet_irq_print_chip(struct irq_data *d, struct seq_file *p)
id = moxtet->modules[pos->idx];
- seq_printf(p, " moxtet-%s.%i#%i", mox_module_name(id), pos->idx,
+ seq_printf(p, "moxtet-%s.%i#%i", mox_module_name(id), pos->idx,
pos->bit);
}
@@ -739,9 +737,8 @@ static int moxtet_irq_setup(struct moxtet *moxtet)
{
int i, ret;
- moxtet->irq.domain = irq_domain_add_simple(moxtet->dev->of_node,
- MOXTET_NIRQS, 0,
- &moxtet_irq_domain, moxtet);
+ moxtet->irq.domain = irq_domain_create_simple(dev_fwnode(moxtet->dev), MOXTET_NIRQS, 0,
+ &moxtet_irq_domain, moxtet);
if (moxtet->irq.domain == NULL) {
dev_err(moxtet->dev, "Could not add IRQ domain\n");
return -ENOMEM;
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index 00cb792bda18..dd94145c9b22 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -1006,7 +1006,7 @@ static __init int mvebu_mbus_debugfs_init(void)
}
fs_initcall(mvebu_mbus_debugfs_init);
-static int mvebu_mbus_suspend(void)
+static int mvebu_mbus_suspend(void *data)
{
struct mvebu_mbus_state *s = &mbus_state;
int win;
@@ -1040,7 +1040,7 @@ static int mvebu_mbus_suspend(void)
return 0;
}
-static void mvebu_mbus_resume(void)
+static void mvebu_mbus_resume(void *data)
{
struct mvebu_mbus_state *s = &mbus_state;
int win;
@@ -1069,9 +1069,13 @@ static void mvebu_mbus_resume(void)
}
}
-static struct syscore_ops mvebu_mbus_syscore_ops = {
- .suspend = mvebu_mbus_suspend,
- .resume = mvebu_mbus_resume,
+static const struct syscore_ops mvebu_mbus_syscore_ops = {
+ .suspend = mvebu_mbus_suspend,
+ .resume = mvebu_mbus_resume,
+};
+
+static struct syscore mvebu_mbus_syscore = {
+ .ops = &mvebu_mbus_syscore_ops,
};
static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
@@ -1118,7 +1122,7 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
writel(UNIT_SYNC_BARRIER_ALL,
mbus->mbuswins_base + UNIT_SYNC_BARRIER_OFF);
- register_syscore_ops(&mvebu_mbus_syscore_ops);
+ register_syscore(&mvebu_mbus_syscore);
return 0;
}
diff --git a/drivers/bus/omap-ocp2scp.c b/drivers/bus/omap-ocp2scp.c
index 7d7479ba0a75..e4dfda7b3b10 100644
--- a/drivers/bus/omap-ocp2scp.c
+++ b/drivers/bus/omap-ocp2scp.c
@@ -101,7 +101,7 @@ MODULE_DEVICE_TABLE(of, omap_ocp2scp_id_table);
static struct platform_driver omap_ocp2scp_driver = {
.probe = omap_ocp2scp_probe,
- .remove_new = omap_ocp2scp_remove,
+ .remove = omap_ocp2scp_remove,
.driver = {
.name = "omap-ocp2scp",
.of_match_table = of_match_ptr(omap_ocp2scp_id_table),
diff --git a/drivers/bus/omap_l3_smx.c b/drivers/bus/omap_l3_smx.c
index ee6d29925e4d..7f0a8f8b3f4c 100644
--- a/drivers/bus/omap_l3_smx.c
+++ b/drivers/bus/omap_l3_smx.c
@@ -273,7 +273,7 @@ static void omap3_l3_remove(struct platform_device *pdev)
static struct platform_driver omap3_l3_driver = {
.probe = omap3_l3_probe,
- .remove_new = omap3_l3_remove,
+ .remove = omap3_l3_remove,
.driver = {
.name = "omap_l3_smx",
.of_match_table = of_match_ptr(omap3_l3_match),
diff --git a/drivers/bus/qcom-ssc-block-bus.c b/drivers/bus/qcom-ssc-block-bus.c
index 5931974a21fa..7f5fd4e0940d 100644
--- a/drivers/bus/qcom-ssc-block-bus.c
+++ b/drivers/bus/qcom-ssc-block-bus.c
@@ -264,18 +264,6 @@ static int qcom_ssc_block_bus_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
- data->pd_names = qcom_ssc_block_pd_names;
- data->num_pds = ARRAY_SIZE(qcom_ssc_block_pd_names);
-
- /* power domains */
- ret = qcom_ssc_block_bus_pds_attach(&pdev->dev, data->pds, data->pd_names, data->num_pds);
- if (ret < 0)
- return dev_err_probe(&pdev->dev, ret, "error when attaching power domains\n");
-
- ret = qcom_ssc_block_bus_pds_enable(data->pds, data->num_pds);
- if (ret < 0)
- return dev_err_probe(&pdev->dev, ret, "error when enabling power domains\n");
-
/* low level overrides for when the HW logic doesn't "just work" */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpm_sscaon_config0");
data->reg_mpm_sscaon_config0 = devm_ioremap_resource(&pdev->dev, res);
@@ -343,11 +331,30 @@ static int qcom_ssc_block_bus_probe(struct platform_device *pdev)
data->ssc_axi_halt = halt_args.args[0];
+ /* power domains */
+ data->pd_names = qcom_ssc_block_pd_names;
+ data->num_pds = ARRAY_SIZE(qcom_ssc_block_pd_names);
+
+ ret = qcom_ssc_block_bus_pds_attach(&pdev->dev, data->pds, data->pd_names, data->num_pds);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "error when attaching power domains\n");
+
+ ret = qcom_ssc_block_bus_pds_enable(data->pds, data->num_pds);
+ if (ret < 0) {
+ dev_err_probe(&pdev->dev, ret, "error when enabling power domains\n");
+ goto err_detach_pds_bus;
+ }
+
qcom_ssc_block_bus_init(&pdev->dev);
of_platform_populate(np, NULL, NULL, &pdev->dev);
return 0;
+
+err_detach_pds_bus:
+ qcom_ssc_block_bus_pds_detach(&pdev->dev, data->pds, data->num_pds);
+
+ return ret;
}
static void qcom_ssc_block_bus_remove(struct platform_device *pdev)
@@ -356,9 +363,6 @@ static void qcom_ssc_block_bus_remove(struct platform_device *pdev)
qcom_ssc_block_bus_deinit(&pdev->dev);
- iounmap(data->reg_mpm_sscaon_config0);
- iounmap(data->reg_mpm_sscaon_config1);
-
qcom_ssc_block_bus_pds_disable(data->pds, data->num_pds);
qcom_ssc_block_bus_pds_detach(&pdev->dev, data->pds, data->num_pds);
pm_runtime_disable(&pdev->dev);
@@ -373,7 +377,7 @@ MODULE_DEVICE_TABLE(of, qcom_ssc_block_bus_of_match);
static struct platform_driver qcom_ssc_block_bus_driver = {
.probe = qcom_ssc_block_bus_probe,
- .remove_new = qcom_ssc_block_bus_remove,
+ .remove = qcom_ssc_block_bus_remove,
.driver = {
.name = "qcom-ssc-block-bus",
.of_match_table = qcom_ssc_block_bus_of_match,
diff --git a/drivers/bus/simple-pm-bus.c b/drivers/bus/simple-pm-bus.c
index 50870c827889..d8e029e7e53f 100644
--- a/drivers/bus/simple-pm-bus.c
+++ b/drivers/bus/simple-pm-bus.c
@@ -109,9 +109,29 @@ static int simple_pm_bus_runtime_resume(struct device *dev)
return 0;
}
+static int simple_pm_bus_suspend(struct device *dev)
+{
+ struct simple_pm_bus *bus = dev_get_drvdata(dev);
+
+ if (!bus)
+ return 0;
+
+ return pm_runtime_force_suspend(dev);
+}
+
+static int simple_pm_bus_resume(struct device *dev)
+{
+ struct simple_pm_bus *bus = dev_get_drvdata(dev);
+
+ if (!bus)
+ return 0;
+
+ return pm_runtime_force_resume(dev);
+}
+
static const struct dev_pm_ops simple_pm_bus_pm_ops = {
RUNTIME_PM_OPS(simple_pm_bus_runtime_suspend, simple_pm_bus_runtime_resume, NULL)
- NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(simple_pm_bus_suspend, simple_pm_bus_resume)
};
#define ONLY_BUS ((void *) 1) /* Match if the device is only a bus. */
@@ -128,7 +148,7 @@ MODULE_DEVICE_TABLE(of, simple_pm_bus_of_match);
static struct platform_driver simple_pm_bus_driver = {
.probe = simple_pm_bus_probe,
- .remove_new = simple_pm_bus_remove,
+ .remove = simple_pm_bus_remove,
.driver = {
.name = "simple-pm-bus",
.of_match_table = simple_pm_bus_of_match,
diff --git a/drivers/bus/stm32_rifsc.c b/drivers/bus/stm32_rifsc.c
index 4cf1b60014b7..debeaf8ea1bd 100644
--- a/drivers/bus/stm32_rifsc.c
+++ b/drivers/bus/stm32_rifsc.c
@@ -5,6 +5,7 @@
#include <linux/bitfield.h>
#include <linux/bits.h>
+#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -25,6 +26,8 @@
#define RIFSC_RISC_PRIVCFGR0 0x30
#define RIFSC_RISC_PER0_CIDCFGR 0x100
#define RIFSC_RISC_PER0_SEMCR 0x104
+#define RIFSC_RISC_REG0_ACFGR 0x900
+#define RIFSC_RISC_REG3_AADDR 0x924
#define RIFSC_RISC_HWCFGR2 0xFEC
/*
@@ -70,6 +73,565 @@
#define RIF_CID0 0x0
#define RIF_CID1 0x1
+#if defined(CONFIG_DEBUG_FS)
+#define RIFSC_RISUP_ENTRIES 128
+#define RIFSC_RIMU_ENTRIES 16
+#define RIFSC_RISAL_SUBREGIONS 2
+#define RIFSC_RISAL_GRANULARITY 8
+
+#define RIFSC_RIMC_ATTR0 0xC10
+
+#define RIFSC_RIMC_CIDSEL BIT(2)
+#define RIFSC_RIMC_MCID_MASK GENMASK(6, 4)
+#define RIFSC_RIMC_MSEC BIT(8)
+#define RIFSC_RIMC_MPRIV BIT(9)
+
+#define RIFSC_RISC_SRCID_MASK GENMASK(6, 4)
+#define RIFSC_RISC_SRPRIV BIT(9)
+#define RIFSC_RISC_SRSEC BIT(8)
+#define RIFSC_RISC_SRRLOCK BIT(1)
+#define RIFSC_RISC_SREN BIT(0)
+#define RIFSC_RISC_SRLENGTH_MASK GENMASK(27, 16)
+#define RIFSC_RISC_SRSTART_MASK GENMASK(10, 0)
+
+static const char *stm32mp21_rifsc_rimu_names[RIFSC_RIMU_ENTRIES] = {
+ "ETR",
+ "SDMMC1",
+ "SDMMC2",
+ "SDMMC3",
+ "OTG_HS",
+ "USBH",
+ "ETH1",
+ "ETH2",
+ "RESERVED",
+ "RESERVED",
+ "DCMIPP",
+ "LTDC_L1/L2",
+ "LTDC_L3",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+};
+
+static const char *stm32mp25_rifsc_rimu_names[RIFSC_RIMU_ENTRIES] = {
+ "ETR",
+ "SDMMC1",
+ "SDMMC2",
+ "SDMMC3",
+ "USB3DR",
+ "USBH",
+ "ETH1",
+ "ETH2",
+ "PCIE",
+ "GPU",
+ "DMCIPP",
+ "LTDC_L0/L1",
+ "LTDC_L2",
+ "LTDC_ROT",
+ "VDEC",
+ "VENC"
+};
+
+static const char *stm32mp21_rifsc_risup_names[RIFSC_RISUP_ENTRIES] = {
+ "TIM1",
+ "TIM2",
+ "TIM3",
+ "TIM4",
+ "TIM5",
+ "TIM6",
+ "TIM7",
+ "TIM8",
+ "TIM10",
+ "TIM11",
+ "TIM12",
+ "TIM13",
+ "TIM14",
+ "TIM15",
+ "TIM16",
+ "TIM17",
+ "RESERVED",
+ "LPTIM1",
+ "LPTIM2",
+ "LPTIM3",
+ "LPTIM4",
+ "LPTIM5",
+ "SPI1",
+ "SPI2",
+ "SPI3",
+ "SPI4",
+ "SPI5",
+ "SPI6",
+ "RESERVED",
+ "RESERVED",
+ "SPDIFRX",
+ "USART1",
+ "USART2",
+ "USART3",
+ "UART4",
+ "UART5",
+ "USART6",
+ "UART7",
+ "RESERVED",
+ "RESERVED",
+ "LPUART1",
+ "I2C1",
+ "I2C2",
+ "I2C3",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "SAI1",
+ "SAI2",
+ "SAI3",
+ "SAI4",
+ "RESERVED",
+ "MDF1",
+ "RESERVED",
+ "FDCAN",
+ "HDP",
+ "ADC1",
+ "ADC2",
+ "ETH1",
+ "ETH2",
+ "RESERVED",
+ "USBH",
+ "RESERVED",
+ "RESERVED",
+ "OTG_HS",
+ "DDRPERFM",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "STGEN",
+ "OCTOSPI1",
+ "RESERVED",
+ "SDMMC1",
+ "SDMMC2",
+ "SDMMC3",
+ "RESERVED",
+ "LTDC_CMN",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "CSI",
+ "DCMIPP",
+ "DCMI_PSSI",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "RNG1",
+ "RNG2",
+ "PKA",
+ "SAES",
+ "HASH1",
+ "HASH2",
+ "CRYP1",
+ "CRYP2",
+ "IWDG1",
+ "IWDG2",
+ "IWDG3",
+ "IWDG4",
+ "WWDG1",
+ "RESERVED",
+ "VREFBUF",
+ "DTS",
+ "RAMCFG",
+ "CRC",
+ "SERC",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "I3C1",
+ "I3C2",
+ "I3C3",
+ "RESERVED",
+ "ICACHE_DCACHE",
+ "LTDC_L1L2",
+ "LTDC_L3",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "OTFDEC1",
+ "RESERVED",
+ "IAC",
+};
+
+static const char *stm32mp25_rifsc_risup_names[RIFSC_RISUP_ENTRIES] = {
+ "TIM1",
+ "TIM2",
+ "TIM3",
+ "TIM4",
+ "TIM5",
+ "TIM6",
+ "TIM7",
+ "TIM8",
+ "TIM10",
+ "TIM11",
+ "TIM12",
+ "TIM13",
+ "TIM14",
+ "TIM15",
+ "TIM16",
+ "TIM17",
+ "TIM20",
+ "LPTIM1",
+ "LPTIM2",
+ "LPTIM3",
+ "LPTIM4",
+ "LPTIM5",
+ "SPI1",
+ "SPI2",
+ "SPI3",
+ "SPI4",
+ "SPI5",
+ "SPI6",
+ "SPI7",
+ "SPI8",
+ "SPDIFRX",
+ "USART1",
+ "USART2",
+ "USART3",
+ "UART4",
+ "UART5",
+ "USART6",
+ "UART7",
+ "UART8",
+ "UART9",
+ "LPUART1",
+ "I2C1",
+ "I2C2",
+ "I2C3",
+ "I2C4",
+ "I2C5",
+ "I2C6",
+ "I2C7",
+ "I2C8",
+ "SAI1",
+ "SAI2",
+ "SAI3",
+ "SAI4",
+ "RESERVED",
+ "MDF1",
+ "ADF1",
+ "FDCAN",
+ "HDP",
+ "ADC12",
+ "ADC3",
+ "ETH1",
+ "ETH2",
+ "RESERVED",
+ "USBH",
+ "RESERVED",
+ "RESERVED",
+ "USB3DR",
+ "COMBOPHY",
+ "PCIE",
+ "UCPD1",
+ "ETHSW_DEIP",
+ "ETHSW_ACM_CF",
+ "ETHSW_ACM_MSGBU",
+ "STGEN",
+ "OCTOSPI1",
+ "OCTOSPI2",
+ "SDMMC1",
+ "SDMMC2",
+ "SDMMC3",
+ "GPU",
+ "LTDC_CMN",
+ "DSI_CMN",
+ "RESERVED",
+ "RESERVED",
+ "LVDS",
+ "RESERVED",
+ "CSI",
+ "DCMIPP",
+ "DCMI_PSSI",
+ "VDEC",
+ "VENC",
+ "RESERVED",
+ "RNG",
+ "PKA",
+ "SAES",
+ "HASH",
+ "CRYP1",
+ "CRYP2",
+ "IWDG1",
+ "IWDG2",
+ "IWDG3",
+ "IWDG4",
+ "IWDG5",
+ "WWDG1",
+ "WWDG2",
+ "RESERVED",
+ "VREFBUF",
+ "DTS",
+ "RAMCFG",
+ "CRC",
+ "SERC",
+ "OCTOSPIM",
+ "GICV2M",
+ "RESERVED",
+ "I3C1",
+ "I3C2",
+ "I3C3",
+ "I3C4",
+ "ICACHE_DCACHE",
+ "LTDC_L0L1",
+ "LTDC_L2",
+ "LTDC_ROT",
+ "DSI_TRIG",
+ "DSI_RDFIFO",
+ "RESERVED",
+ "OTFDEC1",
+ "OTFDEC2",
+ "IAC",
+};
+struct rifsc_risup_debug_data {
+ char dev_name[15];
+ u8 dev_cid;
+ u8 dev_sem_cids;
+ u8 dev_id;
+ bool dev_cid_filt_en;
+ bool dev_sem_en;
+ bool dev_priv;
+ bool dev_sec;
+};
+
+struct rifsc_rimu_debug_data {
+ char m_name[11];
+ u8 m_cid;
+ bool cidsel;
+ bool m_sec;
+ bool m_priv;
+};
+
+struct rifsc_subreg_debug_data {
+ bool sr_sec;
+ bool sr_priv;
+ u8 sr_cid;
+ bool sr_rlock;
+ bool sr_enable;
+ u16 sr_start;
+ u16 sr_length;
+};
+
+struct stm32_rifsc_resources_names {
+ const char **device_names;
+ const char **initiator_names;
+};
+struct rifsc_dbg_private {
+ const struct stm32_rifsc_resources_names *res_names;
+ void __iomem *mmio;
+ unsigned int nb_risup;
+ unsigned int nb_rimu;
+ unsigned int nb_risal;
+};
+
+static const struct stm32_rifsc_resources_names rifsc_mp21_res_names = {
+ .device_names = stm32mp21_rifsc_risup_names,
+ .initiator_names = stm32mp21_rifsc_rimu_names,
+};
+
+static const struct stm32_rifsc_resources_names rifsc_mp25_res_names = {
+ .device_names = stm32mp25_rifsc_risup_names,
+ .initiator_names = stm32mp25_rifsc_rimu_names,
+};
+
+static void stm32_rifsc_fill_rimu_dbg_entry(struct rifsc_dbg_private *rifsc,
+ struct rifsc_rimu_debug_data *dbg_entry, int i)
+{
+ const struct stm32_rifsc_resources_names *dbg_names = rifsc->res_names;
+ u32 rimc_attr = readl_relaxed(rifsc->mmio + RIFSC_RIMC_ATTR0 + 0x4 * i);
+
+ snprintf(dbg_entry->m_name, sizeof(dbg_entry->m_name), "%s", dbg_names->initiator_names[i]);
+ dbg_entry->m_cid = FIELD_GET(RIFSC_RIMC_MCID_MASK, rimc_attr);
+ dbg_entry->cidsel = rimc_attr & RIFSC_RIMC_CIDSEL;
+ dbg_entry->m_sec = rimc_attr & RIFSC_RIMC_MSEC;
+ dbg_entry->m_priv = rimc_attr & RIFSC_RIMC_MPRIV;
+}
+
+static void stm32_rifsc_fill_dev_dbg_entry(struct rifsc_dbg_private *rifsc,
+ struct rifsc_risup_debug_data *dbg_entry, int i)
+{
+ const struct stm32_rifsc_resources_names *dbg_names = rifsc->res_names;
+ u32 cid_cfgr, sec_cfgr, priv_cfgr;
+ u8 reg_id = i / IDS_PER_RISC_SEC_PRIV_REGS;
+ u8 reg_offset = i % IDS_PER_RISC_SEC_PRIV_REGS;
+
+ cid_cfgr = readl_relaxed(rifsc->mmio + RIFSC_RISC_PER0_CIDCFGR + 0x8 * i);
+ sec_cfgr = readl_relaxed(rifsc->mmio + RIFSC_RISC_SECCFGR0 + 0x4 * reg_id);
+ priv_cfgr = readl_relaxed(rifsc->mmio + RIFSC_RISC_PRIVCFGR0 + 0x4 * reg_id);
+
+ snprintf(dbg_entry->dev_name, sizeof(dbg_entry->dev_name), "%s",
+ dbg_names->device_names[i]);
+ dbg_entry->dev_id = i;
+ dbg_entry->dev_cid_filt_en = cid_cfgr & CIDCFGR_CFEN;
+ dbg_entry->dev_sem_en = cid_cfgr & CIDCFGR_SEMEN;
+ dbg_entry->dev_cid = FIELD_GET(RIFSC_RISC_SCID_MASK, cid_cfgr);
+ dbg_entry->dev_sem_cids = FIELD_GET(RIFSC_RISC_SEMWL_MASK, cid_cfgr);
+ dbg_entry->dev_sec = sec_cfgr & BIT(reg_offset) ? true : false;
+ dbg_entry->dev_priv = priv_cfgr & BIT(reg_offset) ? true : false;
+}
+
+
+static void stm32_rifsc_fill_subreg_dbg_entry(struct rifsc_dbg_private *rifsc,
+ struct rifsc_subreg_debug_data *dbg_entry, int i,
+ int j)
+{
+ u32 risc_xcfgr = readl_relaxed(rifsc->mmio + RIFSC_RISC_REG0_ACFGR + 0x10 * i + 0x8 * j);
+ u32 risc_xaddr;
+
+ dbg_entry->sr_sec = risc_xcfgr & RIFSC_RISC_SRSEC;
+ dbg_entry->sr_priv = risc_xcfgr & RIFSC_RISC_SRPRIV;
+ dbg_entry->sr_cid = FIELD_GET(RIFSC_RISC_SRCID_MASK, risc_xcfgr);
+ dbg_entry->sr_rlock = risc_xcfgr & RIFSC_RISC_SRRLOCK;
+ dbg_entry->sr_enable = risc_xcfgr & RIFSC_RISC_SREN;
+ if (i == 2) {
+ risc_xaddr = readl_relaxed(rifsc->mmio + RIFSC_RISC_REG3_AADDR + 0x8 * j);
+ dbg_entry->sr_length = FIELD_GET(RIFSC_RISC_SRLENGTH_MASK, risc_xaddr);
+ dbg_entry->sr_start = FIELD_GET(RIFSC_RISC_SRSTART_MASK, risc_xaddr);
+ } else {
+ dbg_entry->sr_start = 0;
+ dbg_entry->sr_length = U16_MAX;
+ }
+}
+
+static int stm32_rifsc_conf_dump_show(struct seq_file *s, void *data)
+{
+ struct rifsc_dbg_private *rifsc = (struct rifsc_dbg_private *)s->private;
+ int i, j;
+
+ seq_puts(s, "\n=============================================\n");
+ seq_puts(s, " RIFSC dump\n");
+ seq_puts(s, "=============================================\n\n");
+
+ seq_puts(s, "\n=============================================\n");
+ seq_puts(s, " RISUP dump\n");
+ seq_puts(s, "=============================================\n");
+
+ seq_printf(s, "\n| %-15s |", "Peripheral name");
+ seq_puts(s, "| Firewall ID |");
+ seq_puts(s, "| N/SECURE |");
+ seq_puts(s, "| N/PRIVILEGED |");
+ seq_puts(s, "| CID filtering |");
+ seq_puts(s, "| Semaphore mode |");
+ seq_puts(s, "| SCID |");
+ seq_printf(s, "| %7s |\n", "SEMWL");
+
+ for (i = 0; i < RIFSC_RISUP_ENTRIES && i < rifsc->nb_risup; i++) {
+ struct rifsc_risup_debug_data d_dbg_entry;
+
+ stm32_rifsc_fill_dev_dbg_entry(rifsc, &d_dbg_entry, i);
+
+ seq_printf(s, "| %-15s |", d_dbg_entry.dev_name);
+ seq_printf(s, "| %-11d |", d_dbg_entry.dev_id);
+ seq_printf(s, "| %-8s |", d_dbg_entry.dev_sec ? "SEC" : "NSEC");
+ seq_printf(s, "| %-12s |", d_dbg_entry.dev_priv ? "PRIV" : "NPRIV");
+ seq_printf(s, "| %-13s |", str_enabled_disabled(d_dbg_entry.dev_cid_filt_en));
+ seq_printf(s, "| %-14s |", str_enabled_disabled(d_dbg_entry.dev_sem_en));
+ seq_printf(s, "| %-4d |", d_dbg_entry.dev_cid);
+ seq_printf(s, "| %#-7x |\n", d_dbg_entry.dev_sem_cids);
+ }
+
+ seq_puts(s, "\n=============================================\n");
+ seq_puts(s, " RIMU dump\n");
+ seq_puts(s, "=============================================\n");
+
+ seq_puts(s, "| RIMU's name |");
+ seq_puts(s, "| CIDSEL |");
+ seq_puts(s, "| MCID |");
+ seq_puts(s, "| N/SECURE |");
+ seq_puts(s, "| N/PRIVILEGED |\n");
+
+ for (i = 0; i < RIFSC_RIMU_ENTRIES && rifsc->nb_rimu; i++) {
+ struct rifsc_rimu_debug_data m_dbg_entry;
+
+ stm32_rifsc_fill_rimu_dbg_entry(rifsc, &m_dbg_entry, i);
+
+ seq_printf(s, "| %-11s |", m_dbg_entry.m_name);
+ seq_printf(s, "| %-6s |", m_dbg_entry.cidsel ? "CIDSEL" : "");
+ seq_printf(s, "| %-4d |", m_dbg_entry.m_cid);
+ seq_printf(s, "| %-8s |", m_dbg_entry.m_sec ? "SEC" : "NSEC");
+ seq_printf(s, "| %-12s |\n", m_dbg_entry.m_priv ? "PRIV" : "NPRIV");
+ }
+
+ if (rifsc->nb_risal > 0) {
+ seq_puts(s, "\n=============================================\n");
+ seq_puts(s, " RISAL dump\n");
+ seq_puts(s, "=============================================\n");
+
+ seq_puts(s, "| Memory |");
+ seq_puts(s, "| Subreg. |");
+ seq_puts(s, "| N/SECURE |");
+ seq_puts(s, "| N/PRIVILEGED |");
+ seq_puts(s, "| Subreg. CID |");
+ seq_puts(s, "| Resource lock |");
+ seq_puts(s, "| Subreg. enable |");
+ seq_puts(s, "| Subreg. start |");
+ seq_puts(s, "| Subreg. end |\n");
+
+ for (i = 0; i < rifsc->nb_risal; i++) {
+ for (j = 0; j < RIFSC_RISAL_SUBREGIONS; j++) {
+ struct rifsc_subreg_debug_data sr_dbg_entry;
+
+ stm32_rifsc_fill_subreg_dbg_entry(rifsc, &sr_dbg_entry, i, j);
+
+ seq_printf(s, "| LPSRAM%1d |", i + 1);
+ seq_printf(s, "| %1s |", (j == 0) ? "A" : "B");
+ seq_printf(s, "| %-8s |", sr_dbg_entry.sr_sec ? "SEC" : "NSEC");
+ seq_printf(s, "| %-12s |", sr_dbg_entry.sr_priv ? "PRIV" : "NPRIV");
+ seq_printf(s, "| 0x%-9x |", sr_dbg_entry.sr_cid);
+ seq_printf(s, "| %-13s |",
+ sr_dbg_entry.sr_rlock ? "locked (1)" : "unlocked (0)");
+ seq_printf(s, "| %-14s |",
+ str_enabled_disabled(sr_dbg_entry.sr_enable));
+ seq_printf(s, "| 0x%-11x |", sr_dbg_entry.sr_start);
+ seq_printf(s, "| 0x%-11x |\n", sr_dbg_entry.sr_start +
+ sr_dbg_entry.sr_length - 1);
+ }
+ }
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(stm32_rifsc_conf_dump);
+
+static int stm32_rifsc_register_debugfs(struct stm32_firewall_controller *rifsc_controller,
+ u32 nb_risup, u32 nb_rimu, u32 nb_risal)
+{
+ struct rifsc_dbg_private *rifsc_priv;
+ struct dentry *root = NULL;
+
+ rifsc_priv = devm_kzalloc(rifsc_controller->dev, sizeof(*rifsc_priv), GFP_KERNEL);
+ if (!rifsc_priv)
+ return -ENOMEM;
+
+ rifsc_priv->mmio = rifsc_controller->mmio;
+ rifsc_priv->nb_risup = nb_risup;
+ rifsc_priv->nb_rimu = nb_rimu;
+ rifsc_priv->nb_risal = nb_risal;
+ rifsc_priv->res_names = of_device_get_match_data(rifsc_controller->dev);
+
+ root = debugfs_lookup("stm32_firewall", NULL);
+ if (!root)
+ root = debugfs_create_dir("stm32_firewall", NULL);
+
+ if (IS_ERR(root))
+ return PTR_ERR(root);
+
+ debugfs_create_file("rifsc", 0444, root, rifsc_priv, &stm32_rifsc_conf_dump_fops);
+
+ return 0;
+}
+#endif /* defined(CONFIG_DEBUG_FS) */
+
static bool stm32_rifsc_is_semaphore_available(void __iomem *addr)
{
return !(readl(addr) & SEMCR_MUTEX);
@@ -207,9 +769,19 @@ static int stm32_rifsc_probe(struct platform_device *pdev)
rifsc_controller->release_access = stm32_rifsc_release_access;
/* Get number of RIFSC entries*/
- nb_risup = readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2) & HWCFGR2_CONF1_MASK;
- nb_rimu = readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2) & HWCFGR2_CONF2_MASK;
- nb_risal = readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2) & HWCFGR2_CONF3_MASK;
+ nb_risup = FIELD_GET(HWCFGR2_CONF1_MASK,
+ readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2));
+ nb_rimu = FIELD_GET(HWCFGR2_CONF2_MASK,
+ readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2));
+ nb_risal = FIELD_GET(HWCFGR2_CONF3_MASK,
+ readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2));
+ /*
+ * On STM32MP21, RIFSC_RISC_HWCFGR2 shows an incorrect number of RISAL (NUM_RISAL is 3
+ * instead of 0). A software workaround is implemented using the st,mem-map property in the
+ * device tree. This property is absent or left empty if there is no RISAL.
+ */
+ if (of_device_is_compatible(np, "st,stm32mp21-rifsc"))
+ nb_risal = 0;
rifsc_controller->max_entries = nb_risup + nb_rimu + nb_risal;
platform_set_drvdata(pdev, rifsc_controller);
@@ -228,12 +800,29 @@ static int stm32_rifsc_probe(struct platform_device *pdev)
return rc;
}
+#if defined(CONFIG_DEBUG_FS)
+ rc = stm32_rifsc_register_debugfs(rifsc_controller, nb_risup, nb_rimu, nb_risal);
+ if (rc)
+ return dev_err_probe(rifsc_controller->dev, rc, "Failed creating debugfs entry\n");
+#endif
+
/* Populate all allowed nodes */
return of_platform_populate(np, NULL, NULL, &pdev->dev);
}
static const struct of_device_id stm32_rifsc_of_match[] = {
- { .compatible = "st,stm32mp25-rifsc" },
+ {
+ .compatible = "st,stm32mp25-rifsc",
+#if defined(CONFIG_DEBUG_FS)
+ .data = &rifsc_mp25_res_names,
+#endif
+ },
+ {
+ .compatible = "st,stm32mp21-rifsc",
+#if defined(CONFIG_DEBUG_FS)
+ .data = &rifsc_mp21_res_names,
+#endif
+ },
{}
};
MODULE_DEVICE_TABLE(of, stm32_rifsc_of_match);
diff --git a/drivers/bus/sun50i-de2.c b/drivers/bus/sun50i-de2.c
index 3339311ce068..dfe588179aca 100644
--- a/drivers/bus/sun50i-de2.c
+++ b/drivers/bus/sun50i-de2.c
@@ -36,7 +36,7 @@ static const struct of_device_id sun50i_de2_bus_of_match[] = {
static struct platform_driver sun50i_de2_bus_driver = {
.probe = sun50i_de2_bus_probe,
- .remove_new = sun50i_de2_bus_remove,
+ .remove = sun50i_de2_bus_remove,
.driver = {
.name = "sun50i-de2-bus",
.of_match_table = sun50i_de2_bus_of_match,
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index 1e29ba76615d..82735c58be11 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -130,7 +130,7 @@ struct sunxi_rsb {
/* bus / slave device related functions */
static const struct bus_type sunxi_rsb_bus;
-static int sunxi_rsb_device_match(struct device *dev, struct device_driver *drv)
+static int sunxi_rsb_device_match(struct device *dev, const struct device_driver *drv)
{
return of_driver_match_device(dev, drv);
}
@@ -373,7 +373,6 @@ static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
unlock:
mutex_unlock(&rsb->lock);
- pm_runtime_mark_last_busy(rsb->dev);
pm_runtime_put_autosuspend(rsb->dev);
return ret;
@@ -417,7 +416,6 @@ static int sunxi_rsb_write(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
mutex_unlock(&rsb->lock);
- pm_runtime_mark_last_busy(rsb->dev);
pm_runtime_put_autosuspend(rsb->dev);
return ret;
@@ -457,7 +455,7 @@ static void regmap_sunxi_rsb_free_ctx(void *context)
kfree(ctx);
}
-static struct regmap_bus regmap_sunxi_rsb = {
+static const struct regmap_bus regmap_sunxi_rsb = {
.reg_write = regmap_sunxi_rsb_reg_write,
.reg_read = regmap_sunxi_rsb_reg_read,
.free_context = regmap_sunxi_rsb_free_ctx,
@@ -751,12 +749,10 @@ static int sunxi_rsb_probe(struct platform_device *pdev)
int irq, ret;
of_property_read_u32(np, "clock-frequency", &clk_freq);
- if (clk_freq > RSB_MAX_FREQ) {
- dev_err(dev,
- "clock-frequency (%u Hz) is too high (max = 20MHz)\n",
- clk_freq);
- return -EINVAL;
- }
+ if (clk_freq > RSB_MAX_FREQ)
+ return dev_err_probe(dev, -EINVAL,
+ "clock-frequency (%u Hz) is too high (max = 20MHz)\n",
+ clk_freq);
rsb = devm_kzalloc(dev, sizeof(*rsb), GFP_KERNEL);
if (!rsb)
@@ -774,28 +770,22 @@ static int sunxi_rsb_probe(struct platform_device *pdev)
return irq;
rsb->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(rsb->clk)) {
- ret = PTR_ERR(rsb->clk);
- dev_err(dev, "failed to retrieve clk: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(rsb->clk))
+ return dev_err_probe(dev, PTR_ERR(rsb->clk),
+ "failed to retrieve clk\n");
rsb->rstc = devm_reset_control_get(dev, NULL);
- if (IS_ERR(rsb->rstc)) {
- ret = PTR_ERR(rsb->rstc);
- dev_err(dev, "failed to retrieve reset controller: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(rsb->rstc))
+ return dev_err_probe(dev, PTR_ERR(rsb->rstc),
+ "failed to retrieve reset controller\n");
init_completion(&rsb->complete);
mutex_init(&rsb->lock);
ret = devm_request_irq(dev, irq, sunxi_rsb_irq, 0, RSB_CTRL_NAME, rsb);
- if (ret) {
- dev_err(dev, "can't register interrupt handler irq %d: %d\n",
- irq, ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "can't register interrupt handler irq %d\n", irq);
ret = sunxi_rsb_hw_init(rsb);
if (ret)
@@ -840,7 +830,7 @@ MODULE_DEVICE_TABLE(of, sunxi_rsb_of_match_table);
static struct platform_driver sunxi_rsb_driver = {
.probe = sunxi_rsb_probe,
- .remove_new = sunxi_rsb_remove,
+ .remove = sunxi_rsb_remove,
.driver = {
.name = RSB_CTRL_NAME,
.of_match_table = sunxi_rsb_of_match_table,
diff --git a/drivers/bus/tegra-aconnect.c b/drivers/bus/tegra-aconnect.c
index de80008bff92..90e3b0a10816 100644
--- a/drivers/bus/tegra-aconnect.c
+++ b/drivers/bus/tegra-aconnect.c
@@ -104,7 +104,7 @@ MODULE_DEVICE_TABLE(of, tegra_aconnect_of_match);
static struct platform_driver tegra_aconnect_driver = {
.probe = tegra_aconnect_probe,
- .remove_new = tegra_aconnect_remove,
+ .remove = tegra_aconnect_remove,
.driver = {
.name = "tegra-aconnect",
.of_match_table = tegra_aconnect_of_match,
diff --git a/drivers/bus/tegra-gmi.c b/drivers/bus/tegra-gmi.c
index f5d6414df9f2..9c09141961d8 100644
--- a/drivers/bus/tegra-gmi.c
+++ b/drivers/bus/tegra-gmi.c
@@ -303,7 +303,7 @@ MODULE_DEVICE_TABLE(of, tegra_gmi_id_table);
static struct platform_driver tegra_gmi_driver = {
.probe = tegra_gmi_probe,
- .remove_new = tegra_gmi_remove,
+ .remove = tegra_gmi_remove,
.driver = {
.name = "tegra-gmi",
.of_match_table = tegra_gmi_id_table,
diff --git a/drivers/bus/ti-pwmss.c b/drivers/bus/ti-pwmss.c
index 4969c556e752..1f2cab91e438 100644
--- a/drivers/bus/ti-pwmss.c
+++ b/drivers/bus/ti-pwmss.c
@@ -44,7 +44,7 @@ static struct platform_driver pwmss_driver = {
.of_match_table = pwmss_of_match,
},
.probe = pwmss_probe,
- .remove_new = pwmss_remove,
+ .remove = pwmss_remove,
};
module_platform_driver(pwmss_driver);
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 8767e04d6c89..610354ce7f8f 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -48,6 +48,7 @@ enum sysc_soc {
SOC_UNKNOWN,
SOC_2420,
SOC_2430,
+ SOC_AM33,
SOC_3430,
SOC_AM35,
SOC_3630,
@@ -126,7 +127,6 @@ static const char * const clock_names[SYSC_MAX_CLOCKS] = {
* @enabled: sysc runtime enabled status
* @needs_resume: runtime resume needed on resume from suspend
* @child_needs_resume: runtime resume needed for child on resume from suspend
- * @disable_on_idle: status flag used for disabling modules with resets
* @idle_work: work structure used to perform delayed idle on a module
* @pre_reset_quirk: module specific pre-reset quirk
* @post_reset_quirk: module specific post-reset quirk
@@ -678,51 +678,6 @@ static int sysc_parse_and_check_child_range(struct sysc *ddata)
return 0;
}
-/* Interconnect instances to probe before l4_per instances */
-static struct resource early_bus_ranges[] = {
- /* am3/4 l4_wkup */
- { .start = 0x44c00000, .end = 0x44c00000 + 0x300000, },
- /* omap4/5 and dra7 l4_cfg */
- { .start = 0x4a000000, .end = 0x4a000000 + 0x300000, },
- /* omap4 l4_wkup */
- { .start = 0x4a300000, .end = 0x4a300000 + 0x30000, },
- /* omap5 and dra7 l4_wkup without dra7 dcan segment */
- { .start = 0x4ae00000, .end = 0x4ae00000 + 0x30000, },
-};
-
-static atomic_t sysc_defer = ATOMIC_INIT(10);
-
-/**
- * sysc_defer_non_critical - defer non_critical interconnect probing
- * @ddata: device driver data
- *
- * We want to probe l4_cfg and l4_wkup interconnect instances before any
- * l4_per instances as l4_per instances depend on resources on l4_cfg and
- * l4_wkup interconnects.
- */
-static int sysc_defer_non_critical(struct sysc *ddata)
-{
- struct resource *res;
- int i;
-
- if (!atomic_read(&sysc_defer))
- return 0;
-
- for (i = 0; i < ARRAY_SIZE(early_bus_ranges); i++) {
- res = &early_bus_ranges[i];
- if (ddata->module_pa >= res->start &&
- ddata->module_pa <= res->end) {
- atomic_set(&sysc_defer, 0);
-
- return 0;
- }
- }
-
- atomic_dec_if_positive(&sysc_defer);
-
- return -EPROBE_DEFER;
-}
-
static struct device_node *stdout_path;
static void sysc_init_stdout_path(struct sysc *ddata)
@@ -948,10 +903,6 @@ static int sysc_map_and_check_registers(struct sysc *ddata)
if (error)
return error;
- error = sysc_defer_non_critical(ddata);
- if (error)
- return error;
-
sysc_check_children(ddata);
if (!of_property_present(np, "reg"))
@@ -2037,6 +1988,21 @@ static void sysc_module_disable_quirk_pruss(struct sysc *ddata)
sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
}
+static void sysc_module_enable_quirk_pruss(struct sysc *ddata)
+{
+ u32 reg;
+
+ reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
+ /*
+ * Clearing the SYSC_PRUSS_STANDBY_INIT bit - Updates OCP master
+ * port configuration to enable memory access outside of the
+ * PRU-ICSS subsystem.
+ */
+ reg &= (~SYSC_PRUSS_STANDBY_INIT);
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
+}
+
static void sysc_init_module_quirks(struct sysc *ddata)
{
if (ddata->legacy_mode || !ddata->name)
@@ -2089,8 +2055,10 @@ static void sysc_init_module_quirks(struct sysc *ddata)
ddata->module_disable_quirk = sysc_reset_done_quirk_wdt;
}
- if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_PRUSS)
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_PRUSS) {
+ ddata->module_enable_quirk = sysc_module_enable_quirk_pruss;
ddata->module_disable_quirk = sysc_module_disable_quirk_pruss;
+ }
}
static int sysc_clockdomain_init(struct sysc *ddata)
@@ -2203,9 +2171,8 @@ static int sysc_reset(struct sysc *ddata)
static int sysc_init_module(struct sysc *ddata)
{
bool rstctrl_deasserted = false;
- int error = 0;
+ int error = sysc_clockdomain_init(ddata);
- error = sysc_clockdomain_init(ddata);
if (error)
return error;
@@ -2291,11 +2258,9 @@ static int sysc_init_idlemode(struct sysc *ddata, u8 *idlemodes,
const char *name)
{
struct device_node *np = ddata->dev->of_node;
- struct property *prop;
- const __be32 *p;
u32 val;
- of_property_for_each_u32(np, name, prop, p, val) {
+ of_property_for_each_u32(np, name, val) {
if (val >= SYSC_NR_IDLEMODES) {
dev_err(ddata->dev, "invalid idlemode: %i\n", val);
return -EINVAL;
@@ -2571,14 +2536,12 @@ static const struct sysc_dts_quirk sysc_dts_quirks[] = {
static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
bool is_child)
{
- const struct property *prop;
- int i, len;
+ int i;
for (i = 0; i < ARRAY_SIZE(sysc_dts_quirks); i++) {
const char *name = sysc_dts_quirks[i].name;
- prop = of_get_property(np, name, &len);
- if (!prop)
+ if (!of_property_present(np, name))
continue;
ddata->cfg.quirks |= sysc_dts_quirks[i].mask;
@@ -2950,6 +2913,7 @@ static void ti_sysc_idle(struct work_struct *work)
static const struct soc_device_attribute sysc_soc_match[] = {
SOC_FLAG("OMAP242*", SOC_2420),
SOC_FLAG("OMAP243*", SOC_2430),
+ SOC_FLAG("AM33*", SOC_AM33),
SOC_FLAG("AM35*", SOC_AM35),
SOC_FLAG("OMAP3[45]*", SOC_3430),
SOC_FLAG("OMAP3[67]*", SOC_3630),
@@ -3155,10 +3119,15 @@ static int sysc_check_active_timer(struct sysc *ddata)
* can be dropped if we stop supporting old beagleboard revisions
* A to B4 at some point.
*/
- if (sysc_soc->soc == SOC_3430 || sysc_soc->soc == SOC_AM35)
+ switch (sysc_soc->soc) {
+ case SOC_AM33:
+ case SOC_3430:
+ case SOC_AM35:
error = -ENXIO;
- else
+ break;
+ default:
error = -EBUSY;
+ }
if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) &&
(ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE))
@@ -3350,7 +3319,7 @@ MODULE_DEVICE_TABLE(of, sysc_match);
static struct platform_driver sysc_driver = {
.probe = sysc_probe,
- .remove_new = sysc_remove,
+ .remove = sysc_remove,
.driver = {
.name = "ti-sysc",
.of_match_table = sysc_match,
diff --git a/drivers/bus/ts-nbus.c b/drivers/bus/ts-nbus.c
index baf22a82c47a..2328c48b9b12 100644
--- a/drivers/bus/ts-nbus.c
+++ b/drivers/bus/ts-nbus.c
@@ -294,7 +294,7 @@ static int ts_nbus_probe(struct platform_device *pdev)
state.duty_cycle = state.period;
state.enabled = true;
- ret = pwm_apply_state(pwm, &state);
+ ret = pwm_apply_might_sleep(pwm, &state);
if (ret < 0)
return dev_err_probe(dev, ret, "failed to configure PWM\n");
@@ -336,7 +336,7 @@ MODULE_DEVICE_TABLE(of, ts_nbus_of_match);
static struct platform_driver ts_nbus_driver = {
.probe = ts_nbus_probe,
- .remove_new = ts_nbus_remove,
+ .remove = ts_nbus_remove,
.driver = {
.name = "ts_nbus",
.of_match_table = ts_nbus_of_match,
diff --git a/drivers/bus/vexpress-config.c b/drivers/bus/vexpress-config.c
index d2c7ada90186..64ee920721ee 100644
--- a/drivers/bus/vexpress-config.c
+++ b/drivers/bus/vexpress-config.c
@@ -414,4 +414,5 @@ static struct platform_driver vexpress_syscfg_driver = {
.probe = vexpress_syscfg_probe,
};
module_platform_driver(vexpress_syscfg_driver);
+MODULE_DESCRIPTION("Versatile Express configuration bus");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/cache/Kconfig b/drivers/cache/Kconfig
index 9345ce4976d7..1518449d47b5 100644
--- a/drivers/cache/Kconfig
+++ b/drivers/cache/Kconfig
@@ -1,9 +1,17 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Cache Drivers"
+
+menuconfig CACHEMAINT_FOR_DMA
+ bool "Cache management for noncoherent DMA"
+ depends on RISCV
+ default y
+ help
+ These drivers implement support for noncoherent DMA master devices
+ on platforms that lack the standard CPU interfaces for this.
+
+if CACHEMAINT_FOR_DMA
config AX45MP_L2_CACHE
bool "Andes Technology AX45MP L2 Cache controller"
- depends on RISCV
select RISCV_NONSTANDARD_CACHE_OPS
help
Support for the L2 cache controller on Andes Technology AX45MP platforms.
@@ -14,4 +22,35 @@ config SIFIVE_CCACHE
help
Support for the composable cache controller on SiFive platforms.
-endmenu
+config STARFIVE_STARLINK_CACHE
+ bool "StarFive StarLink Cache controller"
+ depends on ARCH_STARFIVE
+ depends on 64BIT
+ select RISCV_DMA_NONCOHERENT
+ select RISCV_NONSTANDARD_CACHE_OPS
+ help
+ Support for the StarLink cache controller IP from StarFive.
+
+endif #CACHEMAINT_FOR_DMA
+
+menuconfig CACHEMAINT_FOR_HOTPLUG
+ bool "Cache management for memory hot plug like operations"
+ depends on GENERIC_CPU_CACHE_MAINTENANCE
+ help
+ These drivers implement cache management for flows where it is necessary
+ to flush data from all host caches.
+
+if CACHEMAINT_FOR_HOTPLUG
+
+config HISI_SOC_HHA
+ tristate "HiSilicon Hydra Home Agent (HHA) device driver"
+ depends on (ARM64 && ACPI) || COMPILE_TEST
+ help
+ The Hydra Home Agent (HHA) is responsible for cache coherency
+ on the SoC. This drivers enables the cache maintenance functions of
+ the HHA.
+
+ This driver can be built as a module. If so, the module will be
+ called hisi_soc_hha.
+
+endif #CACHEMAINT_FOR_HOTPLUG
diff --git a/drivers/cache/Makefile b/drivers/cache/Makefile
index 7657cff3bd6c..b3362b15d6c1 100644
--- a/drivers/cache/Makefile
+++ b/drivers/cache/Makefile
@@ -1,4 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_AX45MP_L2_CACHE) += ax45mp_cache.o
-obj-$(CONFIG_SIFIVE_CCACHE) += sifive_ccache.o
+obj-$(CONFIG_AX45MP_L2_CACHE) += ax45mp_cache.o
+obj-$(CONFIG_SIFIVE_CCACHE) += sifive_ccache.o
+obj-$(CONFIG_STARFIVE_STARLINK_CACHE) += starfive_starlink_cache.o
+
+obj-$(CONFIG_HISI_SOC_HHA) += hisi_soc_hha.o
diff --git a/drivers/cache/hisi_soc_hha.c b/drivers/cache/hisi_soc_hha.c
new file mode 100644
index 000000000000..25ff0f5ae79b
--- /dev/null
+++ b/drivers/cache/hisi_soc_hha.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for HiSilicon Hydra Home Agent (HHA).
+ *
+ * Copyright (c) 2025 HiSilicon Technologies Co., Ltd.
+ * Author: Yicong Yang <yangyicong@hisilicon.com>
+ * Yushan Wang <wangyushan12@huawei.com>
+ *
+ * A system typically contains multiple HHAs. Each is responsible for a subset
+ * of the physical addresses in the system, but interleave can make the mapping
+ * from a particular cache line to a responsible HHA complex. As such no
+ * filtering is done in the driver, with the hardware being responsible for
+ * responding with success for even if it was not responsible for any addresses
+ * in the range on which the operation was requested.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/cache_coherency.h>
+#include <linux/dev_printk.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/memregion.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+
+#define HISI_HHA_CTRL 0x5004
+#define HISI_HHA_CTRL_EN BIT(0)
+#define HISI_HHA_CTRL_RANGE BIT(1)
+#define HISI_HHA_CTRL_TYPE GENMASK(3, 2)
+#define HISI_HHA_START_L 0x5008
+#define HISI_HHA_START_H 0x500c
+#define HISI_HHA_LEN_L 0x5010
+#define HISI_HHA_LEN_H 0x5014
+
+/* The maintain operation performs in a 128 Byte granularity */
+#define HISI_HHA_MAINT_ALIGN 128
+
+#define HISI_HHA_POLL_GAP_US 10
+#define HISI_HHA_POLL_TIMEOUT_US 50000
+
+struct hisi_soc_hha {
+ /* Must be first element */
+ struct cache_coherency_ops_inst cci;
+ /* Locks HHA instance to forbid overlapping access. */
+ struct mutex lock;
+ void __iomem *base;
+};
+
+static bool hisi_hha_cache_maintain_wait_finished(struct hisi_soc_hha *soc_hha)
+{
+ u32 val;
+
+ return !readl_poll_timeout_atomic(soc_hha->base + HISI_HHA_CTRL, val,
+ !(val & HISI_HHA_CTRL_EN),
+ HISI_HHA_POLL_GAP_US,
+ HISI_HHA_POLL_TIMEOUT_US);
+}
+
+static int hisi_soc_hha_wbinv(struct cache_coherency_ops_inst *cci,
+ struct cc_inval_params *invp)
+{
+ struct hisi_soc_hha *soc_hha =
+ container_of(cci, struct hisi_soc_hha, cci);
+ phys_addr_t top, addr = invp->addr;
+ size_t size = invp->size;
+ u32 reg;
+
+ if (!size)
+ return -EINVAL;
+
+ addr = ALIGN_DOWN(addr, HISI_HHA_MAINT_ALIGN);
+ top = ALIGN(addr + size, HISI_HHA_MAINT_ALIGN);
+ size = top - addr;
+
+ guard(mutex)(&soc_hha->lock);
+
+ if (!hisi_hha_cache_maintain_wait_finished(soc_hha))
+ return -EBUSY;
+
+ /*
+ * Hardware will search for addresses ranging [addr, addr + size - 1],
+ * last byte included, and perform maintenance in 128 byte granules
+ * on those cachelines which contain the addresses. If a given instance
+ * is either not responsible for a cacheline or that cacheline is not
+ * currently present then the search will fail, no operation will be
+ * necessary and the device will report success.
+ */
+ size -= 1;
+
+ writel(lower_32_bits(addr), soc_hha->base + HISI_HHA_START_L);
+ writel(upper_32_bits(addr), soc_hha->base + HISI_HHA_START_H);
+ writel(lower_32_bits(size), soc_hha->base + HISI_HHA_LEN_L);
+ writel(upper_32_bits(size), soc_hha->base + HISI_HHA_LEN_H);
+
+ reg = FIELD_PREP(HISI_HHA_CTRL_TYPE, 1); /* Clean Invalid */
+ reg |= HISI_HHA_CTRL_RANGE | HISI_HHA_CTRL_EN;
+ writel(reg, soc_hha->base + HISI_HHA_CTRL);
+
+ return 0;
+}
+
+static int hisi_soc_hha_done(struct cache_coherency_ops_inst *cci)
+{
+ struct hisi_soc_hha *soc_hha =
+ container_of(cci, struct hisi_soc_hha, cci);
+
+ guard(mutex)(&soc_hha->lock);
+ if (!hisi_hha_cache_maintain_wait_finished(soc_hha))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static const struct cache_coherency_ops hha_ops = {
+ .wbinv = hisi_soc_hha_wbinv,
+ .done = hisi_soc_hha_done,
+};
+
+static int hisi_soc_hha_probe(struct platform_device *pdev)
+{
+ struct hisi_soc_hha *soc_hha;
+ struct resource *mem;
+ int ret;
+
+ soc_hha = cache_coherency_ops_instance_alloc(&hha_ops,
+ struct hisi_soc_hha, cci);
+ if (!soc_hha)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, soc_hha);
+
+ mutex_init(&soc_hha->lock);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ ret = -ENOMEM;
+ goto err_free_cci;
+ }
+
+ soc_hha->base = ioremap(mem->start, resource_size(mem));
+ if (!soc_hha->base) {
+ ret = dev_err_probe(&pdev->dev, -ENOMEM,
+ "failed to remap io memory");
+ goto err_free_cci;
+ }
+
+ ret = cache_coherency_ops_instance_register(&soc_hha->cci);
+ if (ret)
+ goto err_iounmap;
+
+ return 0;
+
+err_iounmap:
+ iounmap(soc_hha->base);
+err_free_cci:
+ cache_coherency_ops_instance_put(&soc_hha->cci);
+ return ret;
+}
+
+static void hisi_soc_hha_remove(struct platform_device *pdev)
+{
+ struct hisi_soc_hha *soc_hha = platform_get_drvdata(pdev);
+
+ cache_coherency_ops_instance_unregister(&soc_hha->cci);
+ iounmap(soc_hha->base);
+ cache_coherency_ops_instance_put(&soc_hha->cci);
+}
+
+static const struct acpi_device_id hisi_soc_hha_ids[] = {
+ { "HISI0511", },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, hisi_soc_hha_ids);
+
+static struct platform_driver hisi_soc_hha_driver = {
+ .driver = {
+ .name = "hisi_soc_hha",
+ .acpi_match_table = hisi_soc_hha_ids,
+ },
+ .probe = hisi_soc_hha_probe,
+ .remove = hisi_soc_hha_remove,
+};
+
+module_platform_driver(hisi_soc_hha_driver);
+
+MODULE_IMPORT_NS("CACHE_COHERENCY");
+MODULE_DESCRIPTION("HiSilicon Hydra Home Agent driver supporting cache maintenance");
+MODULE_AUTHOR("Yicong Yang <yangyicong@hisilicon.com>");
+MODULE_AUTHOR("Yushan Wang <wangyushan12@huawei.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cache/sifive_ccache.c b/drivers/cache/sifive_ccache.c
index 6874b72ec59d..a86800b123b9 100644
--- a/drivers/cache/sifive_ccache.c
+++ b/drivers/cache/sifive_ccache.c
@@ -118,6 +118,8 @@ static void ccache_config_read(void)
}
static const struct of_device_id sifive_ccache_ids[] = {
+ { .compatible = "eswin,eic7700-l3-cache",
+ .data = (void *)(QUIRK_NONSTANDARD_CACHE_OPS) },
{ .compatible = "sifive,fu540-c000-ccache" },
{ .compatible = "sifive,fu740-c000-ccache" },
{ .compatible = "starfive,jh7100-ccache",
@@ -149,16 +151,16 @@ static void ccache_flush_range(phys_addr_t start, size_t len)
if (!len)
return;
- mb();
+ mb(); /* complete earlier memory accesses before the cache flush */
for (line = ALIGN_DOWN(start, SIFIVE_CCACHE_LINE_SIZE); line < end;
line += SIFIVE_CCACHE_LINE_SIZE) {
#ifdef CONFIG_32BIT
- writel(line >> 4, ccache_base + SIFIVE_CCACHE_FLUSH32);
+ writel_relaxed(line >> 4, ccache_base + SIFIVE_CCACHE_FLUSH32);
#else
- writeq(line, ccache_base + SIFIVE_CCACHE_FLUSH64);
+ writeq_relaxed(line, ccache_base + SIFIVE_CCACHE_FLUSH64);
#endif
- mb();
}
+ mb(); /* issue later memory accesses after the cache flush */
}
static const struct riscv_nonstd_cache_ops ccache_mgmt_ops __initconst = {
diff --git a/drivers/cache/starfive_starlink_cache.c b/drivers/cache/starfive_starlink_cache.c
new file mode 100644
index 000000000000..24c7d078ca22
--- /dev/null
+++ b/drivers/cache/starfive_starlink_cache.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cache Management Operations for StarFive's Starlink cache controller
+ *
+ * Copyright (C) 2024 Shanghai StarFive Technology Co., Ltd.
+ *
+ * Author: Joshua Yeong <joshua.yeong@starfivetech.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/cacheflush.h>
+#include <linux/iopoll.h>
+#include <linux/of_address.h>
+
+#include <asm/dma-noncoherent.h>
+
+#define STARLINK_CACHE_FLUSH_START_ADDR 0x0
+#define STARLINK_CACHE_FLUSH_END_ADDR 0x8
+#define STARLINK_CACHE_FLUSH_CTL 0x10
+#define STARLINK_CACHE_ALIGN 0x40
+
+#define STARLINK_CACHE_ADDRESS_RANGE_MASK GENMASK(39, 0)
+#define STARLINK_CACHE_FLUSH_CTL_MODE_MASK GENMASK(2, 1)
+#define STARLINK_CACHE_FLUSH_CTL_ENABLE_MASK BIT(0)
+
+#define STARLINK_CACHE_FLUSH_CTL_CLEAN_INVALIDATE 0
+#define STARLINK_CACHE_FLUSH_CTL_MAKE_INVALIDATE 1
+#define STARLINK_CACHE_FLUSH_CTL_CLEAN_SHARED 2
+#define STARLINK_CACHE_FLUSH_POLL_DELAY_US 1
+#define STARLINK_CACHE_FLUSH_TIMEOUT_US 5000000
+
+static void __iomem *starlink_cache_base;
+
+static void starlink_cache_flush_complete(void)
+{
+ volatile void __iomem *ctl = starlink_cache_base + STARLINK_CACHE_FLUSH_CTL;
+ u64 v;
+ int ret;
+
+ ret = readq_poll_timeout_atomic(ctl, v, !(v & STARLINK_CACHE_FLUSH_CTL_ENABLE_MASK),
+ STARLINK_CACHE_FLUSH_POLL_DELAY_US,
+ STARLINK_CACHE_FLUSH_TIMEOUT_US);
+ if (ret)
+ WARN(1, "StarFive Starlink cache flush operation timeout\n");
+}
+
+static void starlink_cache_dma_cache_wback(phys_addr_t paddr, unsigned long size)
+{
+ writeq(FIELD_PREP(STARLINK_CACHE_ADDRESS_RANGE_MASK, paddr),
+ starlink_cache_base + STARLINK_CACHE_FLUSH_START_ADDR);
+ writeq(FIELD_PREP(STARLINK_CACHE_ADDRESS_RANGE_MASK, paddr + size),
+ starlink_cache_base + STARLINK_CACHE_FLUSH_END_ADDR);
+
+ mb();
+ writeq(FIELD_PREP(STARLINK_CACHE_FLUSH_CTL_MODE_MASK,
+ STARLINK_CACHE_FLUSH_CTL_CLEAN_SHARED),
+ starlink_cache_base + STARLINK_CACHE_FLUSH_CTL);
+
+ starlink_cache_flush_complete();
+}
+
+static void starlink_cache_dma_cache_invalidate(phys_addr_t paddr, unsigned long size)
+{
+ writeq(FIELD_PREP(STARLINK_CACHE_ADDRESS_RANGE_MASK, paddr),
+ starlink_cache_base + STARLINK_CACHE_FLUSH_START_ADDR);
+ writeq(FIELD_PREP(STARLINK_CACHE_ADDRESS_RANGE_MASK, paddr + size),
+ starlink_cache_base + STARLINK_CACHE_FLUSH_END_ADDR);
+
+ mb();
+ writeq(FIELD_PREP(STARLINK_CACHE_FLUSH_CTL_MODE_MASK,
+ STARLINK_CACHE_FLUSH_CTL_MAKE_INVALIDATE),
+ starlink_cache_base + STARLINK_CACHE_FLUSH_CTL);
+
+ starlink_cache_flush_complete();
+}
+
+static void starlink_cache_dma_cache_wback_inv(phys_addr_t paddr, unsigned long size)
+{
+ writeq(FIELD_PREP(STARLINK_CACHE_ADDRESS_RANGE_MASK, paddr),
+ starlink_cache_base + STARLINK_CACHE_FLUSH_START_ADDR);
+ writeq(FIELD_PREP(STARLINK_CACHE_ADDRESS_RANGE_MASK, paddr + size),
+ starlink_cache_base + STARLINK_CACHE_FLUSH_END_ADDR);
+
+ mb();
+ writeq(FIELD_PREP(STARLINK_CACHE_FLUSH_CTL_MODE_MASK,
+ STARLINK_CACHE_FLUSH_CTL_CLEAN_INVALIDATE),
+ starlink_cache_base + STARLINK_CACHE_FLUSH_CTL);
+
+ starlink_cache_flush_complete();
+}
+
+static const struct riscv_nonstd_cache_ops starlink_cache_ops = {
+ .wback = &starlink_cache_dma_cache_wback,
+ .inv = &starlink_cache_dma_cache_invalidate,
+ .wback_inv = &starlink_cache_dma_cache_wback_inv,
+};
+
+static const struct of_device_id starlink_cache_ids[] = {
+ { .compatible = "starfive,jh8100-starlink-cache" },
+ { /* sentinel */ }
+};
+
+static int __init starlink_cache_init(void)
+{
+ struct device_node *np;
+ u32 block_size;
+ int ret;
+
+ np = of_find_matching_node(NULL, starlink_cache_ids);
+ if (!of_device_is_available(np))
+ return -ENODEV;
+
+ ret = of_property_read_u32(np, "cache-block-size", &block_size);
+ if (ret)
+ return ret;
+
+ if (block_size % STARLINK_CACHE_ALIGN)
+ return -EINVAL;
+
+ starlink_cache_base = of_iomap(np, 0);
+ if (!starlink_cache_base)
+ return -ENOMEM;
+
+ riscv_cbom_block_size = block_size;
+ riscv_noncoherent_supported();
+ riscv_noncoherent_register_cache_ops(&starlink_cache_ops);
+
+ return 0;
+}
+arch_initcall(starlink_cache_init);
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index a5e07270e0d4..31ba1f8c1f78 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -624,9 +624,6 @@ int register_cdrom(struct gendisk *disk, struct cdrom_device_info *cdi)
if (check_media_type == 1)
cdi->options |= (int) CDO_CHECK_TYPE;
- if (CDROM_CAN(CDC_MRW_W))
- cdi->exit = cdrom_mrw_exit;
-
if (cdi->ops->read_cdda_bpc)
cdi->cdda_method = CDDA_BPC_FULL;
else
@@ -651,9 +648,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
list_del(&cdi->list);
mutex_unlock(&cdrom_mutex);
- if (cdi->exit)
- cdi->exit(cdi);
-
cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
}
EXPORT_SYMBOL(unregister_cdrom);
@@ -1106,7 +1100,7 @@ int open_for_data(struct cdrom_device_info *cdi)
}
}
- cd_dbg(CD_OPEN, "all seems well, opening the devicen");
+ cd_dbg(CD_OPEN, "all seems well, opening the device\n");
/* all seems well, we can open the device */
ret = cdo->open(cdi, 0); /* open for data */
@@ -1264,6 +1258,8 @@ void cdrom_release(struct cdrom_device_info *cdi)
cd_dbg(CD_CLOSE, "Use count for \"/dev/%s\" now zero\n",
cdi->name);
cdrom_dvd_rw_close_write(cdi);
+ if (CDROM_CAN(CDC_MRW_W))
+ cdrom_mrw_exit(cdi);
if ((cdo->capability & CDC_LOCK) && !cdi->keeplocked) {
cd_dbg(CD_CLOSE, "Unlocking door!\n");
@@ -2313,7 +2309,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
return -EINVAL;
/* Prevent arg from speculatively bypassing the length check */
- barrier_nospec();
+ arg = array_index_nospec(arg, cdi->capacity);
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info)
@@ -2358,7 +2354,7 @@ static int cdrom_ioctl_timed_media_change(struct cdrom_device_info *cdi,
return -EFAULT;
tmp_info.media_flags = 0;
- if (tmp_info.last_media_change - cdi->last_media_change_ms < 0)
+ if (cdi->last_media_change_ms > tmp_info.last_media_change)
tmp_info.media_flags |= MEDIA_CHANGED_FLAG;
tmp_info.last_media_change = cdi->last_media_change_ms;
@@ -3473,7 +3469,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
return 0;
}
-static int cdrom_sysctl_info(struct ctl_table *ctl, int write,
+static int cdrom_sysctl_info(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int pos;
@@ -3586,7 +3582,7 @@ static void cdrom_update_settings(void)
mutex_unlock(&cdrom_mutex);
}
-static int cdrom_sysctl_handler(struct ctl_table *ctl, int write,
+static int cdrom_sysctl_handler(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
@@ -3612,7 +3608,7 @@ static int cdrom_sysctl_handler(struct ctl_table *ctl, int write,
}
/* Place files in /proc/sys/dev/cdrom */
-static struct ctl_table cdrom_table[] = {
+static const struct ctl_table cdrom_table[] = {
{
.procname = "info",
.data = &cdrom_sysctl_settings.info,
@@ -3677,8 +3673,7 @@ static void cdrom_sysctl_register(void)
static void cdrom_sysctl_unregister(void)
{
- if (cdrom_sysctl_header)
- unregister_sysctl_table(cdrom_sysctl_header);
+ unregister_sysctl_table(cdrom_sysctl_header);
}
#else /* CONFIG_SYSCTL */
@@ -3708,4 +3703,5 @@ static void __exit cdrom_exit(void)
module_init(cdrom_init);
module_exit(cdrom_exit);
+MODULE_DESCRIPTION("Uniform CD-ROM driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index eefdd422ad8e..85aceab5eac6 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -744,6 +744,7 @@ static int probe_gdrom(struct platform_device *devptr)
.max_segments = 1,
/* set a large max size to get most from DMA */
.max_segment_size = 0x40000,
+ .features = BLK_FEAT_ROTATIONAL,
};
int err;
@@ -776,7 +777,7 @@ static int probe_gdrom(struct platform_device *devptr)
probe_gdrom_setupcd();
err = blk_mq_alloc_sq_tag_set(&gd.tag_set, &gdrom_mq_ops, 1,
- BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
+ BLK_MQ_F_BLOCKING);
if (err)
goto probe_fail_free_cd_info;
@@ -846,7 +847,7 @@ static void remove_gdrom(struct platform_device *devptr)
static struct platform_driver gdrom_driver = {
.probe = probe_gdrom,
- .remove_new = remove_gdrom,
+ .remove = remove_gdrom,
.driver = {
.name = GDROM_DEV_NAME,
},
diff --git a/drivers/cdx/Kconfig b/drivers/cdx/Kconfig
index a08958485e31..1f1e360507d7 100644
--- a/drivers/cdx/Kconfig
+++ b/drivers/cdx/Kconfig
@@ -7,7 +7,7 @@
config CDX_BUS
bool "CDX Bus driver"
- depends on OF && ARM64
+ depends on OF && ARM64 || COMPILE_TEST
help
Driver to enable Composable DMA Transfer(CDX) Bus. CDX bus
exposes Fabric devices which uses composable DMA IP to the
diff --git a/drivers/cdx/Makefile b/drivers/cdx/Makefile
index 749a3295c2bd..3ca7068a3052 100644
--- a/drivers/cdx/Makefile
+++ b/drivers/cdx/Makefile
@@ -5,7 +5,7 @@
# Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
#
-ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CDX_BUS
+ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE='"CDX_BUS"'
obj-$(CONFIG_CDX_BUS) += cdx.o controller/
diff --git a/drivers/cdx/cdx.c b/drivers/cdx/cdx.c
index 236d381dc5f7..b39af2f1937f 100644
--- a/drivers/cdx/cdx.c
+++ b/drivers/cdx/cdx.c
@@ -170,7 +170,7 @@ static int cdx_unregister_device(struct device *dev,
return 0;
}
-static void cdx_unregister_devices(struct bus_type *bus)
+static void cdx_unregister_devices(const struct bus_type *bus)
{
/* Reset all the devices attached to cdx bus */
bus_for_each_dev(bus, NULL, NULL, cdx_unregister_device);
@@ -262,10 +262,10 @@ EXPORT_SYMBOL_GPL(cdx_clear_master);
*
* Return: true on success, false otherwise.
*/
-static int cdx_bus_match(struct device *dev, struct device_driver *drv)
+static int cdx_bus_match(struct device *dev, const struct device_driver *drv)
{
struct cdx_device *cdx_dev = to_cdx_device(dev);
- struct cdx_driver *cdx_drv = to_cdx_driver(drv);
+ const struct cdx_driver *cdx_drv = to_cdx_driver(drv);
const struct cdx_device_id *found_id = NULL;
const struct cdx_device_id *ids;
@@ -310,7 +310,7 @@ static int cdx_probe(struct device *dev)
* Setup MSI device data so that generic MSI alloc/free can
* be used by the device driver.
*/
- if (cdx->msi_domain) {
+ if (IS_ENABLED(CONFIG_GENERIC_MSI_IRQ) && cdx->msi_domain) {
error = msi_setup_device_data(&cdx_dev->dev);
if (error)
return error;
@@ -338,7 +338,10 @@ static void cdx_shutdown(struct device *dev)
{
struct cdx_driver *cdx_drv = to_cdx_driver(dev->driver);
struct cdx_device *cdx_dev = to_cdx_device(dev);
+ struct cdx_controller *cdx = cdx_dev->cdx;
+ if (cdx_dev->is_bus && cdx_dev->enabled && cdx->ops->bus_disable)
+ cdx->ops->bus_disable(cdx, cdx_dev->bus_num);
if (cdx_drv && cdx_drv->shutdown)
cdx_drv->shutdown(cdx_dev);
}
@@ -357,7 +360,8 @@ static int cdx_dma_configure(struct device *dev)
return ret;
}
- if (!ret && !cdx_drv->driver_managed_dma) {
+ /* @cdx_drv may not be valid when we're called from the IOMMU layer */
+ if (!ret && dev->driver && !cdx_drv->driver_managed_dma) {
ret = iommu_device_use_default_domain(dev);
if (ret)
arch_teardown_dma_ops(dev);
@@ -470,8 +474,12 @@ static ssize_t driver_override_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cdx_device *cdx_dev = to_cdx_device(dev);
+ ssize_t len;
- return sysfs_emit(buf, "%s\n", cdx_dev->driver_override);
+ device_lock(dev);
+ len = sysfs_emit(buf, "%s\n", cdx_dev->driver_override);
+ device_unlock(dev);
+ return len;
}
static DEVICE_ATTR_RW(driver_override);
@@ -643,7 +651,7 @@ static struct attribute *cdx_bus_attrs[] = {
};
ATTRIBUTE_GROUPS(cdx_bus);
-struct bus_type cdx_bus_type = {
+const struct bus_type cdx_bus_type = {
.name = "cdx",
.match = cdx_bus_match,
.probe = cdx_probe,
@@ -707,7 +715,7 @@ static const struct vm_operations_struct cdx_phys_vm_ops = {
* Return: true on success, false otherwise.
*/
static int cdx_mmap_resource(struct file *fp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
struct vm_area_struct *vma)
{
struct cdx_device *cdx_dev = to_cdx_device(kobj_to_dev(kobj));
@@ -825,7 +833,7 @@ int cdx_device_add(struct cdx_dev_params *dev_params)
((cdx->id << CDX_CONTROLLER_ID_SHIFT) | (cdx_dev->bus_num & CDX_BUS_NUM_MASK)),
cdx_dev->dev_num);
- if (cdx->msi_domain) {
+ if (IS_ENABLED(CONFIG_GENERIC_MSI_IRQ) && cdx->msi_domain) {
cdx_dev->num_msi = dev_params->num_msi;
dev_set_msi_domain(&cdx_dev->dev, cdx->msi_domain);
}
@@ -868,7 +876,7 @@ fail:
return ret;
}
-EXPORT_SYMBOL_NS_GPL(cdx_device_add, CDX_BUS_CONTROLLER);
+EXPORT_SYMBOL_NS_GPL(cdx_device_add, "CDX_BUS_CONTROLLER");
struct device *cdx_bus_add(struct cdx_controller *cdx, u8 bus_num)
{
@@ -915,7 +923,7 @@ device_add_fail:
return NULL;
}
-EXPORT_SYMBOL_NS_GPL(cdx_bus_add, CDX_BUS_CONTROLLER);
+EXPORT_SYMBOL_NS_GPL(cdx_bus_add, "CDX_BUS_CONTROLLER");
int cdx_register_controller(struct cdx_controller *cdx)
{
@@ -940,7 +948,7 @@ int cdx_register_controller(struct cdx_controller *cdx)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cdx_register_controller, CDX_BUS_CONTROLLER);
+EXPORT_SYMBOL_NS_GPL(cdx_register_controller, "CDX_BUS_CONTROLLER");
void cdx_unregister_controller(struct cdx_controller *cdx)
{
@@ -955,7 +963,7 @@ void cdx_unregister_controller(struct cdx_controller *cdx)
mutex_unlock(&cdx_controller_lock);
}
-EXPORT_SYMBOL_NS_GPL(cdx_unregister_controller, CDX_BUS_CONTROLLER);
+EXPORT_SYMBOL_NS_GPL(cdx_unregister_controller, "CDX_BUS_CONTROLLER");
static int __init cdx_bus_init(void)
{
diff --git a/drivers/cdx/cdx_msi.c b/drivers/cdx/cdx_msi.c
index e55f1716cfcb..91b95422b263 100644
--- a/drivers/cdx/cdx_msi.c
+++ b/drivers/cdx/cdx_msi.c
@@ -165,7 +165,7 @@ struct irq_domain *cdx_msi_domain_init(struct device *dev)
struct device_node *parent_node;
struct irq_domain *parent;
- fwnode_handle = of_node_to_fwnode(np);
+ fwnode_handle = of_fwnode_handle(np);
parent_node = of_parse_phandle(np, "msi-map", 1);
if (!parent_node) {
@@ -173,7 +173,8 @@ struct irq_domain *cdx_msi_domain_init(struct device *dev)
return NULL;
}
- parent = irq_find_matching_fwnode(of_node_to_fwnode(parent_node), DOMAIN_BUS_NEXUS);
+ parent = irq_find_matching_fwnode(of_fwnode_handle(parent_node), DOMAIN_BUS_NEXUS);
+ of_node_put(parent_node);
if (!parent || !msi_get_domain_info(parent)) {
dev_err(dev, "unable to locate ITS domain\n");
return NULL;
@@ -189,4 +190,4 @@ struct irq_domain *cdx_msi_domain_init(struct device *dev)
return cdx_msi_domain;
}
-EXPORT_SYMBOL_NS_GPL(cdx_msi_domain_init, CDX_BUS_CONTROLLER);
+EXPORT_SYMBOL_NS_GPL(cdx_msi_domain_init, "CDX_BUS_CONTROLLER");
diff --git a/drivers/cdx/controller/Kconfig b/drivers/cdx/controller/Kconfig
index f8e729761aee..a480b62cbd1f 100644
--- a/drivers/cdx/controller/Kconfig
+++ b/drivers/cdx/controller/Kconfig
@@ -9,7 +9,7 @@ if CDX_BUS
config CDX_CONTROLLER
tristate "CDX bus controller"
- select GENERIC_MSI_IRQ
+ depends on HAS_DMA
select REMOTEPROC
select RPMSG
help
diff --git a/drivers/cdx/controller/bitfield.h b/drivers/cdx/controller/bitfield.h
deleted file mode 100644
index 567f8ec47582..000000000000
--- a/drivers/cdx/controller/bitfield.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2013 Solarflare Communications Inc.
- * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
- */
-
-#ifndef CDX_BITFIELD_H
-#define CDX_BITFIELD_H
-
-#include <linux/bitfield.h>
-
-/* Lowest bit numbers and widths */
-#define CDX_DWORD_LBN 0
-#define CDX_DWORD_WIDTH 32
-
-/* Specified attribute (e.g. LBN) of the specified field */
-#define CDX_VAL(field, attribute) field ## _ ## attribute
-/* Low bit number of the specified field */
-#define CDX_LOW_BIT(field) CDX_VAL(field, LBN)
-/* Bit width of the specified field */
-#define CDX_WIDTH(field) CDX_VAL(field, WIDTH)
-/* High bit number of the specified field */
-#define CDX_HIGH_BIT(field) (CDX_LOW_BIT(field) + CDX_WIDTH(field) - 1)
-
-/* A doubleword (i.e. 4 byte) datatype - little-endian in HW */
-struct cdx_dword {
- __le32 cdx_u32;
-};
-
-/* Value expanders for printk */
-#define CDX_DWORD_VAL(dword) \
- ((unsigned int)le32_to_cpu((dword).cdx_u32))
-
-/*
- * Extract bit field portion [low,high) from the 32-bit little-endian
- * element which contains bits [min,max)
- */
-#define CDX_DWORD_FIELD(dword, field) \
- (FIELD_GET(GENMASK(CDX_HIGH_BIT(field), CDX_LOW_BIT(field)), \
- le32_to_cpu((dword).cdx_u32)))
-
-/*
- * Creates the portion of the named bit field that lies within the
- * range [min,max).
- */
-#define CDX_INSERT_FIELD(field, value) \
- (FIELD_PREP(GENMASK(CDX_HIGH_BIT(field), \
- CDX_LOW_BIT(field)), value))
-
-/*
- * Creates the portion of the named bit fields that lie within the
- * range [min,max).
- */
-#define CDX_INSERT_FIELDS(field1, value1, \
- field2, value2, \
- field3, value3, \
- field4, value4, \
- field5, value5, \
- field6, value6, \
- field7, value7) \
- (CDX_INSERT_FIELD(field1, (value1)) | \
- CDX_INSERT_FIELD(field2, (value2)) | \
- CDX_INSERT_FIELD(field3, (value3)) | \
- CDX_INSERT_FIELD(field4, (value4)) | \
- CDX_INSERT_FIELD(field5, (value5)) | \
- CDX_INSERT_FIELD(field6, (value6)) | \
- CDX_INSERT_FIELD(field7, (value7)))
-
-#define CDX_POPULATE_DWORD(dword, ...) \
- (dword).cdx_u32 = cpu_to_le32(CDX_INSERT_FIELDS(__VA_ARGS__))
-
-/* Populate a dword field with various numbers of arguments */
-#define CDX_POPULATE_DWORD_7 CDX_POPULATE_DWORD
-#define CDX_POPULATE_DWORD_6(dword, ...) \
- CDX_POPULATE_DWORD_7(dword, CDX_DWORD, 0, __VA_ARGS__)
-#define CDX_POPULATE_DWORD_5(dword, ...) \
- CDX_POPULATE_DWORD_6(dword, CDX_DWORD, 0, __VA_ARGS__)
-#define CDX_POPULATE_DWORD_4(dword, ...) \
- CDX_POPULATE_DWORD_5(dword, CDX_DWORD, 0, __VA_ARGS__)
-#define CDX_POPULATE_DWORD_3(dword, ...) \
- CDX_POPULATE_DWORD_4(dword, CDX_DWORD, 0, __VA_ARGS__)
-#define CDX_POPULATE_DWORD_2(dword, ...) \
- CDX_POPULATE_DWORD_3(dword, CDX_DWORD, 0, __VA_ARGS__)
-#define CDX_POPULATE_DWORD_1(dword, ...) \
- CDX_POPULATE_DWORD_2(dword, CDX_DWORD, 0, __VA_ARGS__)
-#define CDX_SET_DWORD(dword) \
- CDX_POPULATE_DWORD_1(dword, CDX_DWORD, 0xffffffff)
-
-#endif /* CDX_BITFIELD_H */
diff --git a/drivers/cdx/controller/cdx_controller.c b/drivers/cdx/controller/cdx_controller.c
index 112a1541de6d..280f207735da 100644
--- a/drivers/cdx/controller/cdx_controller.c
+++ b/drivers/cdx/controller/cdx_controller.c
@@ -14,7 +14,7 @@
#include "cdx_controller.h"
#include "../cdx.h"
#include "mcdi_functions.h"
-#include "mcdi.h"
+#include "mcdid.h"
static unsigned int cdx_mcdi_rpc_timeout(struct cdx_mcdi *cdx, unsigned int cmd)
{
@@ -193,21 +193,19 @@ static int xlnx_cdx_probe(struct platform_device *pdev)
cdx->ops = &cdx_ops;
/* Create MSI domain */
- cdx->msi_domain = cdx_msi_domain_init(&pdev->dev);
+ if (IS_ENABLED(CONFIG_GENERIC_MSI_IRQ))
+ cdx->msi_domain = cdx_msi_domain_init(&pdev->dev);
if (!cdx->msi_domain) {
- dev_err(&pdev->dev, "cdx_msi_domain_init() failed");
- ret = -ENODEV;
+ ret = dev_err_probe(&pdev->dev, -ENODEV, "cdx_msi_domain_init() failed");
goto cdx_msi_fail;
}
ret = cdx_setup_rpmsg(pdev);
if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Failed to register CDX RPMsg transport\n");
+ dev_err_probe(&pdev->dev, ret, "Failed to register CDX RPMsg transport\n");
goto cdx_rpmsg_fail;
}
- dev_info(&pdev->dev, "Successfully registered CDX controller with RPMsg as transport\n");
return 0;
cdx_rpmsg_fail:
@@ -222,7 +220,7 @@ mcdi_init_fail:
return ret;
}
-static int xlnx_cdx_remove(struct platform_device *pdev)
+static void xlnx_cdx_remove(struct platform_device *pdev)
{
struct cdx_controller *cdx = platform_get_drvdata(pdev);
struct cdx_mcdi *cdx_mcdi = cdx->priv;
@@ -234,8 +232,6 @@ static int xlnx_cdx_remove(struct platform_device *pdev)
cdx_mcdi_finish(cdx_mcdi);
kfree(cdx_mcdi);
-
- return 0;
}
static const struct of_device_id cdx_match_table[] = {
@@ -248,33 +244,15 @@ MODULE_DEVICE_TABLE(of, cdx_match_table);
static struct platform_driver cdx_pdriver = {
.driver = {
.name = "cdx-controller",
- .pm = NULL,
.of_match_table = cdx_match_table,
},
.probe = xlnx_cdx_probe,
.remove = xlnx_cdx_remove,
};
-static int __init cdx_controller_init(void)
-{
- int ret;
-
- ret = platform_driver_register(&cdx_pdriver);
- if (ret)
- pr_err("platform_driver_register() failed: %d\n", ret);
-
- return ret;
-}
-
-static void __exit cdx_controller_exit(void)
-{
- platform_driver_unregister(&cdx_pdriver);
-}
-
-module_init(cdx_controller_init);
-module_exit(cdx_controller_exit);
+module_platform_driver(cdx_pdriver);
MODULE_AUTHOR("AMD Inc.");
MODULE_DESCRIPTION("CDX controller for AMD devices");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CDX_BUS_CONTROLLER);
+MODULE_IMPORT_NS("CDX_BUS_CONTROLLER");
diff --git a/drivers/cdx/controller/cdx_rpmsg.c b/drivers/cdx/controller/cdx_rpmsg.c
index 04b578a0be17..59aabd99fa8f 100644
--- a/drivers/cdx/controller/cdx_rpmsg.c
+++ b/drivers/cdx/controller/cdx_rpmsg.c
@@ -15,7 +15,7 @@
#include "../cdx.h"
#include "cdx_controller.h"
#include "mcdi_functions.h"
-#include "mcdi.h"
+#include "mcdid.h"
static struct rpmsg_device_id cdx_rpmsg_id_table[] = {
{ .name = "mcdi_ipc" },
@@ -129,8 +129,7 @@ static int cdx_rpmsg_probe(struct rpmsg_device *rpdev)
chinfo.src = RPMSG_ADDR_ANY;
chinfo.dst = rpdev->dst;
- strscpy(chinfo.name, cdx_rpmsg_id_table[0].name,
- strlen(cdx_rpmsg_id_table[0].name));
+ strscpy(chinfo.name, cdx_rpmsg_id_table[0].name, sizeof(chinfo.name));
cdx_mcdi->ept = rpmsg_create_ept(rpdev, cdx_rpmsg_cb, NULL, chinfo);
if (!cdx_mcdi->ept) {
diff --git a/drivers/cdx/controller/mcdi.c b/drivers/cdx/controller/mcdi.c
index 1eedc5eeb315..2e82ffc18d89 100644
--- a/drivers/cdx/controller/mcdi.c
+++ b/drivers/cdx/controller/mcdi.c
@@ -23,13 +23,10 @@
#include <linux/log2.h>
#include <linux/net_tstamp.h>
#include <linux/wait.h>
+#include <linux/cdx/bitfield.h>
-#include "bitfield.h"
-#include "mcdi.h"
-
-struct cdx_mcdi_copy_buffer {
- struct cdx_dword buffer[DIV_ROUND_UP(MCDI_CTL_SDU_LEN_MAX, 4)];
-};
+#include <linux/cdx/mcdi.h>
+#include "mcdid.h"
static void cdx_mcdi_cancel_cmd(struct cdx_mcdi *cdx, struct cdx_mcdi_cmd *cmd);
static void cdx_mcdi_wait_for_cleanup(struct cdx_mcdi *cdx);
@@ -103,6 +100,19 @@ static unsigned long cdx_mcdi_rpc_timeout(struct cdx_mcdi *cdx, unsigned int cmd
return cdx->mcdi_ops->mcdi_rpc_timeout(cdx, cmd);
}
+/**
+ * cdx_mcdi_init - Initialize MCDI (Management Controller Driver Interface) state
+ * @cdx: Handle to the CDX MCDI structure
+ *
+ * This function allocates and initializes internal MCDI structures and resources
+ * for the CDX device, including the workqueue, locking primitives, and command
+ * tracking mechanisms. It sets the initial operating mode and prepares the device
+ * for MCDI operations.
+ *
+ * Return:
+ * * 0 - on success
+ * * -ENOMEM - if memory allocation or workqueue creation fails
+ */
int cdx_mcdi_init(struct cdx_mcdi *cdx)
{
struct cdx_mcdi_iface *mcdi;
@@ -132,7 +142,16 @@ fail2:
fail:
return rc;
}
+EXPORT_SYMBOL_GPL(cdx_mcdi_init);
+/**
+ * cdx_mcdi_finish - Cleanup MCDI (Management Controller Driver Interface) state
+ * @cdx: Handle to the CDX MCDI structure
+ *
+ * This function is responsible for cleaning up the MCDI (Management Controller Driver Interface)
+ * resources associated with a cdx_mcdi structure. Also destroys the mcdi workqueue.
+ *
+ */
void cdx_mcdi_finish(struct cdx_mcdi *cdx)
{
struct cdx_mcdi_iface *mcdi;
@@ -147,6 +166,7 @@ void cdx_mcdi_finish(struct cdx_mcdi *cdx)
kfree(cdx->mcdi);
cdx->mcdi = NULL;
}
+EXPORT_SYMBOL_GPL(cdx_mcdi_finish);
static bool cdx_mcdi_flushed(struct cdx_mcdi_iface *mcdi, bool ignore_cleanups)
{
@@ -557,6 +577,19 @@ static void cdx_mcdi_start_or_queue(struct cdx_mcdi_iface *mcdi,
cdx_mcdi_cmd_start_or_queue(mcdi, cmd);
}
+/**
+ * cdx_mcdi_process_cmd - Process an incoming MCDI response
+ * @cdx: Handle to the CDX MCDI structure
+ * @outbuf: Pointer to the response buffer received from the management controller
+ * @len: Length of the response buffer in bytes
+ *
+ * This function handles a response from the management controller. It locates the
+ * corresponding command using the sequence number embedded in the header,
+ * completes the command if it is still pending, and initiates any necessary cleanup.
+ *
+ * The function assumes that the response buffer is well-formed and at least one
+ * dword in size.
+ */
void cdx_mcdi_process_cmd(struct cdx_mcdi *cdx, struct cdx_dword *outbuf, int len)
{
struct cdx_mcdi_iface *mcdi;
@@ -594,6 +627,7 @@ void cdx_mcdi_process_cmd(struct cdx_mcdi *cdx, struct cdx_dword *outbuf, int le
cdx_mcdi_process_cleanup_list(mcdi->cdx, &cleanup_list);
}
+EXPORT_SYMBOL_GPL(cdx_mcdi_process_cmd);
static void cdx_mcdi_cmd_work(struct work_struct *context)
{
@@ -761,6 +795,7 @@ int cdx_mcdi_rpc(struct cdx_mcdi *cdx, unsigned int cmd,
return cdx_mcdi_rpc_sync(cdx, cmd, inbuf, inlen, outbuf, outlen,
outlen_actual, false);
}
+EXPORT_SYMBOL_GPL(cdx_mcdi_rpc);
/**
* cdx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
diff --git a/drivers/cdx/controller/mcdi.h b/drivers/cdx/controller/mcdi.h
deleted file mode 100644
index 54a65e9760ae..000000000000
--- a/drivers/cdx/controller/mcdi.h
+++ /dev/null
@@ -1,242 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * Copyright 2008-2013 Solarflare Communications Inc.
- * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
- */
-
-#ifndef CDX_MCDI_H
-#define CDX_MCDI_H
-
-#include <linux/mutex.h>
-#include <linux/kref.h>
-#include <linux/rpmsg.h>
-
-#include "bitfield.h"
-#include "mc_cdx_pcol.h"
-
-#ifdef DEBUG
-#define CDX_WARN_ON_ONCE_PARANOID(x) WARN_ON_ONCE(x)
-#define CDX_WARN_ON_PARANOID(x) WARN_ON(x)
-#else
-#define CDX_WARN_ON_ONCE_PARANOID(x) do {} while (0)
-#define CDX_WARN_ON_PARANOID(x) do {} while (0)
-#endif
-
-/**
- * enum cdx_mcdi_mode - MCDI transaction mode
- * @MCDI_MODE_EVENTS: wait for an mcdi response callback.
- * @MCDI_MODE_FAIL: we think MCDI is dead, so fail-fast all calls
- */
-enum cdx_mcdi_mode {
- MCDI_MODE_EVENTS,
- MCDI_MODE_FAIL,
-};
-
-#define MCDI_RPC_TIMEOUT (10 * HZ)
-#define MCDI_RPC_LONG_TIMEOU (60 * HZ)
-#define MCDI_RPC_POST_RST_TIME (10 * HZ)
-
-#define MCDI_BUF_LEN (8 + MCDI_CTL_SDU_LEN_MAX)
-
-/**
- * enum cdx_mcdi_cmd_state - State for an individual MCDI command
- * @MCDI_STATE_QUEUED: Command not started and is waiting to run.
- * @MCDI_STATE_RETRY: Command was submitted and MC rejected with no resources,
- * as MC have too many outstanding commands. Command will be retried once
- * another command returns.
- * @MCDI_STATE_RUNNING: Command was accepted and is running.
- * @MCDI_STATE_RUNNING_CANCELLED: Command is running but the issuer cancelled
- * the command.
- * @MCDI_STATE_FINISHED: Processing of this command has completed.
- */
-
-enum cdx_mcdi_cmd_state {
- MCDI_STATE_QUEUED,
- MCDI_STATE_RETRY,
- MCDI_STATE_RUNNING,
- MCDI_STATE_RUNNING_CANCELLED,
- MCDI_STATE_FINISHED,
-};
-
-/**
- * struct cdx_mcdi - CDX MCDI Firmware interface, to interact
- * with CDX controller.
- * @mcdi: MCDI interface
- * @mcdi_ops: MCDI operations
- * @r5_rproc : R5 Remoteproc device handle
- * @rpdev: RPMsg device
- * @ept: RPMsg endpoint
- * @work: Post probe work
- */
-struct cdx_mcdi {
- /* MCDI interface */
- struct cdx_mcdi_data *mcdi;
- const struct cdx_mcdi_ops *mcdi_ops;
-
- struct rproc *r5_rproc;
- struct rpmsg_device *rpdev;
- struct rpmsg_endpoint *ept;
- struct work_struct work;
-};
-
-struct cdx_mcdi_ops {
- void (*mcdi_request)(struct cdx_mcdi *cdx,
- const struct cdx_dword *hdr, size_t hdr_len,
- const struct cdx_dword *sdu, size_t sdu_len);
- unsigned int (*mcdi_rpc_timeout)(struct cdx_mcdi *cdx, unsigned int cmd);
-};
-
-typedef void cdx_mcdi_async_completer(struct cdx_mcdi *cdx,
- unsigned long cookie, int rc,
- struct cdx_dword *outbuf,
- size_t outlen_actual);
-
-/**
- * struct cdx_mcdi_cmd - An outstanding MCDI command
- * @ref: Reference count. There will be one reference if the command is
- * in the mcdi_iface cmd_list, another if it's on a cleanup list,
- * and a third if it's queued in the work queue.
- * @list: The data for this entry in mcdi->cmd_list
- * @cleanup_list: The data for this entry in a cleanup list
- * @work: The work item for this command, queued in mcdi->workqueue
- * @mcdi: The mcdi_iface for this command
- * @state: The state of this command
- * @inlen: inbuf length
- * @inbuf: Input buffer
- * @quiet: Whether to silence errors
- * @reboot_seen: Whether a reboot has been seen during this command,
- * to prevent duplicates
- * @seq: Sequence number
- * @started: Jiffies this command was started at
- * @cookie: Context for completion function
- * @completer: Completion function
- * @handle: Command handle
- * @cmd: Command number
- * @rc: Return code
- * @outlen: Length of output buffer
- * @outbuf: Output buffer
- */
-struct cdx_mcdi_cmd {
- struct kref ref;
- struct list_head list;
- struct list_head cleanup_list;
- struct work_struct work;
- struct cdx_mcdi_iface *mcdi;
- enum cdx_mcdi_cmd_state state;
- size_t inlen;
- const struct cdx_dword *inbuf;
- bool quiet;
- bool reboot_seen;
- u8 seq;
- unsigned long started;
- unsigned long cookie;
- cdx_mcdi_async_completer *completer;
- unsigned int handle;
- unsigned int cmd;
- int rc;
- size_t outlen;
- struct cdx_dword *outbuf;
- /* followed by inbuf data if necessary */
-};
-
-/**
- * struct cdx_mcdi_iface - MCDI protocol context
- * @cdx: The associated NIC
- * @iface_lock: Serialise access to this structure
- * @outstanding_cleanups: Count of cleanups
- * @cmd_list: List of outstanding and running commands
- * @workqueue: Workqueue used for delayed processing
- * @cmd_complete_wq: Waitqueue for command completion
- * @db_held_by: Command the MC doorbell is in use by
- * @seq_held_by: Command each sequence number is in use by
- * @prev_handle: The last used command handle
- * @mode: Poll for mcdi completion, or wait for an mcdi_event
- * @prev_seq: The last used sequence number
- * @new_epoch: Indicates start of day or start of MC reboot recovery
- */
-struct cdx_mcdi_iface {
- struct cdx_mcdi *cdx;
- /* Serialise access */
- struct mutex iface_lock;
- unsigned int outstanding_cleanups;
- struct list_head cmd_list;
- struct workqueue_struct *workqueue;
- wait_queue_head_t cmd_complete_wq;
- struct cdx_mcdi_cmd *db_held_by;
- struct cdx_mcdi_cmd *seq_held_by[16];
- unsigned int prev_handle;
- enum cdx_mcdi_mode mode;
- u8 prev_seq;
- bool new_epoch;
-};
-
-/**
- * struct cdx_mcdi_data - extra state for NICs that implement MCDI
- * @iface: Interface/protocol state
- * @fn_flags: Flags for this function, as returned by %MC_CMD_DRV_ATTACH.
- */
-struct cdx_mcdi_data {
- struct cdx_mcdi_iface iface;
- u32 fn_flags;
-};
-
-static inline struct cdx_mcdi_iface *cdx_mcdi_if(struct cdx_mcdi *cdx)
-{
- return cdx->mcdi ? &cdx->mcdi->iface : NULL;
-}
-
-int cdx_mcdi_init(struct cdx_mcdi *cdx);
-void cdx_mcdi_finish(struct cdx_mcdi *cdx);
-
-void cdx_mcdi_process_cmd(struct cdx_mcdi *cdx, struct cdx_dword *outbuf, int len);
-int cdx_mcdi_rpc(struct cdx_mcdi *cdx, unsigned int cmd,
- const struct cdx_dword *inbuf, size_t inlen,
- struct cdx_dword *outbuf, size_t outlen, size_t *outlen_actual);
-int cdx_mcdi_rpc_async(struct cdx_mcdi *cdx, unsigned int cmd,
- const struct cdx_dword *inbuf, size_t inlen,
- cdx_mcdi_async_completer *complete,
- unsigned long cookie);
-int cdx_mcdi_wait_for_quiescence(struct cdx_mcdi *cdx,
- unsigned int timeout_jiffies);
-
-/*
- * We expect that 16- and 32-bit fields in MCDI requests and responses
- * are appropriately aligned, but 64-bit fields are only
- * 32-bit-aligned.
- */
-#define MCDI_DECLARE_BUF(_name, _len) struct cdx_dword _name[DIV_ROUND_UP(_len, 4)] = {{0}}
-#define _MCDI_PTR(_buf, _offset) \
- ((u8 *)(_buf) + (_offset))
-#define MCDI_PTR(_buf, _field) \
- _MCDI_PTR(_buf, MC_CMD_ ## _field ## _OFST)
-#define _MCDI_CHECK_ALIGN(_ofst, _align) \
- ((void)BUILD_BUG_ON_ZERO((_ofst) & ((_align) - 1)), \
- (_ofst))
-#define _MCDI_DWORD(_buf, _field) \
- ((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2))
-
-#define MCDI_BYTE(_buf, _field) \
- ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \
- *MCDI_PTR(_buf, _field))
-#define MCDI_WORD(_buf, _field) \
- ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2), \
- le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
-#define MCDI_SET_DWORD(_buf, _field, _value) \
- CDX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), CDX_DWORD, _value)
-#define MCDI_DWORD(_buf, _field) \
- CDX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), CDX_DWORD)
-#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \
- CDX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), \
- MC_CMD_ ## _name1, _value1)
-#define MCDI_SET_QWORD(_buf, _field, _value) \
- do { \
- CDX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0], \
- CDX_DWORD, (u32)(_value)); \
- CDX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[1], \
- CDX_DWORD, (u64)(_value) >> 32); \
- } while (0)
-#define MCDI_QWORD(_buf, _field) \
- (CDX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[0], CDX_DWORD) | \
- (u64)CDX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[1], CDX_DWORD) << 32)
-
-#endif /* CDX_MCDI_H */
diff --git a/drivers/cdx/controller/mcdi_functions.c b/drivers/cdx/controller/mcdi_functions.c
index 885c69e6ebe5..8ae2d99be81e 100644
--- a/drivers/cdx/controller/mcdi_functions.c
+++ b/drivers/cdx/controller/mcdi_functions.c
@@ -5,7 +5,6 @@
#include <linux/module.h>
-#include "mcdi.h"
#include "mcdi_functions.h"
int cdx_mcdi_get_num_buses(struct cdx_mcdi *cdx)
diff --git a/drivers/cdx/controller/mcdi_functions.h b/drivers/cdx/controller/mcdi_functions.h
index b9942affdc6b..57fd1bae706b 100644
--- a/drivers/cdx/controller/mcdi_functions.h
+++ b/drivers/cdx/controller/mcdi_functions.h
@@ -8,7 +8,8 @@
#ifndef CDX_MCDI_FUNCTIONS_H
#define CDX_MCDI_FUNCTIONS_H
-#include "mcdi.h"
+#include <linux/cdx/mcdi.h>
+#include "mcdid.h"
#include "../cdx.h"
/**
diff --git a/drivers/cdx/controller/mcdid.h b/drivers/cdx/controller/mcdid.h
new file mode 100644
index 000000000000..7fc29f099265
--- /dev/null
+++ b/drivers/cdx/controller/mcdid.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2008-2013 Solarflare Communications Inc.
+ * Copyright (C) 2022-2025, Advanced Micro Devices, Inc.
+ */
+
+#ifndef CDX_MCDID_H
+#define CDX_MCDID_H
+
+#include <linux/mutex.h>
+#include <linux/kref.h>
+#include <linux/rpmsg.h>
+
+#include "mc_cdx_pcol.h"
+
+#ifdef DEBUG
+#define CDX_WARN_ON_ONCE_PARANOID(x) WARN_ON_ONCE(x)
+#define CDX_WARN_ON_PARANOID(x) WARN_ON(x)
+#else
+#define CDX_WARN_ON_ONCE_PARANOID(x) do {} while (0)
+#define CDX_WARN_ON_PARANOID(x) do {} while (0)
+#endif
+
+#define MCDI_BUF_LEN (8 + MCDI_CTL_SDU_LEN_MAX)
+
+static inline struct cdx_mcdi_iface *cdx_mcdi_if(struct cdx_mcdi *cdx)
+{
+ return cdx->mcdi ? &cdx->mcdi->iface : NULL;
+}
+
+int cdx_mcdi_rpc_async(struct cdx_mcdi *cdx, unsigned int cmd,
+ const struct cdx_dword *inbuf, size_t inlen,
+ cdx_mcdi_async_completer *complete,
+ unsigned long cookie);
+int cdx_mcdi_wait_for_quiescence(struct cdx_mcdi *cdx,
+ unsigned int timeout_jiffies);
+
+/*
+ * We expect that 16- and 32-bit fields in MCDI requests and responses
+ * are appropriately aligned, but 64-bit fields are only
+ * 32-bit-aligned.
+ */
+#define MCDI_BYTE(_buf, _field) \
+ ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \
+ *MCDI_PTR(_buf, _field))
+#define MCDI_WORD(_buf, _field) \
+ ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2), \
+ le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
+#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \
+ CDX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1)
+#define MCDI_SET_QWORD(_buf, _field, _value) \
+ do { \
+ CDX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0], \
+ CDX_DWORD, (u32)(_value)); \
+ CDX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[1], \
+ CDX_DWORD, (u64)(_value) >> 32); \
+ } while (0)
+#define MCDI_QWORD(_buf, _field) \
+ (CDX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[0], CDX_DWORD) | \
+ (u64)CDX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[1], CDX_DWORD) << 32)
+
+#endif /* CDX_MCDID_H */
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 7c8dd0abcfdf..d2cfc584e202 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -237,7 +237,8 @@ config APPLICOM
config SONYPI
tristate "Sony Vaio Programmable I/O Control Device support"
- depends on X86_32 && PCI && INPUT
+ depends on X86_32 && PCI && INPUT && HAS_IOPORT
+ depends on ACPI_EC || !ACPI
help
This driver enables access to the Sony Programmable I/O Control
Device which can be found in many (all ?) Sony Vaio laptops.
@@ -403,7 +404,7 @@ config TELCLOCK
configuration of the telecom clock configuration settings. This
device is used for hardware synchronization across the ATCA backplane
fabric. Upon loading, the driver exports a sysfs directory,
- /sys/devices/platform/telco_clock, with a number of files for
+ /sys/devices/faux/telco_clock, with a number of files for
controlling the behavior of this hardware.
source "drivers/s390/char/Kconfig"
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index e9b360cdc99a..1291369b9126 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -6,6 +6,7 @@
obj-y += mem.o random.o
obj-$(CONFIG_TTY_PRINTK) += ttyprintk.o
obj-y += misc.o
+obj-$(CONFIG_TEST_MISC_MINOR) += misc_minor_kunit.o
obj-$(CONFIG_ATARI_DSP56K) += dsp56k.o
obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o
obj-$(CONFIG_UV_MMTIMER) += uv_mmtimer.o
diff --git a/drivers/char/adi.c b/drivers/char/adi.c
index 751d7cc0da1b..0849d933a2d5 100644
--- a/drivers/char/adi.c
+++ b/drivers/char/adi.c
@@ -14,12 +14,6 @@
#define MAX_BUF_SZ PAGE_SIZE
-static int adi_open(struct inode *inode, struct file *file)
-{
- file->f_mode |= FMODE_UNSIGNED_OFFSET;
- return 0;
-}
-
static int read_mcd_tag(unsigned long addr)
{
long err;
@@ -86,8 +80,8 @@ static ssize_t adi_read(struct file *file, char __user *buf,
bytes_read += ver_buf_sz;
ver_buf_idx = 0;
- ver_buf_sz = min(count - bytes_read,
- (size_t)MAX_BUF_SZ);
+ ver_buf_sz = min_t(size_t, count - bytes_read,
+ MAX_BUF_SZ);
}
}
@@ -137,7 +131,7 @@ static ssize_t adi_write(struct file *file, const char __user *buf,
ssize_t ret;
int i;
- if (count <= 0)
+ if (count == 0)
return -EINVAL;
ver_buf_sz = min_t(size_t, count, MAX_BUF_SZ);
@@ -163,7 +157,7 @@ static ssize_t adi_write(struct file *file, const char __user *buf,
}
bytes_written += ver_buf_sz;
- ver_buf_sz = min(count - bytes_written, (size_t)MAX_BUF_SZ);
+ ver_buf_sz = min_t(size_t, count - bytes_written, MAX_BUF_SZ);
} while (bytes_written < count);
(*offp) += bytes_written;
@@ -196,7 +190,6 @@ static loff_t adi_llseek(struct file *file, loff_t offset, int whence)
if (offset != file->f_pos) {
file->f_pos = offset;
- file->f_version = 0;
ret = offset;
}
@@ -206,9 +199,9 @@ static loff_t adi_llseek(struct file *file, loff_t offset, int whence)
static const struct file_operations adi_fops = {
.owner = THIS_MODULE,
.llseek = adi_llseek,
- .open = adi_open,
.read = adi_read,
.write = adi_write,
+ .fop_flags = FOP_UNSIGNED_OFFSET,
};
static struct miscdevice adi_miscdev = {
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
index 760d9a931289..2eaab502ec29 100644
--- a/drivers/char/agp/ali-agp.c
+++ b/drivers/char/agp/ali-agp.c
@@ -418,5 +418,6 @@ module_init(agp_ali_init);
module_exit(agp_ali_cleanup);
MODULE_AUTHOR("Dave Jones");
+MODULE_DESCRIPTION("ALi AGPGART routines");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/alpha-agp.c b/drivers/char/agp/alpha-agp.c
index f0d0c044731c..e1763ecb8111 100644
--- a/drivers/char/agp/alpha-agp.c
+++ b/drivers/char/agp/alpha-agp.c
@@ -217,4 +217,5 @@ module_init(agp_alpha_core_init);
module_exit(agp_alpha_core_cleanup);
MODULE_AUTHOR("Jeff Wiedemeier <Jeff.Wiedemeier@hp.com>");
+MODULE_DESCRIPTION("Alpha AGP support");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index 55397ba765d2..795c8c9ff680 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -549,4 +549,5 @@ static void __exit agp_amdk7_cleanup(void)
module_init(agp_amdk7_init);
module_exit(agp_amdk7_cleanup);
+MODULE_DESCRIPTION("AMD K7 AGPGART routines");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index ce8651436609..2505df1f4e69 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -16,7 +16,7 @@
#include <linux/mmzone.h>
#include <asm/page.h> /* PAGE_SIZE */
#include <asm/e820/api.h>
-#include <asm/amd_nb.h>
+#include <asm/amd/nb.h>
#include <asm/gart.h>
#include "agp.h"
@@ -720,11 +720,6 @@ static const struct pci_device_id agp_amd64_pci_table[] = {
MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table);
-static const struct pci_device_id agp_amd64_pci_promisc_table[] = {
- { PCI_DEVICE_CLASS(0, 0) },
- { }
-};
-
static DEFINE_SIMPLE_DEV_PM_OPS(agp_amd64_pm_ops, NULL, agp_amd64_resume);
static struct pci_driver agp_amd64_pci_driver = {
@@ -739,6 +734,7 @@ static struct pci_driver agp_amd64_pci_driver = {
/* Not static due to IOMMU code calling it early. */
int __init agp_amd64_init(void)
{
+ struct pci_dev *pdev = NULL;
int err = 0;
if (agp_off)
@@ -767,9 +763,13 @@ int __init agp_amd64_init(void)
}
/* Look for any AGP bridge */
- agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table;
- err = driver_attach(&agp_amd64_pci_driver.driver);
- if (err == 0 && agp_bridges_found == 0) {
+ for_each_pci_dev(pdev)
+ if (pci_find_capability(pdev, PCI_CAP_ID_AGP))
+ pci_add_dynid(&agp_amd64_pci_driver,
+ pdev->vendor, pdev->device,
+ pdev->subsystem_vendor,
+ pdev->subsystem_device, 0, 0, 0);
+ if (agp_bridges_found == 0) {
pci_unregister_driver(&agp_amd64_pci_driver);
err = -ENODEV;
}
@@ -802,4 +802,5 @@ module_exit(agp_amd64_cleanup);
MODULE_AUTHOR("Dave Jones, Andi Kleen");
module_param(agp_try_unsupported, bool, 0);
+MODULE_DESCRIPTION("GART driver for the AMD Opteron/Athlon64 on-CPU northbridge");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index 3c1fce48aabe..f7871afe08cf 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -572,5 +572,6 @@ module_init(agp_ati_init);
module_exit(agp_ati_cleanup);
MODULE_AUTHOR("Dave Jones");
+MODULE_DESCRIPTION("ATi AGPGART routines");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c
index f28d42319269..0d25bbdc7e6a 100644
--- a/drivers/char/agp/efficeon-agp.c
+++ b/drivers/char/agp/efficeon-agp.c
@@ -465,4 +465,5 @@ module_init(agp_efficeon_init);
module_exit(agp_efficeon_cleanup);
MODULE_AUTHOR("Carlos Puchol <cpglinux@puchol.com>");
+MODULE_DESCRIPTION("Transmeta's Efficeon AGPGART driver");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index c518b3a9db04..3111e320b2c5 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -12,7 +12,7 @@
#include <asm/smp.h>
#include "agp.h"
#include "intel-agp.h"
-#include <drm/intel-gtt.h>
+#include <drm/intel/intel-gtt.h>
static int intel_fetch_size(void)
{
@@ -920,4 +920,5 @@ module_init(agp_intel_init);
module_exit(agp_intel_cleanup);
MODULE_AUTHOR("Dave Jones, Various @Intel");
+MODULE_DESCRIPTION("Intel AGPGART routines");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index bf6716ff863b..bcc26785175d 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -25,7 +25,7 @@
#include <asm/smp.h>
#include "agp.h"
#include "intel-agp.h"
-#include <drm/intel-gtt.h>
+#include <drm/intel/intel-gtt.h>
#include <asm/set_memory.h>
/*
@@ -53,6 +53,7 @@ struct intel_gtt_driver {
* of the mmio register file, that's done in the generic code. */
void (*cleanup)(void);
void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
+ dma_addr_t (*read_entry)(unsigned int entry, bool *is_present, bool *is_local);
/* Flags is a more or less chipset specific opaque value.
* For chipsets that need to support old ums (non-gem) code, this
* needs to be identical to the various supported agp memory types! */
@@ -336,6 +337,19 @@ static void i810_write_entry(dma_addr_t addr, unsigned int entry,
writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
}
+static dma_addr_t i810_read_entry(unsigned int entry,
+ bool *is_present, bool *is_local)
+{
+ u32 val;
+
+ val = readl(intel_private.gtt + entry);
+
+ *is_present = val & I810_PTE_VALID;
+ *is_local = val & I810_PTE_LOCAL;
+
+ return val & ~0xfff;
+}
+
static resource_size_t intel_gtt_stolen_size(void)
{
u16 gmch_ctrl;
@@ -741,6 +755,19 @@ static void i830_write_entry(dma_addr_t addr, unsigned int entry,
writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
}
+static dma_addr_t i830_read_entry(unsigned int entry,
+ bool *is_present, bool *is_local)
+{
+ u32 val;
+
+ val = readl(intel_private.gtt + entry);
+
+ *is_present = val & I810_PTE_VALID;
+ *is_local = false;
+
+ return val & ~0xfff;
+}
+
bool intel_gmch_enable_gtt(void)
{
u8 __iomem *reg;
@@ -878,6 +905,13 @@ void intel_gmch_gtt_insert_sg_entries(struct sg_table *st,
}
EXPORT_SYMBOL(intel_gmch_gtt_insert_sg_entries);
+dma_addr_t intel_gmch_gtt_read_entry(unsigned int pg,
+ bool *is_present, bool *is_local)
+{
+ return intel_private.driver->read_entry(pg, is_present, is_local);
+}
+EXPORT_SYMBOL(intel_gmch_gtt_read_entry);
+
#if IS_ENABLED(CONFIG_AGP_INTEL)
static void intel_gmch_gtt_insert_pages(unsigned int first_entry,
unsigned int num_entries,
@@ -1126,6 +1160,19 @@ static void i965_write_entry(dma_addr_t addr,
writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
}
+static dma_addr_t i965_read_entry(unsigned int entry,
+ bool *is_present, bool *is_local)
+{
+ u64 val;
+
+ val = readl(intel_private.gtt + entry);
+
+ *is_present = val & I810_PTE_VALID;
+ *is_local = false;
+
+ return ((val & 0xf0) << 28) | (val & ~0xfff);
+}
+
static int i9xx_setup(void)
{
phys_addr_t reg_addr;
@@ -1187,6 +1234,7 @@ static const struct intel_gtt_driver i81x_gtt_driver = {
.cleanup = i810_cleanup,
.check_flags = i830_check_flags,
.write_entry = i810_write_entry,
+ .read_entry = i810_read_entry,
};
static const struct intel_gtt_driver i8xx_gtt_driver = {
.gen = 2,
@@ -1194,6 +1242,7 @@ static const struct intel_gtt_driver i8xx_gtt_driver = {
.setup = i830_setup,
.cleanup = i830_cleanup,
.write_entry = i830_write_entry,
+ .read_entry = i830_read_entry,
.dma_mask_size = 32,
.check_flags = i830_check_flags,
.chipset_flush = i830_chipset_flush,
@@ -1205,6 +1254,7 @@ static const struct intel_gtt_driver i915_gtt_driver = {
.cleanup = i9xx_cleanup,
/* i945 is the last gpu to need phys mem (for overlay and cursors). */
.write_entry = i830_write_entry,
+ .read_entry = i830_read_entry,
.dma_mask_size = 32,
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
@@ -1215,6 +1265,7 @@ static const struct intel_gtt_driver g33_gtt_driver = {
.setup = i9xx_setup,
.cleanup = i9xx_cleanup,
.write_entry = i965_write_entry,
+ .read_entry = i965_read_entry,
.dma_mask_size = 36,
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
@@ -1225,6 +1276,7 @@ static const struct intel_gtt_driver pineview_gtt_driver = {
.setup = i9xx_setup,
.cleanup = i9xx_cleanup,
.write_entry = i965_write_entry,
+ .read_entry = i965_read_entry,
.dma_mask_size = 36,
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
@@ -1235,6 +1287,7 @@ static const struct intel_gtt_driver i965_gtt_driver = {
.setup = i9xx_setup,
.cleanup = i9xx_cleanup,
.write_entry = i965_write_entry,
+ .read_entry = i965_read_entry,
.dma_mask_size = 36,
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
@@ -1244,6 +1297,7 @@ static const struct intel_gtt_driver g4x_gtt_driver = {
.setup = i9xx_setup,
.cleanup = i9xx_cleanup,
.write_entry = i965_write_entry,
+ .read_entry = i965_read_entry,
.dma_mask_size = 36,
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
@@ -1254,6 +1308,7 @@ static const struct intel_gtt_driver ironlake_gtt_driver = {
.setup = i9xx_setup,
.cleanup = i9xx_cleanup,
.write_entry = i965_write_entry,
+ .read_entry = i965_read_entry,
.dma_mask_size = 36,
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
@@ -1461,4 +1516,5 @@ void intel_gmch_remove(void)
EXPORT_SYMBOL(intel_gmch_remove);
MODULE_AUTHOR("Dave Jones, Various @Intel");
+MODULE_DESCRIPTION("Intel GTT (Graphics Translation Table) routines");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
index dbcbc06cc202..4787391bb6b4 100644
--- a/drivers/char/agp/nvidia-agp.c
+++ b/drivers/char/agp/nvidia-agp.c
@@ -11,6 +11,7 @@
#include <linux/page-flags.h>
#include <linux/mm.h>
#include <linux/jiffies.h>
+#include <asm/msr.h>
#include "agp.h"
/* NVIDIA registers */
@@ -462,6 +463,7 @@ static void __exit agp_nvidia_cleanup(void)
module_init(agp_nvidia_init);
module_exit(agp_nvidia_cleanup);
+MODULE_DESCRIPTION("Nvidia AGPGART routines");
MODULE_LICENSE("GPL and additional rights");
MODULE_AUTHOR("NVIDIA Corporation");
diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c
index edbc4d338117..93a48070b2a1 100644
--- a/drivers/char/agp/parisc-agp.c
+++ b/drivers/char/agp/parisc-agp.c
@@ -432,4 +432,5 @@ out:
module_init(parisc_agp_init);
MODULE_AUTHOR("Kyle McMartin <kyle@parisc-linux.org>");
+MODULE_DESCRIPTION("HP Quicksilver AGP GART routines");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c
index 484bb101c53b..a0deb97cedb0 100644
--- a/drivers/char/agp/sis-agp.c
+++ b/drivers/char/agp/sis-agp.c
@@ -433,4 +433,5 @@ module_param(agp_sis_force_delay, bool, 0);
MODULE_PARM_DESC(agp_sis_force_delay,"forces sis delay hack");
module_param(agp_sis_agp_spec, int, 0);
MODULE_PARM_DESC(agp_sis_agp_spec,"0=force sis init, 1=force generic agp3 init, default: autodetect");
+MODULE_DESCRIPTION("SiS AGPGART routines");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c
index b91da5998dd7..0ab7562d17c9 100644
--- a/drivers/char/agp/sworks-agp.c
+++ b/drivers/char/agp/sworks-agp.c
@@ -564,5 +564,6 @@ static void __exit agp_serverworks_cleanup(void)
module_init(agp_serverworks_init);
module_exit(agp_serverworks_cleanup);
+MODULE_DESCRIPTION("Serverworks AGPGART routines");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
index 84411b13c49f..b8d7115b8c9e 100644
--- a/drivers/char/agp/uninorth-agp.c
+++ b/drivers/char/agp/uninorth-agp.c
@@ -726,4 +726,5 @@ MODULE_PARM_DESC(aperture,
"\t\tDefault: " DEFAULT_APERTURE_STRING "M");
MODULE_AUTHOR("Ben Herrenschmidt & Paul Mackerras");
+MODULE_DESCRIPTION("Apple UniNorth & U3 AGP support");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c
index bc5140af2dcb..8b19a5d1a09b 100644
--- a/drivers/char/agp/via-agp.c
+++ b/drivers/char/agp/via-agp.c
@@ -575,5 +575,6 @@ static void __exit agp_via_cleanup(void)
module_init(agp_via_init);
module_exit(agp_via_cleanup);
+MODULE_DESCRIPTION("VIA AGPGART routines");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dave Jones");
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index e795390b070f..4aa5d1c76f83 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -141,22 +141,11 @@ static struct apm_queue kapmd_queue;
static DEFINE_MUTEX(state_lock);
-static const char driver_version[] = "1.13"; /* no spaces */
-
-
-
-/*
- * Compatibility cruft until the IPAQ people move over to the new
- * interface.
- */
-static void __apm_get_power_status(struct apm_power_info *info)
-{
-}
/*
* This allows machines to provide their own "apm get power status" function.
*/
-void (*apm_get_power_status)(struct apm_power_info *) = __apm_get_power_status;
+void (*apm_get_power_status)(struct apm_power_info *);
EXPORT_SYMBOL(apm_get_power_status);
@@ -435,6 +424,8 @@ static struct miscdevice apm_device = {
*/
static int proc_apm_show(struct seq_file *m, void *v)
{
+ static const char driver_version[] = "1.13"; /* no spaces */
+
struct apm_power_info info;
char *units;
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index 69314532f38c..c138c468f3a4 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -111,7 +111,6 @@ static irqreturn_t ac_interrupt(int, void *);
static const struct file_operations ac_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = ac_read,
.write = ac_write,
.unlocked_ioctl = ac_ioctl,
@@ -836,7 +835,10 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
ret = -ENOTTY;
break;
}
- Dummy = readb(apbs[IndexCard].RamIO + VERS);
+
+ if (cmd != 6)
+ Dummy = readb(apbs[IndexCard].RamIO + VERS);
+
kfree(adgl);
mutex_unlock(&ac_mutex);
return ret;
diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c
index 70d31aed9011..837109ef6766 100644
--- a/drivers/char/bsr.c
+++ b/drivers/char/bsr.c
@@ -342,5 +342,6 @@ static void __exit bsr_exit(void)
module_init(bsr_init);
module_exit(bsr_exit);
+MODULE_DESCRIPTION("IBM POWER Barrier Synchronization Register Driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sonny Rao <sonnyrao@us.ibm.com>");
diff --git a/drivers/char/ds1620.c b/drivers/char/ds1620.c
index cf89a9631107..44a1cdbd4bfb 100644
--- a/drivers/char/ds1620.c
+++ b/drivers/char/ds1620.c
@@ -353,7 +353,6 @@ static const struct file_operations ds1620_fops = {
.open = ds1620_open,
.read = ds1620_read,
.unlocked_ioctl = ds1620_unlocked_ioctl,
- .llseek = no_llseek,
};
static struct miscdevice ds1620_miscdev = {
@@ -421,4 +420,5 @@ static void __exit ds1620_exit(void)
module_init(ds1620_init);
module_exit(ds1620_exit);
+MODULE_DESCRIPTION("Dallas Semiconductor DS1620 thermometer driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/dsp56k.c b/drivers/char/dsp56k.c
index bda27e595da1..1c2c8439797c 100644
--- a/drivers/char/dsp56k.c
+++ b/drivers/char/dsp56k.c
@@ -530,5 +530,6 @@ static void __exit dsp56k_cleanup_driver(void)
}
module_exit(dsp56k_cleanup_driver);
+MODULE_DESCRIPTION("Atari DSP56001 Device Driver");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE("dsp56k/bootstrap.bin");
diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c
index 6946c1cad9f6..16618079298a 100644
--- a/drivers/char/dtlk.c
+++ b/drivers/char/dtlk.c
@@ -107,7 +107,6 @@ static const struct file_operations dtlk_fops =
.unlocked_ioctl = dtlk_ioctl,
.open = dtlk_open,
.release = dtlk_release,
- .llseek = no_llseek,
};
/* local prototypes */
@@ -244,11 +243,11 @@ static __poll_t dtlk_poll(struct file *file, poll_table * wait)
poll_wait(file, &dtlk_process_list, wait);
if (dtlk_has_indexing && dtlk_readable()) {
- del_timer(&dtlk_timer);
+ timer_delete(&dtlk_timer);
mask = EPOLLIN | EPOLLRDNORM;
}
if (dtlk_writeable()) {
- del_timer(&dtlk_timer);
+ timer_delete(&dtlk_timer);
mask |= EPOLLOUT | EPOLLWRNORM;
}
/* there are no exception conditions */
@@ -323,7 +322,7 @@ static int dtlk_release(struct inode *inode, struct file *file)
}
TRACE_RET;
- del_timer_sync(&dtlk_timer);
+ timer_delete_sync(&dtlk_timer);
return 0;
}
@@ -660,4 +659,5 @@ static char dtlk_write_tts(char ch)
return 0;
}
+MODULE_DESCRIPTION("RC Systems DoubleTalk PC speech card driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c
index 4181bcc1c796..231cbf7b300f 100644
--- a/drivers/char/hangcheck-timer.c
+++ b/drivers/char/hangcheck-timer.c
@@ -69,7 +69,8 @@ MODULE_VERSION(VERSION_STR);
static int __init hangcheck_parse_tick(char *str)
{
int par;
- if (get_option(&str,&par))
+
+ if (get_option(&str, &par))
hangcheck_tick = par;
return 1;
}
@@ -77,7 +78,8 @@ static int __init hangcheck_parse_tick(char *str)
static int __init hangcheck_parse_margin(char *str)
{
int par;
- if (get_option(&str,&par))
+
+ if (get_option(&str, &par))
hangcheck_margin = par;
return 1;
}
@@ -85,7 +87,8 @@ static int __init hangcheck_parse_margin(char *str)
static int __init hangcheck_parse_reboot(char *str)
{
int par;
- if (get_option(&str,&par))
+
+ if (get_option(&str, &par))
hangcheck_reboot = par;
return 1;
}
@@ -93,7 +96,8 @@ static int __init hangcheck_parse_reboot(char *str)
static int __init hangcheck_parse_dump_tasks(char *str)
{
int par;
- if (get_option(&str,&par))
+
+ if (get_option(&str, &par))
hangcheck_dump_tasks = par;
return 1;
}
@@ -126,23 +130,23 @@ static void hangcheck_fire(struct timer_list *unused)
if (tsc_diff > hangcheck_tsc_margin) {
if (hangcheck_dump_tasks) {
- printk(KERN_CRIT "Hangcheck: Task state:\n");
+ pr_crit("Hangcheck: Task state:\n");
#ifdef CONFIG_MAGIC_SYSRQ
handle_sysrq('t');
#endif /* CONFIG_MAGIC_SYSRQ */
}
if (hangcheck_reboot) {
- printk(KERN_CRIT "Hangcheck: hangcheck is restarting the machine.\n");
+ pr_crit("Hangcheck: hangcheck is restarting the machine.\n");
emergency_restart();
} else {
- printk(KERN_CRIT "Hangcheck: hangcheck value past margin!\n");
+ pr_crit("Hangcheck: hangcheck value past margin!\n");
}
}
#if 0
/*
* Enable to investigate delays in detail
*/
- printk("Hangcheck: called %Ld ns since last time (%Ld ns overshoot)\n",
+ pr_debug("Hangcheck: called %lld ns since last time (%lld ns overshoot)\n",
tsc_diff, tsc_diff - hangcheck_tick*TIMER_FREQ);
#endif
mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ));
@@ -152,7 +156,7 @@ static void hangcheck_fire(struct timer_list *unused)
static int __init hangcheck_init(void)
{
- printk("Hangcheck: starting hangcheck timer %s (tick is %d seconds, margin is %d seconds).\n",
+ pr_debug("Hangcheck: starting hangcheck timer %s (tick is %d seconds, margin is %d seconds).\n",
VERSION_STR, hangcheck_tick, hangcheck_margin);
hangcheck_tsc_margin =
(unsigned long long)hangcheck_margin + hangcheck_tick;
@@ -167,8 +171,8 @@ static int __init hangcheck_init(void)
static void __exit hangcheck_exit(void)
{
- del_timer_sync(&hangcheck_ticktock);
- printk("Hangcheck: Stopped hangcheck timer.\n");
+ timer_delete_sync(&hangcheck_ticktock);
+ pr_debug("Hangcheck: Stopped hangcheck timer.\n");
}
module_init(hangcheck_init);
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index d51fc8321d41..4f5ccd3a1f56 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -162,6 +162,7 @@ static irqreturn_t hpet_interrupt(int irq, void *data)
static void hpet_timer_set_irq(struct hpet_dev *devp)
{
+ const unsigned int nr_irqs = irq_get_nr_irqs();
unsigned long v;
int irq, gsi;
struct hpet_timer __iomem *timer;
@@ -269,8 +270,13 @@ hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos)
if (!devp->hd_ireqfreq)
return -EIO;
- if (count < sizeof(unsigned long))
- return -EINVAL;
+ if (in_compat_syscall()) {
+ if (count < sizeof(compat_ulong_t))
+ return -EINVAL;
+ } else {
+ if (count < sizeof(unsigned long))
+ return -EINVAL;
+ }
add_wait_queue(&devp->hd_waitqueue, &wait);
@@ -294,9 +300,16 @@ hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos)
schedule();
}
- retval = put_user(data, (unsigned long __user *)buf);
- if (!retval)
- retval = sizeof(unsigned long);
+ if (in_compat_syscall()) {
+ retval = put_user(data, (compat_ulong_t __user *)buf);
+ if (!retval)
+ retval = sizeof(compat_ulong_t);
+ } else {
+ retval = put_user(data, (unsigned long __user *)buf);
+ if (!retval)
+ retval = sizeof(unsigned long);
+ }
+
out:
__set_current_state(TASK_RUNNING);
remove_wait_queue(&devp->hd_waitqueue, &wait);
@@ -651,12 +664,24 @@ struct compat_hpet_info {
unsigned short hi_timer;
};
+/* 32-bit types would lead to different command codes which should be
+ * translated into 64-bit ones before passed to hpet_ioctl_common
+ */
+#define COMPAT_HPET_INFO _IOR('h', 0x03, struct compat_hpet_info)
+#define COMPAT_HPET_IRQFREQ _IOW('h', 0x6, compat_ulong_t)
+
static long
hpet_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct hpet_info info;
int err;
+ if (cmd == COMPAT_HPET_INFO)
+ cmd = HPET_INFO;
+
+ if (cmd == COMPAT_HPET_IRQFREQ)
+ cmd = HPET_IRQFREQ;
+
mutex_lock(&hpet_mutex);
err = hpet_ioctl_common(file->private_data, cmd, arg, &info);
mutex_unlock(&hpet_mutex);
@@ -676,7 +701,6 @@ hpet_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
static const struct file_operations hpet_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = hpet_read,
.poll = hpet_poll,
.unlocked_ioctl = hpet_ioctl,
@@ -700,7 +724,7 @@ static int hpet_is_known(struct hpet_data *hdp)
return 0;
}
-static struct ctl_table hpet_table[] = {
+static const struct ctl_table hpet_table[] = {
{
.procname = "max-user-freq",
.data = &hpet_max_freq,
@@ -784,7 +808,7 @@ int hpet_alloc(struct hpet_data *hdp)
struct hpets *hpetp;
struct hpet __iomem *hpet;
static struct hpets *last;
- unsigned long period;
+ u32 period;
unsigned long long temp;
u32 remainder;
@@ -841,11 +865,11 @@ int hpet_alloc(struct hpet_data *hdp)
do_div(temp, period);
hpetp->hp_tick_freq = temp; /* ticks per second */
- printk(KERN_INFO "hpet%d: at MMIO 0x%lx, IRQ%s",
+ printk(KERN_INFO "hpet%u: at MMIO 0x%lx, IRQ%s",
hpetp->hp_which, hdp->hd_phys_address,
- hpetp->hp_ntimer > 1 ? "s" : "");
+ str_plural(hpetp->hp_ntimer));
for (i = 0; i < hpetp->hp_ntimer; i++)
- printk(KERN_CONT "%s %d", i > 0 ? "," : "", hdp->hd_irq[i]);
+ printk(KERN_CONT "%s %u", i > 0 ? "," : "", hdp->hd_irq[i]);
printk(KERN_CONT "\n");
temp = hpetp->hp_tick_freq;
@@ -999,8 +1023,7 @@ static int __init hpet_init(void)
result = acpi_bus_register_driver(&hpet_acpi_driver);
if (result < 0) {
- if (sysctl_header)
- unregister_sysctl_table(sysctl_header);
+ unregister_sysctl_table(sysctl_header);
misc_deregister(&hpet_misc);
return result;
}
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 442c40efb200..492a2a61a65b 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -50,7 +50,7 @@ config HW_RANDOM_INTEL
config HW_RANDOM_AMD
tristate "AMD HW Random Number Generator support"
- depends on (X86 || PPC_MAPLE || COMPILE_TEST)
+ depends on (X86 || COMPILE_TEST)
depends on PCI && HAS_IOPORT_MAP
default HW_RANDOM
help
@@ -62,9 +62,22 @@ config HW_RANDOM_AMD
If unsure, say Y.
+config HW_RANDOM_AIROHA
+ tristate "Airoha True HW Random Number Generator support"
+ depends on ARCH_AIROHA || COMPILE_TEST
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the True Random Number
+ Generator hardware found on Airoha SoC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called airoha-rng.
+
+ If unsure, say Y.
+
config HW_RANDOM_ATMEL
tristate "Atmel Random Number Generator support"
- depends on (ARCH_AT91 || COMPILE_TEST)
+ depends on (ARCH_MICROCHIP || COMPILE_TEST)
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
@@ -99,9 +112,22 @@ config HW_RANDOM_BCM2835
If unsure, say Y.
+config HW_RANDOM_BCM74110
+ tristate "Broadcom BCM74110 Random Number Generator support"
+ depends on ARCH_BRCMSTB || COMPILE_TEST
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on the Broadcom BCM74110 SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bcm74110-rng
+
+ If unsure, say Y.
+
config HW_RANDOM_IPROC_RNG200
tristate "Broadcom iProc/STB RNG200 support"
- depends on ARCH_BCM_IPROC || ARCH_BCM2835 || ARCH_BRCMSTB || COMPILE_TEST
+ depends on ARCH_BCM_IPROC || ARCH_BCM2835 || ARCH_BCMBCA || ARCH_BRCMSTB || COMPILE_TEST
default HW_RANDOM
help
This driver provides kernel-side support for the RNG200
@@ -286,6 +312,7 @@ config HW_RANDOM_INGENIC_TRNG
config HW_RANDOM_NOMADIK
tristate "ST-Ericsson Nomadik Random Number Generator support"
depends on ARCH_NOMADIK || COMPILE_TEST
+ depends on ARM_AMBA
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
@@ -508,10 +535,10 @@ config HW_RANDOM_NPCM
If unsure, say Y.
config HW_RANDOM_KEYSTONE
+ tristate "TI Keystone NETCP SA Hardware random number generator"
depends on ARCH_KEYSTONE || COMPILE_TEST
depends on HAS_IOMEM && OF
default HW_RANDOM
- tristate "TI Keystone NETCP SA Hardware random number generator"
help
This option enables Keystone's hardware random generator.
@@ -553,15 +580,15 @@ config HW_RANDOM_ARM_SMCCC_TRNG
module will be called arm_smccc_trng.
config HW_RANDOM_CN10K
- tristate "Marvell CN10K Random Number Generator support"
- depends on HW_RANDOM && PCI && (ARM64 || (64BIT && COMPILE_TEST))
- default HW_RANDOM
- help
- This driver provides support for the True Random Number
- generator available in Marvell CN10K SoCs.
+ tristate "Marvell CN10K Random Number Generator support"
+ depends on HW_RANDOM && PCI && (ARM64 || (64BIT && COMPILE_TEST))
+ default HW_RANDOM if ARCH_THUNDER
+ help
+ This driver provides support for the True Random Number
+ generator available in Marvell CN10K SoCs.
- To compile this driver as a module, choose M here.
- The module will be called cn10k_rng. If unsure, say Y.
+ To compile this driver as a module, choose M here.
+ The module will be called cn10k_rng. If unsure, say Y.
config HW_RANDOM_JH7110
tristate "StarFive JH7110 Random Number Generator support"
@@ -573,6 +600,21 @@ config HW_RANDOM_JH7110
To compile this driver as a module, choose M here.
The module will be called jh7110-trng.
+config HW_RANDOM_ROCKCHIP
+ tristate "Rockchip True Random Number Generator"
+ depends on HW_RANDOM && (ARCH_ROCKCHIP || COMPILE_TEST)
+ depends on HAS_IOMEM
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the True Random Number
+ Generator hardware found on some Rockchip SoCs like RK3566, RK3568
+ or RK3588.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rockchip-rng.
+
+ If unsure, say Y.
+
endif # HW_RANDOM
config UML_RANDOM
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 32549a1186dc..b9132b3f5d21 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -8,6 +8,7 @@ rng-core-y := core.o
obj-$(CONFIG_HW_RANDOM_TIMERIOMEM) += timeriomem-rng.o
obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o
obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o
+obj-$(CONFIG_HW_RANDOM_AIROHA) += airoha-trng.o
obj-$(CONFIG_HW_RANDOM_ATMEL) += atmel-rng.o
obj-$(CONFIG_HW_RANDOM_BA431) += ba431-rng.o
obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o
@@ -31,6 +32,7 @@ obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
obj-$(CONFIG_HW_RANDOM_HISI) += hisi-rng.o
obj-$(CONFIG_HW_RANDOM_HISTB) += histb-rng.o
obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
+obj-$(CONFIG_HW_RANDOM_BCM74110) += bcm74110-rng.o
obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o
obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o
obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o
@@ -48,4 +50,5 @@ obj-$(CONFIG_HW_RANDOM_XIPHERA) += xiphera-trng.o
obj-$(CONFIG_HW_RANDOM_ARM_SMCCC_TRNG) += arm_smccc_trng.o
obj-$(CONFIG_HW_RANDOM_CN10K) += cn10k-rng.o
obj-$(CONFIG_HW_RANDOM_POLARFIRE_SOC) += mpfs-rng.o
+obj-$(CONFIG_HW_RANDOM_ROCKCHIP) += rockchip-rng.o
obj-$(CONFIG_HW_RANDOM_JH7110) += jh7110-trng.o
diff --git a/drivers/char/hw_random/airoha-trng.c b/drivers/char/hw_random/airoha-trng.c
new file mode 100644
index 000000000000..1dbfa9505c21
--- /dev/null
+++ b/drivers/char/hw_random/airoha-trng.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2024 Christian Marangi */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/hw_random.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/platform_device.h>
+
+#define TRNG_IP_RDY 0x800
+#define CNT_TRANS GENMASK(15, 8)
+#define SAMPLE_RDY BIT(0)
+#define TRNG_NS_SEK_AND_DAT_EN 0x804
+#define RNG_EN BIT(31) /* referenced as ring_en */
+#define RAW_DATA_EN BIT(16)
+#define TRNG_HEALTH_TEST_SW_RST 0x808
+#define SW_RST BIT(0) /* Active High */
+#define TRNG_INTR_EN 0x818
+#define INTR_MASK BIT(16)
+#define CONTINUOUS_HEALTH_INITR_EN BIT(2)
+#define SW_STARTUP_INITR_EN BIT(1)
+#define RST_STARTUP_INITR_EN BIT(0)
+/* Notice that Health Test are done only out of Reset and with RNG_EN */
+#define TRNG_HEALTH_TEST_STATUS 0x824
+#define CONTINUOUS_HEALTH_AP_TEST_FAIL BIT(23)
+#define CONTINUOUS_HEALTH_RC_TEST_FAIL BIT(22)
+#define SW_STARTUP_TEST_DONE BIT(21)
+#define SW_STARTUP_AP_TEST_FAIL BIT(20)
+#define SW_STARTUP_RC_TEST_FAIL BIT(19)
+#define RST_STARTUP_TEST_DONE BIT(18)
+#define RST_STARTUP_AP_TEST_FAIL BIT(17)
+#define RST_STARTUP_RC_TEST_FAIL BIT(16)
+#define RAW_DATA_VALID BIT(7)
+
+#define TRNG_RAW_DATA_OUT 0x828
+
+#define TRNG_CNT_TRANS_VALID 0x80
+#define BUSY_LOOP_SLEEP 10
+#define BUSY_LOOP_TIMEOUT (BUSY_LOOP_SLEEP * 10000)
+
+struct airoha_trng {
+ void __iomem *base;
+ struct hwrng rng;
+ struct device *dev;
+
+ struct completion rng_op_done;
+};
+
+static int airoha_trng_irq_mask(struct airoha_trng *trng)
+{
+ u32 val;
+
+ val = readl(trng->base + TRNG_INTR_EN);
+ val |= INTR_MASK;
+ writel(val, trng->base + TRNG_INTR_EN);
+
+ return 0;
+}
+
+static int airoha_trng_irq_unmask(struct airoha_trng *trng)
+{
+ u32 val;
+
+ val = readl(trng->base + TRNG_INTR_EN);
+ val &= ~INTR_MASK;
+ writel(val, trng->base + TRNG_INTR_EN);
+
+ return 0;
+}
+
+static int airoha_trng_init(struct hwrng *rng)
+{
+ struct airoha_trng *trng = container_of(rng, struct airoha_trng, rng);
+ int ret;
+ u32 val;
+
+ val = readl(trng->base + TRNG_NS_SEK_AND_DAT_EN);
+ val |= RNG_EN;
+ writel(val, trng->base + TRNG_NS_SEK_AND_DAT_EN);
+
+ /* Set out of SW Reset */
+ airoha_trng_irq_unmask(trng);
+ writel(0, trng->base + TRNG_HEALTH_TEST_SW_RST);
+
+ ret = wait_for_completion_timeout(&trng->rng_op_done, BUSY_LOOP_TIMEOUT);
+ if (ret <= 0) {
+ dev_err(trng->dev, "Timeout waiting for Health Check\n");
+ airoha_trng_irq_mask(trng);
+ return -ENODEV;
+ }
+
+ /* Check if Health Test Failed */
+ val = readl(trng->base + TRNG_HEALTH_TEST_STATUS);
+ if (val & (RST_STARTUP_AP_TEST_FAIL | RST_STARTUP_RC_TEST_FAIL)) {
+ dev_err(trng->dev, "Health Check fail: %s test fail\n",
+ val & RST_STARTUP_AP_TEST_FAIL ? "AP" : "RC");
+ return -ENODEV;
+ }
+
+ /* Check if IP is ready */
+ ret = readl_poll_timeout(trng->base + TRNG_IP_RDY, val,
+ val & SAMPLE_RDY, 10, 1000);
+ if (ret < 0) {
+ dev_err(trng->dev, "Timeout waiting for IP ready");
+ return -ENODEV;
+ }
+
+ /* CNT_TRANS must be 0x80 for IP to be considered ready */
+ ret = readl_poll_timeout(trng->base + TRNG_IP_RDY, val,
+ FIELD_GET(CNT_TRANS, val) == TRNG_CNT_TRANS_VALID,
+ 10, 1000);
+ if (ret < 0) {
+ dev_err(trng->dev, "Timeout waiting for IP ready");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void airoha_trng_cleanup(struct hwrng *rng)
+{
+ struct airoha_trng *trng = container_of(rng, struct airoha_trng, rng);
+ u32 val;
+
+ val = readl(trng->base + TRNG_NS_SEK_AND_DAT_EN);
+ val &= ~RNG_EN;
+ writel(val, trng->base + TRNG_NS_SEK_AND_DAT_EN);
+
+ /* Put it in SW Reset */
+ writel(SW_RST, trng->base + TRNG_HEALTH_TEST_SW_RST);
+}
+
+static int airoha_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct airoha_trng *trng = container_of(rng, struct airoha_trng, rng);
+ u32 *data = buf;
+ u32 status;
+ int ret;
+
+ ret = readl_poll_timeout(trng->base + TRNG_HEALTH_TEST_STATUS, status,
+ status & RAW_DATA_VALID, 10, 1000);
+ if (ret < 0) {
+ dev_err(trng->dev, "Timeout waiting for TRNG RAW Data valid\n");
+ return ret;
+ }
+
+ *data = readl(trng->base + TRNG_RAW_DATA_OUT);
+
+ return 4;
+}
+
+static irqreturn_t airoha_trng_irq(int irq, void *priv)
+{
+ struct airoha_trng *trng = (struct airoha_trng *)priv;
+
+ airoha_trng_irq_mask(trng);
+ /* Just complete the task, we will read the value later */
+ complete(&trng->rng_op_done);
+
+ return IRQ_HANDLED;
+}
+
+static int airoha_trng_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct airoha_trng *trng;
+ int irq, ret;
+ u32 val;
+
+ trng = devm_kzalloc(dev, sizeof(*trng), GFP_KERNEL);
+ if (!trng)
+ return -ENOMEM;
+
+ trng->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(trng->base))
+ return PTR_ERR(trng->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ airoha_trng_irq_mask(trng);
+ ret = devm_request_irq(&pdev->dev, irq, airoha_trng_irq, 0,
+ pdev->name, (void *)trng);
+ if (ret) {
+ dev_err(dev, "Can't get interrupt working.\n");
+ return ret;
+ }
+
+ init_completion(&trng->rng_op_done);
+
+ /* Enable interrupt for SW reset Health Check */
+ val = readl(trng->base + TRNG_INTR_EN);
+ val |= RST_STARTUP_INITR_EN;
+ writel(val, trng->base + TRNG_INTR_EN);
+
+ /* Set output to raw data */
+ val = readl(trng->base + TRNG_NS_SEK_AND_DAT_EN);
+ val |= RAW_DATA_EN;
+ writel(val, trng->base + TRNG_NS_SEK_AND_DAT_EN);
+
+ /* Put it in SW Reset */
+ writel(SW_RST, trng->base + TRNG_HEALTH_TEST_SW_RST);
+
+ trng->dev = dev;
+ trng->rng.name = pdev->name;
+ trng->rng.init = airoha_trng_init;
+ trng->rng.cleanup = airoha_trng_cleanup;
+ trng->rng.read = airoha_trng_read;
+
+ ret = devm_hwrng_register(dev, &trng->rng);
+ if (ret) {
+ dev_err(dev, "failed to register rng device: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id airoha_trng_of_match[] = {
+ { .compatible = "airoha,en7581-trng", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, airoha_trng_of_match);
+
+static struct platform_driver airoha_trng_driver = {
+ .driver = {
+ .name = "airoha-trng",
+ .of_match_table = airoha_trng_of_match,
+ },
+ .probe = airoha_trng_probe,
+};
+
+module_platform_driver(airoha_trng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
+MODULE_DESCRIPTION("Airoha True Random Number Generator driver");
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
index 86162a13681e..9a24d19236dc 100644
--- a/drivers/char/hw_random/amd-rng.c
+++ b/drivers/char/hw_random/amd-rng.c
@@ -143,8 +143,10 @@ static int __init amd_rng_mod_init(void)
found:
err = pci_read_config_dword(pdev, 0x58, &pmbase);
- if (err)
+ if (err) {
+ err = pcibios_err_to_errno(err);
goto put_dev;
+ }
pmbase &= 0x0000FF00;
if (pmbase == 0) {
diff --git a/drivers/char/hw_random/arm_smccc_trng.c b/drivers/char/hw_random/arm_smccc_trng.c
index 7e954341b09f..dcb8e7f37f25 100644
--- a/drivers/char/hw_random/arm_smccc_trng.c
+++ b/drivers/char/hw_random/arm_smccc_trng.c
@@ -118,4 +118,5 @@ module_platform_driver(smccc_trng_driver);
MODULE_ALIAS("platform:smccc_trng");
MODULE_AUTHOR("Andre Przywara");
+MODULE_DESCRIPTION("Arm SMCCC TRNG firmware interface support");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index e9157255f851..6ed24be3481d 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -37,6 +37,7 @@ struct atmel_trng {
struct clk *clk;
void __iomem *base;
struct hwrng rng;
+ struct device *dev;
bool has_half_rate;
};
@@ -59,9 +60,9 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
u32 *data = buf;
int ret;
- ret = pm_runtime_get_sync((struct device *)trng->rng.priv);
+ ret = pm_runtime_get_sync(trng->dev);
if (ret < 0) {
- pm_runtime_put_sync((struct device *)trng->rng.priv);
+ pm_runtime_put_sync(trng->dev);
return ret;
}
@@ -79,8 +80,7 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
ret = 4;
out:
- pm_runtime_mark_last_busy((struct device *)trng->rng.priv);
- pm_runtime_put_sync_autosuspend((struct device *)trng->rng.priv);
+ pm_runtime_put_sync_autosuspend(trng->dev);
return ret;
}
@@ -134,9 +134,9 @@ static int atmel_trng_probe(struct platform_device *pdev)
return -ENODEV;
trng->has_half_rate = data->has_half_rate;
+ trng->dev = &pdev->dev;
trng->rng.name = pdev->name;
trng->rng.read = atmel_trng_read;
- trng->rng.priv = (unsigned long)&pdev->dev;
platform_set_drvdata(pdev, trng);
#ifndef CONFIG_PM
@@ -216,7 +216,7 @@ MODULE_DEVICE_TABLE(of, atmel_trng_dt_ids);
static struct platform_driver atmel_trng_driver = {
.probe = atmel_trng_probe,
- .remove_new = atmel_trng_remove,
+ .remove = atmel_trng_remove,
.driver = {
.name = "atmel-trng",
.pm = pm_ptr(&atmel_trng_pm_ops),
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index b03e80300627..6d6ac409efcf 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -94,8 +94,10 @@ static int bcm2835_rng_init(struct hwrng *rng)
return ret;
ret = reset_control_reset(priv->reset);
- if (ret)
+ if (ret) {
+ clk_disable_unprepare(priv->clk);
return ret;
+ }
if (priv->mask_interrupts) {
/* mask the interrupt */
@@ -136,12 +138,11 @@ static const struct of_device_id bcm2835_rng_of_match[] = {
{ .compatible = "brcm,bcm6368-rng"},
{},
};
+MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match);
static int bcm2835_rng_probe(struct platform_device *pdev)
{
- const struct bcm2835_rng_of_data *of_data;
struct device *dev = &pdev->dev;
- const struct of_device_id *rng_id;
struct bcm2835_rng_priv *priv;
int err;
@@ -169,12 +170,10 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
priv->rng.cleanup = bcm2835_rng_cleanup;
if (dev_of_node(dev)) {
- rng_id = of_match_node(bcm2835_rng_of_match, dev->of_node);
- if (!rng_id)
- return -EINVAL;
+ const struct bcm2835_rng_of_data *of_data;
/* Check for rng init function, execute it */
- of_data = rng_id->data;
+ of_data = of_device_get_match_data(dev);
if (of_data)
priv->mask_interrupts = of_data->mask_interrupts;
}
@@ -189,8 +188,6 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
return err;
}
-MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match);
-
static const struct platform_device_id bcm2835_rng_devtype[] = {
{ .name = "bcm2835-rng" },
{ .name = "bcm63xx-rng" },
diff --git a/drivers/char/hw_random/bcm74110-rng.c b/drivers/char/hw_random/bcm74110-rng.c
new file mode 100644
index 000000000000..5c64148e91f1
--- /dev/null
+++ b/drivers/char/hw_random/bcm74110-rng.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2024 Broadcom
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+#include <linux/hw_random.h>
+
+#define HOST_REV_ID 0x00
+#define HOST_FIFO_DEPTH 0x04
+#define HOST_FIFO_COUNT 0x08
+#define HOST_FIFO_THRESHOLD 0x0c
+#define HOST_FIFO_DATA 0x10
+
+#define HOST_FIFO_COUNT_MASK 0xffff
+
+/* Delay range in microseconds */
+#define FIFO_DELAY_MIN_US 3
+#define FIFO_DELAY_MAX_US 7
+#define FIFO_DELAY_MAX_COUNT 10
+
+struct bcm74110_priv {
+ void __iomem *base;
+};
+
+static inline int bcm74110_rng_fifo_count(void __iomem *mem)
+{
+ return readl_relaxed(mem) & HOST_FIFO_COUNT_MASK;
+}
+
+static int bcm74110_rng_read(struct hwrng *rng, void *buf, size_t max,
+ bool wait)
+{
+ struct bcm74110_priv *priv = (struct bcm74110_priv *)rng->priv;
+ void __iomem *fc_addr = priv->base + HOST_FIFO_COUNT;
+ void __iomem *fd_addr = priv->base + HOST_FIFO_DATA;
+ unsigned underrun_count = 0;
+ u32 max_words = max / sizeof(u32);
+ u32 num_words;
+ unsigned i;
+
+ /*
+ * We need to check how many words are available in the RNG FIFO. If
+ * there aren't any, we need to wait for some to become available.
+ */
+ while ((num_words = bcm74110_rng_fifo_count(fc_addr)) == 0) {
+ if (!wait)
+ return 0;
+ /*
+ * As a precaution, limit how long we wait. If the FIFO doesn't
+ * refill within the allotted time, return 0 (=no data) to the
+ * caller.
+ */
+ if (likely(underrun_count < FIFO_DELAY_MAX_COUNT))
+ usleep_range(FIFO_DELAY_MIN_US, FIFO_DELAY_MAX_US);
+ else
+ return 0;
+ underrun_count++;
+ }
+ if (num_words > max_words)
+ num_words = max_words;
+
+ /* Bail early if we run out of random numbers unexpectedly */
+ for (i = 0; i < num_words && bcm74110_rng_fifo_count(fc_addr) > 0; i++)
+ ((u32 *)buf)[i] = readl_relaxed(fd_addr);
+
+ return i * sizeof(u32);
+}
+
+static struct hwrng bcm74110_hwrng = {
+ .read = bcm74110_rng_read,
+};
+
+static int bcm74110_rng_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bcm74110_priv *priv;
+ int rc;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ bcm74110_hwrng.name = pdev->name;
+ bcm74110_hwrng.priv = (unsigned long)priv;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ rc = devm_hwrng_register(dev, &bcm74110_hwrng);
+ if (rc)
+ dev_err(dev, "hwrng registration failed (%d)\n", rc);
+ else
+ dev_info(dev, "hwrng registered\n");
+
+ return rc;
+}
+
+static const struct of_device_id bcm74110_rng_match[] = {
+ { .compatible = "brcm,bcm74110-rng", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcm74110_rng_match);
+
+static struct platform_driver bcm74110_rng_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = bcm74110_rng_match,
+ },
+ .probe = bcm74110_rng_probe,
+};
+module_platform_driver(bcm74110_rng_driver);
+
+MODULE_AUTHOR("Markus Mayer <mmayer@broadcom.com>");
+MODULE_DESCRIPTION("BCM 74110 Random Number Generator (RNG) driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/cavium-rng-vf.c b/drivers/char/hw_random/cavium-rng-vf.c
index c99c54cd99c6..c1b8918b2292 100644
--- a/drivers/char/hw_random/cavium-rng-vf.c
+++ b/drivers/char/hw_random/cavium-rng-vf.c
@@ -266,4 +266,5 @@ static struct pci_driver cavium_rng_vf_driver = {
module_pci_driver(cavium_rng_vf_driver);
MODULE_AUTHOR("Omer Khaliq <okhaliq@caviumnetworks.com>");
+MODULE_DESCRIPTION("Cavium ThunderX Random Number Generator VF support");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/cavium-rng.c b/drivers/char/hw_random/cavium-rng.c
index b96579222408..d9d7b6038c06 100644
--- a/drivers/char/hw_random/cavium-rng.c
+++ b/drivers/char/hw_random/cavium-rng.c
@@ -88,4 +88,5 @@ static struct pci_driver cavium_rng_pf_driver = {
module_pci_driver(cavium_rng_pf_driver);
MODULE_AUTHOR("Omer Khaliq <okhaliq@caviumnetworks.com>");
+MODULE_DESCRIPTION("Cavium ThunderX Random Number Generator support");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/cctrng.c b/drivers/char/hw_random/cctrng.c
index c0d2f824769f..a5be9258037f 100644
--- a/drivers/char/hw_random/cctrng.c
+++ b/drivers/char/hw_random/cctrng.c
@@ -98,7 +98,6 @@ static void cc_trng_pm_put_suspend(struct device *dev)
{
int rc = 0;
- pm_runtime_mark_last_busy(dev);
rc = pm_runtime_put_autosuspend(dev);
if (rc)
dev_err(dev, "pm_runtime_put_autosuspend returned %x\n", rc);
@@ -622,6 +621,7 @@ static int __maybe_unused cctrng_resume(struct device *dev)
/* wait for Cryptocell reset completion */
if (!cctrng_wait_for_reset_completion(drvdata)) {
dev_err(dev, "Cryptocell reset not completed");
+ clk_disable_unprepare(drvdata->clk);
return -EBUSY;
}
@@ -652,7 +652,7 @@ static struct platform_driver cctrng_driver = {
.pm = &cctrng_pm,
},
.probe = cctrng_probe,
- .remove_new = cctrng_remove,
+ .remove = cctrng_remove,
};
module_platform_driver(cctrng_driver);
diff --git a/drivers/char/hw_random/cn10k-rng.c b/drivers/char/hw_random/cn10k-rng.c
index 31935316a160..3b4e78182e14 100644
--- a/drivers/char/hw_random/cn10k-rng.c
+++ b/drivers/char/hw_random/cn10k-rng.c
@@ -188,7 +188,7 @@ static int cn10k_rng_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rng->reg_base = pcim_iomap(pdev, 0, 0);
if (!rng->reg_base)
- return dev_err_probe(&pdev->dev, -ENOMEM, "Error while mapping CSRs, exiting\n");
+ return -ENOMEM;
rng->ops.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"cn10k-rng-%s", dev_name(&pdev->dev));
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index f5c71a617a99..96d7fe41b373 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -64,19 +64,6 @@ static size_t rng_buffer_size(void)
return RNG_BUFFER_SIZE;
}
-static void add_early_randomness(struct hwrng *rng)
-{
- int bytes_read;
-
- mutex_lock(&reading_mutex);
- bytes_read = rng_get_data(rng, rng_fillbuf, 32, 0);
- mutex_unlock(&reading_mutex);
- if (bytes_read > 0) {
- size_t entropy = bytes_read * 8 * rng->quality / 1024;
- add_hwgenerator_randomness(rng_fillbuf, bytes_read, entropy, false);
- }
-}
-
static inline void cleanup_rng(struct kref *kref)
{
struct hwrng *rng = container_of(kref, struct hwrng, ref);
@@ -174,7 +161,6 @@ static int hwrng_init(struct hwrng *rng)
reinit_completion(&rng->cleanup_done);
skip_init:
- rng->quality = min_t(u16, min_t(u16, default_quality, 1024), rng->quality ?: 1024);
current_quality = rng->quality; /* obsolete */
return 0;
@@ -195,8 +181,15 @@ static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
int present;
BUG_ON(!mutex_is_locked(&reading_mutex));
- if (rng->read)
- return rng->read(rng, (void *)buffer, size, wait);
+ if (rng->read) {
+ int err;
+
+ err = rng->read(rng, buffer, size, wait);
+ if (WARN_ON_ONCE(err > 0 && err > size))
+ err = size;
+
+ return err;
+ }
if (rng->data_present)
present = rng->data_present(rng, wait);
@@ -340,15 +333,17 @@ static ssize_t rng_current_store(struct device *dev,
const char *buf, size_t len)
{
int err;
- struct hwrng *rng, *old_rng, *new_rng;
+ struct hwrng *rng, *new_rng;
err = mutex_lock_interruptible(&rng_mutex);
if (err)
return -ERESTARTSYS;
- old_rng = current_rng;
if (sysfs_streq(buf, "")) {
err = enable_best_rng();
+ } else if (sysfs_streq(buf, "none")) {
+ cur_rng_set_by_user = 1;
+ drop_current_rng();
} else {
list_for_each_entry(rng, &rng_list, list) {
if (sysfs_streq(rng->name, buf)) {
@@ -362,11 +357,8 @@ static ssize_t rng_current_store(struct device *dev,
new_rng = get_current_rng_nolock();
mutex_unlock(&rng_mutex);
- if (new_rng) {
- if (new_rng != old_rng)
- add_early_randomness(new_rng);
+ if (new_rng)
put_rng(new_rng);
- }
return err ? : len;
}
@@ -403,7 +395,7 @@ static ssize_t rng_available_show(struct device *dev,
strlcat(buf, rng->name, PAGE_SIZE);
strlcat(buf, " ", PAGE_SIZE);
}
- strlcat(buf, "\n", PAGE_SIZE);
+ strlcat(buf, "none\n", PAGE_SIZE);
mutex_unlock(&rng_mutex);
return strlen(buf);
@@ -487,16 +479,6 @@ static struct attribute *rng_dev_attrs[] = {
ATTRIBUTE_GROUPS(rng_dev);
-static void __exit unregister_miscdev(void)
-{
- misc_deregister(&rng_miscdev);
-}
-
-static int __init register_miscdev(void)
-{
- return misc_register(&rng_miscdev);
-}
-
static int hwrng_fillfn(void *unused)
{
size_t entropy, entropy_credit = 0; /* in 1/1024 of a bit */
@@ -544,7 +526,6 @@ int hwrng_register(struct hwrng *rng)
{
int err = -EINVAL;
struct hwrng *tmp;
- bool is_new_current = false;
if (!rng->name || (!rng->data_read && !rng->read))
goto out;
@@ -563,8 +544,11 @@ int hwrng_register(struct hwrng *rng)
complete(&rng->cleanup_done);
init_completion(&rng->dying);
- if (!current_rng ||
- (!cur_rng_set_by_user && rng->quality > current_rng->quality)) {
+ /* Adjust quality field to always have a proper value */
+ rng->quality = min3(default_quality, 1024, rng->quality ?: 1024);
+
+ if (!cur_rng_set_by_user &&
+ (!current_rng || rng->quality > current_rng->quality)) {
/*
* Set new rng as current as the new rng source
* provides better entropy quality and was not
@@ -573,25 +557,8 @@ int hwrng_register(struct hwrng *rng)
err = set_current_rng(rng);
if (err)
goto out_unlock;
- /* to use current_rng in add_early_randomness() we need
- * to take a ref
- */
- is_new_current = true;
- kref_get(&rng->ref);
}
mutex_unlock(&rng_mutex);
- if (is_new_current || !rng->init) {
- /*
- * Use a new device's input to add some randomness to
- * the system. If this rng device isn't going to be
- * used right away, its init function hasn't been
- * called yet by set_current_rng(); so only use the
- * randomness from devices that don't need an init callback
- */
- add_early_randomness(rng);
- }
- if (is_new_current)
- put_rng(rng);
return 0;
out_unlock:
mutex_unlock(&rng_mutex);
@@ -602,12 +569,11 @@ EXPORT_SYMBOL_GPL(hwrng_register);
void hwrng_unregister(struct hwrng *rng)
{
- struct hwrng *old_rng, *new_rng;
+ struct hwrng *new_rng;
int err;
mutex_lock(&rng_mutex);
- old_rng = current_rng;
list_del(&rng->list);
complete_all(&rng->dying);
if (current_rng == rng) {
@@ -626,11 +592,8 @@ void hwrng_unregister(struct hwrng *rng)
} else
mutex_unlock(&rng_mutex);
- if (new_rng) {
- if (old_rng != new_rng)
- add_early_randomness(new_rng);
+ if (new_rng)
put_rng(new_rng);
- }
wait_for_completion(&rng->cleanup_done);
}
@@ -707,7 +670,7 @@ static int __init hwrng_modinit(void)
return -ENOMEM;
}
- ret = register_miscdev();
+ ret = misc_register(&rng_miscdev);
if (ret) {
kfree(rng_fillbuf);
kfree(rng_buffer);
@@ -724,7 +687,7 @@ static void __exit hwrng_modexit(void)
kfree(rng_fillbuf);
mutex_unlock(&rng_mutex);
- unregister_miscdev();
+ misc_deregister(&rng_miscdev);
}
fs_initcall(hwrng_modinit); /* depends on misc_register() */
diff --git a/drivers/char/hw_random/exynos-trng.c b/drivers/char/hw_random/exynos-trng.c
index 0ed5d22fe667..02e207c09e81 100644
--- a/drivers/char/hw_random/exynos-trng.c
+++ b/drivers/char/hw_random/exynos-trng.c
@@ -10,6 +10,7 @@
* Krzysztof Kozłowski <krzk@kernel.org>
*/
+#include <linux/arm-smccc.h>
#include <linux/clk.h>
#include <linux/crypto.h>
#include <linux/delay.h>
@@ -22,46 +23,69 @@
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-
-#define EXYNOS_TRNG_CLKDIV (0x0)
-
-#define EXYNOS_TRNG_CTRL (0x20)
-#define EXYNOS_TRNG_CTRL_RNGEN BIT(31)
-
-#define EXYNOS_TRNG_POST_CTRL (0x30)
-#define EXYNOS_TRNG_ONLINE_CTRL (0x40)
-#define EXYNOS_TRNG_ONLINE_STAT (0x44)
-#define EXYNOS_TRNG_ONLINE_MAXCHI2 (0x48)
-#define EXYNOS_TRNG_FIFO_CTRL (0x50)
-#define EXYNOS_TRNG_FIFO_0 (0x80)
-#define EXYNOS_TRNG_FIFO_1 (0x84)
-#define EXYNOS_TRNG_FIFO_2 (0x88)
-#define EXYNOS_TRNG_FIFO_3 (0x8c)
-#define EXYNOS_TRNG_FIFO_4 (0x90)
-#define EXYNOS_TRNG_FIFO_5 (0x94)
-#define EXYNOS_TRNG_FIFO_6 (0x98)
-#define EXYNOS_TRNG_FIFO_7 (0x9c)
-#define EXYNOS_TRNG_FIFO_LEN (8)
-#define EXYNOS_TRNG_CLOCK_RATE (500000)
-
+#include <linux/property.h>
+
+#define EXYNOS_TRNG_CLKDIV 0x0
+
+#define EXYNOS_TRNG_CTRL 0x20
+#define EXYNOS_TRNG_CTRL_RNGEN BIT(31)
+
+#define EXYNOS_TRNG_POST_CTRL 0x30
+#define EXYNOS_TRNG_ONLINE_CTRL 0x40
+#define EXYNOS_TRNG_ONLINE_STAT 0x44
+#define EXYNOS_TRNG_ONLINE_MAXCHI2 0x48
+#define EXYNOS_TRNG_FIFO_CTRL 0x50
+#define EXYNOS_TRNG_FIFO_0 0x80
+#define EXYNOS_TRNG_FIFO_1 0x84
+#define EXYNOS_TRNG_FIFO_2 0x88
+#define EXYNOS_TRNG_FIFO_3 0x8c
+#define EXYNOS_TRNG_FIFO_4 0x90
+#define EXYNOS_TRNG_FIFO_5 0x94
+#define EXYNOS_TRNG_FIFO_6 0x98
+#define EXYNOS_TRNG_FIFO_7 0x9c
+#define EXYNOS_TRNG_FIFO_LEN 8
+#define EXYNOS_TRNG_CLOCK_RATE 500000
+
+/* Driver feature flags */
+#define EXYNOS_SMC BIT(0)
+
+#define EXYNOS_SMC_CALL_VAL(func_num) \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_SIP, \
+ func_num)
+
+/* SMC command for DTRNG access */
+#define SMC_CMD_RANDOM EXYNOS_SMC_CALL_VAL(0x1012)
+
+/* SMC_CMD_RANDOM: arguments */
+#define HWRNG_INIT 0x0
+#define HWRNG_EXIT 0x1
+#define HWRNG_GET_DATA 0x2
+#define HWRNG_RESUME 0x3
+
+/* SMC_CMD_RANDOM: return values */
+#define HWRNG_RET_OK 0x0
+#define HWRNG_RET_RETRY_ERROR 0x2
+
+#define HWRNG_MAX_TRIES 100
struct exynos_trng_dev {
- struct device *dev;
- void __iomem *mem;
- struct clk *clk;
- struct hwrng rng;
+ struct device *dev;
+ void __iomem *mem;
+ struct clk *clk; /* operating clock */
+ struct clk *pclk; /* bus clock */
+ struct hwrng rng;
+ unsigned long flags;
};
-static int exynos_trng_do_read(struct hwrng *rng, void *data, size_t max,
- bool wait)
+static int exynos_trng_do_read_reg(struct hwrng *rng, void *data, size_t max,
+ bool wait)
{
- struct exynos_trng_dev *trng;
+ struct exynos_trng_dev *trng = (struct exynos_trng_dev *)rng->priv;
int val;
max = min_t(size_t, max, (EXYNOS_TRNG_FIFO_LEN * 4));
-
- trng = (struct exynos_trng_dev *)rng->priv;
-
writel_relaxed(max * 8, trng->mem + EXYNOS_TRNG_FIFO_CTRL);
val = readl_poll_timeout(trng->mem + EXYNOS_TRNG_FIFO_CTRL, val,
val == 0, 200, 1000000);
@@ -73,7 +97,40 @@ static int exynos_trng_do_read(struct hwrng *rng, void *data, size_t max,
return max;
}
-static int exynos_trng_init(struct hwrng *rng)
+static int exynos_trng_do_read_smc(struct hwrng *rng, void *data, size_t max,
+ bool wait)
+{
+ struct arm_smccc_res res;
+ unsigned int copied = 0;
+ u32 *buf = data;
+ int tries = 0;
+
+ while (copied < max) {
+ arm_smccc_smc(SMC_CMD_RANDOM, HWRNG_GET_DATA, 0, 0, 0, 0, 0, 0,
+ &res);
+ switch (res.a0) {
+ case HWRNG_RET_OK:
+ *buf++ = res.a2;
+ *buf++ = res.a3;
+ copied += 8;
+ tries = 0;
+ break;
+ case HWRNG_RET_RETRY_ERROR:
+ if (!wait)
+ return copied;
+ if (++tries >= HWRNG_MAX_TRIES)
+ return copied;
+ cond_resched();
+ break;
+ default:
+ return -EIO;
+ }
+ }
+
+ return copied;
+}
+
+static int exynos_trng_init_reg(struct hwrng *rng)
{
struct exynos_trng_dev *trng = (struct exynos_trng_dev *)rng->priv;
unsigned long sss_rate;
@@ -87,7 +144,7 @@ static int exynos_trng_init(struct hwrng *rng)
*/
val = sss_rate / (EXYNOS_TRNG_CLOCK_RATE * 2);
if (val > 0x7fff) {
- dev_err(trng->dev, "clock divider too large: %d", val);
+ dev_err(trng->dev, "clock divider too large: %d\n", val);
return -ERANGE;
}
val = val << 1;
@@ -106,6 +163,24 @@ static int exynos_trng_init(struct hwrng *rng)
return 0;
}
+static int exynos_trng_init_smc(struct hwrng *rng)
+{
+ struct exynos_trng_dev *trng = (struct exynos_trng_dev *)rng->priv;
+ struct arm_smccc_res res;
+ int ret = 0;
+
+ arm_smccc_smc(SMC_CMD_RANDOM, HWRNG_INIT, 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0 != HWRNG_RET_OK) {
+ dev_err(trng->dev, "SMC command for TRNG init failed (%d)\n",
+ (int)res.a0);
+ ret = -EIO;
+ }
+ if ((int)res.a0 == -1)
+ dev_info(trng->dev, "Make sure LDFW is loaded by your BL\n");
+
+ return ret;
+}
+
static int exynos_trng_probe(struct platform_device *pdev)
{
struct exynos_trng_dev *trng;
@@ -115,21 +190,29 @@ static int exynos_trng_probe(struct platform_device *pdev)
if (!trng)
return ret;
+ platform_set_drvdata(pdev, trng);
+ trng->dev = &pdev->dev;
+
+ trng->flags = (unsigned long)device_get_match_data(&pdev->dev);
+
trng->rng.name = devm_kstrdup(&pdev->dev, dev_name(&pdev->dev),
GFP_KERNEL);
if (!trng->rng.name)
return ret;
- trng->rng.init = exynos_trng_init;
- trng->rng.read = exynos_trng_do_read;
- trng->rng.priv = (unsigned long) trng;
+ trng->rng.priv = (unsigned long)trng;
- platform_set_drvdata(pdev, trng);
- trng->dev = &pdev->dev;
+ if (trng->flags & EXYNOS_SMC) {
+ trng->rng.init = exynos_trng_init_smc;
+ trng->rng.read = exynos_trng_do_read_smc;
+ } else {
+ trng->rng.init = exynos_trng_init_reg;
+ trng->rng.read = exynos_trng_do_read_reg;
- trng->mem = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(trng->mem))
- return PTR_ERR(trng->mem);
+ trng->mem = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(trng->mem))
+ return PTR_ERR(trng->mem);
+ }
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_resume_and_get(&pdev->dev);
@@ -138,32 +221,30 @@ static int exynos_trng_probe(struct platform_device *pdev)
goto err_pm_get;
}
- trng->clk = devm_clk_get(&pdev->dev, "secss");
+ trng->clk = devm_clk_get_enabled(&pdev->dev, "secss");
if (IS_ERR(trng->clk)) {
- ret = PTR_ERR(trng->clk);
- dev_err(&pdev->dev, "Could not get clock.\n");
+ ret = dev_err_probe(&pdev->dev, PTR_ERR(trng->clk),
+ "Could not get clock\n");
goto err_clock;
}
- ret = clk_prepare_enable(trng->clk);
- if (ret) {
- dev_err(&pdev->dev, "Could not enable the clk.\n");
+ trng->pclk = devm_clk_get_optional_enabled(&pdev->dev, "pclk");
+ if (IS_ERR(trng->pclk)) {
+ ret = dev_err_probe(&pdev->dev, PTR_ERR(trng->pclk),
+ "Could not get pclk\n");
goto err_clock;
}
ret = devm_hwrng_register(&pdev->dev, &trng->rng);
if (ret) {
dev_err(&pdev->dev, "Could not register hwrng device.\n");
- goto err_register;
+ goto err_clock;
}
dev_info(&pdev->dev, "Exynos True Random Number Generator.\n");
return 0;
-err_register:
- clk_disable_unprepare(trng->clk);
-
err_clock:
pm_runtime_put_noidle(&pdev->dev);
@@ -175,9 +256,14 @@ err_pm_get:
static void exynos_trng_remove(struct platform_device *pdev)
{
- struct exynos_trng_dev *trng = platform_get_drvdata(pdev);
+ struct exynos_trng_dev *trng = platform_get_drvdata(pdev);
- clk_disable_unprepare(trng->clk);
+ if (trng->flags & EXYNOS_SMC) {
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(SMC_CMD_RANDOM, HWRNG_EXIT, 0, 0, 0, 0, 0, 0,
+ &res);
+ }
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -185,6 +271,16 @@ static void exynos_trng_remove(struct platform_device *pdev)
static int exynos_trng_suspend(struct device *dev)
{
+ struct exynos_trng_dev *trng = dev_get_drvdata(dev);
+ struct arm_smccc_res res;
+
+ if (trng->flags & EXYNOS_SMC) {
+ arm_smccc_smc(SMC_CMD_RANDOM, HWRNG_EXIT, 0, 0, 0, 0, 0, 0,
+ &res);
+ if (res.a0 != HWRNG_RET_OK)
+ return -EIO;
+ }
+
pm_runtime_put_sync(dev);
return 0;
@@ -192,6 +288,7 @@ static int exynos_trng_suspend(struct device *dev)
static int exynos_trng_resume(struct device *dev)
{
+ struct exynos_trng_dev *trng = dev_get_drvdata(dev);
int ret;
ret = pm_runtime_resume_and_get(dev);
@@ -200,15 +297,32 @@ static int exynos_trng_resume(struct device *dev)
return ret;
}
+ if (trng->flags & EXYNOS_SMC) {
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(SMC_CMD_RANDOM, HWRNG_RESUME, 0, 0, 0, 0, 0, 0,
+ &res);
+ if (res.a0 != HWRNG_RET_OK)
+ return -EIO;
+
+ arm_smccc_smc(SMC_CMD_RANDOM, HWRNG_INIT, 0, 0, 0, 0, 0, 0,
+ &res);
+ if (res.a0 != HWRNG_RET_OK)
+ return -EIO;
+ }
+
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(exynos_trng_pm_ops, exynos_trng_suspend,
- exynos_trng_resume);
+ exynos_trng_resume);
static const struct of_device_id exynos_trng_dt_match[] = {
{
.compatible = "samsung,exynos5250-trng",
+ }, {
+ .compatible = "samsung,exynos850-trng",
+ .data = (void *)EXYNOS_SMC,
},
{ },
};
@@ -221,10 +335,11 @@ static struct platform_driver exynos_trng_driver = {
.of_match_table = exynos_trng_dt_match,
},
.probe = exynos_trng_probe,
- .remove_new = exynos_trng_remove,
+ .remove = exynos_trng_remove,
};
module_platform_driver(exynos_trng_driver);
+
MODULE_AUTHOR("Łukasz Stelmach");
MODULE_DESCRIPTION("H/W TRNG driver for Exynos chips");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/histb-rng.c b/drivers/char/hw_random/histb-rng.c
index f652e1135e4b..1b91e88cc4c0 100644
--- a/drivers/char/hw_random/histb-rng.c
+++ b/drivers/char/hw_random/histb-rng.c
@@ -89,7 +89,7 @@ depth_show(struct device *dev, struct device_attribute *attr, char *buf)
struct histb_rng_priv *priv = dev_get_drvdata(dev);
void __iomem *base = priv->base;
- return sprintf(buf, "%d\n", histb_rng_get_depth(base));
+ return sprintf(buf, "%u\n", histb_rng_get_depth(base));
}
static ssize_t
diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
index 118a72acb99b..241664a9b5d9 100644
--- a/drivers/char/hw_random/imx-rngc.c
+++ b/drivers/char/hw_random/imx-rngc.c
@@ -13,6 +13,8 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include <linux/interrupt.h>
#include <linux/hw_random.h>
#include <linux/completion.h>
@@ -53,6 +55,7 @@
#define RNGC_SELFTEST_TIMEOUT 2500 /* us */
#define RNGC_SEED_TIMEOUT 200 /* ms */
+#define RNGC_PM_TIMEOUT 500 /* ms */
static bool self_test = true;
module_param(self_test, bool, 0);
@@ -123,7 +126,11 @@ static int imx_rngc_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng);
unsigned int status;
- int retval = 0;
+ int err, retval = 0;
+
+ err = pm_runtime_resume_and_get(rngc->dev);
+ if (err)
+ return err;
while (max >= sizeof(u32)) {
status = readl(rngc->base + RNGC_STATUS);
@@ -141,6 +148,8 @@ static int imx_rngc_read(struct hwrng *rng, void *data, size_t max, bool wait)
max -= sizeof(u32);
}
}
+ pm_runtime_mark_last_busy(rngc->dev);
+ pm_runtime_put(rngc->dev);
return retval ? retval : -EIO;
}
@@ -169,7 +178,11 @@ static int imx_rngc_init(struct hwrng *rng)
{
struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng);
u32 cmd, ctrl;
- int ret;
+ int ret, err;
+
+ err = pm_runtime_resume_and_get(rngc->dev);
+ if (err)
+ return err;
/* clear error */
cmd = readl(rngc->base + RNGC_COMMAND);
@@ -186,15 +199,15 @@ static int imx_rngc_init(struct hwrng *rng)
ret = wait_for_completion_timeout(&rngc->rng_op_done,
msecs_to_jiffies(RNGC_SEED_TIMEOUT));
if (!ret) {
- ret = -ETIMEDOUT;
- goto err;
+ err = -ETIMEDOUT;
+ goto out;
}
} while (rngc->err_reg == RNGC_ERROR_STATUS_STAT_ERR);
if (rngc->err_reg) {
- ret = -EIO;
- goto err;
+ err = -EIO;
+ goto out;
}
/*
@@ -205,23 +218,29 @@ static int imx_rngc_init(struct hwrng *rng)
ctrl |= RNGC_CTRL_AUTO_SEED;
writel(ctrl, rngc->base + RNGC_CONTROL);
+out:
/*
* if initialisation was successful, we keep the interrupt
* unmasked until imx_rngc_cleanup is called
* we mask the interrupt ourselves if we return an error
*/
- return 0;
+ if (err)
+ imx_rngc_irq_mask_clear(rngc);
-err:
- imx_rngc_irq_mask_clear(rngc);
- return ret;
+ pm_runtime_put(rngc->dev);
+ return err;
}
static void imx_rngc_cleanup(struct hwrng *rng)
{
struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng);
+ int err;
- imx_rngc_irq_mask_clear(rngc);
+ err = pm_runtime_resume_and_get(rngc->dev);
+ if (!err) {
+ imx_rngc_irq_mask_clear(rngc);
+ pm_runtime_put(rngc->dev);
+ }
}
static int __init imx_rngc_probe(struct platform_device *pdev)
@@ -240,7 +259,7 @@ static int __init imx_rngc_probe(struct platform_device *pdev)
if (IS_ERR(rngc->base))
return PTR_ERR(rngc->base);
- rngc->clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ rngc->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(rngc->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(rngc->clk), "Cannot get rng_clk\n");
@@ -248,14 +267,18 @@ static int __init imx_rngc_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
+ clk_prepare_enable(rngc->clk);
+
ver_id = readl(rngc->base + RNGC_VER_ID);
rng_type = FIELD_GET(RNG_TYPE, ver_id);
/*
* This driver supports only RNGC and RNGB. (There's a different
* driver for RNGA.)
*/
- if (rng_type != RNGC_TYPE_RNGC && rng_type != RNGC_TYPE_RNGB)
+ if (rng_type != RNGC_TYPE_RNGC && rng_type != RNGC_TYPE_RNGB) {
+ clk_disable_unprepare(rngc->clk);
return -ENODEV;
+ }
init_completion(&rngc->rng_op_done);
@@ -272,15 +295,24 @@ static int __init imx_rngc_probe(struct platform_device *pdev)
ret = devm_request_irq(&pdev->dev,
irq, imx_rngc_irq, 0, pdev->name, (void *)rngc);
- if (ret)
+ if (ret) {
+ clk_disable_unprepare(rngc->clk);
return dev_err_probe(&pdev->dev, ret, "Can't get interrupt working.\n");
+ }
if (self_test) {
ret = imx_rngc_self_test(rngc);
- if (ret)
+ if (ret) {
+ clk_disable_unprepare(rngc->clk);
return dev_err_probe(&pdev->dev, ret, "self test failed\n");
+ }
}
+ pm_runtime_set_autosuspend_delay(&pdev->dev, RNGC_PM_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ devm_pm_runtime_enable(&pdev->dev);
+
ret = devm_hwrng_register(&pdev->dev, &rngc->rng);
if (ret)
return dev_err_probe(&pdev->dev, ret, "hwrng registration failed\n");
@@ -310,7 +342,10 @@ static int imx_rngc_resume(struct device *dev)
return 0;
}
-static DEFINE_SIMPLE_DEV_PM_OPS(imx_rngc_pm_ops, imx_rngc_suspend, imx_rngc_resume);
+static const struct dev_pm_ops imx_rngc_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ RUNTIME_PM_OPS(imx_rngc_suspend, imx_rngc_resume, NULL)
+};
static const struct of_device_id imx_rngc_dt_ids[] = {
{ .compatible = "fsl,imx25-rngb" },
@@ -321,7 +356,7 @@ MODULE_DEVICE_TABLE(of, imx_rngc_dt_ids);
static struct platform_driver imx_rngc_driver = {
.driver = {
.name = KBUILD_MODNAME,
- .pm = pm_sleep_ptr(&imx_rngc_pm_ops),
+ .pm = pm_ptr(&imx_rngc_pm_ops),
.of_match_table = imx_rngc_dt_ids,
},
};
diff --git a/drivers/char/hw_random/ingenic-rng.c b/drivers/char/hw_random/ingenic-rng.c
index 2f9b6483c4a1..bbfd662d25a6 100644
--- a/drivers/char/hw_random/ingenic-rng.c
+++ b/drivers/char/hw_random/ingenic-rng.c
@@ -132,7 +132,7 @@ MODULE_DEVICE_TABLE(of, ingenic_rng_of_match);
static struct platform_driver ingenic_rng_driver = {
.probe = ingenic_rng_probe,
- .remove_new = ingenic_rng_remove,
+ .remove = ingenic_rng_remove,
.driver = {
.name = "ingenic-rng",
.of_match_table = ingenic_rng_of_match,
diff --git a/drivers/char/hw_random/ks-sa-rng.c b/drivers/char/hw_random/ks-sa-rng.c
index 36c34252b4f6..9e408144a10c 100644
--- a/drivers/char/hw_random/ks-sa-rng.c
+++ b/drivers/char/hw_random/ks-sa-rng.c
@@ -231,6 +231,10 @@ static int ks_sa_rng_probe(struct platform_device *pdev)
if (IS_ERR(ks_sa_rng->regmap_cfg))
return dev_err_probe(dev, -EINVAL, "syscon_node_to_regmap failed\n");
+ ks_sa_rng->clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(ks_sa_rng->clk))
+ return dev_err_probe(dev, PTR_ERR(ks_sa_rng->clk), "Failed to get clock\n");
+
pm_runtime_enable(dev);
ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
@@ -261,7 +265,7 @@ static struct platform_driver ks_sa_rng_driver = {
.of_match_table = ks_sa_rng_dt_match,
},
.probe = ks_sa_rng_probe,
- .remove_new = ks_sa_rng_remove,
+ .remove = ks_sa_rng_remove,
};
module_platform_driver(ks_sa_rng_driver);
diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c
index aa993753ab12..5808d09d12c4 100644
--- a/drivers/char/hw_random/mtk-rng.c
+++ b/drivers/char/hw_random/mtk-rng.c
@@ -36,6 +36,7 @@ struct mtk_rng {
void __iomem *base;
struct clk *clk;
struct hwrng rng;
+ struct device *dev;
};
static int mtk_rng_init(struct hwrng *rng)
@@ -85,7 +86,7 @@ static int mtk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
struct mtk_rng *priv = to_mtk_rng(rng);
int retval = 0;
- pm_runtime_get_sync((struct device *)priv->rng.priv);
+ pm_runtime_get_sync(priv->dev);
while (max >= sizeof(u32)) {
if (!mtk_rng_wait_ready(rng, wait))
@@ -97,8 +98,7 @@ static int mtk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
max -= sizeof(u32);
}
- pm_runtime_mark_last_busy((struct device *)priv->rng.priv);
- pm_runtime_put_sync_autosuspend((struct device *)priv->rng.priv);
+ pm_runtime_put_sync_autosuspend(priv->dev);
return retval || !wait ? retval : -EIO;
}
@@ -112,13 +112,13 @@ static int mtk_rng_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ priv->dev = &pdev->dev;
priv->rng.name = pdev->name;
#ifndef CONFIG_PM
priv->rng.init = mtk_rng_init;
priv->rng.cleanup = mtk_rng_cleanup;
#endif
priv->rng.read = mtk_rng_read;
- priv->rng.priv = (unsigned long)&pdev->dev;
priv->rng.quality = 900;
priv->clk = devm_clk_get(&pdev->dev, "rng");
@@ -142,7 +142,9 @@ static int mtk_rng_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, priv);
pm_runtime_set_autosuspend_delay(&pdev->dev, RNG_AUTOSUSPEND_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
dev_info(&pdev->dev, "registered RNG driver\n");
diff --git a/drivers/char/hw_random/mxc-rnga.c b/drivers/char/hw_random/mxc-rnga.c
index 94ee18a1120a..e3fcb8bcc29b 100644
--- a/drivers/char/hw_random/mxc-rnga.c
+++ b/drivers/char/hw_random/mxc-rnga.c
@@ -147,33 +147,25 @@ static int mxc_rnga_probe(struct platform_device *pdev)
mxc_rng->rng.data_present = mxc_rnga_data_present;
mxc_rng->rng.data_read = mxc_rnga_data_read;
- mxc_rng->clk = devm_clk_get(&pdev->dev, NULL);
+ mxc_rng->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(mxc_rng->clk)) {
dev_err(&pdev->dev, "Could not get rng_clk!\n");
return PTR_ERR(mxc_rng->clk);
}
- err = clk_prepare_enable(mxc_rng->clk);
- if (err)
- return err;
-
mxc_rng->mem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mxc_rng->mem)) {
err = PTR_ERR(mxc_rng->mem);
- goto err_ioremap;
+ return err;
}
err = hwrng_register(&mxc_rng->rng);
if (err) {
dev_err(&pdev->dev, "MXC RNGA registering failed (%d)\n", err);
- goto err_ioremap;
+ return err;
}
return 0;
-
-err_ioremap:
- clk_disable_unprepare(mxc_rng->clk);
- return err;
}
static void mxc_rnga_remove(struct platform_device *pdev)
@@ -181,8 +173,6 @@ static void mxc_rnga_remove(struct platform_device *pdev)
struct mxc_rng *mxc_rng = platform_get_drvdata(pdev);
hwrng_unregister(&mxc_rng->rng);
-
- clk_disable_unprepare(mxc_rng->clk);
}
static const struct of_device_id mxc_rnga_of_match[] = {
@@ -198,7 +188,7 @@ static struct platform_driver mxc_rnga_driver = {
.of_match_table = mxc_rnga_of_match,
},
.probe = mxc_rnga_probe,
- .remove_new = mxc_rnga_remove,
+ .remove = mxc_rnga_remove,
};
module_platform_driver(mxc_rnga_driver);
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index 1b49e3a86d57..ea6d5599242f 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -858,7 +858,7 @@ static struct platform_driver n2rng_driver = {
.of_match_table = n2rng_match,
},
.probe = n2rng_probe,
- .remove_new = n2rng_remove,
+ .remove = n2rng_remove,
};
module_platform_driver(n2rng_driver);
diff --git a/drivers/char/hw_random/n2rng.h b/drivers/char/hw_random/n2rng.h
index 9a870f5dc371..7612f15a261f 100644
--- a/drivers/char/hw_random/n2rng.h
+++ b/drivers/char/hw_random/n2rng.h
@@ -48,7 +48,7 @@
#define HV_RNG_NUM_CONTROL 4
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
extern unsigned long sun4v_rng_get_diag_ctl(void);
extern unsigned long sun4v_rng_ctl_read_v1(unsigned long ctl_regs_ra,
unsigned long *state,
@@ -147,6 +147,6 @@ struct n2rng {
#define N2RNG_BUSY_LIMIT 100
#define N2RNG_HCHECK_LIMIT 100
-#endif /* !(__ASSEMBLY__) */
+#endif /* !(__ASSEMBLER__) */
#endif /* _N2RNG_H */
diff --git a/drivers/char/hw_random/npcm-rng.c b/drivers/char/hw_random/npcm-rng.c
index bce8c4829a1f..40d6e29dea03 100644
--- a/drivers/char/hw_random/npcm-rng.c
+++ b/drivers/char/hw_random/npcm-rng.c
@@ -32,6 +32,7 @@
struct npcm_rng {
void __iomem *base;
struct hwrng rng;
+ struct device *dev;
u32 clkp;
};
@@ -57,7 +58,7 @@ static int npcm_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
int retval = 0;
int ready;
- pm_runtime_get_sync((struct device *)priv->rng.priv);
+ pm_runtime_get_sync(priv->dev);
while (max) {
if (wait) {
@@ -79,8 +80,7 @@ static int npcm_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
max--;
}
- pm_runtime_mark_last_busy((struct device *)priv->rng.priv);
- pm_runtime_put_sync_autosuspend((struct device *)priv->rng.priv);
+ pm_runtime_put_sync_autosuspend(priv->dev);
return retval || !wait ? retval : -EIO;
}
@@ -109,7 +109,7 @@ static int npcm_rng_probe(struct platform_device *pdev)
#endif
priv->rng.name = pdev->name;
priv->rng.read = npcm_rng_read;
- priv->rng.priv = (unsigned long)&pdev->dev;
+ priv->dev = &pdev->dev;
priv->clkp = (u32)(uintptr_t)of_device_get_match_data(&pdev->dev);
writel(NPCM_RNG_M1ROSEL, priv->base + NPCM_RNGMODE_REG);
@@ -176,7 +176,7 @@ static struct platform_driver npcm_rng_driver = {
.of_match_table = of_match_ptr(rng_dt_id),
},
.probe = npcm_rng_probe,
- .remove_new = npcm_rng_remove,
+ .remove = npcm_rng_remove,
};
module_platform_driver(npcm_rng_driver);
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index d4c02e900466..5e8b50f15db7 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -558,10 +558,11 @@ static struct platform_driver omap_rng_driver = {
.of_match_table = of_match_ptr(omap_rng_of_match),
},
.probe = omap_rng_probe,
- .remove_new = omap_rng_remove,
+ .remove = omap_rng_remove,
};
module_platform_driver(omap_rng_driver);
MODULE_ALIAS("platform:omap_rng");
MODULE_AUTHOR("Deepak Saxena (and others)");
+MODULE_DESCRIPTION("RNG driver for TI OMAP CPU family");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c
index 18dc46b1b58e..aa71f61c3dc9 100644
--- a/drivers/char/hw_random/omap3-rom-rng.c
+++ b/drivers/char/hw_random/omap3-rom-rng.c
@@ -56,7 +56,6 @@ static int omap3_rom_rng_read(struct hwrng *rng, void *data, size_t max, bool w)
else
r = 4;
- pm_runtime_mark_last_busy(ddata->dev);
pm_runtime_put_autosuspend(ddata->dev);
return r;
@@ -178,4 +177,5 @@ module_platform_driver(omap3_rom_rng_driver);
MODULE_ALIAS("platform:omap3-rom-rng");
MODULE_AUTHOR("Juha Yrjola");
MODULE_AUTHOR("Pali Rohár <pali@kernel.org>");
+MODULE_DESCRIPTION("RNG driver for TI OMAP3 CPU family");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/rockchip-rng.c b/drivers/char/hw_random/rockchip-rng.c
new file mode 100644
index 000000000000..6e3ed4b85605
--- /dev/null
+++ b/drivers/char/hw_random/rockchip-rng.c
@@ -0,0 +1,492 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * rockchip-rng.c True Random Number Generator driver for Rockchip SoCs
+ *
+ * Copyright (c) 2018, Fuzhou Rockchip Electronics Co., Ltd.
+ * Copyright (c) 2022, Aurelien Jarno
+ * Copyright (c) 2025, Collabora Ltd.
+ * Authors:
+ * Lin Jinhan <troy.lin@rock-chips.com>
+ * Aurelien Jarno <aurelien@aurel32.net>
+ * Nicolas Frattaroli <nicolas.frattaroli@collabora.com>
+ */
+#include <linux/clk.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#define RK_RNG_AUTOSUSPEND_DELAY 100
+#define RK_RNG_MAX_BYTE 32
+#define RK_RNG_POLL_PERIOD_US 100
+#define RK_RNG_POLL_TIMEOUT_US 10000
+
+/*
+ * TRNG collects osc ring output bit every RK_RNG_SAMPLE_CNT time. The value is
+ * a tradeoff between speed and quality and has been adjusted to get a quality
+ * of ~900 (~87.5% of FIPS 140-2 successes).
+ */
+#define RK_RNG_SAMPLE_CNT 1000
+
+/* after how many bytes of output TRNGv1 implementations should be reseeded */
+#define RK_TRNG_V1_AUTO_RESEED_CNT 16000
+
+/* TRNG registers from RK3568 TRM-Part2, section 5.4.1 */
+#define TRNG_RST_CTL 0x0004
+#define TRNG_RNG_CTL 0x0400
+#define TRNG_RNG_CTL_LEN_64_BIT (0x00 << 4)
+#define TRNG_RNG_CTL_LEN_128_BIT (0x01 << 4)
+#define TRNG_RNG_CTL_LEN_192_BIT (0x02 << 4)
+#define TRNG_RNG_CTL_LEN_256_BIT (0x03 << 4)
+#define TRNG_RNG_CTL_OSC_RING_SPEED_0 (0x00 << 2)
+#define TRNG_RNG_CTL_OSC_RING_SPEED_1 (0x01 << 2)
+#define TRNG_RNG_CTL_OSC_RING_SPEED_2 (0x02 << 2)
+#define TRNG_RNG_CTL_OSC_RING_SPEED_3 (0x03 << 2)
+#define TRNG_RNG_CTL_MASK GENMASK(15, 0)
+#define TRNG_RNG_CTL_ENABLE BIT(1)
+#define TRNG_RNG_CTL_START BIT(0)
+#define TRNG_RNG_SAMPLE_CNT 0x0404
+#define TRNG_RNG_DOUT 0x0410
+
+/*
+ * TRNG V1 register definitions
+ * The TRNG V1 IP is a stand-alone TRNG implementation (not part of a crypto IP)
+ * and can be found in the Rockchip RK3588 SoC
+ */
+#define TRNG_V1_CTRL 0x0000
+#define TRNG_V1_CTRL_NOP 0x00
+#define TRNG_V1_CTRL_RAND 0x01
+#define TRNG_V1_CTRL_SEED 0x02
+
+#define TRNG_V1_STAT 0x0004
+#define TRNG_V1_STAT_SEEDED BIT(9)
+#define TRNG_V1_STAT_GENERATING BIT(30)
+#define TRNG_V1_STAT_RESEEDING BIT(31)
+
+#define TRNG_V1_MODE 0x0008
+#define TRNG_V1_MODE_128_BIT (0x00 << 3)
+#define TRNG_V1_MODE_256_BIT (0x01 << 3)
+
+/* Interrupt Enable register; unused because polling is faster */
+#define TRNG_V1_IE 0x0010
+#define TRNG_V1_IE_GLBL_EN BIT(31)
+#define TRNG_V1_IE_SEED_DONE_EN BIT(1)
+#define TRNG_V1_IE_RAND_RDY_EN BIT(0)
+
+#define TRNG_V1_ISTAT 0x0014
+#define TRNG_V1_ISTAT_RAND_RDY BIT(0)
+
+/* RAND0 ~ RAND7 */
+#define TRNG_V1_RAND0 0x0020
+#define TRNG_V1_RAND7 0x003C
+
+/* Auto Reseed Register */
+#define TRNG_V1_AUTO_RQSTS 0x0060
+
+#define TRNG_V1_VERSION 0x00F0
+#define TRNG_v1_VERSION_CODE 0x46bc
+/* end of TRNG_V1 register definitions */
+
+/*
+ * RKRNG register definitions
+ * The RKRNG IP is a stand-alone TRNG implementation (not part of a crypto IP)
+ * and can be found in the Rockchip RK3576, Rockchip RK3562 and Rockchip RK3528
+ * SoCs. It can either output true randomness (TRNG) or "deterministic"
+ * randomness derived from hashing the true entropy (DRNG). This driver
+ * implementation uses just the true entropy, and leaves stretching the entropy
+ * up to Linux.
+ */
+#define RKRNG_CFG 0x0000
+#define RKRNG_CTRL 0x0010
+#define RKRNG_CTRL_REQ_TRNG BIT(4)
+#define RKRNG_STATE 0x0014
+#define RKRNG_STATE_TRNG_RDY BIT(4)
+#define RKRNG_TRNG_DATA0 0x0050
+#define RKRNG_TRNG_DATA1 0x0054
+#define RKRNG_TRNG_DATA2 0x0058
+#define RKRNG_TRNG_DATA3 0x005C
+#define RKRNG_TRNG_DATA4 0x0060
+#define RKRNG_TRNG_DATA5 0x0064
+#define RKRNG_TRNG_DATA6 0x0068
+#define RKRNG_TRNG_DATA7 0x006C
+#define RKRNG_READ_LEN 32
+
+/* Before removing this assert, give rk3588_rng_read an upper bound of 32 */
+static_assert(RK_RNG_MAX_BYTE <= (TRNG_V1_RAND7 + 4 - TRNG_V1_RAND0),
+ "You raised RK_RNG_MAX_BYTE and broke rk3588-rng, congrats.");
+
+struct rk_rng {
+ struct hwrng rng;
+ void __iomem *base;
+ int clk_num;
+ struct clk_bulk_data *clk_bulks;
+ const struct rk_rng_soc_data *soc_data;
+ struct device *dev;
+};
+
+struct rk_rng_soc_data {
+ int (*rk_rng_init)(struct hwrng *rng);
+ int (*rk_rng_read)(struct hwrng *rng, void *buf, size_t max, bool wait);
+ void (*rk_rng_cleanup)(struct hwrng *rng);
+ unsigned short quality;
+ bool reset_optional;
+};
+
+/* The mask in the upper 16 bits determines the bits that are updated */
+static void rk_rng_write_ctl(struct rk_rng *rng, u32 val, u32 mask)
+{
+ writel((mask << 16) | val, rng->base + TRNG_RNG_CTL);
+}
+
+static inline void rk_rng_writel(struct rk_rng *rng, u32 val, u32 offset)
+{
+ writel(val, rng->base + offset);
+}
+
+static inline u32 rk_rng_readl(struct rk_rng *rng, u32 offset)
+{
+ return readl(rng->base + offset);
+}
+
+static int rk_rng_enable_clks(struct rk_rng *rk_rng)
+{
+ int ret;
+ /* start clocks */
+ ret = clk_bulk_prepare_enable(rk_rng->clk_num, rk_rng->clk_bulks);
+ if (ret < 0) {
+ dev_err(rk_rng->dev, "Failed to enable clocks: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rk3568_rng_init(struct hwrng *rng)
+{
+ struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
+ int ret;
+
+ ret = rk_rng_enable_clks(rk_rng);
+ if (ret < 0)
+ return ret;
+
+ /* set the sample period */
+ writel(RK_RNG_SAMPLE_CNT, rk_rng->base + TRNG_RNG_SAMPLE_CNT);
+
+ /* set osc ring speed and enable it */
+ rk_rng_write_ctl(rk_rng, TRNG_RNG_CTL_LEN_256_BIT |
+ TRNG_RNG_CTL_OSC_RING_SPEED_0 |
+ TRNG_RNG_CTL_ENABLE,
+ TRNG_RNG_CTL_MASK);
+
+ return 0;
+}
+
+static void rk3568_rng_cleanup(struct hwrng *rng)
+{
+ struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
+
+ /* stop TRNG */
+ rk_rng_write_ctl(rk_rng, 0, TRNG_RNG_CTL_MASK);
+
+ /* stop clocks */
+ clk_bulk_disable_unprepare(rk_rng->clk_num, rk_rng->clk_bulks);
+}
+
+static int rk3568_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
+ size_t to_read = min_t(size_t, max, RK_RNG_MAX_BYTE);
+ u32 reg;
+ int ret = 0;
+
+ ret = pm_runtime_resume_and_get(rk_rng->dev);
+ if (ret < 0)
+ return ret;
+
+ /* Start collecting random data */
+ rk_rng_write_ctl(rk_rng, TRNG_RNG_CTL_START, TRNG_RNG_CTL_START);
+
+ ret = readl_poll_timeout(rk_rng->base + TRNG_RNG_CTL, reg,
+ !(reg & TRNG_RNG_CTL_START),
+ RK_RNG_POLL_PERIOD_US,
+ RK_RNG_POLL_TIMEOUT_US);
+ if (ret < 0)
+ goto out;
+
+ /* Read random data stored in the registers */
+ memcpy_fromio(buf, rk_rng->base + TRNG_RNG_DOUT, to_read);
+out:
+ pm_runtime_put_sync_autosuspend(rk_rng->dev);
+
+ return (ret < 0) ? ret : to_read;
+}
+
+static int rk3576_rng_init(struct hwrng *rng)
+{
+ struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
+
+ return rk_rng_enable_clks(rk_rng);
+}
+
+static int rk3576_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
+ size_t to_read = min_t(size_t, max, RKRNG_READ_LEN);
+ int ret = 0;
+ u32 val;
+
+ ret = pm_runtime_resume_and_get(rk_rng->dev);
+ if (ret < 0)
+ return ret;
+
+ rk_rng_writel(rk_rng, RKRNG_CTRL_REQ_TRNG | (RKRNG_CTRL_REQ_TRNG << 16),
+ RKRNG_CTRL);
+
+ if (readl_poll_timeout(rk_rng->base + RKRNG_STATE, val,
+ (val & RKRNG_STATE_TRNG_RDY), RK_RNG_POLL_PERIOD_US,
+ RK_RNG_POLL_TIMEOUT_US)) {
+ dev_err(rk_rng->dev, "timed out waiting for data\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ rk_rng_writel(rk_rng, RKRNG_STATE_TRNG_RDY, RKRNG_STATE);
+
+ memcpy_fromio(buf, rk_rng->base + RKRNG_TRNG_DATA0, to_read);
+
+out:
+ pm_runtime_put_sync_autosuspend(rk_rng->dev);
+
+ return (ret < 0) ? ret : to_read;
+}
+
+static int rk3588_rng_init(struct hwrng *rng)
+{
+ struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
+ u32 version, status, mask, istat;
+ int ret;
+
+ ret = rk_rng_enable_clks(rk_rng);
+ if (ret < 0)
+ return ret;
+
+ version = rk_rng_readl(rk_rng, TRNG_V1_VERSION);
+ if (version != TRNG_v1_VERSION_CODE) {
+ dev_err(rk_rng->dev,
+ "wrong trng version, expected = %08x, actual = %08x\n",
+ TRNG_V1_VERSION, version);
+ ret = -EFAULT;
+ goto err_disable_clk;
+ }
+
+ mask = TRNG_V1_STAT_SEEDED | TRNG_V1_STAT_GENERATING |
+ TRNG_V1_STAT_RESEEDING;
+ if (readl_poll_timeout(rk_rng->base + TRNG_V1_STAT, status,
+ (status & mask) == TRNG_V1_STAT_SEEDED,
+ RK_RNG_POLL_PERIOD_US, RK_RNG_POLL_TIMEOUT_US) < 0) {
+ dev_err(rk_rng->dev, "timed out waiting for hwrng to reseed\n");
+ ret = -ETIMEDOUT;
+ goto err_disable_clk;
+ }
+
+ /*
+ * clear ISTAT flag, downstream advises to do this to avoid
+ * auto-reseeding "on power on"
+ */
+ istat = rk_rng_readl(rk_rng, TRNG_V1_ISTAT);
+ rk_rng_writel(rk_rng, istat, TRNG_V1_ISTAT);
+
+ /* auto reseed after RK_TRNG_V1_AUTO_RESEED_CNT bytes */
+ rk_rng_writel(rk_rng, RK_TRNG_V1_AUTO_RESEED_CNT / 16, TRNG_V1_AUTO_RQSTS);
+
+ return 0;
+err_disable_clk:
+ clk_bulk_disable_unprepare(rk_rng->clk_num, rk_rng->clk_bulks);
+ return ret;
+}
+
+static void rk3588_rng_cleanup(struct hwrng *rng)
+{
+ struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
+
+ clk_bulk_disable_unprepare(rk_rng->clk_num, rk_rng->clk_bulks);
+}
+
+static int rk3588_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
+ size_t to_read = min_t(size_t, max, RK_RNG_MAX_BYTE);
+ int ret = 0;
+ u32 reg;
+
+ ret = pm_runtime_resume_and_get(rk_rng->dev);
+ if (ret < 0)
+ return ret;
+
+ /* Clear ISTAT, even without interrupts enabled, this will be updated */
+ reg = rk_rng_readl(rk_rng, TRNG_V1_ISTAT);
+ rk_rng_writel(rk_rng, reg, TRNG_V1_ISTAT);
+
+ /* generate 256 bits of random data */
+ rk_rng_writel(rk_rng, TRNG_V1_MODE_256_BIT, TRNG_V1_MODE);
+ rk_rng_writel(rk_rng, TRNG_V1_CTRL_RAND, TRNG_V1_CTRL);
+
+ ret = readl_poll_timeout_atomic(rk_rng->base + TRNG_V1_ISTAT, reg,
+ (reg & TRNG_V1_ISTAT_RAND_RDY), 0,
+ RK_RNG_POLL_TIMEOUT_US);
+ if (ret < 0)
+ goto out;
+
+ /* Read random data that's in registers TRNG_V1_RAND0 through RAND7 */
+ memcpy_fromio(buf, rk_rng->base + TRNG_V1_RAND0, to_read);
+
+out:
+ /* Clear ISTAT */
+ rk_rng_writel(rk_rng, reg, TRNG_V1_ISTAT);
+ /* close the TRNG */
+ rk_rng_writel(rk_rng, TRNG_V1_CTRL_NOP, TRNG_V1_CTRL);
+
+ pm_runtime_put_sync_autosuspend(rk_rng->dev);
+
+ return (ret < 0) ? ret : to_read;
+}
+
+static const struct rk_rng_soc_data rk3568_soc_data = {
+ .rk_rng_init = rk3568_rng_init,
+ .rk_rng_read = rk3568_rng_read,
+ .rk_rng_cleanup = rk3568_rng_cleanup,
+ .quality = 900,
+ .reset_optional = false,
+};
+
+static const struct rk_rng_soc_data rk3576_soc_data = {
+ .rk_rng_init = rk3576_rng_init,
+ .rk_rng_read = rk3576_rng_read,
+ .rk_rng_cleanup = rk3588_rng_cleanup,
+ .quality = 999, /* as determined by actual testing */
+ .reset_optional = true,
+};
+
+static const struct rk_rng_soc_data rk3588_soc_data = {
+ .rk_rng_init = rk3588_rng_init,
+ .rk_rng_read = rk3588_rng_read,
+ .rk_rng_cleanup = rk3588_rng_cleanup,
+ .quality = 999, /* as determined by actual testing */
+ .reset_optional = true,
+};
+
+static int rk_rng_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct reset_control *rst;
+ struct rk_rng *rk_rng;
+ int ret;
+
+ rk_rng = devm_kzalloc(dev, sizeof(*rk_rng), GFP_KERNEL);
+ if (!rk_rng)
+ return -ENOMEM;
+
+ rk_rng->soc_data = of_device_get_match_data(dev);
+ rk_rng->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rk_rng->base))
+ return PTR_ERR(rk_rng->base);
+
+ rk_rng->clk_num = devm_clk_bulk_get_all(dev, &rk_rng->clk_bulks);
+ if (rk_rng->clk_num < 0)
+ return dev_err_probe(dev, rk_rng->clk_num,
+ "Failed to get clks property\n");
+
+ if (rk_rng->soc_data->reset_optional)
+ rst = devm_reset_control_array_get_optional_exclusive(dev);
+ else
+ rst = devm_reset_control_array_get_exclusive(dev);
+
+ if (rst) {
+ if (IS_ERR(rst))
+ return dev_err_probe(dev, PTR_ERR(rst), "Failed to get reset property\n");
+
+ reset_control_assert(rst);
+ udelay(2);
+ reset_control_deassert(rst);
+ }
+
+ platform_set_drvdata(pdev, rk_rng);
+
+ rk_rng->rng.name = dev_driver_string(dev);
+ if (!IS_ENABLED(CONFIG_PM)) {
+ rk_rng->rng.init = rk_rng->soc_data->rk_rng_init;
+ rk_rng->rng.cleanup = rk_rng->soc_data->rk_rng_cleanup;
+ }
+ rk_rng->rng.read = rk_rng->soc_data->rk_rng_read;
+ rk_rng->dev = dev;
+ rk_rng->rng.quality = rk_rng->soc_data->quality;
+
+ pm_runtime_set_autosuspend_delay(dev, RK_RNG_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Runtime pm activation failed.\n");
+
+ ret = devm_hwrng_register(dev, &rk_rng->rng);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register Rockchip hwrng\n");
+
+ return 0;
+}
+
+static int __maybe_unused rk_rng_runtime_suspend(struct device *dev)
+{
+ struct rk_rng *rk_rng = dev_get_drvdata(dev);
+
+ rk_rng->soc_data->rk_rng_cleanup(&rk_rng->rng);
+
+ return 0;
+}
+
+static int __maybe_unused rk_rng_runtime_resume(struct device *dev)
+{
+ struct rk_rng *rk_rng = dev_get_drvdata(dev);
+
+ return rk_rng->soc_data->rk_rng_init(&rk_rng->rng);
+}
+
+static const struct dev_pm_ops rk_rng_pm_ops = {
+ SET_RUNTIME_PM_OPS(rk_rng_runtime_suspend,
+ rk_rng_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
+static const struct of_device_id rk_rng_dt_match[] = {
+ { .compatible = "rockchip,rk3568-rng", .data = (void *)&rk3568_soc_data },
+ { .compatible = "rockchip,rk3576-rng", .data = (void *)&rk3576_soc_data },
+ { .compatible = "rockchip,rk3588-rng", .data = (void *)&rk3588_soc_data },
+ { /* sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, rk_rng_dt_match);
+
+static struct platform_driver rk_rng_driver = {
+ .driver = {
+ .name = "rockchip-rng",
+ .pm = &rk_rng_pm_ops,
+ .of_match_table = rk_rng_dt_match,
+ },
+ .probe = rk_rng_probe,
+};
+
+module_platform_driver(rk_rng_driver);
+
+MODULE_DESCRIPTION("Rockchip True Random Number Generator driver");
+MODULE_AUTHOR("Lin Jinhan <troy.lin@rock-chips.com>");
+MODULE_AUTHOR("Aurelien Jarno <aurelien@aurel32.net>");
+MODULE_AUTHOR("Daniel Golle <daniel@makrotopia.org>");
+MODULE_AUTHOR("Nicolas Frattaroli <nicolas.frattaroli@collabora.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/s390-trng.c b/drivers/char/hw_random/s390-trng.c
index d27e32e9bfee..3024d5e9fd61 100644
--- a/drivers/char/hw_random/s390-trng.c
+++ b/drivers/char/hw_random/s390-trng.c
@@ -9,8 +9,7 @@
* Author(s): Harald Freudenberger <freude@de.ibm.com>
*/
-#define KMSG_COMPONENT "trng"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "trng: " fmt
#include <linux/hw_random.h>
#include <linux/kernel.h>
diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
index 0e903d6e22e3..9a8c00586ab0 100644
--- a/drivers/char/hw_random/stm32-rng.c
+++ b/drivers/char/hw_random/stm32-rng.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/hw_random.h>
#include <linux/io.h>
@@ -49,6 +50,7 @@
struct stm32_rng_data {
uint max_clock_rate;
+ uint nb_clock;
u32 cr;
u32 nscr;
u32 htcr;
@@ -70,8 +72,9 @@ struct stm32_rng_config {
struct stm32_rng_private {
struct hwrng rng;
+ struct device *dev;
void __iomem *base;
- struct clk *clk;
+ struct clk_bulk_data *clk_bulk;
struct reset_control *rst;
struct stm32_rng_config pm_conf;
const struct stm32_rng_data *data;
@@ -99,7 +102,7 @@ struct stm32_rng_private {
*/
static int stm32_rng_conceal_seed_error_cond_reset(struct stm32_rng_private *priv)
{
- struct device *dev = (struct device *)priv->rng.priv;
+ struct device *dev = priv->dev;
u32 sr = readl_relaxed(priv->base + RNG_SR);
u32 cr = readl_relaxed(priv->base + RNG_CR);
int err;
@@ -171,7 +174,7 @@ static int stm32_rng_conceal_seed_error(struct hwrng *rng)
{
struct stm32_rng_private *priv = container_of(rng, struct stm32_rng_private, rng);
- dev_dbg((struct device *)priv->rng.priv, "Concealing seed error\n");
+ dev_dbg(priv->dev, "Concealing seed error\n");
if (priv->data->has_cond_reset)
return stm32_rng_conceal_seed_error_cond_reset(priv);
@@ -187,7 +190,9 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
int retval = 0, err = 0;
u32 sr;
- pm_runtime_get_sync((struct device *) priv->rng.priv);
+ retval = pm_runtime_resume_and_get(priv->dev);
+ if (retval)
+ return retval;
if (readl_relaxed(priv->base + RNG_SR) & RNG_SR_SEIS)
stm32_rng_conceal_seed_error(rng);
@@ -204,8 +209,7 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
sr, sr,
10, 50000);
if (err) {
- dev_err((struct device *)priv->rng.priv,
- "%s: timeout %x!\n", __func__, sr);
+ dev_err(priv->dev, "%s: timeout %x!\n", __func__, sr);
break;
}
} else if (!sr) {
@@ -218,8 +222,7 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
err = stm32_rng_conceal_seed_error(rng);
i++;
if (err && i > RNG_NB_RECOVER_TRIES) {
- dev_err((struct device *)priv->rng.priv,
- "Couldn't recover from seed error\n");
+ dev_err(priv->dev, "Couldn't recover from seed error\n");
retval = -ENOTRECOVERABLE;
goto exit_rpm;
}
@@ -237,8 +240,7 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
err = stm32_rng_conceal_seed_error(rng);
i++;
if (err && i > RNG_NB_RECOVER_TRIES) {
- dev_err((struct device *)priv->rng.priv,
- "Couldn't recover from seed error");
+ dev_err(priv->dev, "Couldn't recover from seed error");
retval = -ENOTRECOVERABLE;
goto exit_rpm;
}
@@ -253,8 +255,7 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
}
exit_rpm:
- pm_runtime_mark_last_busy((struct device *) priv->rng.priv);
- pm_runtime_put_sync_autosuspend((struct device *) priv->rng.priv);
+ pm_runtime_put_sync_autosuspend(priv->dev);
return retval || !wait ? retval : -EIO;
}
@@ -266,7 +267,7 @@ static uint stm32_rng_clock_freq_restrain(struct hwrng *rng)
unsigned long clock_rate = 0;
uint clock_div = 0;
- clock_rate = clk_get_rate(priv->clk);
+ clock_rate = clk_get_rate(priv->clk_bulk[0].clk);
/*
* Get the exponent to apply on the CLKDIV field in RNG_CR register
@@ -276,7 +277,7 @@ static uint stm32_rng_clock_freq_restrain(struct hwrng *rng)
while ((clock_rate >> clock_div) > priv->data->max_clock_rate)
clock_div++;
- pr_debug("RNG clk rate : %lu\n", clk_get_rate(priv->clk) >> clock_div);
+ pr_debug("RNG clk rate : %lu\n", clk_get_rate(priv->clk_bulk[0].clk) >> clock_div);
return clock_div;
}
@@ -288,7 +289,7 @@ static int stm32_rng_init(struct hwrng *rng)
int err;
u32 reg;
- err = clk_prepare_enable(priv->clk);
+ err = clk_bulk_prepare_enable(priv->data->nb_clock, priv->clk_bulk);
if (err)
return err;
@@ -328,9 +329,8 @@ static int stm32_rng_init(struct hwrng *rng)
(!(reg & RNG_CR_CONDRST)),
10, 50000);
if (err) {
- clk_disable_unprepare(priv->clk);
- dev_err((struct device *)priv->rng.priv,
- "%s: timeout %x!\n", __func__, reg);
+ clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
+ dev_err(priv->dev, "%s: timeout %x!\n", __func__, reg);
return -EINVAL;
}
} else {
@@ -357,13 +357,13 @@ static int stm32_rng_init(struct hwrng *rng)
reg & RNG_SR_DRDY,
10, 100000);
if (err || (reg & ~RNG_SR_DRDY)) {
- clk_disable_unprepare(priv->clk);
- dev_err((struct device *)priv->rng.priv,
- "%s: timeout:%x SR: %x!\n", __func__, err, reg);
+ clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
+ dev_err(priv->dev, "%s: timeout:%x SR: %x!\n", __func__, err, reg);
+
return -EINVAL;
}
- clk_disable_unprepare(priv->clk);
+ clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
return 0;
}
@@ -381,7 +381,8 @@ static int __maybe_unused stm32_rng_runtime_suspend(struct device *dev)
reg = readl_relaxed(priv->base + RNG_CR);
reg &= ~RNG_CR_RNGEN;
writel_relaxed(reg, priv->base + RNG_CR);
- clk_disable_unprepare(priv->clk);
+
+ clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
return 0;
}
@@ -391,7 +392,7 @@ static int __maybe_unused stm32_rng_suspend(struct device *dev)
struct stm32_rng_private *priv = dev_get_drvdata(dev);
int err;
- err = clk_prepare_enable(priv->clk);
+ err = clk_bulk_prepare_enable(priv->data->nb_clock, priv->clk_bulk);
if (err)
return err;
@@ -405,7 +406,7 @@ static int __maybe_unused stm32_rng_suspend(struct device *dev)
writel_relaxed(priv->pm_conf.cr, priv->base + RNG_CR);
- clk_disable_unprepare(priv->clk);
+ clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
return 0;
}
@@ -416,7 +417,7 @@ static int __maybe_unused stm32_rng_runtime_resume(struct device *dev)
int err;
u32 reg;
- err = clk_prepare_enable(priv->clk);
+ err = clk_bulk_prepare_enable(priv->data->nb_clock, priv->clk_bulk);
if (err)
return err;
@@ -436,7 +437,7 @@ static int __maybe_unused stm32_rng_resume(struct device *dev)
int err;
u32 reg;
- err = clk_prepare_enable(priv->clk);
+ err = clk_bulk_prepare_enable(priv->data->nb_clock, priv->clk_bulk);
if (err)
return err;
@@ -464,9 +465,8 @@ static int __maybe_unused stm32_rng_resume(struct device *dev)
reg & ~RNG_CR_CONDRST, 10, 100000);
if (err) {
- clk_disable_unprepare(priv->clk);
- dev_err((struct device *)priv->rng.priv,
- "%s: timeout:%x CR: %x!\n", __func__, err, reg);
+ clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
+ dev_err(priv->dev, "%s: timeout:%x CR: %x!\n", __func__, err, reg);
return -EINVAL;
}
} else {
@@ -475,7 +475,7 @@ static int __maybe_unused stm32_rng_resume(struct device *dev)
writel_relaxed(reg, priv->base + RNG_CR);
}
- clk_disable_unprepare(priv->clk);
+ clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
return 0;
}
@@ -487,9 +487,19 @@ static const struct dev_pm_ops __maybe_unused stm32_rng_pm_ops = {
stm32_rng_resume)
};
+static const struct stm32_rng_data stm32mp25_rng_data = {
+ .has_cond_reset = true,
+ .max_clock_rate = 48000000,
+ .nb_clock = 2,
+ .cr = 0x00F00D00,
+ .nscr = 0x2B5BB,
+ .htcr = 0x969D,
+};
+
static const struct stm32_rng_data stm32mp13_rng_data = {
.has_cond_reset = true,
.max_clock_rate = 48000000,
+ .nb_clock = 1,
.cr = 0x00F00D00,
.nscr = 0x2B5BB,
.htcr = 0x969D,
@@ -497,11 +507,16 @@ static const struct stm32_rng_data stm32mp13_rng_data = {
static const struct stm32_rng_data stm32_rng_data = {
.has_cond_reset = false,
- .max_clock_rate = 3000000,
+ .max_clock_rate = 48000000,
+ .nb_clock = 1,
};
static const struct of_device_id stm32_rng_match[] = {
{
+ .compatible = "st,stm32mp25-rng",
+ .data = &stm32mp25_rng_data,
+ },
+ {
.compatible = "st,stm32mp13-rng",
.data = &stm32mp13_rng_data,
},
@@ -519,8 +534,9 @@ static int stm32_rng_probe(struct platform_device *ofdev)
struct device_node *np = ofdev->dev.of_node;
struct stm32_rng_private *priv;
struct resource *res;
+ int ret;
- priv = devm_kzalloc(dev, sizeof(struct stm32_rng_private), GFP_KERNEL);
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -528,10 +544,6 @@ static int stm32_rng_probe(struct platform_device *ofdev)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
- priv->clk = devm_clk_get(&ofdev->dev, NULL);
- if (IS_ERR(priv->clk))
- return PTR_ERR(priv->clk);
-
priv->rst = devm_reset_control_get(&ofdev->dev, NULL);
if (!IS_ERR(priv->rst)) {
reset_control_assert(priv->rst);
@@ -541,6 +553,7 @@ static int stm32_rng_probe(struct platform_device *ofdev)
priv->ced = of_property_read_bool(np, "clock-error-detect");
priv->lock_conf = of_property_read_bool(np, "st,rng-lock-conf");
+ priv->dev = dev;
priv->data = of_device_get_match_data(dev);
if (!priv->data)
@@ -551,9 +564,30 @@ static int stm32_rng_probe(struct platform_device *ofdev)
priv->rng.name = dev_driver_string(dev);
priv->rng.init = stm32_rng_init;
priv->rng.read = stm32_rng_read;
- priv->rng.priv = (unsigned long) dev;
priv->rng.quality = 900;
+ if (!priv->data->nb_clock || priv->data->nb_clock > 2)
+ return -EINVAL;
+
+ ret = devm_clk_bulk_get_all(dev, &priv->clk_bulk);
+ if (ret != priv->data->nb_clock)
+ return dev_err_probe(dev, -EINVAL, "Failed to get clocks: %d\n", ret);
+
+ if (priv->data->nb_clock == 2) {
+ const char *id = priv->clk_bulk[1].id;
+ struct clk *clk = priv->clk_bulk[1].clk;
+
+ if (!priv->clk_bulk[0].id || !priv->clk_bulk[1].id)
+ return dev_err_probe(dev, -EINVAL, "Missing clock name\n");
+
+ if (strcmp(priv->clk_bulk[0].id, "core")) {
+ priv->clk_bulk[1].id = priv->clk_bulk[0].id;
+ priv->clk_bulk[1].clk = priv->clk_bulk[0].clk;
+ priv->clk_bulk[0].id = id;
+ priv->clk_bulk[0].clk = clk;
+ }
+ }
+
pm_runtime_set_autosuspend_delay(dev, 100);
pm_runtime_use_autosuspend(dev);
pm_runtime_enable(dev);
@@ -568,7 +602,7 @@ static struct platform_driver stm32_rng_driver = {
.of_match_table = stm32_rng_match,
},
.probe = stm32_rng_probe,
- .remove_new = stm32_rng_remove,
+ .remove = stm32_rng_remove,
};
module_platform_driver(stm32_rng_driver);
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index 65b8260339f5..e61f06393209 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -150,10 +150,9 @@ static int timeriomem_rng_probe(struct platform_device *pdev)
priv->rng_ops.quality = pdata->quality;
}
- priv->period = ns_to_ktime(period * NSEC_PER_USEC);
+ priv->period = us_to_ktime(period);
init_completion(&priv->completion);
- hrtimer_init(&priv->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- priv->timer.function = timeriomem_rng_trigger;
+ hrtimer_setup(&priv->timer, timeriomem_rng_trigger, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
priv->rng_ops.name = dev_name(&pdev->dev);
priv->rng_ops.read = timeriomem_rng_read;
@@ -193,7 +192,7 @@ static struct platform_driver timeriomem_rng_driver = {
.of_match_table = timeriomem_rng_match,
},
.probe = timeriomem_rng_probe,
- .remove_new = timeriomem_rng_remove,
+ .remove = timeriomem_rng_remove,
};
module_platform_driver(timeriomem_rng_driver);
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index 7a4b45393acb..dd998f4fe4f2 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -245,7 +245,6 @@ static const struct virtio_device_id id_table[] = {
static struct virtio_driver virtio_rng_driver = {
.driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = virtrng_probe,
.remove = virtrng_remove,
diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c
index 642d13519464..709a36507145 100644
--- a/drivers/char/hw_random/xgene-rng.c
+++ b/drivers/char/hw_random/xgene-rng.c
@@ -88,12 +88,12 @@ struct xgene_rng_dev {
static void xgene_rng_expired_timer(struct timer_list *t)
{
- struct xgene_rng_dev *ctx = from_timer(ctx, t, failure_timer);
+ struct xgene_rng_dev *ctx = timer_container_of(ctx, t, failure_timer);
/* Clear failure counter as timer expired */
disable_irq(ctx->irq);
ctx->failure_cnt = 0;
- del_timer(&ctx->failure_timer);
+ timer_delete(&ctx->failure_timer);
enable_irq(ctx->irq);
}
@@ -375,7 +375,7 @@ MODULE_DEVICE_TABLE(of, xgene_rng_of_match);
static struct platform_driver xgene_rng_driver = {
.probe = xgene_rng_probe,
- .remove_new = xgene_rng_remove,
+ .remove = xgene_rng_remove,
.driver = {
.name = "xgene-rng",
.of_match_table = xgene_rng_of_match,
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index f4adc6feb3b2..92bed266d07c 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -84,6 +84,13 @@ config IPMI_IPMB
bus, and it also supports direct messaging on the bus using
IPMB direct messages. This module requires I2C support.
+config IPMI_LS2K
+ bool 'Loongson-2K IPMI interface'
+ depends on LOONGARCH
+ select MFD_LS2K_BMC_CORE
+ help
+ Provides a driver for Loongson-2K IPMI interfaces.
+
config IPMI_POWERNV
depends on PPC_POWERNV
tristate 'POWERNV (OPAL firmware) IPMI interface'
diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile
index cb6138b8ded9..4ea450a82242 100644
--- a/drivers/char/ipmi/Makefile
+++ b/drivers/char/ipmi/Makefile
@@ -5,13 +5,11 @@
ipmi_si-y := ipmi_si_intf.o ipmi_kcs_sm.o ipmi_smic_sm.o ipmi_bt_sm.o \
ipmi_si_hotmod.o ipmi_si_hardcode.o ipmi_si_platform.o \
- ipmi_si_port_io.o ipmi_si_mem_io.o
-ifdef CONFIG_PCI
-ipmi_si-y += ipmi_si_pci.o
-endif
-ifdef CONFIG_PARISC
-ipmi_si-y += ipmi_si_parisc.o
-endif
+ ipmi_si_mem_io.o
+ipmi_si-$(CONFIG_HAS_IOPORT) += ipmi_si_port_io.o
+ipmi_si-$(CONFIG_PCI) += ipmi_si_pci.o
+ipmi_si-$(CONFIG_IPMI_LS2K) += ipmi_si_ls2k.o
+ipmi_si-$(CONFIG_PARISC) += ipmi_si_parisc.o
obj-$(CONFIG_IPMI_HANDLER) += ipmi_msghandler.o
obj-$(CONFIG_IPMI_DEVICE_INTERFACE) += ipmi_devintf.o
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c
index 7450904e330a..a179d4797011 100644
--- a/drivers/char/ipmi/bt-bmc.c
+++ b/drivers/char/ipmi/bt-bmc.c
@@ -347,7 +347,7 @@ static const struct file_operations bt_bmc_fops = {
static void poll_timer(struct timer_list *t)
{
- struct bt_bmc *bt_bmc = from_timer(bt_bmc, t, poll_timer);
+ struct bt_bmc *bt_bmc = timer_container_of(bt_bmc, t, poll_timer);
bt_bmc->poll_timer.expires += msecs_to_jiffies(500);
wake_up(&bt_bmc->queue);
@@ -459,14 +459,13 @@ static int bt_bmc_probe(struct platform_device *pdev)
return 0;
}
-static int bt_bmc_remove(struct platform_device *pdev)
+static void bt_bmc_remove(struct platform_device *pdev)
{
struct bt_bmc *bt_bmc = dev_get_drvdata(&pdev->dev);
misc_deregister(&bt_bmc->miscdev);
if (bt_bmc->irq < 0)
- del_timer_sync(&bt_bmc->poll_timer);
- return 0;
+ timer_delete_sync(&bt_bmc->poll_timer);
}
static const struct of_device_id bt_bmc_match[] = {
diff --git a/drivers/char/ipmi/ipmb_dev_int.c b/drivers/char/ipmi/ipmb_dev_int.c
index 49100845fcb7..ee2bdc7ed0da 100644
--- a/drivers/char/ipmi/ipmb_dev_int.c
+++ b/drivers/char/ipmi/ipmb_dev_int.c
@@ -321,6 +321,9 @@ static int ipmb_probe(struct i2c_client *client)
ipmb_dev->miscdev.name = devm_kasprintf(&client->dev, GFP_KERNEL,
"%s%d", "ipmb-",
client->adapter->nr);
+ if (!ipmb_dev->miscdev.name)
+ return -ENOMEM;
+
ipmb_dev->miscdev.fops = &ipmb_fops;
ipmb_dev->miscdev.parent = &client->dev;
ret = misc_register(&ipmb_dev->miscdev);
@@ -350,16 +353,18 @@ static void ipmb_remove(struct i2c_client *client)
}
static const struct i2c_device_id ipmb_id[] = {
- { "ipmb-dev", 0 },
- {},
+ { "ipmb-dev" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, ipmb_id);
+#ifdef CONFIG_ACPI
static const struct acpi_device_id acpi_ipmb_id[] = {
{ "IPMB0001", 0 },
{},
};
MODULE_DEVICE_TABLE(acpi, acpi_ipmb_id);
+#endif
static struct i2c_driver ipmb_driver = {
.driver = {
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 332082e02ea5..e6ba35b71f10 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -122,12 +122,9 @@ out:
static int ipmi_release(struct inode *inode, struct file *file)
{
struct ipmi_file_private *priv = file->private_data;
- int rv;
struct ipmi_recv_msg *msg, *next;
- rv = ipmi_destroy_user(priv->user);
- if (rv)
- return rv;
+ ipmi_destroy_user(priv->user);
list_for_each_entry_safe(msg, next, &priv->recv_msgs, link)
ipmi_free_recv_msg(msg);
diff --git a/drivers/char/ipmi/ipmi_ipmb.c b/drivers/char/ipmi/ipmi_ipmb.c
index 4e335832fc26..3a51e58b2487 100644
--- a/drivers/char/ipmi/ipmi_ipmb.c
+++ b/drivers/char/ipmi/ipmi_ipmb.c
@@ -404,8 +404,7 @@ static void ipmi_ipmb_shutdown(void *send_info)
ipmi_ipmb_stop_thread(iidev);
}
-static void ipmi_ipmb_sender(void *send_info,
- struct ipmi_smi_msg *msg)
+static int ipmi_ipmb_sender(void *send_info, struct ipmi_smi_msg *msg)
{
struct ipmi_ipmb_dev *iidev = send_info;
unsigned long flags;
@@ -417,6 +416,7 @@ static void ipmi_ipmb_sender(void *send_info,
spin_unlock_irqrestore(&iidev->lock, flags);
up(&iidev->wake_thread);
+ return IPMI_CC_NO_ERROR;
}
static void ipmi_ipmb_request_events(void *send_info)
@@ -561,8 +561,8 @@ MODULE_DEVICE_TABLE(of, of_ipmi_ipmb_match);
#endif
static const struct i2c_device_id ipmi_ipmb_id[] = {
- { DEVICE_NAME, 0 },
- {},
+ { DEVICE_NAME },
+ {}
};
MODULE_DEVICE_TABLE(i2c, ipmi_ipmb_id);
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
index ecfcb50302f6..efda90dcf5b3 100644
--- a/drivers/char/ipmi/ipmi_kcs_sm.c
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -122,10 +122,10 @@ struct si_sm_data {
unsigned long error0_timeout;
};
-static unsigned int init_kcs_data_with_state(struct si_sm_data *kcs,
- struct si_sm_io *io, enum kcs_states state)
+static unsigned int init_kcs_data(struct si_sm_data *kcs,
+ struct si_sm_io *io)
{
- kcs->state = state;
+ kcs->state = KCS_IDLE;
kcs->io = io;
kcs->write_pos = 0;
kcs->write_count = 0;
@@ -140,12 +140,6 @@ static unsigned int init_kcs_data_with_state(struct si_sm_data *kcs,
return 2;
}
-static unsigned int init_kcs_data(struct si_sm_data *kcs,
- struct si_sm_io *io)
-{
- return init_kcs_data_with_state(kcs, io, KCS_IDLE);
-}
-
static inline unsigned char read_status(struct si_sm_data *kcs)
{
return kcs->io->inputb(kcs->io, 1);
@@ -276,7 +270,7 @@ static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data,
if (size > MAX_KCS_WRITE_SIZE)
return IPMI_REQ_LEN_EXCEEDED_ERR;
- if (kcs->state != KCS_IDLE) {
+ if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED)) {
dev_warn(kcs->io->dev, "KCS in invalid state %d\n", kcs->state);
return IPMI_NOT_IN_MY_STATE_ERR;
}
@@ -501,7 +495,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
}
if (kcs->state == KCS_HOSED) {
- init_kcs_data_with_state(kcs, kcs->io, KCS_ERROR0);
+ init_kcs_data(kcs, kcs->io);
return SI_SM_HOSED;
}
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index b0eedc4595b3..3f48fc6ab596 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -27,7 +27,6 @@
#include <linux/ipmi_smi.h>
#include <linux/notifier.h>
#include <linux/init.h>
-#include <linux/proc_fs.h>
#include <linux/rcupdate.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
@@ -39,17 +38,22 @@
#define IPMI_DRIVER_VERSION "39.2"
-static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
+static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user);
+static void ipmi_set_recv_msg_user(struct ipmi_recv_msg *msg,
+ struct ipmi_user *user);
static int ipmi_init_msghandler(void);
-static void smi_recv_tasklet(struct tasklet_struct *t);
+static void smi_work(struct work_struct *t);
static void handle_new_recv_msgs(struct ipmi_smi *intf);
static void need_waiter(struct ipmi_smi *intf);
static int handle_one_recv_msg(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg);
+static void intf_free(struct kref *ref);
static bool initialized;
static bool drvregistered;
+static struct timer_list ipmi_timer;
+
/* Numbers in this enumerator should be mapped to ipmi_panic_event_str */
enum ipmi_panic_event_op {
IPMI_SEND_PANIC_EVENT_NONE,
@@ -180,14 +184,8 @@ MODULE_PARM_DESC(max_msgs_per_user,
struct ipmi_user {
struct list_head link;
- /*
- * Set to NULL when the user is destroyed, a pointer to myself
- * so srcu_dereference can be used on it.
- */
- struct ipmi_user *self;
- struct srcu_struct release_barrier;
-
struct kref refcount;
+ refcount_t destroyed;
/* The upper layer that handles receive messages. */
const struct ipmi_user_hndl *handler;
@@ -200,30 +198,8 @@ struct ipmi_user {
bool gets_events;
atomic_t nr_msgs;
-
- /* Free must run in process context for RCU cleanup. */
- struct work_struct remove_work;
};
-static struct workqueue_struct *remove_work_wq;
-
-static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
- __acquires(user->release_barrier)
-{
- struct ipmi_user *ruser;
-
- *index = srcu_read_lock(&user->release_barrier);
- ruser = srcu_dereference(user->self, &user->release_barrier);
- if (!ruser)
- srcu_read_unlock(&user->release_barrier, *index);
- return ruser;
-}
-
-static void release_ipmi_user(struct ipmi_user *user, int index)
-{
- srcu_read_unlock(&user->release_barrier, index);
-}
-
struct cmd_rcvr {
struct list_head link;
@@ -327,6 +303,8 @@ struct bmc_device {
};
#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
+static struct workqueue_struct *bmc_remove_work_wq;
+
static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
struct ipmi_device_id *id,
bool *guid_set, guid_t *guid);
@@ -451,14 +429,14 @@ struct ipmi_smi {
struct list_head link;
/*
- * The list of upper layers that are using me. seq_lock write
- * protects this. Read protection is with srcu.
+ * The list of upper layers that are using me.
*/
struct list_head users;
- struct srcu_struct users_srcu;
+ struct mutex users_mutex;
atomic_t nr_users;
struct device_attribute nr_users_devattr;
struct device_attribute nr_msgs_devattr;
+ struct device_attribute maintenance_mode_devattr;
/* Used for wake ups at startup. */
@@ -491,20 +469,27 @@ struct ipmi_smi {
* interface to match them up with their responses. A routine
* is called periodically to time the items in this list.
*/
- spinlock_t seq_lock;
+ struct mutex seq_lock;
struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
int curr_seq;
/*
- * Messages queued for delivery. If delivery fails (out of memory
- * for instance), They will stay in here to be processed later in a
- * periodic timer interrupt. The tasklet is for handling received
- * messages directly from the handler.
+ * Messages queued for deliver to the user.
+ */
+ struct mutex user_msgs_mutex;
+ struct list_head user_msgs;
+
+ /*
+ * Messages queued for processing. If processing fails (out
+ * of memory for instance), They will stay in here to be
+ * processed later in a periodic timer interrupt. The
+ * workqueue is for handling received messages directly from
+ * the handler.
*/
spinlock_t waiting_rcv_msgs_lock;
struct list_head waiting_rcv_msgs;
atomic_t watchdog_pretimeouts_to_deliver;
- struct tasklet_struct recv_tasklet;
+ struct work_struct smi_work;
spinlock_t xmit_msgs_lock;
struct list_head xmit_msgs;
@@ -522,10 +507,9 @@ struct ipmi_smi {
* Events that were queues because no one was there to receive
* them.
*/
- spinlock_t events_lock; /* For dealing with event stuff. */
+ struct mutex events_mutex; /* For dealing with event stuff. */
struct list_head waiting_events;
unsigned int waiting_events_count; /* How many events in queue? */
- char delivering_events;
char event_msg_printed;
/* How many users are waiting for events? */
@@ -560,7 +544,11 @@ struct ipmi_smi {
/* For handling of maintenance mode. */
int maintenance_mode;
- bool maintenance_mode_enable;
+
+#define IPMI_MAINTENANCE_MODE_STATE_OFF 0
+#define IPMI_MAINTENANCE_MODE_STATE_FIRMWARE 1
+#define IPMI_MAINTENANCE_MODE_STATE_RESET 2
+ int maintenance_mode_state;
int auto_maintenance_timeout;
spinlock_t maintenance_mode_lock; /* Used in a timer... */
@@ -611,8 +599,31 @@ static void __ipmi_bmc_unregister(struct ipmi_smi *intf);
static int __ipmi_bmc_register(struct ipmi_smi *intf,
struct ipmi_device_id *id,
bool guid_set, guid_t *guid, int intf_num);
-static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id);
+static int __scan_channels(struct ipmi_smi *intf,
+ struct ipmi_device_id *id, bool rescan);
+static void free_ipmi_user(struct kref *ref)
+{
+ struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
+ struct module *owner;
+
+ owner = user->intf->owner;
+ kref_put(&user->intf->refcount, intf_free);
+ module_put(owner);
+ vfree(user);
+}
+
+static void release_ipmi_user(struct ipmi_user *user)
+{
+ kref_put(&user->refcount, free_ipmi_user);
+}
+
+static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user)
+{
+ if (!kref_get_unless_zero(&user->refcount))
+ return NULL;
+ return user;
+}
/*
* The driver model view of the IPMI messaging driver.
@@ -630,9 +641,6 @@ static DEFINE_MUTEX(ipmidriver_mutex);
static LIST_HEAD(ipmi_interfaces);
static DEFINE_MUTEX(ipmi_interfaces_mutex);
-#define ipmi_interfaces_mutex_held() \
- lockdep_is_held(&ipmi_interfaces_mutex)
-static struct srcu_struct ipmi_interfaces_srcu;
/*
* List of watchers that want to know when smi's are added and deleted.
@@ -698,27 +706,20 @@ static void free_smi_msg_list(struct list_head *q)
}
}
-static void clean_up_interface_data(struct ipmi_smi *intf)
+static void intf_free(struct kref *ref)
{
+ struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount);
int i;
struct cmd_rcvr *rcvr, *rcvr2;
- struct list_head list;
-
- tasklet_kill(&intf->recv_tasklet);
free_smi_msg_list(&intf->waiting_rcv_msgs);
free_recv_msg_list(&intf->waiting_events);
/*
* Wholesale remove all the entries from the list in the
- * interface and wait for RCU to know that none are in use.
+ * interface. No need for locks, this is single-threaded.
*/
- mutex_lock(&intf->cmd_rcvrs_mutex);
- INIT_LIST_HEAD(&list);
- list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
- mutex_unlock(&intf->cmd_rcvrs_mutex);
-
- list_for_each_entry_safe(rcvr, rcvr2, &list, link)
+ list_for_each_entry_safe(rcvr, rcvr2, &intf->cmd_rcvrs, link)
kfree(rcvr);
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
@@ -726,20 +727,17 @@ static void clean_up_interface_data(struct ipmi_smi *intf)
&& (intf->seq_table[i].recv_msg))
ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
}
-}
-static void intf_free(struct kref *ref)
-{
- struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount);
-
- clean_up_interface_data(intf);
kfree(intf);
}
int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
{
struct ipmi_smi *intf;
- int index, rv;
+ unsigned int count = 0, i;
+ int *interfaces = NULL;
+ struct device **devices = NULL;
+ int rv = 0;
/*
* Make sure the driver is actually initialized, this handles
@@ -753,20 +751,53 @@ int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
list_add(&watcher->link, &smi_watchers);
- index = srcu_read_lock(&ipmi_interfaces_srcu);
- list_for_each_entry_rcu(intf, &ipmi_interfaces, link,
- lockdep_is_held(&smi_watchers_mutex)) {
- int intf_num = READ_ONCE(intf->intf_num);
+ /*
+ * Build an array of ipmi interfaces and fill it in, and
+ * another array of the devices. We can't call the callback
+ * with ipmi_interfaces_mutex held. smi_watchers_mutex will
+ * keep things in order for the user.
+ */
+ mutex_lock(&ipmi_interfaces_mutex);
+ list_for_each_entry(intf, &ipmi_interfaces, link)
+ count++;
+ if (count > 0) {
+ interfaces = kmalloc_array(count, sizeof(*interfaces),
+ GFP_KERNEL);
+ if (!interfaces) {
+ rv = -ENOMEM;
+ } else {
+ devices = kmalloc_array(count, sizeof(*devices),
+ GFP_KERNEL);
+ if (!devices) {
+ kfree(interfaces);
+ interfaces = NULL;
+ rv = -ENOMEM;
+ }
+ }
+ count = 0;
+ }
+ if (interfaces) {
+ list_for_each_entry(intf, &ipmi_interfaces, link) {
+ int intf_num = READ_ONCE(intf->intf_num);
- if (intf_num == -1)
- continue;
- watcher->new_smi(intf_num, intf->si_dev);
+ if (intf_num == -1)
+ continue;
+ devices[count] = intf->si_dev;
+ interfaces[count++] = intf_num;
+ }
+ }
+ mutex_unlock(&ipmi_interfaces_mutex);
+
+ if (interfaces) {
+ for (i = 0; i < count; i++)
+ watcher->new_smi(interfaces[i], devices[i]);
+ kfree(interfaces);
+ kfree(devices);
}
- srcu_read_unlock(&ipmi_interfaces_srcu, index);
mutex_unlock(&smi_watchers_mutex);
- return 0;
+ return rv;
}
EXPORT_SYMBOL(ipmi_smi_watcher_register);
@@ -779,22 +810,17 @@ int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
}
EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
-/*
- * Must be called with smi_watchers_mutex held.
- */
static void
call_smi_watchers(int i, struct device *dev)
{
struct ipmi_smi_watcher *w;
- mutex_lock(&smi_watchers_mutex);
list_for_each_entry(w, &smi_watchers, link) {
if (try_module_get(w->owner)) {
w->new_smi(i, dev);
module_put(w->owner);
}
}
- mutex_unlock(&smi_watchers_mutex);
}
static int
@@ -939,20 +965,15 @@ static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
* risk. At this moment, simply skip it in that case.
*/
ipmi_free_recv_msg(msg);
- atomic_dec(&msg->user->nr_msgs);
} else {
- int index;
- struct ipmi_user *user = acquire_ipmi_user(msg->user, &index);
-
- if (user) {
- atomic_dec(&user->nr_msgs);
- user->handler->ipmi_recv_hndl(msg, user->handler_data);
- release_ipmi_user(user, index);
- } else {
- /* User went away, give up. */
- ipmi_free_recv_msg(msg);
- rv = -EINVAL;
- }
+ /*
+ * Deliver it in smi_work. The message will hold a
+ * refcount to the user.
+ */
+ mutex_lock(&intf->user_msgs_mutex);
+ list_add_tail(&msg->link, &intf->user_msgs);
+ mutex_unlock(&intf->user_msgs_mutex);
+ queue_work(system_wq, &intf->smi_work);
}
return rv;
@@ -1104,12 +1125,11 @@ static int intf_find_seq(struct ipmi_smi *intf,
struct ipmi_recv_msg **recv_msg)
{
int rv = -ENODEV;
- unsigned long flags;
if (seq >= IPMI_IPMB_NUM_SEQ)
return -EINVAL;
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
if (intf->seq_table[seq].inuse) {
struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
@@ -1122,7 +1142,7 @@ static int intf_find_seq(struct ipmi_smi *intf,
rv = 0;
}
}
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
return rv;
}
@@ -1133,14 +1153,13 @@ static int intf_start_seq_timer(struct ipmi_smi *intf,
long msgid)
{
int rv = -ENODEV;
- unsigned long flags;
unsigned char seq;
unsigned long seqid;
GET_SEQ_FROM_MSGID(msgid, seq, seqid);
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
/*
* We do this verification because the user can be deleted
* while a message is outstanding.
@@ -1151,7 +1170,7 @@ static int intf_start_seq_timer(struct ipmi_smi *intf,
ent->timeout = ent->orig_timeout;
rv = 0;
}
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
return rv;
}
@@ -1162,7 +1181,6 @@ static int intf_err_seq(struct ipmi_smi *intf,
unsigned int err)
{
int rv = -ENODEV;
- unsigned long flags;
unsigned char seq;
unsigned long seqid;
struct ipmi_recv_msg *msg = NULL;
@@ -1170,7 +1188,7 @@ static int intf_err_seq(struct ipmi_smi *intf,
GET_SEQ_FROM_MSGID(msgid, seq, seqid);
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
/*
* We do this verification because the user can be deleted
* while a message is outstanding.
@@ -1184,7 +1202,7 @@ static int intf_err_seq(struct ipmi_smi *intf,
msg = ent->recv_msg;
rv = 0;
}
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
if (msg)
deliver_err_response(intf, msg, err);
@@ -1192,23 +1210,13 @@ static int intf_err_seq(struct ipmi_smi *intf,
return rv;
}
-static void free_user_work(struct work_struct *work)
-{
- struct ipmi_user *user = container_of(work, struct ipmi_user,
- remove_work);
-
- cleanup_srcu_struct(&user->release_barrier);
- vfree(user);
-}
-
int ipmi_create_user(unsigned int if_num,
const struct ipmi_user_hndl *handler,
void *handler_data,
struct ipmi_user **user)
{
- unsigned long flags;
- struct ipmi_user *new_user;
- int rv, index;
+ struct ipmi_user *new_user = NULL;
+ int rv = 0;
struct ipmi_smi *intf;
/*
@@ -1230,30 +1238,31 @@ int ipmi_create_user(unsigned int if_num,
if (rv)
return rv;
- new_user = vzalloc(sizeof(*new_user));
- if (!new_user)
- return -ENOMEM;
-
- index = srcu_read_lock(&ipmi_interfaces_srcu);
- list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ mutex_lock(&ipmi_interfaces_mutex);
+ list_for_each_entry(intf, &ipmi_interfaces, link) {
if (intf->intf_num == if_num)
goto found;
}
/* Not found, return an error */
rv = -EINVAL;
- goto out_kfree;
+ goto out_unlock;
found:
+ if (intf->in_shutdown) {
+ rv = -ENODEV;
+ goto out_unlock;
+ }
+
if (atomic_add_return(1, &intf->nr_users) > max_users) {
rv = -EBUSY;
goto out_kfree;
}
- INIT_WORK(&new_user->remove_work, free_user_work);
-
- rv = init_srcu_struct(&new_user->release_barrier);
- if (rv)
+ new_user = vzalloc(sizeof(*new_user));
+ if (!new_user) {
+ rv = -ENOMEM;
goto out_kfree;
+ }
if (!try_module_get(intf->owner)) {
rv = -ENODEV;
@@ -1265,86 +1274,68 @@ int ipmi_create_user(unsigned int if_num,
atomic_set(&new_user->nr_msgs, 0);
kref_init(&new_user->refcount);
+ refcount_set(&new_user->destroyed, 1);
+ kref_get(&new_user->refcount); /* Destroy owns a refcount. */
new_user->handler = handler;
new_user->handler_data = handler_data;
new_user->intf = intf;
new_user->gets_events = false;
- rcu_assign_pointer(new_user->self, new_user);
- spin_lock_irqsave(&intf->seq_lock, flags);
- list_add_rcu(&new_user->link, &intf->users);
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_lock(&intf->users_mutex);
+ mutex_lock(&intf->seq_lock);
+ list_add(&new_user->link, &intf->users);
+ mutex_unlock(&intf->seq_lock);
+ mutex_unlock(&intf->users_mutex);
+
if (handler->ipmi_watchdog_pretimeout)
/* User wants pretimeouts, so make sure to watch for them. */
smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
- srcu_read_unlock(&ipmi_interfaces_srcu, index);
- *user = new_user;
- return 0;
out_kfree:
- atomic_dec(&intf->nr_users);
- srcu_read_unlock(&ipmi_interfaces_srcu, index);
- vfree(new_user);
+ if (rv) {
+ atomic_dec(&intf->nr_users);
+ vfree(new_user);
+ } else {
+ *user = new_user;
+ }
+out_unlock:
+ mutex_unlock(&ipmi_interfaces_mutex);
return rv;
}
EXPORT_SYMBOL(ipmi_create_user);
int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
{
- int rv, index;
+ int rv = -EINVAL;
struct ipmi_smi *intf;
- index = srcu_read_lock(&ipmi_interfaces_srcu);
- list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
- if (intf->intf_num == if_num)
- goto found;
+ mutex_lock(&ipmi_interfaces_mutex);
+ list_for_each_entry(intf, &ipmi_interfaces, link) {
+ if (intf->intf_num == if_num) {
+ if (!intf->handlers->get_smi_info)
+ rv = -ENOTTY;
+ else
+ rv = intf->handlers->get_smi_info(intf->send_info, data);
+ break;
+ }
}
- srcu_read_unlock(&ipmi_interfaces_srcu, index);
-
- /* Not found, return an error */
- return -EINVAL;
-
-found:
- if (!intf->handlers->get_smi_info)
- rv = -ENOTTY;
- else
- rv = intf->handlers->get_smi_info(intf->send_info, data);
- srcu_read_unlock(&ipmi_interfaces_srcu, index);
+ mutex_unlock(&ipmi_interfaces_mutex);
return rv;
}
EXPORT_SYMBOL(ipmi_get_smi_info);
-static void free_user(struct kref *ref)
-{
- struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
-
- /* SRCU cleanup must happen in task context. */
- queue_work(remove_work_wq, &user->remove_work);
-}
-
+/* Must be called with intf->users_mutex held. */
static void _ipmi_destroy_user(struct ipmi_user *user)
{
struct ipmi_smi *intf = user->intf;
int i;
- unsigned long flags;
struct cmd_rcvr *rcvr;
struct cmd_rcvr *rcvrs = NULL;
- struct module *owner;
+ struct ipmi_recv_msg *msg, *msg2;
- if (!acquire_ipmi_user(user, &i)) {
- /*
- * The user has already been cleaned up, just make sure
- * nothing is using it and return.
- */
- synchronize_srcu(&user->release_barrier);
+ if (!refcount_dec_if_one(&user->destroyed))
return;
- }
-
- rcu_assign_pointer(user->self, NULL);
- release_ipmi_user(user, i);
-
- synchronize_srcu(&user->release_barrier);
if (user->handler->shutdown)
user->handler->shutdown(user->handler_data);
@@ -1355,11 +1346,11 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
if (user->gets_events)
atomic_dec(&intf->event_waiters);
- /* Remove the user from the interface's sequence table. */
- spin_lock_irqsave(&intf->seq_lock, flags);
- list_del_rcu(&user->link);
+ /* Remove the user from the interface's list and sequence table. */
+ list_del(&user->link);
atomic_dec(&intf->nr_users);
+ mutex_lock(&intf->seq_lock);
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
if (intf->seq_table[i].inuse
&& (intf->seq_table[i].recv_msg->user == user)) {
@@ -1368,13 +1359,13 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
}
}
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
/*
* Remove the user from the command receiver's table. First
* we build a list of everything (not using the standard link,
* since other things may be using it till we do
- * synchronize_srcu()) then free everything in that list.
+ * synchronize_rcu()) then free everything in that list.
*/
mutex_lock(&intf->cmd_rcvrs_mutex);
list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
@@ -1386,25 +1377,33 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
}
}
mutex_unlock(&intf->cmd_rcvrs_mutex);
- synchronize_rcu();
while (rcvrs) {
rcvr = rcvrs;
rcvrs = rcvr->next;
kfree(rcvr);
}
- owner = intf->owner;
- kref_put(&intf->refcount, intf_free);
- module_put(owner);
+ mutex_lock(&intf->user_msgs_mutex);
+ list_for_each_entry_safe(msg, msg2, &intf->user_msgs, link) {
+ if (msg->user != user)
+ continue;
+ list_del(&msg->link);
+ ipmi_free_recv_msg(msg);
+ }
+ mutex_unlock(&intf->user_msgs_mutex);
+
+ release_ipmi_user(user);
}
-int ipmi_destroy_user(struct ipmi_user *user)
+void ipmi_destroy_user(struct ipmi_user *user)
{
- _ipmi_destroy_user(user);
+ struct ipmi_smi *intf = user->intf;
- kref_put(&user->refcount, free_user);
+ mutex_lock(&intf->users_mutex);
+ _ipmi_destroy_user(user);
+ mutex_unlock(&intf->users_mutex);
- return 0;
+ kref_put(&user->refcount, free_ipmi_user);
}
EXPORT_SYMBOL(ipmi_destroy_user);
@@ -1413,9 +1412,9 @@ int ipmi_get_version(struct ipmi_user *user,
unsigned char *minor)
{
struct ipmi_device_id id;
- int rv, index;
+ int rv;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1424,7 +1423,7 @@ int ipmi_get_version(struct ipmi_user *user,
*major = ipmi_version_major(&id);
*minor = ipmi_version_minor(&id);
}
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1434,9 +1433,9 @@ int ipmi_set_my_address(struct ipmi_user *user,
unsigned int channel,
unsigned char address)
{
- int index, rv = 0;
+ int rv = 0;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1446,7 +1445,7 @@ int ipmi_set_my_address(struct ipmi_user *user,
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
user->intf->addrinfo[channel].address = address;
}
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1456,9 +1455,9 @@ int ipmi_get_my_address(struct ipmi_user *user,
unsigned int channel,
unsigned char *address)
{
- int index, rv = 0;
+ int rv = 0;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1468,7 +1467,7 @@ int ipmi_get_my_address(struct ipmi_user *user,
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
*address = user->intf->addrinfo[channel].address;
}
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1478,9 +1477,9 @@ int ipmi_set_my_LUN(struct ipmi_user *user,
unsigned int channel,
unsigned char LUN)
{
- int index, rv = 0;
+ int rv = 0;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1490,7 +1489,7 @@ int ipmi_set_my_LUN(struct ipmi_user *user,
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
user->intf->addrinfo[channel].lun = LUN & 0x3;
}
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1500,9 +1499,9 @@ int ipmi_get_my_LUN(struct ipmi_user *user,
unsigned int channel,
unsigned char *address)
{
- int index, rv = 0;
+ int rv = 0;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1512,7 +1511,7 @@ int ipmi_get_my_LUN(struct ipmi_user *user,
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
*address = user->intf->addrinfo[channel].lun;
}
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1520,17 +1519,17 @@ EXPORT_SYMBOL(ipmi_get_my_LUN);
int ipmi_get_maintenance_mode(struct ipmi_user *user)
{
- int mode, index;
+ int mode;
unsigned long flags;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
mode = user->intf->maintenance_mode;
spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return mode;
}
@@ -1539,17 +1538,24 @@ EXPORT_SYMBOL(ipmi_get_maintenance_mode);
static void maintenance_mode_update(struct ipmi_smi *intf)
{
if (intf->handlers->set_maintenance_mode)
+ /*
+ * Lower level drivers only care about firmware mode
+ * as it affects their timing. They don't care about
+ * reset, which disables all commands for a while.
+ */
intf->handlers->set_maintenance_mode(
- intf->send_info, intf->maintenance_mode_enable);
+ intf->send_info,
+ (intf->maintenance_mode_state ==
+ IPMI_MAINTENANCE_MODE_STATE_FIRMWARE));
}
int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
{
- int rv = 0, index;
+ int rv = 0;
unsigned long flags;
struct ipmi_smi *intf = user->intf;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1557,16 +1563,17 @@ int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
if (intf->maintenance_mode != mode) {
switch (mode) {
case IPMI_MAINTENANCE_MODE_AUTO:
- intf->maintenance_mode_enable
- = (intf->auto_maintenance_timeout > 0);
+ /* Just leave it alone. */
break;
case IPMI_MAINTENANCE_MODE_OFF:
- intf->maintenance_mode_enable = false;
+ intf->maintenance_mode_state =
+ IPMI_MAINTENANCE_MODE_STATE_OFF;
break;
case IPMI_MAINTENANCE_MODE_ON:
- intf->maintenance_mode_enable = true;
+ intf->maintenance_mode_state =
+ IPMI_MAINTENANCE_MODE_STATE_FIRMWARE;
break;
default:
@@ -1579,7 +1586,7 @@ int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
}
out_unlock:
spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1587,19 +1594,17 @@ EXPORT_SYMBOL(ipmi_set_maintenance_mode);
int ipmi_set_gets_events(struct ipmi_user *user, bool val)
{
- unsigned long flags;
struct ipmi_smi *intf = user->intf;
struct ipmi_recv_msg *msg, *msg2;
struct list_head msgs;
- int index;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
INIT_LIST_HEAD(&msgs);
- spin_lock_irqsave(&intf->events_lock, flags);
+ mutex_lock(&intf->events_mutex);
if (user->gets_events == val)
goto out;
@@ -1612,13 +1617,6 @@ int ipmi_set_gets_events(struct ipmi_user *user, bool val)
atomic_dec(&intf->event_waiters);
}
- if (intf->delivering_events)
- /*
- * Another thread is delivering events for this, so
- * let it handle any new events.
- */
- goto out;
-
/* Deliver any queued events. */
while (user->gets_events && !list_empty(&intf->waiting_events)) {
list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
@@ -1629,22 +1627,15 @@ int ipmi_set_gets_events(struct ipmi_user *user, bool val)
intf->event_msg_printed = 0;
}
- intf->delivering_events = 1;
- spin_unlock_irqrestore(&intf->events_lock, flags);
-
list_for_each_entry_safe(msg, msg2, &msgs, link) {
- msg->user = user;
- kref_get(&user->refcount);
+ ipmi_set_recv_msg_user(msg, user);
deliver_local_response(intf, msg);
}
-
- spin_lock_irqsave(&intf->events_lock, flags);
- intf->delivering_events = 0;
}
out:
- spin_unlock_irqrestore(&intf->events_lock, flags);
- release_ipmi_user(user, index);
+ mutex_unlock(&intf->events_mutex);
+ release_ipmi_user(user);
return 0;
}
@@ -1689,9 +1680,9 @@ int ipmi_register_for_cmd(struct ipmi_user *user,
{
struct ipmi_smi *intf = user->intf;
struct cmd_rcvr *rcvr;
- int rv = 0, index;
+ int rv = 0;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1721,7 +1712,7 @@ out_unlock:
if (rv)
kfree(rcvr);
out_release:
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1735,9 +1726,9 @@ int ipmi_unregister_for_cmd(struct ipmi_user *user,
struct ipmi_smi *intf = user->intf;
struct cmd_rcvr *rcvr;
struct cmd_rcvr *rcvrs = NULL;
- int i, rv = -ENOENT, index;
+ int i, rv = -ENOENT;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1760,7 +1751,7 @@ int ipmi_unregister_for_cmd(struct ipmi_user *user,
}
mutex_unlock(&intf->cmd_rcvrs_mutex);
synchronize_rcu();
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
while (rcvrs) {
smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
rcvr = rcvrs;
@@ -1884,13 +1875,12 @@ static void smi_send(struct ipmi_smi *intf,
const struct ipmi_smi_handlers *handlers,
struct ipmi_smi_msg *smi_msg, int priority)
{
- int run_to_completion = intf->run_to_completion;
+ int run_to_completion = READ_ONCE(intf->run_to_completion);
unsigned long flags = 0;
if (!run_to_completion)
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
smi_msg = smi_add_send_msg(intf, smi_msg, priority);
-
if (!run_to_completion)
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
@@ -1943,14 +1933,20 @@ static int i_ipmi_req_sysintf(struct ipmi_smi *intf,
if (is_maintenance_mode_cmd(msg)) {
unsigned long flags;
+ int newst;
+
+ if (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)
+ newst = IPMI_MAINTENANCE_MODE_STATE_FIRMWARE;
+ else
+ newst = IPMI_MAINTENANCE_MODE_STATE_RESET;
spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
- intf->auto_maintenance_timeout
- = maintenance_mode_timeout_ms;
+ intf->auto_maintenance_timeout = maintenance_mode_timeout_ms;
if (!intf->maintenance_mode
- && !intf->maintenance_mode_enable) {
- intf->maintenance_mode_enable = true;
+ && intf->maintenance_mode_state < newst) {
+ intf->maintenance_mode_state = newst;
maintenance_mode_update(intf);
+ mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
}
spin_unlock_irqrestore(&intf->maintenance_mode_lock,
flags);
@@ -1964,7 +1960,7 @@ static int i_ipmi_req_sysintf(struct ipmi_smi *intf,
smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
smi_msg->data[1] = msg->cmd;
smi_msg->msgid = msgid;
- smi_msg->user_data = recv_msg;
+ smi_msg->recv_msg = recv_msg;
if (msg->data_len > 0)
memcpy(&smi_msg->data[2], msg->data, msg->data_len);
smi_msg->data_size = msg->data_len + 2;
@@ -2045,12 +2041,9 @@ static int i_ipmi_req_ipmb(struct ipmi_smi *intf,
* Save the receive message so we can use it
* to deliver the response.
*/
- smi_msg->user_data = recv_msg;
+ smi_msg->recv_msg = recv_msg;
} else {
- /* It's a command, so get a sequence for it. */
- unsigned long flags;
-
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
if (is_maintenance_mode_cmd(msg))
intf->ipmb_maintenance_mode_timeout =
@@ -2108,7 +2101,7 @@ static int i_ipmi_req_ipmb(struct ipmi_smi *intf,
* to be correct.
*/
out_err:
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
}
return rv;
@@ -2161,7 +2154,7 @@ static int i_ipmi_req_ipmb_direct(struct ipmi_smi *intf,
memcpy(smi_msg->data + 4, msg->data, msg->data_len);
smi_msg->data_size = msg->data_len + 4;
- smi_msg->user_data = recv_msg;
+ smi_msg->recv_msg = recv_msg;
return 0;
}
@@ -2224,12 +2217,9 @@ static int i_ipmi_req_lan(struct ipmi_smi *intf,
* Save the receive message so we can use it
* to deliver the response.
*/
- smi_msg->user_data = recv_msg;
+ smi_msg->recv_msg = recv_msg;
} else {
- /* It's a command, so get a sequence for it. */
- unsigned long flags;
-
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
/*
* Create a sequence number with a 1 second
@@ -2278,7 +2268,7 @@ static int i_ipmi_req_lan(struct ipmi_smi *intf,
* to be correct.
*/
out_err:
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
}
return rv;
@@ -2306,24 +2296,21 @@ static int i_ipmi_request(struct ipmi_user *user,
{
struct ipmi_smi_msg *smi_msg;
struct ipmi_recv_msg *recv_msg;
+ int run_to_completion = READ_ONCE(intf->run_to_completion);
int rv = 0;
- if (user) {
- if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) {
- /* Decrement will happen at the end of the routine. */
- rv = -EBUSY;
- goto out;
- }
- }
-
- if (supplied_recv)
+ if (supplied_recv) {
recv_msg = supplied_recv;
- else {
- recv_msg = ipmi_alloc_recv_msg();
- if (recv_msg == NULL) {
- rv = -ENOMEM;
- goto out;
+ recv_msg->user = user;
+ if (user) {
+ atomic_inc(&user->nr_msgs);
+ /* The put happens when the message is freed. */
+ kref_get(&user->refcount);
}
+ } else {
+ recv_msg = ipmi_alloc_recv_msg(user);
+ if (IS_ERR(recv_msg))
+ return PTR_ERR(recv_msg);
}
recv_msg->user_msg_data = user_msg_data;
@@ -2334,21 +2321,22 @@ static int i_ipmi_request(struct ipmi_user *user,
if (smi_msg == NULL) {
if (!supplied_recv)
ipmi_free_recv_msg(recv_msg);
- rv = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
}
- rcu_read_lock();
+ if (!run_to_completion)
+ mutex_lock(&intf->users_mutex);
+ if (intf->maintenance_mode_state == IPMI_MAINTENANCE_MODE_STATE_RESET) {
+ /* No messages while the BMC is in reset. */
+ rv = -EBUSY;
+ goto out_err;
+ }
if (intf->in_shutdown) {
rv = -ENODEV;
goto out_err;
}
- recv_msg->user = user;
- if (user)
- /* The put happens when the message is freed. */
- kref_get(&user->refcount);
recv_msg->msgid = msgid;
/*
* Store the message to send in the receive message so timeout
@@ -2377,19 +2365,19 @@ static int i_ipmi_request(struct ipmi_user *user,
if (rv) {
out_err:
- ipmi_free_smi_msg(smi_msg);
- ipmi_free_recv_msg(recv_msg);
+ if (!supplied_smi)
+ ipmi_free_smi_msg(smi_msg);
+ if (!supplied_recv)
+ ipmi_free_recv_msg(recv_msg);
} else {
dev_dbg(intf->si_dev, "Send: %*ph\n",
smi_msg->data_size, smi_msg->data);
smi_send(intf, intf->handlers, smi_msg, priority);
}
- rcu_read_unlock();
+ if (!run_to_completion)
+ mutex_unlock(&intf->users_mutex);
-out:
- if (rv && user)
- atomic_dec(&user->nr_msgs);
return rv;
}
@@ -2416,12 +2404,12 @@ int ipmi_request_settime(struct ipmi_user *user,
unsigned int retry_time_ms)
{
unsigned char saddr = 0, lun = 0;
- int rv, index;
+ int rv;
if (!user)
return -EINVAL;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -2440,7 +2428,7 @@ int ipmi_request_settime(struct ipmi_user *user,
retries,
retry_time_ms);
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
EXPORT_SYMBOL(ipmi_request_settime);
@@ -2455,12 +2443,12 @@ int ipmi_request_supply_msgs(struct ipmi_user *user,
int priority)
{
unsigned char saddr = 0, lun = 0;
- int rv, index;
+ int rv;
if (!user)
return -EINVAL;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -2479,7 +2467,7 @@ int ipmi_request_supply_msgs(struct ipmi_user *user,
lun,
-1, 0);
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
EXPORT_SYMBOL(ipmi_request_supply_msgs);
@@ -2640,6 +2628,12 @@ retry_bmc_lock:
(bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry)))
goto out_noprocessing;
+ /* Don't allow sysfs access when in maintenance mode. */
+ if (intf->maintenance_mode_state) {
+ rv = -EBUSY;
+ goto out_noprocessing;
+ }
+
prev_guid_set = bmc->dyn_guid_set;
__get_guid(intf);
@@ -2675,7 +2669,7 @@ retry_bmc_lock:
if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num))
need_waiter(intf); /* Retry later on an error. */
else
- __scan_channels(intf, &id);
+ __scan_channels(intf, &id, false);
if (!intf_set) {
@@ -2695,7 +2689,7 @@ retry_bmc_lock:
goto out_noprocessing;
} else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id)))
/* Version info changes, scan the channels again. */
- __scan_channels(intf, &bmc->fetch_id);
+ __scan_channels(intf, &bmc->fetch_id, true);
bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
@@ -3066,7 +3060,7 @@ cleanup_bmc_device(struct kref *ref)
* with removing the device attributes while reading a device
* attribute.
*/
- queue_work(remove_work_wq, &bmc->remove_work);
+ queue_work(bmc_remove_work_wq, &bmc->remove_work);
}
/*
@@ -3424,8 +3418,6 @@ channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
intf->channels_ready = true;
wake_up(&intf->waitq);
} else {
- intf->channel_list = intf->wchannels + set;
- intf->channels_ready = true;
rv = send_channel_info_cmd(intf, intf->curr_channel);
}
@@ -3447,10 +3439,21 @@ channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
/*
* Must be holding intf->bmc_reg_mutex to call this.
*/
-static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id)
+static int __scan_channels(struct ipmi_smi *intf,
+ struct ipmi_device_id *id,
+ bool rescan)
{
int rv;
+ if (rescan) {
+ /* Clear channels_ready to force channels rescan. */
+ intf->channels_ready = false;
+ }
+
+ /* Skip channel scan if channels are already marked ready */
+ if (intf->channels_ready)
+ return 0;
+
if (ipmi_version_major(id) > 1
|| (ipmi_version_major(id) == 1
&& ipmi_version_minor(id) >= 5)) {
@@ -3522,20 +3525,32 @@ static ssize_t nr_msgs_show(struct device *dev,
char *buf)
{
struct ipmi_smi *intf = container_of(attr,
- struct ipmi_smi, nr_msgs_devattr);
+ struct ipmi_smi, nr_msgs_devattr);
struct ipmi_user *user;
- int index;
unsigned int count = 0;
- index = srcu_read_lock(&intf->users_srcu);
- list_for_each_entry_rcu(user, &intf->users, link)
+ mutex_lock(&intf->users_mutex);
+ list_for_each_entry(user, &intf->users, link)
count += atomic_read(&user->nr_msgs);
- srcu_read_unlock(&intf->users_srcu, index);
+ mutex_unlock(&intf->users_mutex);
return sysfs_emit(buf, "%u\n", count);
}
static DEVICE_ATTR_RO(nr_msgs);
+static ssize_t maintenance_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ipmi_smi *intf = container_of(attr,
+ struct ipmi_smi,
+ maintenance_mode_devattr);
+
+ return sysfs_emit(buf, "%u %d\n", intf->maintenance_mode_state,
+ intf->auto_maintenance_timeout);
+}
+static DEVICE_ATTR_RO(maintenance_mode);
+
static void redo_bmc_reg(struct work_struct *work)
{
struct ipmi_smi *intf = container_of(work, struct ipmi_smi,
@@ -3571,12 +3586,6 @@ int ipmi_add_smi(struct module *owner,
if (!intf)
return -ENOMEM;
- rv = init_srcu_struct(&intf->users_srcu);
- if (rv) {
- kfree(intf);
- return rv;
- }
-
intf->owner = owner;
intf->bmc = &intf->tmp_bmc;
INIT_LIST_HEAD(&intf->bmc->intfs);
@@ -3593,11 +3602,14 @@ int ipmi_add_smi(struct module *owner,
}
if (slave_addr != 0)
intf->addrinfo[0].address = slave_addr;
+ INIT_LIST_HEAD(&intf->user_msgs);
+ mutex_init(&intf->user_msgs_mutex);
INIT_LIST_HEAD(&intf->users);
+ mutex_init(&intf->users_mutex);
atomic_set(&intf->nr_users, 0);
intf->handlers = handlers;
intf->send_info = send_info;
- spin_lock_init(&intf->seq_lock);
+ mutex_init(&intf->seq_lock);
for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
intf->seq_table[j].inuse = 0;
intf->seq_table[j].seqid = 0;
@@ -3605,13 +3617,12 @@ int ipmi_add_smi(struct module *owner,
intf->curr_seq = 0;
spin_lock_init(&intf->waiting_rcv_msgs_lock);
INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
- tasklet_setup(&intf->recv_tasklet,
- smi_recv_tasklet);
+ INIT_WORK(&intf->smi_work, smi_work);
atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
spin_lock_init(&intf->xmit_msgs_lock);
INIT_LIST_HEAD(&intf->xmit_msgs);
INIT_LIST_HEAD(&intf->hp_xmit_msgs);
- spin_lock_init(&intf->events_lock);
+ mutex_init(&intf->events_mutex);
spin_lock_init(&intf->watch_lock);
atomic_set(&intf->event_waiters, 0);
intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
@@ -3624,12 +3635,16 @@ int ipmi_add_smi(struct module *owner,
for (i = 0; i < IPMI_NUM_STATS; i++)
atomic_set(&intf->stats[i], 0);
+ /*
+ * Grab the watchers mutex so we can deliver the new interface
+ * without races.
+ */
+ mutex_lock(&smi_watchers_mutex);
mutex_lock(&ipmi_interfaces_mutex);
/* Look for a hole in the numbers. */
i = 0;
link = &ipmi_interfaces;
- list_for_each_entry_rcu(tintf, &ipmi_interfaces, link,
- ipmi_interfaces_mutex_held()) {
+ list_for_each_entry(tintf, &ipmi_interfaces, link) {
if (tintf->intf_num != i) {
link = &tintf->link;
break;
@@ -3638,9 +3653,9 @@ int ipmi_add_smi(struct module *owner,
}
/* Add the new interface in numeric order. */
if (i == 0)
- list_add_rcu(&intf->link, &ipmi_interfaces);
+ list_add(&intf->link, &ipmi_interfaces);
else
- list_add_tail_rcu(&intf->link, link);
+ list_add_tail(&intf->link, link);
rv = handlers->start_processing(send_info, intf);
if (rv)
@@ -3653,7 +3668,7 @@ int ipmi_add_smi(struct module *owner,
}
mutex_lock(&intf->bmc_reg_mutex);
- rv = __scan_channels(intf, &id);
+ rv = __scan_channels(intf, &id, false);
mutex_unlock(&intf->bmc_reg_mutex);
if (rv)
goto out_err_bmc_reg;
@@ -3672,18 +3687,22 @@ int ipmi_add_smi(struct module *owner,
goto out_err_bmc_reg;
}
- /*
- * Keep memory order straight for RCU readers. Make
- * sure everything else is committed to memory before
- * setting intf_num to mark the interface valid.
- */
- smp_wmb();
+ intf->maintenance_mode_devattr = dev_attr_maintenance_mode;
+ sysfs_attr_init(&intf->maintenance_mode_devattr.attr);
+ rv = device_create_file(intf->si_dev, &intf->maintenance_mode_devattr);
+ if (rv) {
+ device_remove_file(intf->si_dev, &intf->nr_users_devattr);
+ goto out_err_bmc_reg;
+ }
+
intf->intf_num = i;
mutex_unlock(&ipmi_interfaces_mutex);
/* After this point the interface is legal to use. */
call_smi_watchers(i, intf->si_dev);
+ mutex_unlock(&smi_watchers_mutex);
+
return 0;
out_err_bmc_reg:
@@ -3692,10 +3711,9 @@ int ipmi_add_smi(struct module *owner,
if (intf->handlers->shutdown)
intf->handlers->shutdown(intf->send_info);
out_err:
- list_del_rcu(&intf->link);
+ list_del(&intf->link);
mutex_unlock(&ipmi_interfaces_mutex);
- synchronize_srcu(&ipmi_interfaces_srcu);
- cleanup_srcu_struct(&intf->users_srcu);
+ mutex_unlock(&smi_watchers_mutex);
kref_put(&intf->refcount, intf_free);
return rv;
@@ -3761,20 +3779,30 @@ static void cleanup_smi_msgs(struct ipmi_smi *intf)
void ipmi_unregister_smi(struct ipmi_smi *intf)
{
struct ipmi_smi_watcher *w;
- int intf_num, index;
+ int intf_num;
if (!intf)
return;
+
intf_num = intf->intf_num;
mutex_lock(&ipmi_interfaces_mutex);
+ cancel_work_sync(&intf->smi_work);
+ /* smi_work() can no longer be in progress after this. */
+
intf->intf_num = -1;
intf->in_shutdown = true;
- list_del_rcu(&intf->link);
+ list_del(&intf->link);
mutex_unlock(&ipmi_interfaces_mutex);
- synchronize_srcu(&ipmi_interfaces_srcu);
- /* At this point no users can be added to the interface. */
+ /*
+ * At this point no users can be added to the interface and no
+ * new messages can be sent.
+ */
+
+ if (intf->handlers->shutdown)
+ intf->handlers->shutdown(intf->send_info);
+ device_remove_file(intf->si_dev, &intf->maintenance_mode_devattr);
device_remove_file(intf->si_dev, &intf->nr_msgs_devattr);
device_remove_file(intf->si_dev, &intf->nr_users_devattr);
@@ -3787,24 +3815,19 @@ void ipmi_unregister_smi(struct ipmi_smi *intf)
w->smi_gone(intf_num);
mutex_unlock(&smi_watchers_mutex);
- index = srcu_read_lock(&intf->users_srcu);
+ mutex_lock(&intf->users_mutex);
while (!list_empty(&intf->users)) {
- struct ipmi_user *user =
- container_of(list_next_rcu(&intf->users),
- struct ipmi_user, link);
+ struct ipmi_user *user = list_first_entry(&intf->users,
+ struct ipmi_user, link);
_ipmi_destroy_user(user);
}
- srcu_read_unlock(&intf->users_srcu, index);
-
- if (intf->handlers->shutdown)
- intf->handlers->shutdown(intf->send_info);
+ mutex_unlock(&intf->users_mutex);
cleanup_smi_msgs(intf);
ipmi_bmc_unregister(intf);
- cleanup_srcu_struct(&intf->users_srcu);
kref_put(&intf->refcount, intf_free);
}
EXPORT_SYMBOL(ipmi_unregister_smi);
@@ -3882,7 +3905,7 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
unsigned char chan;
struct ipmi_user *user = NULL;
struct ipmi_ipmb_addr *ipmb_addr;
- struct ipmi_recv_msg *recv_msg;
+ struct ipmi_recv_msg *recv_msg = NULL;
if (msg->rsp_size < 10) {
/* Message not big enough, just ignore it. */
@@ -3903,9 +3926,8 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
if (rcvr) {
user = rcvr->user;
- kref_get(&user->refcount);
- } else
- user = NULL;
+ recv_msg = ipmi_alloc_recv_msg(user);
+ }
rcu_read_unlock();
if (user == NULL) {
@@ -3929,58 +3951,47 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
dev_dbg(intf->si_dev, "Invalid command: %*ph\n",
msg->data_size, msg->data);
- rcu_read_lock();
- if (!intf->in_shutdown) {
- smi_send(intf, intf->handlers, msg, 0);
- /*
- * We used the message, so return the value
- * that causes it to not be freed or
- * queued.
- */
- rv = -1;
- }
- rcu_read_unlock();
- } else {
- recv_msg = ipmi_alloc_recv_msg();
- if (!recv_msg) {
- /*
- * We couldn't allocate memory for the
- * message, so requeue it for handling
- * later.
- */
- rv = 1;
- kref_put(&user->refcount, free_user);
- } else {
- /* Extract the source address from the data. */
- ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
- ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
- ipmb_addr->slave_addr = msg->rsp[6];
- ipmb_addr->lun = msg->rsp[7] & 3;
- ipmb_addr->channel = msg->rsp[3] & 0xf;
+ smi_send(intf, intf->handlers, msg, 0);
+ /*
+ * We used the message, so return the value that
+ * causes it to not be freed or queued.
+ */
+ rv = -1;
+ } else if (!IS_ERR(recv_msg)) {
+ /* Extract the source address from the data. */
+ ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
+ ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
+ ipmb_addr->slave_addr = msg->rsp[6];
+ ipmb_addr->lun = msg->rsp[7] & 3;
+ ipmb_addr->channel = msg->rsp[3] & 0xf;
- /*
- * Extract the rest of the message information
- * from the IPMB header.
- */
- recv_msg->user = user;
- recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
- recv_msg->msgid = msg->rsp[7] >> 2;
- recv_msg->msg.netfn = msg->rsp[4] >> 2;
- recv_msg->msg.cmd = msg->rsp[8];
- recv_msg->msg.data = recv_msg->msg_data;
+ /*
+ * Extract the rest of the message information
+ * from the IPMB header.
+ */
+ recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+ recv_msg->msgid = msg->rsp[7] >> 2;
+ recv_msg->msg.netfn = msg->rsp[4] >> 2;
+ recv_msg->msg.cmd = msg->rsp[8];
+ recv_msg->msg.data = recv_msg->msg_data;
- /*
- * We chop off 10, not 9 bytes because the checksum
- * at the end also needs to be removed.
- */
- recv_msg->msg.data_len = msg->rsp_size - 10;
- memcpy(recv_msg->msg_data, &msg->rsp[9],
- msg->rsp_size - 10);
- if (deliver_response(intf, recv_msg))
- ipmi_inc_stat(intf, unhandled_commands);
- else
- ipmi_inc_stat(intf, handled_commands);
- }
+ /*
+ * We chop off 10, not 9 bytes because the checksum
+ * at the end also needs to be removed.
+ */
+ recv_msg->msg.data_len = msg->rsp_size - 10;
+ memcpy(recv_msg->msg_data, &msg->rsp[9],
+ msg->rsp_size - 10);
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_commands);
+ else
+ ipmi_inc_stat(intf, handled_commands);
+ } else {
+ /*
+ * We couldn't allocate memory for the message, so
+ * requeue it for handling later.
+ */
+ rv = 1;
}
return rv;
@@ -3993,7 +4004,7 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
int rv = 0;
struct ipmi_user *user = NULL;
struct ipmi_ipmb_direct_addr *daddr;
- struct ipmi_recv_msg *recv_msg;
+ struct ipmi_recv_msg *recv_msg = NULL;
unsigned char netfn = msg->rsp[0] >> 2;
unsigned char cmd = msg->rsp[3];
@@ -4002,9 +4013,8 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
rcvr = find_cmd_rcvr(intf, netfn, cmd, 0);
if (rcvr) {
user = rcvr->user;
- kref_get(&user->refcount);
- } else
- user = NULL;
+ recv_msg = ipmi_alloc_recv_msg(user);
+ }
rcu_read_unlock();
if (user == NULL) {
@@ -4020,55 +4030,44 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE;
msg->data_size = 5;
- rcu_read_lock();
- if (!intf->in_shutdown) {
- smi_send(intf, intf->handlers, msg, 0);
- /*
- * We used the message, so return the value
- * that causes it to not be freed or
- * queued.
- */
- rv = -1;
- }
- rcu_read_unlock();
- } else {
- recv_msg = ipmi_alloc_recv_msg();
- if (!recv_msg) {
- /*
- * We couldn't allocate memory for the
- * message, so requeue it for handling
- * later.
- */
- rv = 1;
- kref_put(&user->refcount, free_user);
- } else {
- /* Extract the source address from the data. */
- daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr;
- daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
- daddr->channel = 0;
- daddr->slave_addr = msg->rsp[1];
- daddr->rs_lun = msg->rsp[0] & 3;
- daddr->rq_lun = msg->rsp[2] & 3;
+ smi_send(intf, intf->handlers, msg, 0);
+ /*
+ * We used the message, so return the value that
+ * causes it to not be freed or queued.
+ */
+ rv = -1;
+ } else if (!IS_ERR(recv_msg)) {
+ /* Extract the source address from the data. */
+ daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr;
+ daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
+ daddr->channel = 0;
+ daddr->slave_addr = msg->rsp[1];
+ daddr->rs_lun = msg->rsp[0] & 3;
+ daddr->rq_lun = msg->rsp[2] & 3;
- /*
- * Extract the rest of the message information
- * from the IPMB header.
- */
- recv_msg->user = user;
- recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
- recv_msg->msgid = (msg->rsp[2] >> 2);
- recv_msg->msg.netfn = msg->rsp[0] >> 2;
- recv_msg->msg.cmd = msg->rsp[3];
- recv_msg->msg.data = recv_msg->msg_data;
-
- recv_msg->msg.data_len = msg->rsp_size - 4;
- memcpy(recv_msg->msg_data, msg->rsp + 4,
- msg->rsp_size - 4);
- if (deliver_response(intf, recv_msg))
- ipmi_inc_stat(intf, unhandled_commands);
- else
- ipmi_inc_stat(intf, handled_commands);
- }
+ /*
+ * Extract the rest of the message information
+ * from the IPMB header.
+ */
+ recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+ recv_msg->msgid = (msg->rsp[2] >> 2);
+ recv_msg->msg.netfn = msg->rsp[0] >> 2;
+ recv_msg->msg.cmd = msg->rsp[3];
+ recv_msg->msg.data = recv_msg->msg_data;
+
+ recv_msg->msg.data_len = msg->rsp_size - 4;
+ memcpy(recv_msg->msg_data, msg->rsp + 4,
+ msg->rsp_size - 4);
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_commands);
+ else
+ ipmi_inc_stat(intf, handled_commands);
+ } else {
+ /*
+ * We couldn't allocate memory for the message, so
+ * requeue it for handling later.
+ */
+ rv = 1;
}
return rv;
@@ -4080,7 +4079,7 @@ static int handle_ipmb_direct_rcv_rsp(struct ipmi_smi *intf,
struct ipmi_recv_msg *recv_msg;
struct ipmi_ipmb_direct_addr *daddr;
- recv_msg = msg->user_data;
+ recv_msg = msg->recv_msg;
if (recv_msg == NULL) {
dev_warn(intf->si_dev,
"IPMI direct message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n");
@@ -4182,7 +4181,7 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
unsigned char chan;
struct ipmi_user *user = NULL;
struct ipmi_lan_addr *lan_addr;
- struct ipmi_recv_msg *recv_msg;
+ struct ipmi_recv_msg *recv_msg = NULL;
if (msg->rsp_size < 12) {
/* Message not big enough, just ignore it. */
@@ -4203,63 +4202,76 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
if (rcvr) {
user = rcvr->user;
- kref_get(&user->refcount);
- } else
- user = NULL;
+ recv_msg = ipmi_alloc_recv_msg(user);
+ }
rcu_read_unlock();
if (user == NULL) {
- /* We didn't find a user, just give up. */
+ /* We didn't find a user, just give up and return an error. */
ipmi_inc_stat(intf, unhandled_commands);
+ msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg->data[1] = IPMI_SEND_MSG_CMD;
+ msg->data[2] = chan;
+ msg->data[3] = msg->rsp[4]; /* handle */
+ msg->data[4] = msg->rsp[8]; /* rsSWID */
+ msg->data[5] = ((netfn + 1) << 2) | (msg->rsp[9] & 0x3);
+ msg->data[6] = ipmb_checksum(&msg->data[3], 3);
+ msg->data[7] = msg->rsp[5]; /* rqSWID */
+ /* rqseq/lun */
+ msg->data[8] = (msg->rsp[9] & 0xfc) | (msg->rsp[6] & 0x3);
+ msg->data[9] = cmd;
+ msg->data[10] = IPMI_INVALID_CMD_COMPLETION_CODE;
+ msg->data[11] = ipmb_checksum(&msg->data[7], 4);
+ msg->data_size = 12;
+
+ dev_dbg(intf->si_dev, "Invalid command: %*ph\n",
+ msg->data_size, msg->data);
+
+ smi_send(intf, intf->handlers, msg, 0);
/*
- * Don't do anything with these messages, just allow
- * them to be freed.
+ * We used the message, so return the value that
+ * causes it to not be freed or queued.
*/
- rv = 0;
- } else {
- recv_msg = ipmi_alloc_recv_msg();
- if (!recv_msg) {
- /*
- * We couldn't allocate memory for the
- * message, so requeue it for handling later.
- */
- rv = 1;
- kref_put(&user->refcount, free_user);
- } else {
- /* Extract the source address from the data. */
- lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
- lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
- lan_addr->session_handle = msg->rsp[4];
- lan_addr->remote_SWID = msg->rsp[8];
- lan_addr->local_SWID = msg->rsp[5];
- lan_addr->lun = msg->rsp[9] & 3;
- lan_addr->channel = msg->rsp[3] & 0xf;
- lan_addr->privilege = msg->rsp[3] >> 4;
+ rv = -1;
+ } else if (!IS_ERR(recv_msg)) {
+ /* Extract the source address from the data. */
+ lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
+ lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
+ lan_addr->session_handle = msg->rsp[4];
+ lan_addr->remote_SWID = msg->rsp[8];
+ lan_addr->local_SWID = msg->rsp[5];
+ lan_addr->lun = msg->rsp[9] & 3;
+ lan_addr->channel = msg->rsp[3] & 0xf;
+ lan_addr->privilege = msg->rsp[3] >> 4;
- /*
- * Extract the rest of the message information
- * from the IPMB header.
- */
- recv_msg->user = user;
- recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
- recv_msg->msgid = msg->rsp[9] >> 2;
- recv_msg->msg.netfn = msg->rsp[6] >> 2;
- recv_msg->msg.cmd = msg->rsp[10];
- recv_msg->msg.data = recv_msg->msg_data;
+ /*
+ * Extract the rest of the message information
+ * from the IPMB header.
+ */
+ recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+ recv_msg->msgid = msg->rsp[9] >> 2;
+ recv_msg->msg.netfn = msg->rsp[6] >> 2;
+ recv_msg->msg.cmd = msg->rsp[10];
+ recv_msg->msg.data = recv_msg->msg_data;
- /*
- * We chop off 12, not 11 bytes because the checksum
- * at the end also needs to be removed.
- */
- recv_msg->msg.data_len = msg->rsp_size - 12;
- memcpy(recv_msg->msg_data, &msg->rsp[11],
- msg->rsp_size - 12);
- if (deliver_response(intf, recv_msg))
- ipmi_inc_stat(intf, unhandled_commands);
- else
- ipmi_inc_stat(intf, handled_commands);
- }
+ /*
+ * We chop off 12, not 11 bytes because the checksum
+ * at the end also needs to be removed.
+ */
+ recv_msg->msg.data_len = msg->rsp_size - 12;
+ memcpy(recv_msg->msg_data, &msg->rsp[11],
+ msg->rsp_size - 12);
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_commands);
+ else
+ ipmi_inc_stat(intf, handled_commands);
+ } else {
+ /*
+ * We couldn't allocate memory for the message, so
+ * requeue it for handling later.
+ */
+ rv = 1;
}
return rv;
@@ -4281,7 +4293,7 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
unsigned char chan;
struct ipmi_user *user = NULL;
struct ipmi_system_interface_addr *smi_addr;
- struct ipmi_recv_msg *recv_msg;
+ struct ipmi_recv_msg *recv_msg = NULL;
/*
* We expect the OEM SW to perform error checking
@@ -4310,9 +4322,8 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
if (rcvr) {
user = rcvr->user;
- kref_get(&user->refcount);
- } else
- user = NULL;
+ recv_msg = ipmi_alloc_recv_msg(user);
+ }
rcu_read_unlock();
if (user == NULL) {
@@ -4325,48 +4336,42 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
*/
rv = 0;
- } else {
- recv_msg = ipmi_alloc_recv_msg();
- if (!recv_msg) {
- /*
- * We couldn't allocate memory for the
- * message, so requeue it for handling
- * later.
- */
- rv = 1;
- kref_put(&user->refcount, free_user);
- } else {
- /*
- * OEM Messages are expected to be delivered via
- * the system interface to SMS software. We might
- * need to visit this again depending on OEM
- * requirements
- */
- smi_addr = ((struct ipmi_system_interface_addr *)
- &recv_msg->addr);
- smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
- smi_addr->channel = IPMI_BMC_CHANNEL;
- smi_addr->lun = msg->rsp[0] & 3;
-
- recv_msg->user = user;
- recv_msg->user_msg_data = NULL;
- recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
- recv_msg->msg.netfn = msg->rsp[0] >> 2;
- recv_msg->msg.cmd = msg->rsp[1];
- recv_msg->msg.data = recv_msg->msg_data;
+ } else if (!IS_ERR(recv_msg)) {
+ /*
+ * OEM Messages are expected to be delivered via
+ * the system interface to SMS software. We might
+ * need to visit this again depending on OEM
+ * requirements
+ */
+ smi_addr = ((struct ipmi_system_interface_addr *)
+ &recv_msg->addr);
+ smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr->channel = IPMI_BMC_CHANNEL;
+ smi_addr->lun = msg->rsp[0] & 3;
+
+ recv_msg->user_msg_data = NULL;
+ recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
+ recv_msg->msg.netfn = msg->rsp[0] >> 2;
+ recv_msg->msg.cmd = msg->rsp[1];
+ recv_msg->msg.data = recv_msg->msg_data;
- /*
- * The message starts at byte 4 which follows the
- * Channel Byte in the "GET MESSAGE" command
- */
- recv_msg->msg.data_len = msg->rsp_size - 4;
- memcpy(recv_msg->msg_data, &msg->rsp[4],
- msg->rsp_size - 4);
- if (deliver_response(intf, recv_msg))
- ipmi_inc_stat(intf, unhandled_commands);
- else
- ipmi_inc_stat(intf, handled_commands);
- }
+ /*
+ * The message starts at byte 4 which follows the
+ * Channel Byte in the "GET MESSAGE" command
+ */
+ recv_msg->msg.data_len = msg->rsp_size - 4;
+ memcpy(recv_msg->msg_data, &msg->rsp[4],
+ msg->rsp_size - 4);
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_commands);
+ else
+ ipmi_inc_stat(intf, handled_commands);
+ } else {
+ /*
+ * We couldn't allocate memory for the message, so
+ * requeue it for handling later.
+ */
+ rv = 1;
}
return rv;
@@ -4396,8 +4401,7 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
struct ipmi_recv_msg *recv_msg, *recv_msg2;
struct list_head msgs;
struct ipmi_user *user;
- int rv = 0, deliver_count = 0, index;
- unsigned long flags;
+ int rv = 0, deliver_count = 0;
if (msg->rsp_size < 19) {
/* Message is too small to be an IPMB event. */
@@ -4412,7 +4416,7 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
INIT_LIST_HEAD(&msgs);
- spin_lock_irqsave(&intf->events_lock, flags);
+ mutex_lock(&intf->events_mutex);
ipmi_inc_stat(intf, events);
@@ -4420,18 +4424,20 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
* Allocate and fill in one message for every user that is
* getting events.
*/
- index = srcu_read_lock(&intf->users_srcu);
- list_for_each_entry_rcu(user, &intf->users, link) {
+ mutex_lock(&intf->users_mutex);
+ list_for_each_entry(user, &intf->users, link) {
if (!user->gets_events)
continue;
- recv_msg = ipmi_alloc_recv_msg();
- if (!recv_msg) {
- rcu_read_unlock();
+ recv_msg = ipmi_alloc_recv_msg(user);
+ if (IS_ERR(recv_msg)) {
+ mutex_unlock(&intf->users_mutex);
list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
link) {
+ user = recv_msg->user;
list_del(&recv_msg->link);
ipmi_free_recv_msg(recv_msg);
+ kref_put(&user->refcount, free_ipmi_user);
}
/*
* We couldn't allocate memory for the
@@ -4445,11 +4451,9 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
deliver_count++;
copy_event_into_recv_msg(recv_msg, msg);
- recv_msg->user = user;
- kref_get(&user->refcount);
list_add_tail(&recv_msg->link, &msgs);
}
- srcu_read_unlock(&intf->users_srcu, index);
+ mutex_unlock(&intf->users_mutex);
if (deliver_count) {
/* Now deliver all the messages. */
@@ -4462,8 +4466,8 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
* No one to receive the message, put it in queue if there's
* not already too many things in the queue.
*/
- recv_msg = ipmi_alloc_recv_msg();
- if (!recv_msg) {
+ recv_msg = ipmi_alloc_recv_msg(NULL);
+ if (IS_ERR(recv_msg)) {
/*
* We couldn't allocate memory for the
* message, so requeue it for handling
@@ -4487,7 +4491,7 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
}
out:
- spin_unlock_irqrestore(&intf->events_lock, flags);
+ mutex_unlock(&intf->events_mutex);
return rv;
}
@@ -4498,7 +4502,7 @@ static int handle_bmc_rsp(struct ipmi_smi *intf,
struct ipmi_recv_msg *recv_msg;
struct ipmi_system_interface_addr *smi_addr;
- recv_msg = msg->user_data;
+ recv_msg = msg->recv_msg;
if (recv_msg == NULL) {
dev_warn(intf->si_dev,
"IPMI SMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n");
@@ -4539,9 +4543,10 @@ static int handle_one_recv_msg(struct ipmi_smi *intf,
if (msg->rsp_size < 2) {
/* Message is too small to be correct. */
- dev_warn(intf->si_dev,
- "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
- (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
+ dev_warn_ratelimited(intf->si_dev,
+ "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
+ (msg->data[0] >> 2) | 1,
+ msg->data[1], msg->rsp_size);
return_unspecified:
/* Generate an error response for the message. */
@@ -4571,14 +4576,14 @@ return_unspecified:
} else if ((msg->data_size >= 2)
&& (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
&& (msg->data[1] == IPMI_SEND_MSG_CMD)
- && (msg->user_data == NULL)) {
+ && (msg->recv_msg == NULL)) {
- if (intf->in_shutdown)
+ if (intf->in_shutdown || intf->run_to_completion)
goto out;
/*
* This is the local response to a command send, start
- * the timer for these. The user_data will not be
+ * the timer for these. The recv_msg will not be
* NULL if this is a response send, and we will let
* response sends just go through.
*/
@@ -4617,10 +4622,10 @@ return_unspecified:
* The NetFN and Command in the response is not even
* marginally correct.
*/
- dev_warn(intf->si_dev,
- "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
- (msg->data[0] >> 2) | 1, msg->data[1],
- msg->rsp[0] >> 2, msg->rsp[1]);
+ dev_warn_ratelimited(intf->si_dev,
+ "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
+ (msg->data[0] >> 2) | 1, msg->data[1],
+ msg->rsp[0] >> 2, msg->rsp[1]);
goto return_unspecified;
}
@@ -4638,13 +4643,16 @@ return_unspecified:
requeue = handle_ipmb_direct_rcv_rsp(intf, msg);
} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
&& (msg->rsp[1] == IPMI_SEND_MSG_CMD)
- && (msg->user_data != NULL)) {
+ && (msg->recv_msg != NULL)) {
/*
* It's a response to a response we sent. For this we
* deliver a send message response to the user.
*/
struct ipmi_recv_msg *recv_msg;
+ if (intf->run_to_completion)
+ goto out;
+
chan = msg->data[2] & 0x0f;
if (chan >= IPMI_MAX_CHANNELS)
/* Invalid channel number */
@@ -4652,7 +4660,7 @@ return_unspecified:
cc = msg->rsp[2];
process_response_response:
- recv_msg = msg->user_data;
+ recv_msg = msg->recv_msg;
requeue = 0;
if (!recv_msg)
@@ -4667,6 +4675,9 @@ process_response_response:
&& (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
struct ipmi_channel *chans;
+ if (intf->run_to_completion)
+ goto out;
+
/* It's from the receive queue. */
chan = msg->rsp[3] & 0xf;
if (chan >= IPMI_MAX_CHANNELS) {
@@ -4741,6 +4752,9 @@ process_response_response:
} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
&& (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
/* It's an asynchronous event. */
+ if (intf->run_to_completion)
+ goto out;
+
requeue = handle_read_event_rsp(intf, msg);
} else {
/* It's a response from the local BMC. */
@@ -4756,10 +4770,10 @@ process_response_response:
*/
static void handle_new_recv_msgs(struct ipmi_smi *intf)
{
- struct ipmi_smi_msg *smi_msg;
- unsigned long flags = 0;
- int rv;
- int run_to_completion = intf->run_to_completion;
+ struct ipmi_smi_msg *smi_msg;
+ unsigned long flags = 0;
+ int rv;
+ int run_to_completion = READ_ONCE(intf->run_to_completion);
/* See if any waiting messages need to be processed. */
if (!run_to_completion)
@@ -4779,7 +4793,7 @@ static void handle_new_recv_msgs(struct ipmi_smi *intf)
* To preserve message order, quit if we
* can't handle a message. Add the message
* back at the head, this is safe because this
- * tasklet is the only thing that pulls the
+ * workqueue is the only thing that pulls the
* messages.
*/
list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
@@ -4793,31 +4807,16 @@ static void handle_new_recv_msgs(struct ipmi_smi *intf)
}
if (!run_to_completion)
spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags);
-
- /*
- * If the pretimout count is non-zero, decrement one from it and
- * deliver pretimeouts to all the users.
- */
- if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
- struct ipmi_user *user;
- int index;
-
- index = srcu_read_lock(&intf->users_srcu);
- list_for_each_entry_rcu(user, &intf->users, link) {
- if (user->handler->ipmi_watchdog_pretimeout)
- user->handler->ipmi_watchdog_pretimeout(
- user->handler_data);
- }
- srcu_read_unlock(&intf->users_srcu, index);
- }
}
-static void smi_recv_tasklet(struct tasklet_struct *t)
+static void smi_work(struct work_struct *t)
{
unsigned long flags = 0; /* keep us warning-free. */
- struct ipmi_smi *intf = from_tasklet(intf, t, recv_tasklet);
- int run_to_completion = intf->run_to_completion;
+ struct ipmi_smi *intf = from_work(intf, t, smi_work);
+ int run_to_completion = READ_ONCE(intf->run_to_completion);
struct ipmi_smi_msg *newmsg = NULL;
+ struct ipmi_recv_msg *msg, *msg2;
+ int cc;
/*
* Start the next message if available.
@@ -4826,9 +4825,7 @@ static void smi_recv_tasklet(struct tasklet_struct *t)
* because the lower layer is allowed to hold locks while calling
* message delivery.
*/
-
- rcu_read_lock();
-
+restart:
if (!run_to_completion)
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
if (intf->curr_msg == NULL && !intf->in_shutdown) {
@@ -4846,15 +4843,64 @@ static void smi_recv_tasklet(struct tasklet_struct *t)
intf->curr_msg = newmsg;
}
}
-
if (!run_to_completion)
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
- if (newmsg)
- intf->handlers->sender(intf->send_info, newmsg);
- rcu_read_unlock();
+ if (newmsg) {
+ cc = intf->handlers->sender(intf->send_info, newmsg);
+ if (cc) {
+ if (newmsg->recv_msg)
+ deliver_err_response(intf,
+ newmsg->recv_msg, cc);
+ else
+ ipmi_free_smi_msg(newmsg);
+ goto restart;
+ }
+ }
handle_new_recv_msgs(intf);
+
+ /* Nothing below applies during panic time. */
+ if (run_to_completion)
+ return;
+
+ /*
+ * If the pretimout count is non-zero, decrement one from it and
+ * deliver pretimeouts to all the users.
+ */
+ if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
+ struct ipmi_user *user;
+
+ mutex_lock(&intf->users_mutex);
+ list_for_each_entry(user, &intf->users, link) {
+ if (user->handler->ipmi_watchdog_pretimeout)
+ user->handler->ipmi_watchdog_pretimeout(
+ user->handler_data);
+ }
+ mutex_unlock(&intf->users_mutex);
+ }
+
+ /*
+ * Freeing the message can cause a user to be released, which
+ * can then cause the interface to be freed. Make sure that
+ * doesn't happen until we are ready.
+ */
+ kref_get(&intf->refcount);
+
+ mutex_lock(&intf->user_msgs_mutex);
+ list_for_each_entry_safe(msg, msg2, &intf->user_msgs, link) {
+ struct ipmi_user *user = msg->user;
+
+ list_del(&msg->link);
+
+ if (refcount_read(&user->destroyed) == 0)
+ ipmi_free_recv_msg(msg);
+ else
+ user->handler->ipmi_recv_hndl(msg, user->handler_data);
+ }
+ mutex_unlock(&intf->user_msgs_mutex);
+
+ kref_put(&intf->refcount, intf_free);
}
/* Handle a new message from the lower layer. */
@@ -4862,11 +4908,11 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg)
{
unsigned long flags = 0; /* keep us warning-free. */
- int run_to_completion = intf->run_to_completion;
+ int run_to_completion = READ_ONCE(intf->run_to_completion);
/*
* To preserve message order, we keep a queue and deliver from
- * a tasklet.
+ * a workqueue.
*/
if (!run_to_completion)
spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
@@ -4887,9 +4933,9 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf,
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
if (run_to_completion)
- smi_recv_tasklet(&intf->recv_tasklet);
+ smi_work(&intf->smi_work);
else
- tasklet_schedule(&intf->recv_tasklet);
+ queue_work(system_wq, &intf->smi_work);
}
EXPORT_SYMBOL(ipmi_smi_msg_received);
@@ -4899,7 +4945,7 @@ void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
return;
atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
- tasklet_schedule(&intf->recv_tasklet);
+ queue_work(system_wq, &intf->smi_work);
}
EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
@@ -4928,8 +4974,7 @@ smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg,
static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
struct list_head *timeouts,
unsigned long timeout_period,
- int slot, unsigned long *flags,
- bool *need_timer)
+ int slot, bool *need_timer)
{
struct ipmi_recv_msg *msg;
@@ -4981,7 +5026,7 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
return;
}
- spin_unlock_irqrestore(&intf->seq_lock, *flags);
+ mutex_unlock(&intf->seq_lock);
/*
* Send the new message. We send with a zero
@@ -5002,7 +5047,7 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
} else
ipmi_free_smi_msg(smi_msg);
- spin_lock_irqsave(&intf->seq_lock, *flags);
+ mutex_lock(&intf->seq_lock);
}
}
@@ -5029,7 +5074,7 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
* list.
*/
INIT_LIST_HEAD(&timeouts);
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
if (intf->ipmb_maintenance_mode_timeout) {
if (intf->ipmb_maintenance_mode_timeout <= timeout_period)
intf->ipmb_maintenance_mode_timeout = 0;
@@ -5039,8 +5084,8 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
check_msg_timeout(intf, &intf->seq_table[i],
&timeouts, timeout_period, i,
- &flags, &need_timer);
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ &need_timer);
+ mutex_unlock(&intf->seq_lock);
list_for_each_entry_safe(msg, msg2, &timeouts, link)
deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE);
@@ -5060,7 +5105,9 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
-= timeout_period;
if (!intf->maintenance_mode
&& (intf->auto_maintenance_timeout <= 0)) {
- intf->maintenance_mode_enable = false;
+ intf->maintenance_mode_state =
+ IPMI_MAINTENANCE_MODE_STATE_OFF;
+ intf->auto_maintenance_timeout = 0;
maintenance_mode_update(intf);
}
}
@@ -5068,7 +5115,7 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
flags);
}
- tasklet_schedule(&intf->recv_tasklet);
+ queue_work(system_wq, &intf->smi_work);
return need_timer;
}
@@ -5076,28 +5123,28 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
static void ipmi_request_event(struct ipmi_smi *intf)
{
/* No event requests when in maintenance mode. */
- if (intf->maintenance_mode_enable)
+ if (intf->maintenance_mode_state)
return;
if (!intf->in_shutdown)
intf->handlers->request_events(intf->send_info);
}
-static struct timer_list ipmi_timer;
-
static atomic_t stop_operation;
-static void ipmi_timeout(struct timer_list *unused)
+static void ipmi_timeout_work(struct work_struct *work)
{
+ if (atomic_read(&stop_operation))
+ return;
+
struct ipmi_smi *intf;
bool need_timer = false;
- int index;
if (atomic_read(&stop_operation))
return;
- index = srcu_read_lock(&ipmi_interfaces_srcu);
- list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ mutex_lock(&ipmi_interfaces_mutex);
+ list_for_each_entry(intf, &ipmi_interfaces, link) {
if (atomic_read(&intf->event_waiters)) {
intf->ticks_to_req_ev--;
if (intf->ticks_to_req_ev == 0) {
@@ -5106,15 +5153,27 @@ static void ipmi_timeout(struct timer_list *unused)
}
need_timer = true;
}
+ if (intf->maintenance_mode_state)
+ need_timer = true;
need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
}
- srcu_read_unlock(&ipmi_interfaces_srcu, index);
+ mutex_unlock(&ipmi_interfaces_mutex);
if (need_timer)
mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
}
+static DECLARE_WORK(ipmi_timer_work, ipmi_timeout_work);
+
+static void ipmi_timeout(struct timer_list *unused)
+{
+ if (atomic_read(&stop_operation))
+ return;
+
+ queue_work(system_wq, &ipmi_timer_work);
+}
+
static void need_waiter(struct ipmi_smi *intf)
{
/* Racy, but worst case we start the timer twice. */
@@ -5139,7 +5198,7 @@ struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
if (rv) {
rv->done = free_smi_msg;
- rv->user_data = NULL;
+ rv->recv_msg = NULL;
rv->type = IPMI_SMI_MSG_TYPE_NORMAL;
atomic_inc(&smi_msg_inuse_count);
}
@@ -5155,27 +5214,51 @@ static void free_recv_msg(struct ipmi_recv_msg *msg)
kfree(msg);
}
-static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
+static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user)
{
struct ipmi_recv_msg *rv;
+ if (user) {
+ if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) {
+ atomic_dec(&user->nr_msgs);
+ return ERR_PTR(-EBUSY);
+ }
+ }
+
rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
- if (rv) {
- rv->user = NULL;
- rv->done = free_recv_msg;
- atomic_inc(&recv_msg_inuse_count);
+ if (!rv) {
+ if (user)
+ atomic_dec(&user->nr_msgs);
+ return ERR_PTR(-ENOMEM);
}
+
+ rv->user = user;
+ rv->done = free_recv_msg;
+ if (user)
+ kref_get(&user->refcount);
+ atomic_inc(&recv_msg_inuse_count);
return rv;
}
void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
{
- if (msg->user && !oops_in_progress)
- kref_put(&msg->user->refcount, free_user);
+ if (msg->user && !oops_in_progress) {
+ atomic_dec(&msg->user->nr_msgs);
+ kref_put(&msg->user->refcount, free_ipmi_user);
+ }
msg->done(msg);
}
EXPORT_SYMBOL(ipmi_free_recv_msg);
+static void ipmi_set_recv_msg_user(struct ipmi_recv_msg *msg,
+ struct ipmi_user *user)
+{
+ WARN_ON_ONCE(msg->user); /* User should not be set. */
+ msg->user = user;
+ atomic_inc(&user->nr_msgs);
+ kref_get(&user->refcount);
+}
+
static atomic_t panic_done_count = ATOMIC_INIT(0);
static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
@@ -5191,9 +5274,9 @@ static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
/*
* Inside a panic, send a message and wait for a response.
*/
-static void ipmi_panic_request_and_wait(struct ipmi_smi *intf,
- struct ipmi_addr *addr,
- struct kernel_ipmi_msg *msg)
+static void _ipmi_panic_request_and_wait(struct ipmi_smi *intf,
+ struct ipmi_addr *addr,
+ struct kernel_ipmi_msg *msg)
{
struct ipmi_smi_msg smi_msg;
struct ipmi_recv_msg recv_msg;
@@ -5223,6 +5306,15 @@ static void ipmi_panic_request_and_wait(struct ipmi_smi *intf,
ipmi_poll(intf);
}
+void ipmi_panic_request_and_wait(struct ipmi_user *user,
+ struct ipmi_addr *addr,
+ struct kernel_ipmi_msg *msg)
+{
+ user->intf->run_to_completion = 1;
+ _ipmi_panic_request_and_wait(user->intf, addr, msg);
+}
+EXPORT_SYMBOL(ipmi_panic_request_and_wait);
+
static void event_receiver_fetcher(struct ipmi_smi *intf,
struct ipmi_recv_msg *msg)
{
@@ -5291,7 +5383,7 @@ static void send_panic_events(struct ipmi_smi *intf, char *str)
}
/* Send the event announcing the panic. */
- ipmi_panic_request_and_wait(intf, &addr, &msg);
+ _ipmi_panic_request_and_wait(intf, &addr, &msg);
/*
* On every interface, dump a bunch of OEM event holding the
@@ -5327,7 +5419,7 @@ static void send_panic_events(struct ipmi_smi *intf, char *str)
msg.data = NULL;
msg.data_len = 0;
intf->null_user_handler = device_id_fetcher;
- ipmi_panic_request_and_wait(intf, &addr, &msg);
+ _ipmi_panic_request_and_wait(intf, &addr, &msg);
if (intf->local_event_generator) {
/* Request the event receiver from the local MC. */
@@ -5336,7 +5428,7 @@ static void send_panic_events(struct ipmi_smi *intf, char *str)
msg.data = NULL;
msg.data_len = 0;
intf->null_user_handler = event_receiver_fetcher;
- ipmi_panic_request_and_wait(intf, &addr, &msg);
+ _ipmi_panic_request_and_wait(intf, &addr, &msg);
}
intf->null_user_handler = NULL;
@@ -5388,7 +5480,7 @@ static void send_panic_events(struct ipmi_smi *intf, char *str)
memcpy_and_pad(data+5, 11, p, size, '\0');
p += size;
- ipmi_panic_request_and_wait(intf, &addr, &msg);
+ _ipmi_panic_request_and_wait(intf, &addr, &msg);
}
}
@@ -5406,7 +5498,7 @@ static int panic_event(struct notifier_block *this,
has_panicked = 1;
/* For every registered interface, set it to run to completion. */
- list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ list_for_each_entry(intf, &ipmi_interfaces, link) {
if (!intf->handlers || intf->intf_num == -1)
/* Interface is not ready. */
continue;
@@ -5436,7 +5528,7 @@ static int panic_event(struct notifier_block *this,
intf->handlers->set_run_to_completion(intf->send_info,
1);
- list_for_each_entry_rcu(user, &intf->users, link) {
+ list_for_each_entry(user, &intf->users, link) {
if (user->handler->ipmi_panic_handler)
user->handler->ipmi_panic_handler(
user->handler_data);
@@ -5481,15 +5573,11 @@ static int ipmi_init_msghandler(void)
if (initialized)
goto out;
- rv = init_srcu_struct(&ipmi_interfaces_srcu);
- if (rv)
- goto out;
-
- remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
- if (!remove_work_wq) {
+ bmc_remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
+ if (!bmc_remove_work_wq) {
pr_err("unable to create ipmi-msghandler-remove-wq workqueue");
rv = -ENOMEM;
- goto out_wq;
+ goto out;
}
timer_setup(&ipmi_timer, ipmi_timeout, 0);
@@ -5499,9 +5587,6 @@ static int ipmi_init_msghandler(void)
initialized = true;
-out_wq:
- if (rv)
- cleanup_srcu_struct(&ipmi_interfaces_srcu);
out:
mutex_unlock(&ipmi_interfaces_mutex);
return rv;
@@ -5525,7 +5610,7 @@ static void __exit cleanup_ipmi(void)
int count;
if (initialized) {
- destroy_workqueue(remove_work_wq);
+ destroy_workqueue(bmc_remove_work_wq);
atomic_notifier_chain_unregister(&panic_notifier_list,
&panic_block);
@@ -5541,7 +5626,8 @@ static void __exit cleanup_ipmi(void)
* here.
*/
atomic_set(&stop_operation, 1);
- del_timer_sync(&ipmi_timer);
+ timer_delete_sync(&ipmi_timer);
+ cancel_work_sync(&ipmi_timer_work);
initialized = false;
@@ -5552,8 +5638,6 @@ static void __exit cleanup_ipmi(void)
count = atomic_read(&recv_msg_inuse_count);
if (count != 0)
pr_warn("recv message count %d at exit\n", count);
-
- cleanup_srcu_struct(&ipmi_interfaces_srcu);
}
if (drvregistered)
driver_unregister(&ipmidriver.driver);
diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c
index da22a8cbe68e..52a1130defe5 100644
--- a/drivers/char/ipmi/ipmi_powernv.c
+++ b/drivers/char/ipmi/ipmi_powernv.c
@@ -51,7 +51,7 @@ static void send_error_reply(struct ipmi_smi_powernv *smi,
ipmi_smi_msg_received(smi->intf, msg);
}
-static void ipmi_powernv_send(void *send_info, struct ipmi_smi_msg *msg)
+static int ipmi_powernv_send(void *send_info, struct ipmi_smi_msg *msg)
{
struct ipmi_smi_powernv *smi = send_info;
struct opal_ipmi_msg *opal_msg;
@@ -93,18 +93,19 @@ static void ipmi_powernv_send(void *send_info, struct ipmi_smi_msg *msg)
smi->interface_id, opal_msg, size);
rc = opal_ipmi_send(smi->interface_id, opal_msg, size);
pr_devel("%s: -> %d\n", __func__, rc);
-
- if (!rc) {
- smi->cur_msg = msg;
- spin_unlock_irqrestore(&smi->msg_lock, flags);
- return;
+ if (rc) {
+ comp = IPMI_ERR_UNSPECIFIED;
+ goto err_unlock;
}
- comp = IPMI_ERR_UNSPECIFIED;
+ smi->cur_msg = msg;
+ spin_unlock_irqrestore(&smi->msg_lock, flags);
+ return IPMI_CC_NO_ERROR;
+
err_unlock:
spin_unlock_irqrestore(&smi->msg_lock, flags);
err:
- send_error_reply(smi, msg, comp);
+ return comp;
}
static int ipmi_powernv_recv(struct ipmi_smi_powernv *smi)
@@ -281,15 +282,13 @@ err_free:
return rc;
}
-static int ipmi_powernv_remove(struct platform_device *pdev)
+static void ipmi_powernv_remove(struct platform_device *pdev)
{
struct ipmi_smi_powernv *smi = dev_get_drvdata(&pdev->dev);
ipmi_unregister_smi(smi->intf);
free_irq(smi->irq, smi);
irq_dispose_mapping(smi->irq);
-
- return 0;
}
static const struct of_device_id ipmi_powernv_match[] = {
@@ -304,7 +303,7 @@ static struct platform_driver powernv_ipmi_driver = {
.of_match_table = ipmi_powernv_match,
},
.probe = ipmi_powernv_probe,
- .remove = ipmi_powernv_remove,
+ .remove = ipmi_powernv_remove,
};
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
index 941d2dcc8c9d..e63c316d8aaa 100644
--- a/drivers/char/ipmi/ipmi_poweroff.c
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -650,7 +650,7 @@ static struct ipmi_smi_watcher smi_watcher = {
#ifdef CONFIG_PROC_FS
#include <linux/sysctl.h>
-static struct ctl_table ipmi_table[] = {
+static const struct ctl_table ipmi_table[] = {
{ .procname = "poweroff_powercycle",
.data = &poweroff_powercycle,
.maxlen = sizeof(poweroff_powercycle),
@@ -699,8 +699,6 @@ static int __init ipmi_poweroff_init(void)
#ifdef MODULE
static void __exit ipmi_poweroff_cleanup(void)
{
- int rv;
-
#ifdef CONFIG_PROC_FS
unregister_sysctl_table(ipmi_table_header);
#endif
@@ -708,9 +706,7 @@ static void __exit ipmi_poweroff_cleanup(void)
ipmi_smi_watcher_unregister(&smi_watcher);
if (ready) {
- rv = ipmi_destroy_user(ipmi_user);
- if (rv)
- pr_err("could not cleanup the IPMI user: 0x%x\n", rv);
+ ipmi_destroy_user(ipmi_user);
pm_power_off = old_poweroff_func;
}
}
diff --git a/drivers/char/ipmi/ipmi_si.h b/drivers/char/ipmi/ipmi_si.h
index a7ead2a4c753..687835b53da5 100644
--- a/drivers/char/ipmi/ipmi_si.h
+++ b/drivers/char/ipmi/ipmi_si.h
@@ -26,6 +26,14 @@ enum si_type {
/* Array is defined in the ipmi_si_intf.c */
extern const char *const si_to_str[];
+struct ipmi_match_info {
+ enum si_type type;
+};
+
+extern const struct ipmi_match_info ipmi_kcs_si_info;
+extern const struct ipmi_match_info ipmi_smic_si_info;
+extern const struct ipmi_match_info ipmi_bt_si_info;
+
enum ipmi_addr_space {
IPMI_IO_ADDR_SPACE, IPMI_MEM_ADDR_SPACE
};
@@ -64,7 +72,7 @@ struct si_sm_io {
void (*irq_cleanup)(struct si_sm_io *io);
u8 slave_addr;
- enum si_type si_type;
+ const struct ipmi_match_info *si_info;
struct device *dev;
};
@@ -93,6 +101,13 @@ void ipmi_si_pci_shutdown(void);
static inline void ipmi_si_pci_init(void) { }
static inline void ipmi_si_pci_shutdown(void) { }
#endif
+#ifdef CONFIG_IPMI_LS2K
+void ipmi_si_ls2k_init(void);
+void ipmi_si_ls2k_shutdown(void);
+#else
+static inline void ipmi_si_ls2k_init(void) { }
+static inline void ipmi_si_ls2k_shutdown(void) { }
+#endif
#ifdef CONFIG_PARISC
void ipmi_si_parisc_init(void);
void ipmi_si_parisc_shutdown(void);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 5cd031f3fc97..5459ffdde8dc 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -53,6 +53,7 @@
#define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
#define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
short timeout */
+#define SI_TIMEOUT_HOSED (HZ) /* 1 second when in hosed state. */
enum si_intf_state {
SI_NORMAL,
@@ -61,7 +62,8 @@ enum si_intf_state {
SI_CLEARING_FLAGS,
SI_GETTING_MESSAGES,
SI_CHECKING_ENABLES,
- SI_SETTING_ENABLES
+ SI_SETTING_ENABLES,
+ SI_HOSED
/* FIXME - add watchdog stuff. */
};
@@ -73,6 +75,10 @@ enum si_intf_state {
/* 'invalid' to allow a firmware-specified interface to be disabled */
const char *const si_to_str[] = { "invalid", "kcs", "smic", "bt", NULL };
+const struct ipmi_match_info ipmi_kcs_si_info = { .type = SI_KCS };
+const struct ipmi_match_info ipmi_smic_si_info = { .type = SI_SMIC };
+const struct ipmi_match_info ipmi_bt_si_info = { .type = SI_BT };
+
static bool initialized;
/*
@@ -269,8 +275,7 @@ void debug_timestamp(struct smi_info *smi_info, char *msg)
struct timespec64 t;
ktime_get_ts64(&t);
- dev_dbg(smi_info->io.dev, "**%s: %lld.%9.9ld\n",
- msg, t.tv_sec, t.tv_nsec);
+ dev_dbg(smi_info->io.dev, "**%s: %ptSp\n", msg, &t);
}
#else
#define debug_timestamp(smi_info, x)
@@ -309,7 +314,7 @@ static void return_hosed_msg(struct smi_info *smi_info, int cCode)
static enum si_sm_result start_next_msg(struct smi_info *smi_info)
{
- int rv;
+ int rv;
if (!smi_info->waiting_msg) {
smi_info->curr_msg = NULL;
@@ -386,6 +391,17 @@ static void start_clear_flags(struct smi_info *smi_info)
smi_info->si_state = SI_CLEARING_FLAGS;
}
+static void start_get_flags(struct smi_info *smi_info)
+{
+ unsigned char msg[2];
+
+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg[1] = IPMI_GET_MSG_FLAGS_CMD;
+
+ start_new_msg(smi_info, msg, 2);
+ smi_info->si_state = SI_GETTING_FLAGS;
+}
+
static void start_getting_msg_queue(struct smi_info *smi_info)
{
smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
@@ -692,7 +708,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
break;
}
enables = current_global_enables(smi_info, 0, &irq_on);
- if (smi_info->io.si_type == SI_BT)
+ if (smi_info->io.si_info->type == SI_BT)
/* BT has its own interrupt enable bit. */
check_bt_irq(smi_info, irq_on);
if (enables != (msg[3] & GLOBAL_ENABLES_MASK)) {
@@ -738,6 +754,8 @@ static void handle_transaction_done(struct smi_info *smi_info)
}
break;
}
+ case SI_HOSED: /* Shouldn't happen. */
+ break;
}
}
@@ -752,6 +770,10 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
enum si_sm_result si_sm_result;
restart:
+ if (smi_info->si_state == SI_HOSED)
+ /* Just in case, hosed state is only left from the timeout. */
+ return SI_SM_HOSED;
+
/*
* There used to be a loop here that waited a little while
* (around 25us) before giving up. That turned out to be
@@ -775,18 +797,20 @@ restart:
/*
* Do the before return_hosed_msg, because that
- * releases the lock.
+ * releases the lock. We just disable operations for
+ * a while and retry in hosed state.
*/
- smi_info->si_state = SI_NORMAL;
+ smi_info->si_state = SI_HOSED;
if (smi_info->curr_msg != NULL) {
/*
* If we were handling a user message, format
* a response to send to the upper layer to
* tell it about the error.
*/
- return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
+ return_hosed_msg(smi_info, IPMI_BUS_ERR);
}
- goto restart;
+ smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_HOSED);
+ goto out;
}
/*
@@ -794,8 +818,6 @@ restart:
* this if there is not yet an upper layer to handle anything.
*/
if (si_sm_result == SI_SM_ATTN || smi_info->got_attn) {
- unsigned char msg[2];
-
if (smi_info->si_state != SI_NORMAL) {
/*
* We got an ATTN, but we are doing something else.
@@ -813,11 +835,7 @@ restart:
* interrupts work with the SMI, that's not really
* possible.
*/
- msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
- msg[1] = IPMI_GET_MSG_FLAGS_CMD;
-
- start_new_msg(smi_info, msg, 2);
- smi_info->si_state = SI_GETTING_FLAGS;
+ start_get_flags(smi_info);
goto restart;
}
}
@@ -859,7 +877,7 @@ restart:
if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) {
/* Ok it if fails, the timer will just go off. */
- if (del_timer(&smi_info->si_timer))
+ if (timer_delete(&smi_info->si_timer))
smi_info->timer_running = false;
}
@@ -890,27 +908,29 @@ static void flush_messages(void *send_info)
* mode. This means we are single-threaded, no need for locks.
*/
result = smi_event_handler(smi_info, 0);
- while (result != SI_SM_IDLE) {
+ while (result != SI_SM_IDLE && result != SI_SM_HOSED) {
udelay(SI_SHORT_TIMEOUT_USEC);
result = smi_event_handler(smi_info, SI_SHORT_TIMEOUT_USEC);
}
}
-static void sender(void *send_info,
- struct ipmi_smi_msg *msg)
+static int sender(void *send_info, struct ipmi_smi_msg *msg)
{
struct smi_info *smi_info = send_info;
unsigned long flags;
debug_timestamp(smi_info, "Enqueue");
+ if (smi_info->si_state == SI_HOSED)
+ return IPMI_BUS_ERR;
+
if (smi_info->run_to_completion) {
/*
* If we are running to completion, start it. Upper
* layer will call flush_messages to clear it out.
*/
smi_info->waiting_msg = msg;
- return;
+ return IPMI_CC_NO_ERROR;
}
spin_lock_irqsave(&smi_info->si_lock, flags);
@@ -925,6 +945,7 @@ static void sender(void *send_info,
smi_info->waiting_msg = msg;
check_start_timer_thread(smi_info);
spin_unlock_irqrestore(&smi_info->si_lock, flags);
+ return IPMI_CC_NO_ERROR;
}
static void set_run_to_completion(void *send_info, bool i_run_to_completion)
@@ -1072,7 +1093,8 @@ static void set_need_watch(void *send_info, unsigned int watch_mask)
static void smi_timeout(struct timer_list *t)
{
- struct smi_info *smi_info = from_timer(smi_info, t, si_timer);
+ struct smi_info *smi_info = timer_container_of(smi_info, t,
+ si_timer);
enum si_sm_result smi_result;
unsigned long flags;
unsigned long jiffies_now;
@@ -1082,6 +1104,10 @@ static void smi_timeout(struct timer_list *t)
spin_lock_irqsave(&(smi_info->si_lock), flags);
debug_timestamp(smi_info, "Timer");
+ if (smi_info->si_state == SI_HOSED)
+ /* Try something to see if the BMC is now operational. */
+ start_get_flags(smi_info);
+
jiffies_now = jiffies;
time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
* SI_USEC_PER_JIFFY);
@@ -1091,14 +1117,11 @@ static void smi_timeout(struct timer_list *t)
/* Running with interrupts, only do long timeouts. */
timeout = jiffies + SI_TIMEOUT_JIFFIES;
smi_inc_stat(smi_info, long_timeouts);
- goto do_mod_timer;
- }
-
- /*
- * If the state machine asks for a short delay, then shorten
- * the timer timeout.
- */
- if (smi_result == SI_SM_CALL_WITH_DELAY) {
+ } else if (smi_result == SI_SM_CALL_WITH_DELAY) {
+ /*
+ * If the state machine asks for a short delay, then shorten
+ * the timer timeout.
+ */
smi_inc_stat(smi_info, short_timeouts);
timeout = jiffies + 1;
} else {
@@ -1106,7 +1129,6 @@ static void smi_timeout(struct timer_list *t)
timeout = jiffies + SI_TIMEOUT_JIFFIES;
}
-do_mod_timer:
if (smi_result != SI_SM_IDLE)
smi_mod_timer(smi_info, timeout);
else
@@ -1119,7 +1141,7 @@ irqreturn_t ipmi_si_irq_handler(int irq, void *data)
struct smi_info *smi_info = data;
unsigned long flags;
- if (smi_info->io.si_type == SI_BT)
+ if (smi_info->io.si_info->type == SI_BT)
/* We need to clear the IRQ flag for the BT interface. */
smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
IPMI_BT_INTMASK_CLEAR_IRQ_BIT
@@ -1164,7 +1186,7 @@ static int smi_start_processing(void *send_info,
* The BT interface is efficient enough to not need a thread,
* and there is no need for a thread if we have interrupts.
*/
- else if ((new_smi->io.si_type != SI_BT) && (!new_smi->io.irq))
+ else if (new_smi->io.si_info->type != SI_BT && !new_smi->io.irq)
enable = 1;
if (enable) {
@@ -1235,7 +1257,7 @@ MODULE_PARM_DESC(kipmid_max_busy_us,
void ipmi_irq_finish_setup(struct si_sm_io *io)
{
- if (io->si_type == SI_BT)
+ if (io->si_info->type == SI_BT)
/* Enable the interrupt in the BT interface. */
io->outputb(io, IPMI_BT_INTMASK_REG,
IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
@@ -1243,7 +1265,7 @@ void ipmi_irq_finish_setup(struct si_sm_io *io)
void ipmi_irq_start_cleanup(struct si_sm_io *io)
{
- if (io->si_type == SI_BT)
+ if (io->si_info->type == SI_BT)
/* Disable the interrupt in the BT interface. */
io->outputb(io, IPMI_BT_INTMASK_REG, 0);
}
@@ -1614,7 +1636,7 @@ static ssize_t type_show(struct device *dev,
{
struct smi_info *smi_info = dev_get_drvdata(dev);
- return sysfs_emit(buf, "%s\n", si_to_str[smi_info->io.si_type]);
+ return sysfs_emit(buf, "%s\n", si_to_str[smi_info->io.si_info->type]);
}
static DEVICE_ATTR_RO(type);
@@ -1649,7 +1671,7 @@ static ssize_t params_show(struct device *dev,
return sysfs_emit(buf,
"%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
- si_to_str[smi_info->io.si_type],
+ si_to_str[smi_info->io.si_info->type],
addr_space_to_str[smi_info->io.addr_space],
smi_info->io.addr_data,
smi_info->io.regspacing,
@@ -1803,7 +1825,7 @@ setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
{
struct ipmi_device_id *id = &smi_info->device_id;
if (id->manufacturer_id == DELL_IANA_MFR_ID &&
- smi_info->io.si_type == SI_BT)
+ smi_info->io.si_info->type == SI_BT)
register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
}
@@ -1839,7 +1861,7 @@ static inline void stop_timer_and_thread(struct smi_info *smi_info)
}
smi_info->timer_can_start = false;
- del_timer_sync(&smi_info->si_timer);
+ timer_delete_sync(&smi_info->si_timer);
}
static struct smi_info *find_dup_si(struct smi_info *info)
@@ -1882,7 +1904,8 @@ int ipmi_si_add_smi(struct si_sm_io *io)
}
if (!io->io_setup) {
- if (io->addr_space == IPMI_IO_ADDR_SPACE) {
+ if (IS_ENABLED(CONFIG_HAS_IOPORT) &&
+ io->addr_space == IPMI_IO_ADDR_SPACE) {
io->io_setup = ipmi_si_port_setup;
} else if (io->addr_space == IPMI_MEM_ADDR_SPACE) {
io->io_setup = ipmi_si_mem_setup;
@@ -1906,13 +1929,13 @@ int ipmi_si_add_smi(struct si_sm_io *io)
/* We prefer ACPI over SMBIOS. */
dev_info(dup->io.dev,
"Removing SMBIOS-specified %s state machine in favor of ACPI\n",
- si_to_str[new_smi->io.si_type]);
+ si_to_str[new_smi->io.si_info->type]);
cleanup_one_si(dup);
} else {
dev_info(new_smi->io.dev,
"%s-specified %s state machine: duplicate\n",
ipmi_addr_src_to_str(new_smi->io.addr_source),
- si_to_str[new_smi->io.si_type]);
+ si_to_str[new_smi->io.si_info->type]);
rv = -EBUSY;
kfree(new_smi);
goto out_err;
@@ -1921,7 +1944,7 @@ int ipmi_si_add_smi(struct si_sm_io *io)
pr_info("Adding %s-specified %s state machine\n",
ipmi_addr_src_to_str(new_smi->io.addr_source),
- si_to_str[new_smi->io.si_type]);
+ si_to_str[new_smi->io.si_info->type]);
list_add_tail(&new_smi->link, &smi_infos);
@@ -1944,12 +1967,12 @@ static int try_smi_init(struct smi_info *new_smi)
pr_info("Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
ipmi_addr_src_to_str(new_smi->io.addr_source),
- si_to_str[new_smi->io.si_type],
+ si_to_str[new_smi->io.si_info->type],
addr_space_to_str[new_smi->io.addr_space],
new_smi->io.addr_data,
new_smi->io.slave_addr, new_smi->io.irq);
- switch (new_smi->io.si_type) {
+ switch (new_smi->io.si_info->type) {
case SI_KCS:
new_smi->handlers = &kcs_smi_handlers;
break;
@@ -2072,7 +2095,7 @@ static int try_smi_init(struct smi_info *new_smi)
smi_num++;
dev_info(new_smi->io.dev, "IPMI %s interface initialized\n",
- si_to_str[new_smi->io.si_type]);
+ si_to_str[new_smi->io.si_info->type]);
WARN_ON(new_smi->io.dev->init_name != NULL);
@@ -2090,10 +2113,18 @@ static int try_smi_init(struct smi_info *new_smi)
return rv;
}
+/*
+ * Devices in the same address space at the same address are the same.
+ */
+static bool __init ipmi_smi_info_same(struct smi_info *e1, struct smi_info *e2)
+{
+ return (e1->io.addr_space == e2->io.addr_space &&
+ e1->io.addr_data == e2->io.addr_data);
+}
+
static int __init init_ipmi_si(void)
{
- struct smi_info *e;
- enum ipmi_addr_src type = SI_INVALID;
+ struct smi_info *e, *e2;
if (initialized)
return 0;
@@ -2106,45 +2137,77 @@ static int __init init_ipmi_si(void)
ipmi_si_pci_init();
+ ipmi_si_ls2k_init();
+
ipmi_si_parisc_init();
- /* We prefer devices with interrupts, but in the case of a machine
- with multiple BMCs we assume that there will be several instances
- of a given type so if we succeed in registering a type then also
- try to register everything else of the same type */
mutex_lock(&smi_infos_lock);
+
+ /*
+ * Scan through all the devices. We prefer devices with
+ * interrupts, so go through those first in case there are any
+ * duplicates that don't have the interrupt set.
+ */
list_for_each_entry(e, &smi_infos, link) {
- /* Try to register a device if it has an IRQ and we either
- haven't successfully registered a device yet or this
- device has the same type as one we successfully registered */
- if (e->io.irq && (!type || e->io.addr_source == type)) {
- if (!try_smi_init(e)) {
- type = e->io.addr_source;
+ bool dup = false;
+
+ /* Register ones with interrupts first. */
+ if (!e->io.irq)
+ continue;
+
+ /*
+ * Go through the ones we have already seen to see if this
+ * is a dup.
+ */
+ list_for_each_entry(e2, &smi_infos, link) {
+ if (e2 == e)
+ break;
+ if (e2->io.irq && ipmi_smi_info_same(e, e2)) {
+ dup = true;
+ break;
}
}
+ if (!dup)
+ try_smi_init(e);
}
- /* type will only have been set if we successfully registered an si */
- if (type)
- goto skip_fallback_noirq;
+ /*
+ * Now try devices without interrupts.
+ */
+ list_for_each_entry(e, &smi_infos, link) {
+ bool dup = false;
- /* Fall back to the preferred device */
+ if (e->io.irq)
+ continue;
- list_for_each_entry(e, &smi_infos, link) {
- if (!e->io.irq && (!type || e->io.addr_source == type)) {
- if (!try_smi_init(e)) {
- type = e->io.addr_source;
+ /*
+ * Go through the ones we have already seen to see if
+ * this is a dup. We have already looked at the ones
+ * with interrupts.
+ */
+ list_for_each_entry(e2, &smi_infos, link) {
+ if (!e2->io.irq)
+ continue;
+ if (ipmi_smi_info_same(e, e2)) {
+ dup = true;
+ break;
}
}
+ list_for_each_entry(e2, &smi_infos, link) {
+ if (e2 == e)
+ break;
+ if (ipmi_smi_info_same(e, e2)) {
+ dup = true;
+ break;
+ }
+ }
+ if (!dup)
+ try_smi_init(e);
}
-skip_fallback_noirq:
initialized = true;
mutex_unlock(&smi_infos_lock);
- if (type)
- return 0;
-
mutex_lock(&smi_infos_lock);
if (unload_when_empty && list_empty(&smi_infos)) {
mutex_unlock(&smi_infos_lock);
@@ -2266,7 +2329,7 @@ struct device *ipmi_si_remove_by_data(int addr_space, enum si_type si_type,
list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
if (e->io.addr_space != addr_space)
continue;
- if (e->io.si_type != si_type)
+ if (e->io.si_info->type != si_type)
continue;
if (e->io.addr_data == addr) {
dev = get_device(e->io.dev);
@@ -2287,6 +2350,8 @@ static void cleanup_ipmi_si(void)
ipmi_si_pci_shutdown();
+ ipmi_si_ls2k_shutdown();
+
ipmi_si_parisc_shutdown();
ipmi_si_platform_shutdown();
diff --git a/drivers/char/ipmi/ipmi_si_ls2k.c b/drivers/char/ipmi/ipmi_si_ls2k.c
new file mode 100644
index 000000000000..45442c257efd
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_ls2k.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for Loongson-2K BMC IPMI interface
+ *
+ * Copyright (C) 2024-2025 Loongson Technology Corporation Limited.
+ *
+ * Authors:
+ * Chong Qiao <qiaochong@loongson.cn>
+ * Binbin Zhou <zhoubinbin@loongson.cn>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include "ipmi_si.h"
+
+#define LS2K_KCS_FIFO_IBFH 0x0
+#define LS2K_KCS_FIFO_IBFT 0x1
+#define LS2K_KCS_FIFO_OBFH 0x2
+#define LS2K_KCS_FIFO_OBFT 0x3
+
+/* KCS registers */
+#define LS2K_KCS_REG_STS 0x4
+#define LS2K_KCS_REG_DATA_OUT 0x5
+#define LS2K_KCS_REG_DATA_IN 0x6
+#define LS2K_KCS_REG_CMD 0x8
+
+#define LS2K_KCS_CMD_DATA 0xa
+#define LS2K_KCS_VERSION 0xb
+#define LS2K_KCS_WR_REQ 0xc
+#define LS2K_KCS_WR_ACK 0x10
+
+#define LS2K_KCS_STS_OBF BIT(0)
+#define LS2K_KCS_STS_IBF BIT(1)
+#define LS2K_KCS_STS_SMS_ATN BIT(2)
+#define LS2K_KCS_STS_CMD BIT(3)
+
+#define LS2K_KCS_DATA_MASK (LS2K_KCS_STS_OBF | LS2K_KCS_STS_IBF | LS2K_KCS_STS_CMD)
+
+static bool ls2k_registered;
+
+static unsigned char ls2k_mem_inb_v0(const struct si_sm_io *io, unsigned int offset)
+{
+ void __iomem *addr = io->addr;
+ int reg_offset;
+
+ if (offset & BIT(0)) {
+ reg_offset = LS2K_KCS_REG_STS;
+ } else {
+ writeb(readb(addr + LS2K_KCS_REG_STS) & ~LS2K_KCS_STS_OBF, addr + LS2K_KCS_REG_STS);
+ reg_offset = LS2K_KCS_REG_DATA_OUT;
+ }
+
+ return readb(addr + reg_offset);
+}
+
+static unsigned char ls2k_mem_inb_v1(const struct si_sm_io *io, unsigned int offset)
+{
+ void __iomem *addr = io->addr;
+ unsigned char inb = 0, cmd;
+ bool obf, ibf;
+
+ obf = readb(addr + LS2K_KCS_FIFO_OBFH) ^ readb(addr + LS2K_KCS_FIFO_OBFT);
+ ibf = readb(addr + LS2K_KCS_FIFO_IBFH) ^ readb(addr + LS2K_KCS_FIFO_IBFT);
+ cmd = readb(addr + LS2K_KCS_CMD_DATA);
+
+ if (offset & BIT(0)) {
+ inb = readb(addr + LS2K_KCS_REG_STS) & ~LS2K_KCS_DATA_MASK;
+ inb |= FIELD_PREP(LS2K_KCS_STS_OBF, obf)
+ | FIELD_PREP(LS2K_KCS_STS_IBF, ibf)
+ | FIELD_PREP(LS2K_KCS_STS_CMD, cmd);
+ } else {
+ inb = readb(addr + LS2K_KCS_REG_DATA_OUT);
+ writeb(readb(addr + LS2K_KCS_FIFO_OBFH), addr + LS2K_KCS_FIFO_OBFT);
+ }
+
+ return inb;
+}
+
+static void ls2k_mem_outb_v0(const struct si_sm_io *io, unsigned int offset,
+ unsigned char val)
+{
+ void __iomem *addr = io->addr;
+ unsigned char sts = readb(addr + LS2K_KCS_REG_STS);
+ int reg_offset;
+
+ if (sts & LS2K_KCS_STS_IBF)
+ return;
+
+ if (offset & BIT(0)) {
+ reg_offset = LS2K_KCS_REG_CMD;
+ sts |= LS2K_KCS_STS_CMD;
+ } else {
+ reg_offset = LS2K_KCS_REG_DATA_IN;
+ sts &= ~LS2K_KCS_STS_CMD;
+ }
+
+ writew(val, addr + reg_offset);
+ writeb(sts | LS2K_KCS_STS_IBF, addr + LS2K_KCS_REG_STS);
+ writel(readl(addr + LS2K_KCS_WR_REQ) + 1, addr + LS2K_KCS_WR_REQ);
+}
+
+static void ls2k_mem_outb_v1(const struct si_sm_io *io, unsigned int offset,
+ unsigned char val)
+{
+ void __iomem *addr = io->addr;
+ unsigned char ibfh, ibft;
+ int reg_offset;
+
+ ibfh = readb(addr + LS2K_KCS_FIFO_IBFH);
+ ibft = readb(addr + LS2K_KCS_FIFO_IBFT);
+
+ if (ibfh ^ ibft)
+ return;
+
+ reg_offset = (offset & BIT(0)) ? LS2K_KCS_REG_CMD : LS2K_KCS_REG_DATA_IN;
+ writew(val, addr + reg_offset);
+
+ writeb(offset & BIT(0), addr + LS2K_KCS_CMD_DATA);
+ writeb(!ibft, addr + LS2K_KCS_FIFO_IBFH);
+ writel(readl(addr + LS2K_KCS_WR_REQ) + 1, addr + LS2K_KCS_WR_REQ);
+}
+
+static void ls2k_mem_cleanup(struct si_sm_io *io)
+{
+ if (io->addr)
+ iounmap(io->addr);
+}
+
+static int ipmi_ls2k_mem_setup(struct si_sm_io *io)
+{
+ unsigned char version;
+
+ io->addr = ioremap(io->addr_data, io->regspacing);
+ if (!io->addr)
+ return -EIO;
+
+ version = readb(io->addr + LS2K_KCS_VERSION);
+
+ io->inputb = version ? ls2k_mem_inb_v1 : ls2k_mem_inb_v0;
+ io->outputb = version ? ls2k_mem_outb_v1 : ls2k_mem_outb_v0;
+ io->io_cleanup = ls2k_mem_cleanup;
+
+ return 0;
+}
+
+static int ipmi_ls2k_probe(struct platform_device *pdev)
+{
+ struct si_sm_io io;
+
+ memset(&io, 0, sizeof(io));
+
+ io.si_info = &ipmi_kcs_si_info;
+ io.io_setup = ipmi_ls2k_mem_setup;
+ io.addr_data = pdev->resource[0].start;
+ io.regspacing = resource_size(&pdev->resource[0]);
+ io.dev = &pdev->dev;
+
+ dev_dbg(&pdev->dev, "addr 0x%lx, spacing %d.\n", io.addr_data, io.regspacing);
+
+ return ipmi_si_add_smi(&io);
+}
+
+static void ipmi_ls2k_remove(struct platform_device *pdev)
+{
+ ipmi_si_remove_by_dev(&pdev->dev);
+}
+
+struct platform_driver ipmi_ls2k_platform_driver = {
+ .driver = {
+ .name = "ls2k-ipmi-si",
+ },
+ .probe = ipmi_ls2k_probe,
+ .remove = ipmi_ls2k_remove,
+};
+
+void ipmi_si_ls2k_init(void)
+{
+ platform_driver_register(&ipmi_ls2k_platform_driver);
+ ls2k_registered = true;
+}
+
+void ipmi_si_ls2k_shutdown(void)
+{
+ if (ls2k_registered)
+ platform_driver_unregister(&ipmi_ls2k_platform_driver);
+}
diff --git a/drivers/char/ipmi/ipmi_si_parisc.c b/drivers/char/ipmi/ipmi_si_parisc.c
index 2be2967f6b5f..3b0a70d9adbb 100644
--- a/drivers/char/ipmi/ipmi_si_parisc.c
+++ b/drivers/char/ipmi/ipmi_si_parisc.c
@@ -13,7 +13,7 @@ static int __init ipmi_parisc_probe(struct parisc_device *dev)
memset(&io, 0, sizeof(io));
- io.si_type = SI_KCS;
+ io.si_info = &ipmi_kcs_si_info;
io.addr_source = SI_DEVICETREE;
io.addr_space = IPMI_MEM_ADDR_SPACE;
io.addr_data = dev->hpa.start;
diff --git a/drivers/char/ipmi/ipmi_si_pci.c b/drivers/char/ipmi/ipmi_si_pci.c
index 74fa2055868b..17f72763322d 100644
--- a/drivers/char/ipmi/ipmi_si_pci.c
+++ b/drivers/char/ipmi/ipmi_si_pci.c
@@ -23,30 +23,32 @@ MODULE_PARM_DESC(trypci,
static int ipmi_pci_probe_regspacing(struct si_sm_io *io)
{
- if (io->si_type == SI_KCS) {
- unsigned char status;
- int regspacing;
-
- io->regsize = DEFAULT_REGSIZE;
- io->regshift = 0;
-
- /* detect 1, 4, 16byte spacing */
- for (regspacing = DEFAULT_REGSPACING; regspacing <= 16;) {
- io->regspacing = regspacing;
- if (io->io_setup(io)) {
- dev_err(io->dev, "Could not setup I/O space\n");
- return DEFAULT_REGSPACING;
- }
- /* write invalid cmd */
- io->outputb(io, 1, 0x10);
- /* read status back */
- status = io->inputb(io, 1);
- io->io_cleanup(io);
- if (status)
- return regspacing;
- regspacing *= 4;
+ unsigned char status;
+ int regspacing;
+
+ if (io->si_info->type != SI_KCS)
+ return DEFAULT_REGSPACING;
+
+ io->regsize = DEFAULT_REGSIZE;
+ io->regshift = 0;
+
+ /* detect 1, 4, 16byte spacing */
+ for (regspacing = DEFAULT_REGSPACING; regspacing <= 16;) {
+ io->regspacing = regspacing;
+ if (io->io_setup(io)) {
+ dev_err(io->dev, "Could not setup I/O space\n");
+ return DEFAULT_REGSPACING;
}
+ /* write invalid cmd */
+ io->outputb(io, 1, 0x10);
+ /* read status back */
+ status = io->inputb(io, 1);
+ io->io_cleanup(io);
+ if (status)
+ return regspacing;
+ regspacing *= 4;
}
+
return DEFAULT_REGSPACING;
}
@@ -74,15 +76,15 @@ static int ipmi_pci_probe(struct pci_dev *pdev,
switch (pdev->class) {
case PCI_CLASS_SERIAL_IPMI_SMIC:
- io.si_type = SI_SMIC;
+ io.si_info = &ipmi_smic_si_info;
break;
case PCI_CLASS_SERIAL_IPMI_KCS:
- io.si_type = SI_KCS;
+ io.si_info = &ipmi_kcs_si_info;
break;
case PCI_CLASS_SERIAL_IPMI_BT:
- io.si_type = SI_BT;
+ io.si_info = &ipmi_bt_si_info;
break;
default:
@@ -97,6 +99,9 @@ static int ipmi_pci_probe(struct pci_dev *pdev,
}
if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
+ if (!IS_ENABLED(CONFIG_HAS_IOPORT))
+ return -ENXIO;
+
io.addr_space = IPMI_IO_ADDR_SPACE;
io.io_setup = ipmi_si_port_setup;
} else {
@@ -115,7 +120,7 @@ static int ipmi_pci_probe(struct pci_dev *pdev,
if (io.irq)
io.irq_setup = ipmi_std_irq_setup;
- dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n",
+ dev_info(&pdev->dev, "%pR regsize %u spacing %u irq %d\n",
&pdev->resource[0], io.regsize, io.regspacing, io.irq);
return ipmi_si_add_smi(&io);
diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c
index cd2edd8f8a03..fb6e359ae494 100644
--- a/drivers/char/ipmi/ipmi_si_platform.c
+++ b/drivers/char/ipmi/ipmi_si_platform.c
@@ -163,9 +163,13 @@ static int platform_ipmi_probe(struct platform_device *pdev)
switch (type) {
case SI_KCS:
+ io.si_info = &ipmi_kcs_si_info;
+ break;
case SI_SMIC:
+ io.si_info = &ipmi_smic_si_info;
+ break;
case SI_BT:
- io.si_type = type;
+ io.si_info = &ipmi_bt_si_info;
break;
case SI_TYPE_INVALID: /* User disabled this in hardcode. */
return -ENODEV;
@@ -213,13 +217,10 @@ static int platform_ipmi_probe(struct platform_device *pdev)
#ifdef CONFIG_OF
static const struct of_device_id of_ipmi_match[] = {
- { .type = "ipmi", .compatible = "ipmi-kcs",
- .data = (void *)(unsigned long) SI_KCS },
- { .type = "ipmi", .compatible = "ipmi-smic",
- .data = (void *)(unsigned long) SI_SMIC },
- { .type = "ipmi", .compatible = "ipmi-bt",
- .data = (void *)(unsigned long) SI_BT },
- {},
+ { .type = "ipmi", .compatible = "ipmi-kcs", .data = &ipmi_kcs_si_info },
+ { .type = "ipmi", .compatible = "ipmi-smic", .data = &ipmi_smic_si_info },
+ { .type = "ipmi", .compatible = "ipmi-bt", .data = &ipmi_bt_si_info },
+ {}
};
MODULE_DEVICE_TABLE(of, of_ipmi_match);
@@ -265,7 +266,7 @@ static int of_ipmi_probe(struct platform_device *pdev)
}
memset(&io, 0, sizeof(io));
- io.si_type = (enum si_type)device_get_match_data(&pdev->dev);
+ io.si_info = device_get_match_data(&pdev->dev);
io.addr_source = SI_DEVICETREE;
io.irq_setup = ipmi_std_irq_setup;
@@ -296,7 +297,7 @@ static int find_slave_address(struct si_sm_io *io, int slave_addr)
{
#ifdef CONFIG_IPMI_DMI_DECODE
if (!slave_addr)
- slave_addr = ipmi_dmi_get_slave_addr(io->si_type,
+ slave_addr = ipmi_dmi_get_slave_addr(io->si_info->type,
io->addr_space,
io->addr_data);
#endif
@@ -335,13 +336,13 @@ static int acpi_ipmi_probe(struct platform_device *pdev)
switch (tmp) {
case 1:
- io.si_type = SI_KCS;
+ io.si_info = &ipmi_kcs_si_info;
break;
case 2:
- io.si_type = SI_SMIC;
+ io.si_info = &ipmi_smic_si_info;
break;
case 3:
- io.si_type = SI_BT;
+ io.si_info = &ipmi_bt_si_info;
break;
case 4: /* SSIF, just ignore */
return -ENODEV;
@@ -405,11 +406,9 @@ static int ipmi_probe(struct platform_device *pdev)
return platform_ipmi_probe(pdev);
}
-static int ipmi_remove(struct platform_device *pdev)
+static void ipmi_remove(struct platform_device *pdev)
{
ipmi_si_remove_by_dev(&pdev->dev);
-
- return 0;
}
static int pdev_match_name(struct device *dev, const void *data)
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 1f7600c361e6..ef1582a029f4 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -481,8 +481,6 @@ static int ipmi_ssif_thread(void *data)
/* Wait for something to do */
result = wait_for_completion_interruptible(
&ssif_info->wake_thread);
- if (ssif_info->stopping)
- break;
if (result == -ERESTARTSYS)
continue;
init_completion(&ssif_info->wake_thread);
@@ -541,7 +539,8 @@ static void start_resend(struct ssif_info *ssif_info);
static void retry_timeout(struct timer_list *t)
{
- struct ssif_info *ssif_info = from_timer(ssif_info, t, retry_timer);
+ struct ssif_info *ssif_info = timer_container_of(ssif_info, t,
+ retry_timer);
unsigned long oflags, *flags;
bool waiting, resend;
@@ -565,7 +564,8 @@ static void retry_timeout(struct timer_list *t)
static void watch_timeout(struct timer_list *t)
{
- struct ssif_info *ssif_info = from_timer(ssif_info, t, watch_timer);
+ struct ssif_info *ssif_info = timer_container_of(ssif_info, t,
+ watch_timer);
unsigned long oflags, *flags;
if (ssif_info->stopping)
@@ -599,7 +599,7 @@ static void ssif_alert(struct i2c_client *client, enum i2c_alert_protocol type,
flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
if (ssif_info->waiting_alert) {
ssif_info->waiting_alert = false;
- del_timer(&ssif_info->retry_timer);
+ timer_delete(&ssif_info->retry_timer);
do_get = true;
} else if (ssif_info->curr_msg) {
ssif_info->got_alert = true;
@@ -980,7 +980,7 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
ipmi_ssif_unlock_cond(ssif_info, flags);
start_get(ssif_info);
} else {
- /* Wait a jiffie then request the next message */
+ /* Wait a jiffy then request the next message */
ssif_info->waiting_alert = true;
ssif_info->retries_left = SSIF_RECV_RETRIES;
if (!ssif_info->stopping)
@@ -1068,8 +1068,7 @@ static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags)
}
}
-static void sender(void *send_info,
- struct ipmi_smi_msg *msg)
+static int sender(void *send_info, struct ipmi_smi_msg *msg)
{
struct ssif_info *ssif_info = send_info;
unsigned long oflags, *flags;
@@ -1084,11 +1083,10 @@ static void sender(void *send_info,
struct timespec64 t;
ktime_get_real_ts64(&t);
- dev_dbg(&ssif_info->client->dev,
- "**Enqueue %02x %02x: %lld.%6.6ld\n",
- msg->data[0], msg->data[1],
- (long long)t.tv_sec, (long)t.tv_nsec / NSEC_PER_USEC);
+ dev_dbg(&ssif_info->client->dev, "**Enqueue %02x %02x: %ptSp\n",
+ msg->data[0], msg->data[1], &t);
}
+ return IPMI_CC_NO_ERROR;
}
static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
@@ -1268,12 +1266,10 @@ static void shutdown_ssif(void *send_info)
schedule_timeout(1);
ssif_info->stopping = true;
- del_timer_sync(&ssif_info->watch_timer);
- del_timer_sync(&ssif_info->retry_timer);
- if (ssif_info->thread) {
- complete(&ssif_info->wake_thread);
+ timer_delete_sync(&ssif_info->watch_timer);
+ timer_delete_sync(&ssif_info->retry_timer);
+ if (ssif_info->thread)
kthread_stop(ssif_info->thread);
- }
}
static void ssif_remove(struct i2c_client *client)
@@ -1368,8 +1364,20 @@ static int ssif_detect(struct i2c_client *client, struct i2c_board_info *info)
rv = do_cmd(client, 2, msg, &len, resp);
if (rv)
rv = -ENODEV;
- else
+ else {
+ if (len < 3) {
+ rv = -ENODEV;
+ } else {
+ struct ipmi_device_id id;
+
+ rv = ipmi_demangle_device_id(resp[0] >> 2, resp[1],
+ resp + 2, len - 2, &id);
+ if (rv)
+ rv = -ENODEV; /* Error means a BMC probably isn't there. */
+ }
+ if (!rv && info)
strscpy(info->type, DEVICE_NAME, I2C_NAME_SIZE);
+ }
kfree(resp);
return rv;
}
@@ -1704,6 +1712,16 @@ static int ssif_probe(struct i2c_client *client)
ipmi_addr_src_to_str(ssif_info->addr_source),
client->addr, client->adapter->name, slave_addr);
+ /*
+ * Send a get device id command and validate its response to
+ * make sure a valid BMC is there.
+ */
+ rv = ssif_detect(client, NULL);
+ if (rv) {
+ dev_err(&client->dev, "Not present\n");
+ goto out;
+ }
+
/* Now check for system interface capabilities */
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
msg[1] = IPMI_GET_SYSTEM_INTERFACE_CAPABILITIES_CMD;
@@ -2049,7 +2067,7 @@ static int dmi_ipmi_probe(struct platform_device *pdev)
#endif
static const struct i2c_device_id ssif_id[] = {
- { DEVICE_NAME, 0 },
+ { DEVICE_NAME },
{ }
};
MODULE_DEVICE_TABLE(i2c, ssif_id);
@@ -2071,7 +2089,7 @@ static int ssif_platform_probe(struct platform_device *dev)
return dmi_ipmi_probe(dev);
}
-static int ssif_platform_remove(struct platform_device *dev)
+static void ssif_platform_remove(struct platform_device *dev)
{
struct ssif_addr_info *addr_info = dev_get_drvdata(&dev->dev);
@@ -2079,13 +2097,13 @@ static int ssif_platform_remove(struct platform_device *dev)
list_del(&addr_info->link);
kfree(addr_info);
mutex_unlock(&ssif_infos_mutex);
- return 0;
}
static const struct platform_device_id ssif_plat_ids[] = {
{ "dmi-ipmi-ssif", 0 },
{ }
};
+MODULE_DEVICE_TABLE(platform, ssif_plat_ids);
static struct platform_driver ipmi_driver = {
.driver = {
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 9a459257489f..a013ddbf1466 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -150,7 +150,7 @@ static char preaction[16] = "pre_none";
static unsigned char preop_val = WDOG_PREOP_NONE;
static char preop[16] = "preop_none";
-static DEFINE_SPINLOCK(ipmi_read_lock);
+static DEFINE_MUTEX(ipmi_read_mutex);
static char data_to_read;
static DECLARE_WAIT_QUEUE_HEAD(read_q);
static struct fasync_struct *fasync_q;
@@ -363,7 +363,7 @@ static int __ipmi_set_timeout(struct ipmi_smi_msg *smi_msg,
{
struct kernel_ipmi_msg msg;
unsigned char data[6];
- int rv;
+ int rv = 0;
struct ipmi_system_interface_addr addr;
int hbnow = 0;
@@ -405,14 +405,18 @@ static int __ipmi_set_timeout(struct ipmi_smi_msg *smi_msg,
msg.cmd = IPMI_WDOG_SET_TIMER;
msg.data = data;
msg.data_len = sizeof(data);
- rv = ipmi_request_supply_msgs(watchdog_user,
- (struct ipmi_addr *) &addr,
- 0,
- &msg,
- NULL,
- smi_msg,
- recv_msg,
- 1);
+ if (smi_msg)
+ rv = ipmi_request_supply_msgs(watchdog_user,
+ (struct ipmi_addr *) &addr,
+ 0,
+ &msg,
+ NULL,
+ smi_msg,
+ recv_msg,
+ 1);
+ else
+ ipmi_panic_request_and_wait(watchdog_user,
+ (struct ipmi_addr *) &addr, &msg);
if (rv)
pr_warn("set timeout error: %d\n", rv);
else if (send_heartbeat_now)
@@ -431,9 +435,7 @@ static int _ipmi_set_timeout(int do_heartbeat)
atomic_set(&msg_tofree, 2);
- rv = __ipmi_set_timeout(&smi_msg,
- &recv_msg,
- &send_heartbeat_now);
+ rv = __ipmi_set_timeout(&smi_msg, &recv_msg, &send_heartbeat_now);
if (rv) {
atomic_set(&msg_tofree, 0);
return rv;
@@ -460,27 +462,10 @@ static int ipmi_set_timeout(int do_heartbeat)
return rv;
}
-static atomic_t panic_done_count = ATOMIC_INIT(0);
-
-static void panic_smi_free(struct ipmi_smi_msg *msg)
-{
- atomic_dec(&panic_done_count);
-}
-static void panic_recv_free(struct ipmi_recv_msg *msg)
-{
- atomic_dec(&panic_done_count);
-}
-
-static struct ipmi_smi_msg panic_halt_heartbeat_smi_msg =
- INIT_IPMI_SMI_MSG(panic_smi_free);
-static struct ipmi_recv_msg panic_halt_heartbeat_recv_msg =
- INIT_IPMI_RECV_MSG(panic_recv_free);
-
static void panic_halt_ipmi_heartbeat(void)
{
struct kernel_ipmi_msg msg;
struct ipmi_system_interface_addr addr;
- int rv;
/*
* Don't reset the timer if we have the timer turned off, that
@@ -497,24 +482,10 @@ static void panic_halt_ipmi_heartbeat(void)
msg.cmd = IPMI_WDOG_RESET_TIMER;
msg.data = NULL;
msg.data_len = 0;
- atomic_add(2, &panic_done_count);
- rv = ipmi_request_supply_msgs(watchdog_user,
- (struct ipmi_addr *) &addr,
- 0,
- &msg,
- NULL,
- &panic_halt_heartbeat_smi_msg,
- &panic_halt_heartbeat_recv_msg,
- 1);
- if (rv)
- atomic_sub(2, &panic_done_count);
+ ipmi_panic_request_and_wait(watchdog_user, (struct ipmi_addr *) &addr,
+ &msg);
}
-static struct ipmi_smi_msg panic_halt_smi_msg =
- INIT_IPMI_SMI_MSG(panic_smi_free);
-static struct ipmi_recv_msg panic_halt_recv_msg =
- INIT_IPMI_RECV_MSG(panic_recv_free);
-
/*
* Special call, doesn't claim any locks. This is only to be called
* at panic or halt time, in run-to-completion mode, when the caller
@@ -526,22 +497,13 @@ static void panic_halt_ipmi_set_timeout(void)
int send_heartbeat_now;
int rv;
- /* Wait for the messages to be free. */
- while (atomic_read(&panic_done_count) != 0)
- ipmi_poll_interface(watchdog_user);
- atomic_add(2, &panic_done_count);
- rv = __ipmi_set_timeout(&panic_halt_smi_msg,
- &panic_halt_recv_msg,
- &send_heartbeat_now);
+ rv = __ipmi_set_timeout(NULL, NULL, &send_heartbeat_now);
if (rv) {
- atomic_sub(2, &panic_done_count);
pr_warn("Unable to extend the watchdog timeout\n");
} else {
if (send_heartbeat_now)
panic_halt_ipmi_heartbeat();
}
- while (atomic_read(&panic_done_count) != 0)
- ipmi_poll_interface(watchdog_user);
}
static int __ipmi_heartbeat(void)
@@ -793,7 +755,7 @@ static ssize_t ipmi_read(struct file *file,
* Reading returns if the pretimeout has gone off, and it only does
* it once per pretimeout.
*/
- spin_lock_irq(&ipmi_read_lock);
+ mutex_lock(&ipmi_read_mutex);
if (!data_to_read) {
if (file->f_flags & O_NONBLOCK) {
rv = -EAGAIN;
@@ -804,9 +766,9 @@ static ssize_t ipmi_read(struct file *file,
add_wait_queue(&read_q, &wait);
while (!data_to_read && !signal_pending(current)) {
set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irq(&ipmi_read_lock);
+ mutex_unlock(&ipmi_read_mutex);
schedule();
- spin_lock_irq(&ipmi_read_lock);
+ mutex_lock(&ipmi_read_mutex);
}
remove_wait_queue(&read_q, &wait);
@@ -818,7 +780,7 @@ static ssize_t ipmi_read(struct file *file,
data_to_read = 0;
out:
- spin_unlock_irq(&ipmi_read_lock);
+ mutex_unlock(&ipmi_read_mutex);
if (rv == 0) {
if (copy_to_user(buf, &data_to_read, 1))
@@ -856,10 +818,10 @@ static __poll_t ipmi_poll(struct file *file, poll_table *wait)
poll_wait(file, &read_q, wait);
- spin_lock_irq(&ipmi_read_lock);
+ mutex_lock(&ipmi_read_mutex);
if (data_to_read)
mask |= (EPOLLIN | EPOLLRDNORM);
- spin_unlock_irq(&ipmi_read_lock);
+ mutex_unlock(&ipmi_read_mutex);
return mask;
}
@@ -903,7 +865,6 @@ static const struct file_operations ipmi_wdog_fops = {
.open = ipmi_open,
.release = ipmi_close,
.fasync = ipmi_fasync,
- .llseek = no_llseek,
};
static struct miscdevice ipmi_wdog_miscdev = {
@@ -933,13 +894,11 @@ static void ipmi_wdog_pretimeout_handler(void *handler_data)
if (atomic_inc_and_test(&preop_panic_excl))
panic("Watchdog pre-timeout");
} else if (preop_val == WDOG_PREOP_GIVE_DATA) {
- unsigned long flags;
-
- spin_lock_irqsave(&ipmi_read_lock, flags);
+ mutex_lock(&ipmi_read_mutex);
data_to_read = 1;
wake_up_interruptible(&read_q);
kill_fasync(&fasync_q, SIGIO, POLL_IN);
- spin_unlock_irqrestore(&ipmi_read_lock, flags);
+ mutex_unlock(&ipmi_read_mutex);
}
}
@@ -1065,7 +1024,6 @@ static void ipmi_register_watchdog(int ipmi_intf)
static void ipmi_unregister_watchdog(int ipmi_intf)
{
- int rv;
struct ipmi_user *loc_user = watchdog_user;
if (!loc_user)
@@ -1090,9 +1048,7 @@ static void ipmi_unregister_watchdog(int ipmi_intf)
mutex_lock(&ipmi_watchdog_mutex);
/* Disconnect from IPMI. */
- rv = ipmi_destroy_user(loc_user);
- if (rv)
- pr_warn("error unlinking from IPMI: %d\n", rv);
+ ipmi_destroy_user(loc_user);
/* If it comes back, restart it properly. */
ipmi_start_timer_on_heartbeat = 1;
@@ -1190,14 +1146,8 @@ static struct ipmi_smi_watcher smi_watcher = {
.smi_gone = ipmi_smi_gone
};
-static int action_op(const char *inval, char *outval)
+static int action_op_set_val(const char *inval)
{
- if (outval)
- strcpy(outval, action);
-
- if (!inval)
- return 0;
-
if (strcmp(inval, "reset") == 0)
action_val = WDOG_TIMEOUT_RESET;
else if (strcmp(inval, "none") == 0)
@@ -1208,18 +1158,26 @@ static int action_op(const char *inval, char *outval)
action_val = WDOG_TIMEOUT_POWER_DOWN;
else
return -EINVAL;
- strcpy(action, inval);
return 0;
}
-static int preaction_op(const char *inval, char *outval)
+static int action_op(const char *inval, char *outval)
{
+ int rv;
+
if (outval)
- strcpy(outval, preaction);
+ strcpy(outval, action);
if (!inval)
return 0;
+ rv = action_op_set_val(inval);
+ if (!rv)
+ strcpy(action, inval);
+ return rv;
+}
+static int preaction_op_set_val(const char *inval)
+{
if (strcmp(inval, "pre_none") == 0)
preaction_val = WDOG_PRETIMEOUT_NONE;
else if (strcmp(inval, "pre_smi") == 0)
@@ -1232,18 +1190,26 @@ static int preaction_op(const char *inval, char *outval)
preaction_val = WDOG_PRETIMEOUT_MSG_INT;
else
return -EINVAL;
- strcpy(preaction, inval);
return 0;
}
-static int preop_op(const char *inval, char *outval)
+static int preaction_op(const char *inval, char *outval)
{
+ int rv;
+
if (outval)
- strcpy(outval, preop);
+ strcpy(outval, preaction);
if (!inval)
return 0;
+ rv = preaction_op_set_val(inval);
+ if (!rv)
+ strcpy(preaction, inval);
+ return 0;
+}
+static int preop_op_set_val(const char *inval)
+{
if (strcmp(inval, "preop_none") == 0)
preop_val = WDOG_PREOP_NONE;
else if (strcmp(inval, "preop_panic") == 0)
@@ -1252,7 +1218,22 @@ static int preop_op(const char *inval, char *outval)
preop_val = WDOG_PREOP_GIVE_DATA;
else
return -EINVAL;
- strcpy(preop, inval);
+ return 0;
+}
+
+static int preop_op(const char *inval, char *outval)
+{
+ int rv;
+
+ if (outval)
+ strcpy(outval, preop);
+
+ if (!inval)
+ return 0;
+
+ rv = preop_op_set_val(inval);
+ if (!rv)
+ strcpy(preop, inval);
return 0;
}
@@ -1289,18 +1270,18 @@ static int __init ipmi_wdog_init(void)
{
int rv;
- if (action_op(action, NULL)) {
+ if (action_op_set_val(action)) {
action_op("reset", NULL);
pr_info("Unknown action '%s', defaulting to reset\n", action);
}
- if (preaction_op(preaction, NULL)) {
+ if (preaction_op_set_val(preaction)) {
preaction_op("pre_none", NULL);
pr_info("Unknown preaction '%s', defaulting to none\n",
preaction);
}
- if (preop_op(preop, NULL)) {
+ if (preop_op_set_val(preop)) {
preop_op("preop_none", NULL);
pr_info("Unknown preop '%s', defaulting to none\n", preop);
}
diff --git a/drivers/char/ipmi/kcs_bmc_aspeed.c b/drivers/char/ipmi/kcs_bmc_aspeed.c
index 72640da55380..a13a3470c17a 100644
--- a/drivers/char/ipmi/kcs_bmc_aspeed.c
+++ b/drivers/char/ipmi/kcs_bmc_aspeed.c
@@ -428,7 +428,7 @@ static void aspeed_kcs_irq_mask_update(struct kcs_bmc_device *kcs_bmc, u8 mask,
if (rc == -ETIMEDOUT)
mod_timer(&priv->obe.timer, jiffies + OBE_POLL_PERIOD);
} else {
- del_timer(&priv->obe.timer);
+ timer_delete(&priv->obe.timer);
}
}
@@ -641,7 +641,7 @@ static int aspeed_kcs_probe(struct platform_device *pdev)
return 0;
}
-static int aspeed_kcs_remove(struct platform_device *pdev)
+static void aspeed_kcs_remove(struct platform_device *pdev)
{
struct aspeed_kcs_bmc *priv = platform_get_drvdata(pdev);
struct kcs_bmc_device *kcs_bmc = &priv->kcs_bmc;
@@ -655,9 +655,7 @@ static int aspeed_kcs_remove(struct platform_device *pdev)
spin_lock_irq(&priv->obe.lock);
priv->obe.remove = true;
spin_unlock_irq(&priv->obe.lock);
- del_timer_sync(&priv->obe.timer);
-
- return 0;
+ timer_delete_sync(&priv->obe.timer);
}
static const struct of_device_id ast_kcs_bmc_match[] = {
diff --git a/drivers/char/ipmi/kcs_bmc_npcm7xx.c b/drivers/char/ipmi/kcs_bmc_npcm7xx.c
index 7961fec56476..4808a61bf273 100644
--- a/drivers/char/ipmi/kcs_bmc_npcm7xx.c
+++ b/drivers/char/ipmi/kcs_bmc_npcm7xx.c
@@ -218,7 +218,7 @@ static int npcm7xx_kcs_probe(struct platform_device *pdev)
return 0;
}
-static int npcm7xx_kcs_remove(struct platform_device *pdev)
+static void npcm7xx_kcs_remove(struct platform_device *pdev)
{
struct npcm7xx_kcs_bmc *priv = platform_get_drvdata(pdev);
struct kcs_bmc_device *kcs_bmc = &priv->kcs_bmc;
@@ -227,8 +227,6 @@ static int npcm7xx_kcs_remove(struct platform_device *pdev)
npcm7xx_kcs_enable_channel(kcs_bmc, false);
npcm7xx_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0);
-
- return 0;
}
static const struct of_device_id npcm_kcs_bmc_match[] = {
@@ -243,7 +241,7 @@ static struct platform_driver npcm_kcs_bmc_driver = {
.of_match_table = npcm_kcs_bmc_match,
},
.probe = npcm7xx_kcs_probe,
- .remove = npcm7xx_kcs_remove,
+ .remove = npcm7xx_kcs_remove,
};
module_platform_driver(npcm_kcs_bmc_driver);
diff --git a/drivers/char/ipmi/ssif_bmc.c b/drivers/char/ipmi/ssif_bmc.c
index 56346fb32872..7a52e3ea49ed 100644
--- a/drivers/char/ipmi/ssif_bmc.c
+++ b/drivers/char/ipmi/ssif_bmc.c
@@ -177,13 +177,15 @@ static ssize_t ssif_bmc_write(struct file *file, const char __user *buf, size_t
unsigned long flags;
ssize_t ret;
- if (count > sizeof(struct ipmi_ssif_msg))
+ if (count < sizeof(msg.len) ||
+ count > sizeof(struct ipmi_ssif_msg))
return -EINVAL;
if (copy_from_user(&msg, buf, count))
return -EFAULT;
- if (!msg.len || count < sizeof_field(struct ipmi_ssif_msg, len) + msg.len)
+ if (!msg.len || msg.len > IPMI_SSIF_PAYLOAD_MAX ||
+ count < sizeof_field(struct ipmi_ssif_msg, len) + msg.len)
return -EINVAL;
spin_lock_irqsave(&ssif_bmc->lock, flags);
@@ -207,7 +209,7 @@ static ssize_t ssif_bmc_write(struct file *file, const char __user *buf, size_t
if (ret)
goto exit;
- del_timer(&ssif_bmc->response_timer);
+ timer_delete(&ssif_bmc->response_timer);
ssif_bmc->response_timer_inited = false;
memcpy(&ssif_bmc->response, &msg, count);
@@ -290,13 +292,13 @@ static void complete_response(struct ssif_bmc_ctx *ssif_bmc)
ssif_bmc->nbytes_processed = 0;
ssif_bmc->remain_len = 0;
ssif_bmc->busy = false;
- memset(&ssif_bmc->part_buf, 0, sizeof(struct ssif_part_buffer));
wake_up_all(&ssif_bmc->wait_queue);
}
static void response_timeout(struct timer_list *t)
{
- struct ssif_bmc_ctx *ssif_bmc = from_timer(ssif_bmc, t, response_timer);
+ struct ssif_bmc_ctx *ssif_bmc = timer_container_of(ssif_bmc, t,
+ response_timer);
unsigned long flags;
spin_lock_irqsave(&ssif_bmc->lock, flags);
@@ -742,9 +744,11 @@ static void on_stop_event(struct ssif_bmc_ctx *ssif_bmc, u8 *val)
ssif_bmc->aborting = true;
}
} else if (ssif_bmc->state == SSIF_RES_SENDING) {
- if (ssif_bmc->is_singlepart_read || ssif_bmc->block_num == 0xFF)
+ if (ssif_bmc->is_singlepart_read || ssif_bmc->block_num == 0xFF) {
+ memset(&ssif_bmc->part_buf, 0, sizeof(struct ssif_part_buffer));
/* Invalidate response buffer to denote it is sent */
complete_response(ssif_bmc);
+ }
ssif_bmc->state = SSIF_READY;
}
@@ -850,8 +854,8 @@ static const struct of_device_id ssif_bmc_match[] = {
MODULE_DEVICE_TABLE(of, ssif_bmc_match);
static const struct i2c_device_id ssif_bmc_id[] = {
- { DEVICE_NAME, 0 },
- { },
+ { DEVICE_NAME },
+ { }
};
MODULE_DEVICE_TABLE(i2c, ssif_bmc_id);
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index 2f171d14b9b5..24417a00dfe9 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -1016,7 +1016,6 @@ static struct parport_driver lp_driver = {
.name = "lp",
.match_port = lp_attach,
.detach = lp_detach,
- .devmodel = true,
};
static int __init lp_init(void)
@@ -1123,4 +1122,5 @@ module_init(lp_init_module);
module_exit(lp_cleanup_module);
MODULE_ALIAS_CHARDEV_MAJOR(LP_MAJOR);
+MODULE_DESCRIPTION("Generic parallel printer driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 9b80e622ae80..52039fae1594 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -61,29 +61,11 @@ static inline int page_is_allowed(unsigned long pfn)
{
return devmem_is_allowed(pfn);
}
-static inline int range_is_allowed(unsigned long pfn, unsigned long size)
-{
- u64 from = ((u64)pfn) << PAGE_SHIFT;
- u64 to = from + size;
- u64 cursor = from;
-
- while (cursor < to) {
- if (!devmem_is_allowed(pfn))
- return 0;
- cursor += PAGE_SIZE;
- pfn++;
- }
- return 1;
-}
#else
static inline int page_is_allowed(unsigned long pfn)
{
return 1;
}
-static inline int range_is_allowed(unsigned long pfn, unsigned long size)
-{
- return 1;
-}
#endif
static inline bool should_stop_iteration(void)
@@ -322,13 +304,13 @@ static unsigned zero_mmap_capabilities(struct file *file)
}
/* can't do an in-place private mapping if there's no MMU */
-static inline int private_mapping_ok(struct vm_area_struct *vma)
+static inline int private_mapping_ok(struct vm_area_desc *desc)
{
- return is_nommu_shared_mapping(vma->vm_flags);
+ return is_nommu_shared_mapping(desc->vm_flags);
}
#else
-static inline int private_mapping_ok(struct vm_area_struct *vma)
+static inline int private_mapping_ok(struct vm_area_desc *desc)
{
return 1;
}
@@ -340,49 +322,53 @@ static const struct vm_operations_struct mmap_mem_ops = {
#endif
};
-static int mmap_mem(struct file *file, struct vm_area_struct *vma)
+static int mmap_filter_error(int err)
+{
+ return -EAGAIN;
+}
+
+static int mmap_mem_prepare(struct vm_area_desc *desc)
{
- size_t size = vma->vm_end - vma->vm_start;
- phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
+ struct file *file = desc->file;
+ const size_t size = vma_desc_size(desc);
+ const phys_addr_t offset = (phys_addr_t)desc->pgoff << PAGE_SHIFT;
/* Does it even fit in phys_addr_t? */
- if (offset >> PAGE_SHIFT != vma->vm_pgoff)
+ if (offset >> PAGE_SHIFT != desc->pgoff)
return -EINVAL;
/* It's illegal to wrap around the end of the physical address space. */
if (offset + (phys_addr_t)size - 1 < offset)
return -EINVAL;
- if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
+ if (!valid_mmap_phys_addr_range(desc->pgoff, size))
return -EINVAL;
- if (!private_mapping_ok(vma))
+ if (!private_mapping_ok(desc))
return -ENOSYS;
- if (!range_is_allowed(vma->vm_pgoff, size))
+ if (!range_is_allowed(desc->pgoff, size))
return -EPERM;
- if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
- &vma->vm_page_prot))
+ if (!phys_mem_access_prot_allowed(file, desc->pgoff, size,
+ &desc->page_prot))
return -EINVAL;
- vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
- size,
- vma->vm_page_prot);
+ desc->page_prot = phys_mem_access_prot(file, desc->pgoff,
+ size,
+ desc->page_prot);
- vma->vm_ops = &mmap_mem_ops;
+ desc->vm_ops = &mmap_mem_ops;
+
+ /* Remap-pfn-range will mark the range VM_IO. */
+ mmap_action_remap_full(desc, desc->pgoff);
+ /* We filter remap errors to -EAGAIN. */
+ desc->action.error_hook = mmap_filter_error;
- /* Remap-pfn-range will mark the range VM_IO */
- if (remap_pfn_range(vma,
- vma->vm_start,
- vma->vm_pgoff,
- size,
- vma->vm_page_prot)) {
- return -EAGAIN;
- }
return 0;
}
+#ifdef CONFIG_DEVPORT
static ssize_t read_port(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
@@ -424,6 +410,7 @@ static ssize_t write_port(struct file *file, const char __user *buf,
*ppos = i;
return tmp-buf;
}
+#endif
static ssize_t read_null(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
@@ -517,38 +504,64 @@ static ssize_t read_zero(struct file *file, char __user *buf,
return cleared;
}
-static int mmap_zero(struct file *file, struct vm_area_struct *vma)
+static int mmap_zero_private_success(const struct vm_area_struct *vma)
+{
+ /*
+ * This is a highly unique situation where we mark a MAP_PRIVATE mapping
+ * of /dev/zero anonymous, despite it not being.
+ */
+ vma_set_anonymous((struct vm_area_struct *)vma);
+
+ return 0;
+}
+
+static int mmap_zero_prepare(struct vm_area_desc *desc)
{
#ifndef CONFIG_MMU
return -ENOSYS;
#endif
- if (vma->vm_flags & VM_SHARED)
- return shmem_zero_setup(vma);
- vma_set_anonymous(vma);
+ if (desc->vm_flags & VM_SHARED)
+ return shmem_zero_setup_desc(desc);
+
+ desc->action.success_hook = mmap_zero_private_success;
return 0;
}
+#ifndef CONFIG_MMU
+static unsigned long get_unmapped_area_zero(struct file *file,
+ unsigned long addr, unsigned long len,
+ unsigned long pgoff, unsigned long flags)
+{
+ return -ENOSYS;
+}
+#else
static unsigned long get_unmapped_area_zero(struct file *file,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
-#ifdef CONFIG_MMU
if (flags & MAP_SHARED) {
/*
- * mmap_zero() will call shmem_zero_setup() to create a file,
- * so use shmem's get_unmapped_area in case it can be huge;
- * and pass NULL for file as in mmap.c's get_unmapped_area(),
- * so as not to confuse shmem with our handle on "/dev/zero".
+ * mmap_zero_prepare() will call shmem_zero_setup() to create a
+ * file, so use shmem's get_unmapped_area in case it can be
+ * huge; and pass NULL for file as in mmap.c's
+ * get_unmapped_area(), so as not to confuse shmem with our
+ * handle on "/dev/zero".
*/
return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
}
- /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
- return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags);
+ /*
+ * Otherwise flags & MAP_PRIVATE: with no shmem object beneath it,
+ * attempt to map aligned to huge page size if possible, otherwise we
+ * fall back to system page size mappings.
+ */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ return thp_get_unmapped_area(file, addr, len, pgoff, flags);
#else
- return -ENOSYS;
+ return mm_get_unmapped_area(file, addr, len, pgoff, flags);
#endif
}
+#endif /* CONFIG_MMU */
static ssize_t write_full(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
@@ -635,12 +648,13 @@ static const struct file_operations __maybe_unused mem_fops = {
.llseek = memory_lseek,
.read = read_mem,
.write = write_mem,
- .mmap = mmap_mem,
+ .mmap_prepare = mmap_mem_prepare,
.open = open_mem,
#ifndef CONFIG_MMU
.get_unmapped_area = get_unmapped_area_mem,
.mmap_capabilities = memory_mmap_capabilities,
#endif
+ .fop_flags = FOP_UNSIGNED_OFFSET,
};
static const struct file_operations null_fops = {
@@ -653,12 +667,14 @@ static const struct file_operations null_fops = {
.uring_cmd = uring_cmd_null,
};
-static const struct file_operations __maybe_unused port_fops = {
+#ifdef CONFIG_DEVPORT
+static const struct file_operations port_fops = {
.llseek = memory_lseek,
.read = read_port,
.write = write_port,
.open = open_port,
};
+#endif
static const struct file_operations zero_fops = {
.llseek = zero_lseek,
@@ -668,7 +684,7 @@ static const struct file_operations zero_fops = {
.write_iter = write_iter_zero,
.splice_read = copy_splice_read,
.splice_write = splice_write_zero,
- .mmap = mmap_zero,
+ .mmap_prepare = mmap_zero_prepare,
.get_unmapped_area = get_unmapped_area_zero,
#ifndef CONFIG_MMU
.mmap_capabilities = zero_mmap_capabilities,
@@ -689,7 +705,7 @@ static const struct memdev {
umode_t mode;
} devlist[] = {
#ifdef CONFIG_DEVMEM
- [DEVMEM_MINOR] = { "mem", &mem_fops, FMODE_UNSIGNED_OFFSET, 0 },
+ [DEVMEM_MINOR] = { "mem", &mem_fops, 0, 0 },
#endif
[3] = { "null", &null_fops, FMODE_NOWAIT, 0666 },
#ifdef CONFIG_DEVPORT
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 541edc26ec89..726516fb0a3b 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -58,31 +58,27 @@ static LIST_HEAD(misc_list);
static DEFINE_MUTEX(misc_mtx);
/*
- * Assigned numbers, used for dynamic minors
+ * Assigned numbers.
*/
-#define DYNAMIC_MINORS 128 /* like dynamic majors */
static DEFINE_IDA(misc_minors_ida);
-static int misc_minor_alloc(void)
+static int misc_minor_alloc(int minor)
{
- int ret;
+ int ret = 0;
- ret = ida_alloc_max(&misc_minors_ida, DYNAMIC_MINORS - 1, GFP_KERNEL);
- if (ret >= 0) {
- ret = DYNAMIC_MINORS - ret - 1;
- } else {
+ if (minor == MISC_DYNAMIC_MINOR) {
+ /* allocate free id */
ret = ida_alloc_range(&misc_minors_ida, MISC_DYNAMIC_MINOR + 1,
MINORMASK, GFP_KERNEL);
+ } else {
+ ret = ida_alloc_range(&misc_minors_ida, minor, minor, GFP_KERNEL);
}
return ret;
}
static void misc_minor_free(int minor)
{
- if (minor < DYNAMIC_MINORS)
- ida_free(&misc_minors_ida, DYNAMIC_MINORS - minor - 1);
- else if (minor > MISC_DYNAMIC_MINOR)
- ida_free(&misc_minors_ida, minor);
+ ida_free(&misc_minors_ida, minor);
}
#ifdef CONFIG_PROC_FS
@@ -136,7 +132,8 @@ static int misc_open(struct inode *inode, struct file *file)
break;
}
- if (!new_fops) {
+ /* Only request module for fixed minor code */
+ if (!new_fops && minor < MISC_DYNAMIC_MINOR) {
mutex_unlock(&misc_mtx);
request_module("char-major-%d-%d", MISC_MAJOR, minor);
mutex_lock(&misc_mtx);
@@ -148,10 +145,11 @@ static int misc_open(struct inode *inode, struct file *file)
new_fops = fops_get(iter->fops);
break;
}
- if (!new_fops)
- goto fail;
}
+ if (!new_fops)
+ goto fail;
+
/*
* Place the miscdevice in the file's
* private_data so it can be used by the
@@ -214,12 +212,18 @@ int misc_register(struct miscdevice *misc)
int err = 0;
bool is_dynamic = (misc->minor == MISC_DYNAMIC_MINOR);
+ if (misc->minor > MISC_DYNAMIC_MINOR) {
+ pr_err("Invalid fixed minor %d for miscdevice '%s'\n",
+ misc->minor, misc->name);
+ return -EINVAL;
+ }
+
INIT_LIST_HEAD(&misc->list);
mutex_lock(&misc_mtx);
if (is_dynamic) {
- int i = misc_minor_alloc();
+ int i = misc_minor_alloc(misc->minor);
if (i < 0) {
err = -EBUSY;
@@ -228,6 +232,7 @@ int misc_register(struct miscdevice *misc)
misc->minor = i;
} else {
struct miscdevice *c;
+ int i;
list_for_each_entry(c, &misc_list, list) {
if (c->minor == misc->minor) {
@@ -235,6 +240,12 @@ int misc_register(struct miscdevice *misc)
goto out;
}
}
+
+ i = misc_minor_alloc(misc->minor);
+ if (i < 0) {
+ err = -EBUSY;
+ goto out;
+ }
}
dev = MKDEV(MISC_MAJOR, misc->minor);
@@ -243,8 +254,8 @@ int misc_register(struct miscdevice *misc)
device_create_with_groups(&misc_class, misc->parent, dev,
misc, misc->groups, "%s", misc->name);
if (IS_ERR(misc->this_device)) {
+ misc_minor_free(misc->minor);
if (is_dynamic) {
- misc_minor_free(misc->minor);
misc->minor = MISC_DYNAMIC_MINOR;
}
err = PTR_ERR(misc->this_device);
@@ -272,13 +283,12 @@ EXPORT_SYMBOL(misc_register);
void misc_deregister(struct miscdevice *misc)
{
- if (WARN_ON(list_empty(&misc->list)))
- return;
-
mutex_lock(&misc_mtx);
- list_del(&misc->list);
+ list_del_init(&misc->list);
device_destroy(&misc_class, MKDEV(MISC_MAJOR, misc->minor));
misc_minor_free(misc->minor);
+ if (misc->minor > MISC_DYNAMIC_MINOR)
+ misc->minor = MISC_DYNAMIC_MINOR;
mutex_unlock(&misc_mtx);
}
EXPORT_SYMBOL(misc_deregister);
@@ -286,15 +296,15 @@ EXPORT_SYMBOL(misc_deregister);
static int __init misc_init(void)
{
int err;
- struct proc_dir_entry *ret;
+ struct proc_dir_entry *misc_proc_file;
- ret = proc_create_seq("misc", 0, NULL, &misc_seq_ops);
+ misc_proc_file = proc_create_seq("misc", 0, NULL, &misc_seq_ops);
err = class_register(&misc_class);
if (err)
goto fail_remove;
- err = -EIO;
- if (register_chrdev(MISC_MAJOR, "misc", &misc_fops))
+ err = __register_chrdev(MISC_MAJOR, 0, MINORMASK + 1, "misc", &misc_fops);
+ if (err < 0)
goto fail_printk;
return 0;
@@ -302,7 +312,7 @@ fail_printk:
pr_err("unable to get major %d for misc devices\n", MISC_MAJOR);
class_unregister(&misc_class);
fail_remove:
- if (ret)
+ if (misc_proc_file)
remove_proc_entry("misc", NULL);
return err;
}
diff --git a/drivers/char/misc_minor_kunit.c b/drivers/char/misc_minor_kunit.c
new file mode 100644
index 000000000000..6fc8b05169c5
--- /dev/null
+++ b/drivers/char/misc_minor_kunit.c
@@ -0,0 +1,689 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/init_syscalls.h>
+
+/* static minor (LCD_MINOR) */
+static struct miscdevice dev_static_minor = {
+ .minor = LCD_MINOR,
+ .name = "dev_static_minor",
+};
+
+/* misc dynamic minor */
+static struct miscdevice dev_misc_dynamic_minor = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "dev_misc_dynamic_minor",
+};
+
+static void kunit_static_minor(struct kunit *test)
+{
+ int ret;
+
+ ret = misc_register(&dev_static_minor);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, LCD_MINOR, dev_static_minor.minor);
+ misc_deregister(&dev_static_minor);
+}
+
+static void kunit_misc_dynamic_minor(struct kunit *test)
+{
+ int ret;
+
+ ret = misc_register(&dev_misc_dynamic_minor);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ misc_deregister(&dev_misc_dynamic_minor);
+}
+
+struct miscdev_test_case {
+ const char *str;
+ int minor;
+};
+
+static struct miscdev_test_case miscdev_test_ranges[] = {
+ {
+ .str = "lower static range, top",
+ .minor = 15,
+ },
+ {
+ .str = "upper static range, bottom",
+ .minor = 130,
+ },
+ {
+ .str = "lower static range, bottom",
+ .minor = 0,
+ },
+ {
+ .str = "upper static range, top",
+ .minor = MISC_DYNAMIC_MINOR - 1,
+ },
+};
+
+KUNIT_ARRAY_PARAM_DESC(miscdev, miscdev_test_ranges, str);
+
+static int miscdev_find_minors(struct kunit_suite *suite)
+{
+ int ret;
+ struct miscdevice miscstat = {
+ .name = "miscstat",
+ };
+ int i;
+
+ for (i = 15; i >= 0; i--) {
+ miscstat.minor = i;
+ ret = misc_register(&miscstat);
+ if (ret == 0)
+ break;
+ }
+
+ if (ret == 0) {
+ kunit_info(suite, "found misc device minor %d available\n",
+ miscstat.minor);
+ miscdev_test_ranges[0].minor = miscstat.minor;
+ misc_deregister(&miscstat);
+ } else {
+ return ret;
+ }
+
+ for (i = 128; i < MISC_DYNAMIC_MINOR; i++) {
+ miscstat.minor = i;
+ ret = misc_register(&miscstat);
+ if (ret == 0)
+ break;
+ }
+
+ if (ret == 0) {
+ kunit_info(suite, "found misc device minor %d available\n",
+ miscstat.minor);
+ miscdev_test_ranges[1].minor = miscstat.minor;
+ misc_deregister(&miscstat);
+ } else {
+ return ret;
+ }
+
+ for (i = 0; i < miscdev_test_ranges[0].minor; i++) {
+ miscstat.minor = i;
+ ret = misc_register(&miscstat);
+ if (ret == 0)
+ break;
+ }
+
+ if (ret == 0) {
+ kunit_info(suite, "found misc device minor %d available\n",
+ miscstat.minor);
+ miscdev_test_ranges[2].minor = miscstat.minor;
+ misc_deregister(&miscstat);
+ } else {
+ return ret;
+ }
+
+ for (i = MISC_DYNAMIC_MINOR - 1; i > miscdev_test_ranges[1].minor; i--) {
+ miscstat.minor = i;
+ ret = misc_register(&miscstat);
+ if (ret == 0)
+ break;
+ }
+
+ if (ret == 0) {
+ kunit_info(suite, "found misc device minor %d available\n",
+ miscstat.minor);
+ miscdev_test_ranges[3].minor = miscstat.minor;
+ misc_deregister(&miscstat);
+ }
+
+ return ret;
+}
+
+static bool is_valid_dynamic_minor(int minor)
+{
+ if (minor < 0)
+ return false;
+ return minor > MISC_DYNAMIC_MINOR;
+}
+
+static int miscdev_test_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static const struct file_operations miscdev_test_fops = {
+ .open = miscdev_test_open,
+};
+
+static void __init miscdev_test_can_open(struct kunit *test, struct miscdevice *misc)
+{
+ int ret;
+ struct file *filp;
+ char *devname;
+
+ devname = kasprintf(GFP_KERNEL, "/dev/%s", misc->name);
+ ret = init_mknod(devname, S_IFCHR | 0600,
+ new_encode_dev(MKDEV(MISC_MAJOR, misc->minor)));
+ if (ret != 0)
+ KUNIT_FAIL(test, "failed to create node\n");
+
+ filp = filp_open(devname, O_RDONLY, 0);
+ if (IS_ERR_OR_NULL(filp))
+ KUNIT_FAIL(test, "failed to open misc device: %ld\n", PTR_ERR(filp));
+ else
+ fput(filp);
+
+ init_unlink(devname);
+ kfree(devname);
+}
+
+static void __init miscdev_test_static_basic(struct kunit *test)
+{
+ struct miscdevice misc_test = {
+ .name = "misc_test",
+ .fops = &miscdev_test_fops,
+ };
+ int ret;
+ const struct miscdev_test_case *params = test->param_value;
+
+ misc_test.minor = params->minor;
+
+ ret = misc_register(&misc_test);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, misc_test.minor, params->minor);
+
+ if (ret == 0) {
+ miscdev_test_can_open(test, &misc_test);
+ misc_deregister(&misc_test);
+ }
+}
+
+static void __init miscdev_test_dynamic_basic(struct kunit *test)
+{
+ struct miscdevice misc_test = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "misc_test",
+ .fops = &miscdev_test_fops,
+ };
+ int ret;
+
+ ret = misc_register(&misc_test);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(misc_test.minor));
+
+ if (ret == 0) {
+ miscdev_test_can_open(test, &misc_test);
+ misc_deregister(&misc_test);
+ }
+}
+
+static void miscdev_test_twice(struct kunit *test)
+{
+ struct miscdevice misc_test = {
+ .name = "misc_test",
+ .fops = &miscdev_test_fops,
+ };
+ int ret;
+ const struct miscdev_test_case *params = test->param_value;
+
+ misc_test.minor = params->minor;
+
+ ret = misc_register(&misc_test);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, misc_test.minor, params->minor);
+ if (ret == 0)
+ misc_deregister(&misc_test);
+
+ ret = misc_register(&misc_test);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, misc_test.minor, params->minor);
+ if (ret == 0)
+ misc_deregister(&misc_test);
+}
+
+static void miscdev_test_duplicate_minor(struct kunit *test)
+{
+ struct miscdevice misc1 = {
+ .name = "misc1",
+ .fops = &miscdev_test_fops,
+ };
+ struct miscdevice misc2 = {
+ .name = "misc2",
+ .fops = &miscdev_test_fops,
+ };
+ int ret;
+ const struct miscdev_test_case *params = test->param_value;
+
+ misc1.minor = params->minor;
+ misc2.minor = params->minor;
+
+ ret = misc_register(&misc1);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, misc1.minor, params->minor);
+
+ ret = misc_register(&misc2);
+ KUNIT_EXPECT_EQ(test, ret, -EBUSY);
+ if (ret == 0)
+ misc_deregister(&misc2);
+
+ misc_deregister(&misc1);
+}
+
+static void miscdev_test_duplicate_name(struct kunit *test)
+{
+ struct miscdevice misc1 = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "misc1",
+ .fops = &miscdev_test_fops,
+ };
+ struct miscdevice misc2 = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "misc1",
+ .fops = &miscdev_test_fops,
+ };
+ int ret;
+
+ ret = misc_register(&misc1);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(misc1.minor));
+
+ ret = misc_register(&misc2);
+ KUNIT_EXPECT_EQ(test, ret, -EEXIST);
+ if (ret == 0)
+ misc_deregister(&misc2);
+
+ misc_deregister(&misc1);
+}
+
+/*
+ * Test that after a duplicate name failure, the reserved minor number is
+ * freed to be allocated next.
+ */
+static void miscdev_test_duplicate_name_leak(struct kunit *test)
+{
+ struct miscdevice misc1 = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "misc1",
+ .fops = &miscdev_test_fops,
+ };
+ struct miscdevice misc2 = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "misc1",
+ .fops = &miscdev_test_fops,
+ };
+ struct miscdevice misc3 = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "misc3",
+ .fops = &miscdev_test_fops,
+ };
+ int ret;
+ int dyn_minor;
+
+ ret = misc_register(&misc1);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(misc1.minor));
+
+ /*
+ * Find out what is the next minor number available.
+ */
+ ret = misc_register(&misc3);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(misc3.minor));
+ dyn_minor = misc3.minor;
+ misc_deregister(&misc3);
+ misc3.minor = MISC_DYNAMIC_MINOR;
+
+ ret = misc_register(&misc2);
+ KUNIT_EXPECT_EQ(test, ret, -EEXIST);
+ if (ret == 0)
+ misc_deregister(&misc2);
+
+ /*
+ * Now check that we can still get the same minor we found before.
+ */
+ ret = misc_register(&misc3);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(misc3.minor));
+ KUNIT_EXPECT_EQ(test, misc3.minor, dyn_minor);
+ misc_deregister(&misc3);
+
+ misc_deregister(&misc1);
+}
+
+/*
+ * Try to register a static minor with a duplicate name. That might not
+ * deallocate the minor, preventing it from being used again.
+ */
+static void miscdev_test_duplicate_error(struct kunit *test)
+{
+ struct miscdevice miscdyn = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "name1",
+ .fops = &miscdev_test_fops,
+ };
+ struct miscdevice miscstat = {
+ .name = "name1",
+ .fops = &miscdev_test_fops,
+ };
+ struct miscdevice miscnew = {
+ .name = "name2",
+ .fops = &miscdev_test_fops,
+ };
+ int ret;
+ const struct miscdev_test_case *params = test->param_value;
+
+ miscstat.minor = params->minor;
+ miscnew.minor = params->minor;
+
+ ret = misc_register(&miscdyn);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdyn.minor));
+
+ ret = misc_register(&miscstat);
+ KUNIT_EXPECT_EQ(test, ret, -EEXIST);
+ if (ret == 0)
+ misc_deregister(&miscstat);
+
+ ret = misc_register(&miscnew);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, miscnew.minor, params->minor);
+ if (ret == 0)
+ misc_deregister(&miscnew);
+
+ misc_deregister(&miscdyn);
+}
+
+static void __init miscdev_test_dynamic_only_range(struct kunit *test)
+{
+ int ret;
+ struct miscdevice *miscdev;
+ const int dynamic_minors = 256;
+ int i;
+
+ miscdev = kunit_kmalloc_array(test, dynamic_minors,
+ sizeof(struct miscdevice),
+ GFP_KERNEL | __GFP_ZERO);
+
+ for (i = 0; i < dynamic_minors; i++) {
+ miscdev[i].minor = MISC_DYNAMIC_MINOR;
+ miscdev[i].name = kasprintf(GFP_KERNEL, "misc_test%d", i);
+ miscdev[i].fops = &miscdev_test_fops;
+ ret = misc_register(&miscdev[i]);
+ if (ret != 0)
+ break;
+ /*
+ * This is the bug we are looking for!
+ * We asked for a dynamic minor and got a minor in the static range space.
+ */
+ if (miscdev[i].minor >= 0 && miscdev[i].minor <= 15) {
+ KUNIT_FAIL(test, "misc_register allocated minor %d\n", miscdev[i].minor);
+ i++;
+ break;
+ }
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdev[i].minor));
+ }
+
+ for (i--; i >= 0; i--) {
+ miscdev_test_can_open(test, &miscdev[i]);
+ misc_deregister(&miscdev[i]);
+ kfree_const(miscdev[i].name);
+ }
+
+ KUNIT_EXPECT_EQ(test, ret, 0);
+}
+
+static void __init miscdev_test_collision(struct kunit *test)
+{
+ int ret;
+ struct miscdevice *miscdev;
+ struct miscdevice miscstat = {
+ .name = "miscstat",
+ .fops = &miscdev_test_fops,
+ };
+ const int dynamic_minors = 256;
+ int i;
+
+ miscdev = kunit_kmalloc_array(test, dynamic_minors,
+ sizeof(struct miscdevice),
+ GFP_KERNEL | __GFP_ZERO);
+
+ miscstat.minor = miscdev_test_ranges[0].minor;
+ ret = misc_register(&miscstat);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, miscstat.minor, miscdev_test_ranges[0].minor);
+
+ for (i = 0; i < dynamic_minors; i++) {
+ miscdev[i].minor = MISC_DYNAMIC_MINOR;
+ miscdev[i].name = kasprintf(GFP_KERNEL, "misc_test%d", i);
+ miscdev[i].fops = &miscdev_test_fops;
+ ret = misc_register(&miscdev[i]);
+ if (ret != 0)
+ break;
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdev[i].minor));
+ }
+
+ for (i--; i >= 0; i--) {
+ miscdev_test_can_open(test, &miscdev[i]);
+ misc_deregister(&miscdev[i]);
+ kfree_const(miscdev[i].name);
+ }
+
+ misc_deregister(&miscstat);
+
+ KUNIT_EXPECT_EQ(test, ret, 0);
+}
+
+static void __init miscdev_test_collision_reverse(struct kunit *test)
+{
+ int ret;
+ struct miscdevice *miscdev;
+ struct miscdevice miscstat = {
+ .name = "miscstat",
+ .fops = &miscdev_test_fops,
+ };
+ const int dynamic_minors = 256;
+ int i;
+
+ miscdev = kunit_kmalloc_array(test, dynamic_minors,
+ sizeof(struct miscdevice),
+ GFP_KERNEL | __GFP_ZERO);
+
+ for (i = 0; i < dynamic_minors; i++) {
+ miscdev[i].minor = MISC_DYNAMIC_MINOR;
+ miscdev[i].name = kasprintf(GFP_KERNEL, "misc_test%d", i);
+ miscdev[i].fops = &miscdev_test_fops;
+ ret = misc_register(&miscdev[i]);
+ if (ret != 0)
+ break;
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdev[i].minor));
+ }
+
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ miscstat.minor = miscdev_test_ranges[0].minor;
+ ret = misc_register(&miscstat);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, miscstat.minor, miscdev_test_ranges[0].minor);
+ if (ret == 0)
+ misc_deregister(&miscstat);
+
+ for (i--; i >= 0; i--) {
+ miscdev_test_can_open(test, &miscdev[i]);
+ misc_deregister(&miscdev[i]);
+ kfree_const(miscdev[i].name);
+ }
+}
+
+static void __init miscdev_test_conflict(struct kunit *test)
+{
+ int ret;
+ struct miscdevice miscdyn = {
+ .name = "miscdyn",
+ .minor = MISC_DYNAMIC_MINOR,
+ .fops = &miscdev_test_fops,
+ };
+ struct miscdevice miscstat = {
+ .name = "miscstat",
+ .fops = &miscdev_test_fops,
+ };
+
+ ret = misc_register(&miscdyn);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdyn.minor));
+
+ /*
+ * Try to register a static minor with the same minor as the
+ * dynamic one.
+ */
+ miscstat.minor = miscdyn.minor;
+ ret = misc_register(&miscstat);
+ KUNIT_EXPECT_EQ(test, ret, -EINVAL);
+ if (ret == 0)
+ misc_deregister(&miscstat);
+
+ miscdev_test_can_open(test, &miscdyn);
+
+ misc_deregister(&miscdyn);
+}
+
+static void __init miscdev_test_conflict_reverse(struct kunit *test)
+{
+ int ret;
+ struct miscdevice miscdyn = {
+ .name = "miscdyn",
+ .minor = MISC_DYNAMIC_MINOR,
+ .fops = &miscdev_test_fops,
+ };
+ struct miscdevice miscstat = {
+ .name = "miscstat",
+ .fops = &miscdev_test_fops,
+ };
+
+ /*
+ * Find the first available dynamic minor to use it as a static
+ * minor later on.
+ */
+ ret = misc_register(&miscdyn);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdyn.minor));
+ miscstat.minor = miscdyn.minor;
+ misc_deregister(&miscdyn);
+
+ ret = misc_register(&miscstat);
+ KUNIT_EXPECT_EQ(test, ret, -EINVAL);
+ if (ret == 0)
+ misc_deregister(&miscstat);
+
+ /*
+ * Try to register a dynamic minor after registering a static minor
+ * within the dynamic range. It should work but get a different
+ * minor.
+ */
+ miscdyn.minor = MISC_DYNAMIC_MINOR;
+ ret = misc_register(&miscdyn);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, miscdyn.minor, miscstat.minor);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdyn.minor));
+ if (ret == 0)
+ misc_deregister(&miscdyn);
+}
+
+/* Take minor(> MISC_DYNAMIC_MINOR) as invalid when register miscdevice */
+static void miscdev_test_invalid_input(struct kunit *test)
+{
+ struct miscdevice misc_test = {
+ .minor = MISC_DYNAMIC_MINOR + 1,
+ .name = "misc_test",
+ .fops = &miscdev_test_fops,
+ };
+ int ret;
+
+ ret = misc_register(&misc_test);
+ KUNIT_EXPECT_EQ(test, ret, -EINVAL);
+ if (ret == 0)
+ misc_deregister(&misc_test);
+}
+
+/*
+ * Verify if @miscdyn_a can still be registered successfully without
+ * reinitialization even if its minor ever owned was requested by
+ * another miscdevice such as @miscdyn_b.
+ */
+static void miscdev_test_dynamic_reentry(struct kunit *test)
+{
+ struct miscdevice miscdyn_a = {
+ .name = "miscdyn_a",
+ .minor = MISC_DYNAMIC_MINOR,
+ .fops = &miscdev_test_fops,
+ };
+ struct miscdevice miscdyn_b = {
+ .name = "miscdyn_b",
+ .minor = MISC_DYNAMIC_MINOR,
+ .fops = &miscdev_test_fops,
+ };
+ int ret, minor_a;
+
+ ret = misc_register(&miscdyn_a);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdyn_a.minor));
+ minor_a = miscdyn_a.minor;
+ if (ret != 0)
+ return;
+ misc_deregister(&miscdyn_a);
+
+ ret = misc_register(&miscdyn_b);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, miscdyn_b.minor, minor_a);
+ if (ret != 0)
+ return;
+
+ ret = misc_register(&miscdyn_a);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdyn_a.minor));
+ KUNIT_EXPECT_NE(test, miscdyn_a.minor, miscdyn_b.minor);
+ if (ret == 0)
+ misc_deregister(&miscdyn_a);
+
+ misc_deregister(&miscdyn_b);
+}
+
+static struct kunit_case test_cases[] = {
+ KUNIT_CASE(kunit_static_minor),
+ KUNIT_CASE(kunit_misc_dynamic_minor),
+ KUNIT_CASE(miscdev_test_invalid_input),
+ KUNIT_CASE_PARAM(miscdev_test_twice, miscdev_gen_params),
+ KUNIT_CASE_PARAM(miscdev_test_duplicate_minor, miscdev_gen_params),
+ KUNIT_CASE(miscdev_test_duplicate_name),
+ KUNIT_CASE(miscdev_test_duplicate_name_leak),
+ KUNIT_CASE_PARAM(miscdev_test_duplicate_error, miscdev_gen_params),
+ KUNIT_CASE(miscdev_test_dynamic_reentry),
+ {}
+};
+
+static struct kunit_suite test_suite = {
+ .name = "miscdev",
+ .suite_init = miscdev_find_minors,
+ .test_cases = test_cases,
+};
+kunit_test_suite(test_suite);
+
+static struct kunit_case __refdata test_init_cases[] = {
+ KUNIT_CASE_PARAM(miscdev_test_static_basic, miscdev_gen_params),
+ KUNIT_CASE(miscdev_test_dynamic_basic),
+ KUNIT_CASE(miscdev_test_dynamic_only_range),
+ KUNIT_CASE(miscdev_test_collision),
+ KUNIT_CASE(miscdev_test_collision_reverse),
+ KUNIT_CASE(miscdev_test_conflict),
+ KUNIT_CASE(miscdev_test_conflict_reverse),
+ {}
+};
+
+static struct kunit_suite test_init_suite = {
+ .name = "miscdev_init",
+ .suite_init = miscdev_find_minors,
+ .test_cases = test_init_cases,
+};
+kunit_test_init_section_suite(test_init_suite);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vimal Agrawal");
+MODULE_AUTHOR("Thadeu Lima de Souza Cascardo <cascardo@igalia.com>");
+MODULE_DESCRIPTION("Test module for misc character devices");
diff --git a/drivers/char/mwave/3780i.c b/drivers/char/mwave/3780i.c
index 4a8937f80570..90f93cefb21c 100644
--- a/drivers/char/mwave/3780i.c
+++ b/drivers/char/mwave/3780i.c
@@ -46,6 +46,8 @@
* First release to the public
*/
+#define pr_fmt(fmt) "3780i: " fmt
+
#include <linux/kernel.h>
#include <linux/unistd.h>
#include <linux/delay.h>
@@ -75,18 +77,12 @@ unsigned short dsp3780I_ReadMsaCfg(unsigned short usDspBaseIO,
unsigned long flags;
unsigned short val;
- PRINTK_3(TRACE_3780I,
- "3780i::dsp3780I_ReadMsaCfg entry usDspBaseIO %x ulMsaAddr %lx\n",
- usDspBaseIO, ulMsaAddr);
-
spin_lock_irqsave(&dsp_lock, flags);
OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulMsaAddr);
OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulMsaAddr >> 16));
val = InWordDsp(DSP_MsaDataDSISHigh);
spin_unlock_irqrestore(&dsp_lock, flags);
- PRINTK_2(TRACE_3780I, "3780i::dsp3780I_ReadMsaCfg exit val %x\n", val);
-
return val;
}
@@ -95,10 +91,6 @@ void dsp3780I_WriteMsaCfg(unsigned short usDspBaseIO,
{
unsigned long flags;
- PRINTK_4(TRACE_3780I,
- "3780i::dsp3780i_WriteMsaCfg entry usDspBaseIO %x ulMsaAddr %lx usValue %x\n",
- usDspBaseIO, ulMsaAddr, usValue);
-
spin_lock_irqsave(&dsp_lock, flags);
OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulMsaAddr);
OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulMsaAddr >> 16));
@@ -112,64 +104,18 @@ static void dsp3780I_WriteGenCfg(unsigned short usDspBaseIO, unsigned uIndex,
DSP_ISA_SLAVE_CONTROL rSlaveControl;
DSP_ISA_SLAVE_CONTROL rSlaveControl_Save;
-
- PRINTK_4(TRACE_3780I,
- "3780i::dsp3780i_WriteGenCfg entry usDspBaseIO %x uIndex %x ucValue %x\n",
- usDspBaseIO, uIndex, ucValue);
-
MKBYTE(rSlaveControl) = InByteDsp(DSP_IsaSlaveControl);
- PRINTK_2(TRACE_3780I,
- "3780i::dsp3780i_WriteGenCfg rSlaveControl %x\n",
- MKBYTE(rSlaveControl));
-
rSlaveControl_Save = rSlaveControl;
rSlaveControl.ConfigMode = true;
- PRINTK_2(TRACE_3780I,
- "3780i::dsp3780i_WriteGenCfg entry rSlaveControl+ConfigMode %x\n",
- MKBYTE(rSlaveControl));
-
OutByteDsp(DSP_IsaSlaveControl, MKBYTE(rSlaveControl));
OutByteDsp(DSP_ConfigAddress, (unsigned char) uIndex);
OutByteDsp(DSP_ConfigData, ucValue);
OutByteDsp(DSP_IsaSlaveControl, MKBYTE(rSlaveControl_Save));
-
- PRINTK_1(TRACE_3780I, "3780i::dsp3780i_WriteGenCfg exit\n");
-
-
}
-#if 0
-unsigned char dsp3780I_ReadGenCfg(unsigned short usDspBaseIO,
- unsigned uIndex)
-{
- DSP_ISA_SLAVE_CONTROL rSlaveControl;
- DSP_ISA_SLAVE_CONTROL rSlaveControl_Save;
- unsigned char ucValue;
-
-
- PRINTK_3(TRACE_3780I,
- "3780i::dsp3780i_ReadGenCfg entry usDspBaseIO %x uIndex %x\n",
- usDspBaseIO, uIndex);
-
- MKBYTE(rSlaveControl) = InByteDsp(DSP_IsaSlaveControl);
- rSlaveControl_Save = rSlaveControl;
- rSlaveControl.ConfigMode = true;
- OutByteDsp(DSP_IsaSlaveControl, MKBYTE(rSlaveControl));
- OutByteDsp(DSP_ConfigAddress, (unsigned char) uIndex);
- ucValue = InByteDsp(DSP_ConfigData);
- OutByteDsp(DSP_IsaSlaveControl, MKBYTE(rSlaveControl_Save));
-
- PRINTK_2(TRACE_3780I,
- "3780i::dsp3780i_ReadGenCfg exit ucValue %x\n", ucValue);
-
-
- return ucValue;
-}
-#endif /* 0 */
-
-int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings,
+int dsp3780I_EnableDSP(struct dsp_3780i_config_settings *pSettings,
unsigned short *pIrqMap,
unsigned short *pDmaMap)
{
@@ -191,25 +137,13 @@ int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings,
DSP_CLOCK_CONTROL_2 rClockControl2;
DSP_ISA_SLAVE_CONTROL rSlaveControl;
DSP_HBRIDGE_CONTROL rHBridgeControl;
- unsigned short ChipID = 0;
unsigned short tval;
-
- PRINTK_2(TRACE_3780I,
- "3780i::dsp3780I_EnableDSP entry pSettings->bDSPEnabled %x\n",
- pSettings->bDSPEnabled);
-
-
if (!pSettings->bDSPEnabled) {
- PRINTK_ERROR( KERN_ERR "3780i::dsp3780I_EnableDSP: Error: DSP not enabled. Aborting.\n" );
+ pr_err("%s: Error: DSP not enabled. Aborting.\n", __func__);
return -EIO;
}
-
- PRINTK_2(TRACE_3780I,
- "3780i::dsp3780i_EnableDSP entry pSettings->bModemEnabled %x\n",
- pSettings->bModemEnabled);
-
if (pSettings->bModemEnabled) {
rUartCfg1.Reserved = rUartCfg2.Reserved = 0;
rUartCfg1.IrqActiveLow = pSettings->bUartIrqActiveLow;
@@ -282,23 +216,10 @@ int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings,
rSlaveControl.ConfigMode = false;
rSlaveControl.Reserved = 0;
- PRINTK_4(TRACE_3780I,
- "3780i::dsp3780i_EnableDSP usDspBaseIO %x index %x taddr %x\n",
- usDspBaseIO, DSP_IsaSlaveControl,
- usDspBaseIO + DSP_IsaSlaveControl);
-
- PRINTK_2(TRACE_3780I,
- "3780i::dsp3780i_EnableDSP rSlaveContrl %x\n",
- MKWORD(rSlaveControl));
-
spin_lock_irqsave(&dsp_lock, flags);
OutWordDsp(DSP_IsaSlaveControl, MKWORD(rSlaveControl));
MKWORD(tval) = InWordDsp(DSP_IsaSlaveControl);
- PRINTK_2(TRACE_3780I,
- "3780i::dsp3780i_EnableDSP rSlaveControl 2 %x\n", tval);
-
-
for (i = 0; i < 11; i++)
udelay(2000);
@@ -307,10 +228,6 @@ int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings,
MKWORD(tval) = InWordDsp(DSP_IsaSlaveControl);
- PRINTK_2(TRACE_3780I,
- "3780i::dsp3780i_EnableDSP rSlaveControl 3 %x\n", tval);
-
-
/* Program our general configuration registers */
WriteGenCfg(DSP_HBridgeCfg1Index, MKBYTE(rHBridgeCfg1));
WriteGenCfg(DSP_HBridgeCfg2Index, MKBYTE(rHBridgeCfg2));
@@ -331,10 +248,6 @@ int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings,
rHBridgeControl.IoAutoInc = false;
rHBridgeControl.DiagnosticMode = false;
- PRINTK_3(TRACE_3780I,
- "3780i::dsp3780i_EnableDSP DSP_HBridgeControl %x rHBridgeControl %x\n",
- DSP_HBridgeControl, MKWORD(rHBridgeControl));
-
OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl));
spin_unlock_irqrestore(&dsp_lock, flags);
WriteMsaCfg(DSP_LBusTimeoutDisable, MKWORD(rLBusTimeoutDisable));
@@ -342,24 +255,17 @@ int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings,
WriteMsaCfg(DSP_ClockControl_2, MKWORD(rClockControl2));
WriteMsaCfg(DSP_ChipReset, MKWORD(rChipReset));
- ChipID = ReadMsaCfg(DSP_ChipID);
-
- PRINTK_2(TRACE_3780I,
- "3780i::dsp3780I_EnableDSP exiting bRC=true, ChipID %x\n",
- ChipID);
+ ReadMsaCfg(DSP_ChipID);
return 0;
}
-int dsp3780I_DisableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings)
+int dsp3780I_DisableDSP(struct dsp_3780i_config_settings *pSettings)
{
unsigned long flags;
unsigned short usDspBaseIO = pSettings->usDspBaseIO;
DSP_ISA_SLAVE_CONTROL rSlaveControl;
-
- PRINTK_1(TRACE_3780I, "3780i::dsp3780i_DisableDSP entry\n");
-
rSlaveControl.ClockControl = 0;
rSlaveControl.SoftReset = true;
rSlaveControl.ConfigMode = false;
@@ -375,29 +281,20 @@ int dsp3780I_DisableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings)
udelay(5);
-
- PRINTK_1(TRACE_3780I, "3780i::dsp3780i_DisableDSP exit\n");
-
return 0;
}
-int dsp3780I_Reset(DSP_3780I_CONFIG_SETTINGS * pSettings)
+int dsp3780I_Reset(struct dsp_3780i_config_settings *pSettings)
{
unsigned long flags;
unsigned short usDspBaseIO = pSettings->usDspBaseIO;
DSP_BOOT_DOMAIN rBootDomain;
DSP_HBRIDGE_CONTROL rHBridgeControl;
-
- PRINTK_1(TRACE_3780I, "3780i::dsp3780i_Reset entry\n");
-
spin_lock_irqsave(&dsp_lock, flags);
/* Mask DSP to PC interrupt */
MKWORD(rHBridgeControl) = InWordDsp(DSP_HBridgeControl);
- PRINTK_2(TRACE_3780I, "3780i::dsp3780i_Reset rHBridgeControl %x\n",
- MKWORD(rHBridgeControl));
-
rHBridgeControl.EnableDspInt = false;
OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl));
spin_unlock_irqrestore(&dsp_lock, flags);
@@ -408,9 +305,6 @@ int dsp3780I_Reset(DSP_3780I_CONFIG_SETTINGS * pSettings)
rBootDomain.NMI = true;
rBootDomain.Reserved = 0;
- PRINTK_2(TRACE_3780I, "3780i::dsp3780i_Reset rBootDomain %x\n",
- MKWORD(rBootDomain));
-
WriteMsaCfg(DSP_MspBootDomain, MKWORD(rBootDomain));
/* Reset all the chiplets and then reactivate them */
@@ -419,24 +313,17 @@ int dsp3780I_Reset(DSP_3780I_CONFIG_SETTINGS * pSettings)
WriteMsaCfg(DSP_ChipReset,
(unsigned short) (~pSettings->usChipletEnable));
-
- PRINTK_1(TRACE_3780I, "3780i::dsp3780i_Reset exit bRC=0\n");
-
return 0;
}
-int dsp3780I_Run(DSP_3780I_CONFIG_SETTINGS * pSettings)
+int dsp3780I_Run(struct dsp_3780i_config_settings *pSettings)
{
unsigned long flags;
unsigned short usDspBaseIO = pSettings->usDspBaseIO;
DSP_BOOT_DOMAIN rBootDomain;
DSP_HBRIDGE_CONTROL rHBridgeControl;
-
- PRINTK_1(TRACE_3780I, "3780i::dsp3780i_Run entry\n");
-
-
/* Transition the core to a running state */
rBootDomain.ResetCore = true;
rBootDomain.Halt = false;
@@ -459,15 +346,9 @@ int dsp3780I_Run(DSP_3780I_CONFIG_SETTINGS * pSettings)
MKWORD(rHBridgeControl) = InWordDsp(DSP_HBridgeControl);
rHBridgeControl.EnableDspInt = true;
- PRINTK_2(TRACE_3780I, "3780i::dsp3780i_Run rHBridgeControl %x\n",
- MKWORD(rHBridgeControl));
-
OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl));
spin_unlock_irqrestore(&dsp_lock, flags);
-
- PRINTK_1(TRACE_3780I, "3780i::dsp3780i_Run exit bRC=true\n");
-
return 0;
}
@@ -479,12 +360,6 @@ int dsp3780I_ReadDStore(unsigned short usDspBaseIO, void __user *pvBuffer,
unsigned short __user *pusBuffer = pvBuffer;
unsigned short val;
-
- PRINTK_5(TRACE_3780I,
- "3780i::dsp3780I_ReadDStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n",
- usDspBaseIO, pusBuffer, uCount, ulDSPAddr);
-
-
/* Set the initial MSA address. No adjustments need to be made to data store addresses */
spin_lock_irqsave(&dsp_lock, flags);
OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulDSPAddr);
@@ -499,17 +374,9 @@ int dsp3780I_ReadDStore(unsigned short usDspBaseIO, void __user *pvBuffer,
if(put_user(val, pusBuffer++))
return -EFAULT;
- PRINTK_3(TRACE_3780I,
- "3780I::dsp3780I_ReadDStore uCount %x val %x\n",
- uCount, val);
-
PaceMsaAccess(usDspBaseIO);
}
-
- PRINTK_1(TRACE_3780I,
- "3780I::dsp3780I_ReadDStore exit bRC=true\n");
-
return 0;
}
@@ -521,12 +388,6 @@ int dsp3780I_ReadAndClearDStore(unsigned short usDspBaseIO,
unsigned short __user *pusBuffer = pvBuffer;
unsigned short val;
-
- PRINTK_5(TRACE_3780I,
- "3780i::dsp3780I_ReadAndDStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n",
- usDspBaseIO, pusBuffer, uCount, ulDSPAddr);
-
-
/* Set the initial MSA address. No adjustments need to be made to data store addresses */
spin_lock_irqsave(&dsp_lock, flags);
OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulDSPAddr);
@@ -541,17 +402,9 @@ int dsp3780I_ReadAndClearDStore(unsigned short usDspBaseIO,
if(put_user(val, pusBuffer++))
return -EFAULT;
- PRINTK_3(TRACE_3780I,
- "3780I::dsp3780I_ReadAndCleanDStore uCount %x val %x\n",
- uCount, val);
-
PaceMsaAccess(usDspBaseIO);
}
-
- PRINTK_1(TRACE_3780I,
- "3780I::dsp3780I_ReadAndClearDStore exit bRC=true\n");
-
return 0;
}
@@ -562,12 +415,6 @@ int dsp3780I_WriteDStore(unsigned short usDspBaseIO, void __user *pvBuffer,
unsigned long flags;
unsigned short __user *pusBuffer = pvBuffer;
-
- PRINTK_5(TRACE_3780I,
- "3780i::dsp3780D_WriteDStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n",
- usDspBaseIO, pusBuffer, uCount, ulDSPAddr);
-
-
/* Set the initial MSA address. No adjustments need to be made to data store addresses */
spin_lock_irqsave(&dsp_lock, flags);
OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulDSPAddr);
@@ -583,17 +430,9 @@ int dsp3780I_WriteDStore(unsigned short usDspBaseIO, void __user *pvBuffer,
OutWordDsp(DSP_MsaDataDSISHigh, val);
spin_unlock_irqrestore(&dsp_lock, flags);
- PRINTK_3(TRACE_3780I,
- "3780I::dsp3780I_WriteDStore uCount %x val %x\n",
- uCount, val);
-
PaceMsaAccess(usDspBaseIO);
}
-
- PRINTK_1(TRACE_3780I,
- "3780I::dsp3780D_WriteDStore exit bRC=true\n");
-
return 0;
}
@@ -604,10 +443,6 @@ int dsp3780I_ReadIStore(unsigned short usDspBaseIO, void __user *pvBuffer,
unsigned long flags;
unsigned short __user *pusBuffer = pvBuffer;
- PRINTK_5(TRACE_3780I,
- "3780i::dsp3780I_ReadIStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n",
- usDspBaseIO, pusBuffer, uCount, ulDSPAddr);
-
/*
* Set the initial MSA address. To convert from an instruction store
* address to an MSA address
@@ -631,17 +466,10 @@ int dsp3780I_ReadIStore(unsigned short usDspBaseIO, void __user *pvBuffer,
if(put_user(val_hi, pusBuffer++))
return -EFAULT;
- PRINTK_4(TRACE_3780I,
- "3780I::dsp3780I_ReadIStore uCount %x val_lo %x val_hi %x\n",
- uCount, val_lo, val_hi);
-
PaceMsaAccess(usDspBaseIO);
}
- PRINTK_1(TRACE_3780I,
- "3780I::dsp3780I_ReadIStore exit bRC=true\n");
-
return 0;
}
@@ -652,11 +480,6 @@ int dsp3780I_WriteIStore(unsigned short usDspBaseIO, void __user *pvBuffer,
unsigned long flags;
unsigned short __user *pusBuffer = pvBuffer;
- PRINTK_5(TRACE_3780I,
- "3780i::dsp3780I_WriteIStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n",
- usDspBaseIO, pusBuffer, uCount, ulDSPAddr);
-
-
/*
* Set the initial MSA address. To convert from an instruction store
* address to an MSA address
@@ -680,17 +503,9 @@ int dsp3780I_WriteIStore(unsigned short usDspBaseIO, void __user *pvBuffer,
OutWordDsp(DSP_MsaDataDSISHigh, val_hi);
spin_unlock_irqrestore(&dsp_lock, flags);
- PRINTK_4(TRACE_3780I,
- "3780I::dsp3780I_WriteIStore uCount %x val_lo %x val_hi %x\n",
- uCount, val_lo, val_hi);
-
PaceMsaAccess(usDspBaseIO);
-
}
- PRINTK_1(TRACE_3780I,
- "3780I::dsp3780I_WriteIStore exit bRC=true\n");
-
return 0;
}
@@ -700,12 +515,6 @@ int dsp3780I_GetIPCSource(unsigned short usDspBaseIO,
{
unsigned long flags;
DSP_HBRIDGE_CONTROL rHBridgeControl;
- unsigned short temp;
-
-
- PRINTK_3(TRACE_3780I,
- "3780i::dsp3780I_GetIPCSource entry usDspBaseIO %x pusIPCSource %p\n",
- usDspBaseIO, pusIPCSource);
/*
* Disable DSP to PC interrupts, read the interrupt register,
@@ -717,22 +526,11 @@ int dsp3780I_GetIPCSource(unsigned short usDspBaseIO,
OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl));
*pusIPCSource = InWordDsp(DSP_Interrupt);
- temp = (unsigned short) ~(*pusIPCSource);
-
- PRINTK_3(TRACE_3780I,
- "3780i::dsp3780I_GetIPCSource, usIPCSource %x ~ %x\n",
- *pusIPCSource, temp);
-
OutWordDsp(DSP_Interrupt, (unsigned short) ~(*pusIPCSource));
rHBridgeControl.EnableDspInt = true;
OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl));
spin_unlock_irqrestore(&dsp_lock, flags);
-
- PRINTK_2(TRACE_3780I,
- "3780i::dsp3780I_GetIPCSource exit usIPCSource %x\n",
- *pusIPCSource);
-
return 0;
}
diff --git a/drivers/char/mwave/3780i.h b/drivers/char/mwave/3780i.h
index 95164246afd1..53dafceb20e0 100644
--- a/drivers/char/mwave/3780i.h
+++ b/drivers/char/mwave/3780i.h
@@ -261,7 +261,7 @@ typedef struct {
* the only values maintained by the 3780i support layer are the saved UART
* registers.
*/
-typedef struct _DSP_3780I_CONFIG_SETTINGS {
+struct dsp_3780i_config_settings {
/* Location of base configuration register */
unsigned short usBaseConfigIO;
@@ -313,16 +313,16 @@ typedef struct _DSP_3780I_CONFIG_SETTINGS {
unsigned char ucSCR; /* Scratch register */
unsigned char ucDLL; /* Divisor latch, low byte */
unsigned char ucDLM; /* Divisor latch, high byte */
-} DSP_3780I_CONFIG_SETTINGS;
+};
/* 3780i support functions */
-int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings,
+int dsp3780I_EnableDSP(struct dsp_3780i_config_settings *pSettings,
unsigned short *pIrqMap,
unsigned short *pDmaMap);
-int dsp3780I_DisableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings);
-int dsp3780I_Reset(DSP_3780I_CONFIG_SETTINGS * pSettings);
-int dsp3780I_Run(DSP_3780I_CONFIG_SETTINGS * pSettings);
+int dsp3780I_DisableDSP(struct dsp_3780i_config_settings *pSettings);
+int dsp3780I_Reset(struct dsp_3780i_config_settings *pSettings);
+int dsp3780I_Run(struct dsp_3780i_config_settings *pSettings);
int dsp3780I_ReadDStore(unsigned short usDspBaseIO, void __user *pvBuffer,
unsigned uCount, unsigned long ulDSPAddr);
int dsp3780I_ReadAndClearDStore(unsigned short usDspBaseIO,
diff --git a/drivers/char/mwave/Makefile b/drivers/char/mwave/Makefile
index a24fe96e3c96..e56c1a375535 100644
--- a/drivers/char/mwave/Makefile
+++ b/drivers/char/mwave/Makefile
@@ -8,9 +8,3 @@
obj-$(CONFIG_MWAVE) += mwave.o
mwave-y := mwavedd.o smapi.o tp3780i.o 3780i.o
-
-# To have the mwave driver disable other uarts if necessary
-# ccflags-y := -DMWAVE_FUTZ_WITH_OTHER_DEVICES
-
-# To compile in lots (~20 KiB) of run-time enablable printk()s for debugging:
-ccflags-y += -DMW_TRACE
diff --git a/drivers/char/mwave/README b/drivers/char/mwave/README
index c2a58f428bc8..6224aa814c62 100644
--- a/drivers/char/mwave/README
+++ b/drivers/char/mwave/README
@@ -4,16 +4,6 @@ Module options
The mwave module takes the following options. Note that these options
are not saved by the BIOS and so do not persist after unload and reload.
- mwave_debug=value, where value is bitwise OR of trace flags:
- 0x0001 mwavedd api tracing
- 0x0002 smapi api tracing
- 0x0004 3780i tracing
- 0x0008 tp3780i tracing
-
- Tracing only occurs if the driver has been compiled with the
- MW_TRACE macro #defined (i.e. let ccflags-y := -DMW_TRACE
- in the Makefile).
-
mwave_3780i_irq=5/7/10/11/15
If the dsp irq has not been setup and stored in bios by the
thinkpad configuration utility then this parameter allows the
diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c
index 11272d605ecd..640a9cb0dd8d 100644
--- a/drivers/char/mwave/mwavedd.c
+++ b/drivers/char/mwave/mwavedd.c
@@ -46,6 +46,8 @@
* First release to the public
*/
+#define pr_fmt(fmt) "mwavedd: " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/fs.h>
@@ -75,131 +77,62 @@ MODULE_LICENSE("GPL");
* We'll depend on users using the tpctl utility to do that for now
*/
static DEFINE_MUTEX(mwave_mutex);
-int mwave_debug = 0;
int mwave_3780i_irq = 0;
int mwave_3780i_io = 0;
int mwave_uart_irq = 0;
int mwave_uart_io = 0;
-module_param(mwave_debug, int, 0);
module_param_hw(mwave_3780i_irq, int, irq, 0);
module_param_hw(mwave_3780i_io, int, ioport, 0);
module_param_hw(mwave_uart_irq, int, irq, 0);
module_param_hw(mwave_uart_io, int, ioport, 0);
-static int mwave_open(struct inode *inode, struct file *file);
-static int mwave_close(struct inode *inode, struct file *file);
-static long mwave_ioctl(struct file *filp, unsigned int iocmd,
- unsigned long ioarg);
-
-MWAVE_DEVICE_DATA mwave_s_mdd;
-
-static int mwave_open(struct inode *inode, struct file *file)
-{
- unsigned int retval = 0;
-
- PRINTK_3(TRACE_MWAVE,
- "mwavedd::mwave_open, entry inode %p file %p\n",
- inode, file);
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_open, exit return retval %x\n", retval);
-
- return retval;
-}
-
-static int mwave_close(struct inode *inode, struct file *file)
-{
- unsigned int retval = 0;
-
- PRINTK_3(TRACE_MWAVE,
- "mwavedd::mwave_close, entry inode %p file %p\n",
- inode, file);
-
- PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_close, exit retval %x\n",
- retval);
-
- return retval;
-}
+struct mwave_device_data mwave_s_mdd;
static long mwave_ioctl(struct file *file, unsigned int iocmd,
unsigned long ioarg)
{
unsigned int retval = 0;
- pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd;
+ struct mwave_device_data *pDrvData = &mwave_s_mdd;
void __user *arg = (void __user *)ioarg;
- PRINTK_4(TRACE_MWAVE,
- "mwavedd::mwave_ioctl, entry file %p cmd %x arg %x\n",
- file, iocmd, (int) ioarg);
-
switch (iocmd) {
case IOCTL_MW_RESET:
- PRINTK_1(TRACE_MWAVE,
- "mwavedd::mwave_ioctl, IOCTL_MW_RESET"
- " calling tp3780I_ResetDSP\n");
mutex_lock(&mwave_mutex);
retval = tp3780I_ResetDSP(&pDrvData->rBDData);
mutex_unlock(&mwave_mutex);
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_ioctl, IOCTL_MW_RESET"
- " retval %x from tp3780I_ResetDSP\n",
- retval);
break;
case IOCTL_MW_RUN:
- PRINTK_1(TRACE_MWAVE,
- "mwavedd::mwave_ioctl, IOCTL_MW_RUN"
- " calling tp3780I_StartDSP\n");
mutex_lock(&mwave_mutex);
retval = tp3780I_StartDSP(&pDrvData->rBDData);
mutex_unlock(&mwave_mutex);
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_ioctl, IOCTL_MW_RUN"
- " retval %x from tp3780I_StartDSP\n",
- retval);
break;
case IOCTL_MW_DSP_ABILITIES: {
- MW_ABILITIES rAbilities;
+ struct mw_abilities rAbilities;
- PRINTK_1(TRACE_MWAVE,
- "mwavedd::mwave_ioctl,"
- " IOCTL_MW_DSP_ABILITIES calling"
- " tp3780I_QueryAbilities\n");
mutex_lock(&mwave_mutex);
retval = tp3780I_QueryAbilities(&pDrvData->rBDData,
&rAbilities);
mutex_unlock(&mwave_mutex);
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_ioctl, IOCTL_MW_DSP_ABILITIES"
- " retval %x from tp3780I_QueryAbilities\n",
- retval);
if (retval == 0) {
- if( copy_to_user(arg, &rAbilities,
- sizeof(MW_ABILITIES)) )
+ if (copy_to_user(arg, &rAbilities, sizeof(rAbilities)))
return -EFAULT;
}
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_ioctl, IOCTL_MW_DSP_ABILITIES"
- " exit retval %x\n",
- retval);
}
break;
case IOCTL_MW_READ_DATA:
case IOCTL_MW_READCLEAR_DATA: {
- MW_READWRITE rReadData;
+ struct mw_readwrite rReadData;
unsigned short __user *pusBuffer = NULL;
if( copy_from_user(&rReadData, arg,
- sizeof(MW_READWRITE)) )
+ sizeof(struct mw_readwrite)) )
return -EFAULT;
pusBuffer = (unsigned short __user *) (rReadData.pBuf);
- PRINTK_4(TRACE_MWAVE,
- "mwavedd::mwave_ioctl IOCTL_MW_READ_DATA,"
- " size %lx, ioarg %lx pusBuffer %p\n",
- rReadData.ulDataLength, ioarg, pusBuffer);
mutex_lock(&mwave_mutex);
retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData,
iocmd,
@@ -211,19 +144,13 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
break;
case IOCTL_MW_READ_INST: {
- MW_READWRITE rReadData;
+ struct mw_readwrite rReadData;
unsigned short __user *pusBuffer = NULL;
- if( copy_from_user(&rReadData, arg,
- sizeof(MW_READWRITE)) )
+ if (copy_from_user(&rReadData, arg, sizeof(rReadData)))
return -EFAULT;
pusBuffer = (unsigned short __user *) (rReadData.pBuf);
- PRINTK_4(TRACE_MWAVE,
- "mwavedd::mwave_ioctl IOCTL_MW_READ_INST,"
- " size %lx, ioarg %lx pusBuffer %p\n",
- rReadData.ulDataLength / 2, ioarg,
- pusBuffer);
mutex_lock(&mwave_mutex);
retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData,
iocmd, pusBuffer,
@@ -234,19 +161,13 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
break;
case IOCTL_MW_WRITE_DATA: {
- MW_READWRITE rWriteData;
+ struct mw_readwrite rWriteData;
unsigned short __user *pusBuffer = NULL;
- if( copy_from_user(&rWriteData, arg,
- sizeof(MW_READWRITE)) )
+ if (copy_from_user(&rWriteData, arg, sizeof(rWriteData)))
return -EFAULT;
pusBuffer = (unsigned short __user *) (rWriteData.pBuf);
- PRINTK_4(TRACE_MWAVE,
- "mwavedd::mwave_ioctl IOCTL_MW_WRITE_DATA,"
- " size %lx, ioarg %lx pusBuffer %p\n",
- rWriteData.ulDataLength, ioarg,
- pusBuffer);
mutex_lock(&mwave_mutex);
retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData,
iocmd, pusBuffer,
@@ -257,19 +178,13 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
break;
case IOCTL_MW_WRITE_INST: {
- MW_READWRITE rWriteData;
+ struct mw_readwrite rWriteData;
unsigned short __user *pusBuffer = NULL;
- if( copy_from_user(&rWriteData, arg,
- sizeof(MW_READWRITE)) )
+ if (copy_from_user(&rWriteData, arg, sizeof(rWriteData)))
return -EFAULT;
pusBuffer = (unsigned short __user *)(rWriteData.pBuf);
- PRINTK_4(TRACE_MWAVE,
- "mwavedd::mwave_ioctl IOCTL_MW_WRITE_INST,"
- " size %lx, ioarg %lx pusBuffer %p\n",
- rWriteData.ulDataLength, ioarg,
- pusBuffer);
mutex_lock(&mwave_mutex);
retval = tp3780I_ReadWriteDspIStore(&pDrvData->rBDData,
iocmd, pusBuffer,
@@ -283,30 +198,17 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
unsigned int ipcnum = (unsigned int) ioarg;
if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) {
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd::mwave_ioctl:"
- " IOCTL_MW_REGISTER_IPC:"
- " Error: Invalid ipcnum %x\n",
- ipcnum);
+ pr_err("%s: IOCTL_MW_REGISTER_IPC: Error: Invalid ipcnum %x\n",
+ __func__, ipcnum);
return -EINVAL;
}
ipcnum = array_index_nospec(ipcnum,
ARRAY_SIZE(pDrvData->IPCs));
- PRINTK_3(TRACE_MWAVE,
- "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
- " ipcnum %x entry usIntCount %x\n",
- ipcnum,
- pDrvData->IPCs[ipcnum].usIntCount);
mutex_lock(&mwave_mutex);
pDrvData->IPCs[ipcnum].bIsHere = false;
pDrvData->IPCs[ipcnum].bIsEnabled = true;
mutex_unlock(&mwave_mutex);
-
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
- " ipcnum %x exit\n",
- ipcnum);
}
break;
@@ -314,28 +216,17 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
unsigned int ipcnum = (unsigned int) ioarg;
if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) {
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd::mwave_ioctl:"
- " IOCTL_MW_GET_IPC: Error:"
- " Invalid ipcnum %x\n", ipcnum);
+ pr_err("%s: IOCTL_MW_GET_IPC: Error: Invalid ipcnum %x\n", __func__,
+ ipcnum);
return -EINVAL;
}
ipcnum = array_index_nospec(ipcnum,
ARRAY_SIZE(pDrvData->IPCs));
- PRINTK_3(TRACE_MWAVE,
- "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC"
- " ipcnum %x, usIntCount %x\n",
- ipcnum,
- pDrvData->IPCs[ipcnum].usIntCount);
-
+
mutex_lock(&mwave_mutex);
if (pDrvData->IPCs[ipcnum].bIsEnabled == true) {
DECLARE_WAITQUEUE(wait, current);
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_ioctl, thread for"
- " ipc %x going to sleep\n",
- ipcnum);
add_wait_queue(&pDrvData->IPCs[ipcnum].ipc_wait_queue, &wait);
pDrvData->IPCs[ipcnum].bIsHere = true;
set_current_state(TASK_INTERRUPTIBLE);
@@ -343,31 +234,15 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
/* the interrupt handler while we were gone */
if (pDrvData->IPCs[ipcnum].usIntCount == 1) { /* first int has occurred (race condition) */
pDrvData->IPCs[ipcnum].usIntCount = 2; /* first int has been handled */
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_ioctl"
- " IOCTL_MW_GET_IPC ipcnum %x"
- " handling first int\n",
- ipcnum);
} else { /* either 1st int has not yet occurred, or we have already handled the first int */
schedule();
if (pDrvData->IPCs[ipcnum].usIntCount == 1) {
pDrvData->IPCs[ipcnum].usIntCount = 2;
}
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_ioctl"
- " IOCTL_MW_GET_IPC ipcnum %x"
- " woke up and returning to"
- " application\n",
- ipcnum);
}
pDrvData->IPCs[ipcnum].bIsHere = false;
remove_wait_queue(&pDrvData->IPCs[ipcnum].ipc_wait_queue, &wait);
set_current_state(TASK_RUNNING);
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC,"
- " returning thread for ipc %x"
- " processing\n",
- ipcnum);
}
mutex_unlock(&mwave_mutex);
}
@@ -376,16 +251,9 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
case IOCTL_MW_UNREGISTER_IPC: {
unsigned int ipcnum = (unsigned int) ioarg;
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_ioctl IOCTL_MW_UNREGISTER_IPC"
- " ipcnum %x\n",
- ipcnum);
if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) {
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd::mwave_ioctl:"
- " IOCTL_MW_UNREGISTER_IPC:"
- " Error: Invalid ipcnum %x\n",
- ipcnum);
+ pr_err("%s: IOCTL_MW_UNREGISTER_IPC: Error: Invalid ipcnum %x\n",
+ __func__, ipcnum);
return -EINVAL;
}
ipcnum = array_index_nospec(ipcnum,
@@ -405,35 +273,9 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
return -ENOTTY;
} /* switch */
- PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, exit retval %x\n", retval);
-
return retval;
}
-
-static ssize_t mwave_read(struct file *file, char __user *buf, size_t count,
- loff_t * ppos)
-{
- PRINTK_5(TRACE_MWAVE,
- "mwavedd::mwave_read entry file %p, buf %p, count %zx ppos %p\n",
- file, buf, count, ppos);
-
- return -EINVAL;
-}
-
-
-static ssize_t mwave_write(struct file *file, const char __user *buf,
- size_t count, loff_t * ppos)
-{
- PRINTK_5(TRACE_MWAVE,
- "mwavedd::mwave_write entry file %p, buf %p,"
- " count %zx ppos %p\n",
- file, buf, count, ppos);
-
- return -EINVAL;
-}
-
-
static int register_serial_portandirq(unsigned int port, int irq)
{
struct uart_8250_port uart;
@@ -446,9 +288,7 @@ static int register_serial_portandirq(unsigned int port, int irq)
/* OK */
break;
default:
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd::register_serial_portandirq:"
- " Error: Illegal port %x\n", port );
+ pr_err("%s: Error: Illegal port %x\n", __func__, port);
return -1;
} /* switch */
/* port is okay */
@@ -461,9 +301,7 @@ static int register_serial_portandirq(unsigned int port, int irq)
/* OK */
break;
default:
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd::register_serial_portandirq:"
- " Error: Illegal irq %x\n", irq );
+ pr_err("%s: Error: Illegal irq %x\n", __func__, irq);
return -1;
} /* switch */
/* irq is okay */
@@ -478,56 +316,14 @@ static int register_serial_portandirq(unsigned int port, int irq)
return serial8250_register_8250_port(&uart);
}
-
static const struct file_operations mwave_fops = {
.owner = THIS_MODULE,
- .read = mwave_read,
- .write = mwave_write,
.unlocked_ioctl = mwave_ioctl,
- .open = mwave_open,
- .release = mwave_close,
.llseek = default_llseek,
};
-
static struct miscdevice mwave_misc_dev = { MWAVE_MINOR, "mwave", &mwave_fops };
-#if 0 /* totally b0rked */
-/*
- * sysfs support <paulsch@us.ibm.com>
- */
-
-struct device mwave_device;
-
-/* Prevent code redundancy, create a macro for mwave_show_* functions. */
-#define mwave_show_function(attr_name, format_string, field) \
-static ssize_t mwave_show_##attr_name(struct device *dev, struct device_attribute *attr, char *buf) \
-{ \
- DSP_3780I_CONFIG_SETTINGS *pSettings = \
- &mwave_s_mdd.rBDData.rDspSettings; \
- return sprintf(buf, format_string, pSettings->field); \
-}
-
-/* All of our attributes are read attributes. */
-#define mwave_dev_rd_attr(attr_name, format_string, field) \
- mwave_show_function(attr_name, format_string, field) \
-static DEVICE_ATTR(attr_name, S_IRUGO, mwave_show_##attr_name, NULL)
-
-mwave_dev_rd_attr (3780i_dma, "%i\n", usDspDma);
-mwave_dev_rd_attr (3780i_irq, "%i\n", usDspIrq);
-mwave_dev_rd_attr (3780i_io, "%#.4x\n", usDspBaseIO);
-mwave_dev_rd_attr (uart_irq, "%i\n", usUartIrq);
-mwave_dev_rd_attr (uart_io, "%#.4x\n", usUartBaseIO);
-
-static struct device_attribute * const mwave_dev_attrs[] = {
- &dev_attr_3780i_dma,
- &dev_attr_3780i_irq,
- &dev_attr_3780i_io,
- &dev_attr_uart_irq,
- &dev_attr_uart_io,
-};
-#endif
-
/*
* mwave_init is called on module load
*
@@ -536,20 +332,7 @@ static struct device_attribute * const mwave_dev_attrs[] = {
*/
static void mwave_exit(void)
{
- pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd;
-
- PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_exit entry\n");
-
-#if 0
- for (i = 0; i < pDrvData->nr_registered_attrs; i++)
- device_remove_file(&mwave_device, mwave_dev_attrs[i]);
- pDrvData->nr_registered_attrs = 0;
-
- if (pDrvData->device_registered) {
- device_unregister(&mwave_device);
- pDrvData->device_registered = false;
- }
-#endif
+ struct mwave_device_data *pDrvData = &mwave_s_mdd;
if ( pDrvData->sLine >= 0 ) {
serial8250_unregister_port(pDrvData->sLine);
@@ -566,8 +349,6 @@ static void mwave_exit(void)
if (pDrvData->bBDInitialized) {
tp3780I_Cleanup(&pDrvData->rBDData);
}
-
- PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_exit exit\n");
}
module_exit(mwave_exit);
@@ -576,11 +357,9 @@ static int __init mwave_init(void)
{
int i;
int retval = 0;
- pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd;
+ struct mwave_device_data *pDrvData = &mwave_s_mdd;
- PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_init entry\n");
-
- memset(&mwave_s_mdd, 0, sizeof(MWAVE_DEVICE_DATA));
+ memset(&mwave_s_mdd, 0, sizeof(mwave_s_mdd));
pDrvData->bBDInitialized = false;
pDrvData->bResourcesClaimed = false;
@@ -597,60 +376,34 @@ static int __init mwave_init(void)
}
retval = tp3780I_InitializeBoardData(&pDrvData->rBDData);
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_init, return from tp3780I_InitializeBoardData"
- " retval %x\n",
- retval);
if (retval) {
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd::mwave_init: Error:"
- " Failed to initialize board data\n");
+ pr_err("%s: Error: Failed to initialize board data\n", __func__);
goto cleanup_error;
}
pDrvData->bBDInitialized = true;
retval = tp3780I_CalcResources(&pDrvData->rBDData);
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_init, return from tp3780I_CalcResources"
- " retval %x\n",
- retval);
if (retval) {
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd:mwave_init: Error:"
- " Failed to calculate resources\n");
+ pr_err("%s: Error: Failed to calculate resources\n", __func__);
goto cleanup_error;
}
retval = tp3780I_ClaimResources(&pDrvData->rBDData);
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_init, return from tp3780I_ClaimResources"
- " retval %x\n",
- retval);
if (retval) {
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd:mwave_init: Error:"
- " Failed to claim resources\n");
+ pr_err("%s: Error: Failed to claim resources\n", __func__);
goto cleanup_error;
}
pDrvData->bResourcesClaimed = true;
retval = tp3780I_EnableDSP(&pDrvData->rBDData);
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_init, return from tp3780I_EnableDSP"
- " retval %x\n",
- retval);
if (retval) {
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd:mwave_init: Error:"
- " Failed to enable DSP\n");
+ pr_err("%s: Error: Failed to enable DSP\n", __func__);
goto cleanup_error;
}
pDrvData->bDSPEnabled = true;
if (misc_register(&mwave_misc_dev) < 0) {
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd:mwave_init: Error:"
- " Failed to register misc device\n");
+ pr_err("%s: Error: Failed to register misc device\n", __func__);
goto cleanup_error;
}
pDrvData->bMwaveDevRegistered = true;
@@ -660,40 +413,16 @@ static int __init mwave_init(void)
pDrvData->rBDData.rDspSettings.usUartIrq
);
if (pDrvData->sLine < 0) {
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd:mwave_init: Error:"
- " Failed to register serial driver\n");
+ pr_err("%s: Error: Failed to register serial driver\n", __func__);
goto cleanup_error;
}
/* uart is registered */
-#if 0
- /* sysfs */
- memset(&mwave_device, 0, sizeof (struct device));
- dev_set_name(&mwave_device, "mwave");
-
- if (device_register(&mwave_device))
- goto cleanup_error;
- pDrvData->device_registered = true;
- for (i = 0; i < ARRAY_SIZE(mwave_dev_attrs); i++) {
- if(device_create_file(&mwave_device, mwave_dev_attrs[i])) {
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd:mwave_init: Error:"
- " Failed to create sysfs file %s\n",
- mwave_dev_attrs[i]->attr.name);
- goto cleanup_error;
- }
- pDrvData->nr_registered_attrs++;
- }
-#endif
-
/* SUCCESS! */
return 0;
cleanup_error:
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd::mwave_init: Error:"
- " Failed to initialize\n");
+ pr_err("%s: Error: Failed to initialize\n", __func__);
mwave_exit(); /* clean up */
return -EIO;
diff --git a/drivers/char/mwave/mwavedd.h b/drivers/char/mwave/mwavedd.h
index 21cb09c7bed7..e1da1493eec5 100644
--- a/drivers/char/mwave/mwavedd.h
+++ b/drivers/char/mwave/mwavedd.h
@@ -56,97 +56,35 @@
#include <linux/uaccess.h>
#include <linux/wait.h>
-extern int mwave_debug;
extern int mwave_3780i_irq;
extern int mwave_3780i_io;
extern int mwave_uart_irq;
extern int mwave_uart_io;
-#define PRINTK_ERROR printk
-#define KERN_ERR_MWAVE KERN_ERR "mwave: "
-
-#define TRACE_MWAVE 0x0001
-#define TRACE_SMAPI 0x0002
-#define TRACE_3780I 0x0004
-#define TRACE_TP3780I 0x0008
-
-#ifdef MW_TRACE
-#define PRINTK_1(f,s) \
- if (f & (mwave_debug)) { \
- printk(s); \
- }
-
-#define PRINTK_2(f,s,v1) \
- if (f & (mwave_debug)) { \
- printk(s,v1); \
- }
-
-#define PRINTK_3(f,s,v1,v2) \
- if (f & (mwave_debug)) { \
- printk(s,v1,v2); \
- }
-
-#define PRINTK_4(f,s,v1,v2,v3) \
- if (f & (mwave_debug)) { \
- printk(s,v1,v2,v3); \
- }
-
-#define PRINTK_5(f,s,v1,v2,v3,v4) \
- if (f & (mwave_debug)) { \
- printk(s,v1,v2,v3,v4); \
- }
-
-#define PRINTK_6(f,s,v1,v2,v3,v4,v5) \
- if (f & (mwave_debug)) { \
- printk(s,v1,v2,v3,v4,v5); \
- }
-
-#define PRINTK_7(f,s,v1,v2,v3,v4,v5,v6) \
- if (f & (mwave_debug)) { \
- printk(s,v1,v2,v3,v4,v5,v6); \
- }
-
-#define PRINTK_8(f,s,v1,v2,v3,v4,v5,v6,v7) \
- if (f & (mwave_debug)) { \
- printk(s,v1,v2,v3,v4,v5,v6,v7); \
- }
-
-#else
-#define PRINTK_1(f,s)
-#define PRINTK_2(f,s,v1)
-#define PRINTK_3(f,s,v1,v2)
-#define PRINTK_4(f,s,v1,v2,v3)
-#define PRINTK_5(f,s,v1,v2,v3,v4)
-#define PRINTK_6(f,s,v1,v2,v3,v4,v5)
-#define PRINTK_7(f,s,v1,v2,v3,v4,v5,v6)
-#define PRINTK_8(f,s,v1,v2,v3,v4,v5,v6,v7)
-#endif
-
-
-typedef struct _MWAVE_IPC {
+struct mwave_ipc {
unsigned short usIntCount; /* 0=none, 1=first, 2=greater than 1st */
bool bIsEnabled;
bool bIsHere;
/* entry spin lock */
wait_queue_head_t ipc_wait_queue;
-} MWAVE_IPC;
+};
-typedef struct _MWAVE_DEVICE_DATA {
- THINKPAD_BD_DATA rBDData; /* board driver's data area */
+struct mwave_device_data {
+ struct thinkpad_bd_data rBDData; /* board driver's data area */
unsigned long ulIPCSource_ISR; /* IPC source bits for recently processed intr, set during ISR processing */
unsigned long ulIPCSource_DPC; /* IPC source bits for recently processed intr, set during DPC processing */
bool bBDInitialized;
bool bResourcesClaimed;
bool bDSPEnabled;
bool bDSPReset;
- MWAVE_IPC IPCs[16];
+ struct mwave_ipc IPCs[16];
bool bMwaveDevRegistered;
short sLine;
int nr_registered_attrs;
int device_registered;
-} MWAVE_DEVICE_DATA, *pMWAVE_DEVICE_DATA;
+};
-extern MWAVE_DEVICE_DATA mwave_s_mdd;
+extern struct mwave_device_data mwave_s_mdd;
#endif
diff --git a/drivers/char/mwave/mwavepub.h b/drivers/char/mwave/mwavepub.h
index 60c961ae23b4..280327bdaa38 100644
--- a/drivers/char/mwave/mwavepub.h
+++ b/drivers/char/mwave/mwavepub.h
@@ -53,7 +53,7 @@
#include <linux/miscdevice.h>
-typedef struct _MW_ABILITIES {
+struct mw_abilities {
unsigned long instr_per_sec;
unsigned long data_size;
unsigned long inst_size;
@@ -63,27 +63,27 @@ typedef struct _MW_ABILITIES {
unsigned long component_list[7];
char mwave_os_name[16];
char bios_task_name[16];
-} MW_ABILITIES, *pMW_ABILITIES;
+};
-typedef struct _MW_READWRITE {
+struct mw_readwrite {
unsigned short usDspAddress; /* The dsp address */
unsigned long ulDataLength; /* The size in bytes of the data or user buffer */
void __user *pBuf; /* Input:variable sized buffer */
-} MW_READWRITE, *pMW_READWRITE;
+};
#define IOCTL_MW_RESET _IO(MWAVE_MINOR,1)
#define IOCTL_MW_RUN _IO(MWAVE_MINOR,2)
-#define IOCTL_MW_DSP_ABILITIES _IOR(MWAVE_MINOR,3,MW_ABILITIES)
-#define IOCTL_MW_READ_DATA _IOR(MWAVE_MINOR,4,MW_READWRITE)
-#define IOCTL_MW_READCLEAR_DATA _IOR(MWAVE_MINOR,5,MW_READWRITE)
-#define IOCTL_MW_READ_INST _IOR(MWAVE_MINOR,6,MW_READWRITE)
-#define IOCTL_MW_WRITE_DATA _IOW(MWAVE_MINOR,7,MW_READWRITE)
-#define IOCTL_MW_WRITE_INST _IOW(MWAVE_MINOR,8,MW_READWRITE)
+#define IOCTL_MW_DSP_ABILITIES _IOR(MWAVE_MINOR,3,struct mw_abilities)
+#define IOCTL_MW_READ_DATA _IOR(MWAVE_MINOR,4,struct mw_readwrite)
+#define IOCTL_MW_READCLEAR_DATA _IOR(MWAVE_MINOR,5,struct mw_readwrite)
+#define IOCTL_MW_READ_INST _IOR(MWAVE_MINOR,6,struct mw_readwrite)
+#define IOCTL_MW_WRITE_DATA _IOW(MWAVE_MINOR,7,struct mw_readwrite)
+#define IOCTL_MW_WRITE_INST _IOW(MWAVE_MINOR,8,struct mw_readwrite)
#define IOCTL_MW_REGISTER_IPC _IOW(MWAVE_MINOR,9,int)
#define IOCTL_MW_UNREGISTER_IPC _IOW(MWAVE_MINOR,10,int)
#define IOCTL_MW_GET_IPC _IOW(MWAVE_MINOR,11,int)
-#define IOCTL_MW_TRACE _IOR(MWAVE_MINOR,12,MW_READWRITE)
+#define IOCTL_MW_TRACE _IOR(MWAVE_MINOR,12,struct mw_readwrite)
#endif
diff --git a/drivers/char/mwave/smapi.c b/drivers/char/mwave/smapi.c
index f8d79d393b69..df6354b24339 100644
--- a/drivers/char/mwave/smapi.c
+++ b/drivers/char/mwave/smapi.c
@@ -46,6 +46,8 @@
* First release to the public
*/
+#define pr_fmt(fmt) "smapi: " fmt
+
#include <linux/kernel.h>
#include <linux/mc146818rtc.h> /* CMOS defines */
#include "smapi.h"
@@ -69,10 +71,6 @@ static int smapi_request(unsigned short inBX, unsigned short inCX,
unsigned short usSmapiOK = -EIO, *pusSmapiOK = &usSmapiOK;
unsigned int inBXCX = (inBX << 16) | inCX;
unsigned int inDISI = (inDI << 16) | inSI;
- int retval = 0;
-
- PRINTK_5(TRACE_SMAPI, "inBX %x inCX %x inDI %x inSI %x\n",
- inBX, inCX, inDI, inSI);
__asm__ __volatile__("movw $0x5380,%%ax\n\t"
"movl %7,%%ebx\n\t"
@@ -107,10 +105,6 @@ static int smapi_request(unsigned short inBX, unsigned short inCX,
:"%eax", "%ebx", "%ecx", "%edx", "%edi",
"%esi");
- PRINTK_8(TRACE_SMAPI,
- "myoutAX %x myoutBX %x myoutCX %x myoutDX %x myoutDI %x myoutSI %x usSmapiOK %x\n",
- myoutAX, myoutBX, myoutCX, myoutDX, myoutDI, myoutSI,
- usSmapiOK);
*outAX = myoutAX;
*outBX = myoutBX;
*outCX = myoutCX;
@@ -118,13 +112,11 @@ static int smapi_request(unsigned short inBX, unsigned short inCX,
*outDI = myoutDI;
*outSI = myoutSI;
- retval = (usSmapiOK == 1) ? 0 : -EIO;
- PRINTK_2(TRACE_SMAPI, "smapi::smapi_request exit retval %x\n", retval);
- return retval;
+ return usSmapiOK == 1 ? 0 : -EIO;
}
-int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings)
+int smapi_query_DSP_cfg(struct smapi_dsp_settings *pSettings)
{
int bRC;
unsigned short usAX, usBX, usCX, usDX, usDI, usSI;
@@ -134,17 +126,13 @@ int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings)
static const unsigned short ausUartBases[] = {
0x03F8, 0x02F8, 0x03E8, 0x02E8 };
- PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg entry\n");
-
bRC = smapi_request(0x1802, 0x0000, 0, 0,
&usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
if (bRC) {
- PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Error: Could not get DSP Settings. Aborting.\n");
+ pr_err("%s: Error: Could not get DSP Settings. Aborting.\n", __func__);
return bRC;
}
- PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg, smapi_request OK\n");
-
pSettings->bDSPPresent = ((usBX & 0x0100) != 0);
pSettings->bDSPEnabled = ((usCX & 0x0001) != 0);
pSettings->usDspIRQ = usSI & 0x00FF;
@@ -154,27 +142,20 @@ int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings)
} else {
pSettings->usDspBaseIO = 0;
}
- PRINTK_6(TRACE_SMAPI,
- "smapi::smapi_query_DSP_cfg get DSP Settings bDSPPresent %x bDSPEnabled %x usDspIRQ %x usDspDMA %x usDspBaseIO %x\n",
- pSettings->bDSPPresent, pSettings->bDSPEnabled,
- pSettings->usDspIRQ, pSettings->usDspDMA,
- pSettings->usDspBaseIO);
/* check for illegal values */
if ( pSettings->usDspBaseIO == 0 )
- PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: DSP base I/O address is 0\n");
+ pr_err("%s: Worry: DSP base I/O address is 0\n", __func__);
if ( pSettings->usDspIRQ == 0 )
- PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: DSP IRQ line is 0\n");
+ pr_err("%s: Worry: DSP IRQ line is 0\n", __func__);
bRC = smapi_request(0x1804, 0x0000, 0, 0,
&usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
if (bRC) {
- PRINTK_ERROR("smapi::smapi_query_DSP_cfg: Error: Could not get DSP modem settings. Aborting.\n");
+ pr_err("%s: Error: Could not get DSP modem settings. Aborting.\n", __func__);
return bRC;
}
- PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg, smapi_request OK\n");
-
pSettings->bModemEnabled = ((usCX & 0x0001) != 0);
pSettings->usUartIRQ = usSI & 0x000F;
if (((usSI & 0xFF00) >> 8) < ARRAY_SIZE(ausUartBases)) {
@@ -183,19 +164,11 @@ int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings)
pSettings->usUartBaseIO = 0;
}
- PRINTK_4(TRACE_SMAPI,
- "smapi::smapi_query_DSP_cfg get DSP modem settings bModemEnabled %x usUartIRQ %x usUartBaseIO %x\n",
- pSettings->bModemEnabled,
- pSettings->usUartIRQ,
- pSettings->usUartBaseIO);
-
/* check for illegal values */
if ( pSettings->usUartBaseIO == 0 )
- PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: UART base I/O address is 0\n");
+ pr_err("%s: Worry: UART base I/O address is 0\n", __func__);
if ( pSettings->usUartIRQ == 0 )
- PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: UART IRQ line is 0\n");
-
- PRINTK_2(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg exit bRC %x\n", bRC);
+ pr_err("%s: Worry: UART IRQ line is 0\n", __func__);
return bRC;
}
@@ -218,17 +191,14 @@ int smapi_set_DSP_cfg(void)
unsigned short dspio_index = 0, uartio_index = 0;
- PRINTK_5(TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg entry mwave_3780i_irq %x mwave_3780i_io %x mwave_uart_irq %x mwave_uart_io %x\n",
- mwave_3780i_irq, mwave_3780i_io, mwave_uart_irq, mwave_uart_io);
-
if (mwave_3780i_io) {
for (i = 0; i < ARRAY_SIZE(ausDspBases); i++) {
if (mwave_3780i_io == ausDspBases[i])
break;
}
if (i == ARRAY_SIZE(ausDspBases)) {
- PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_3780i_io address %x. Aborting.\n", mwave_3780i_io);
+ pr_err("%s: Error: Invalid mwave_3780i_io address %x. Aborting.\n",
+ __func__, mwave_3780i_io);
return bRC;
}
dspio_index = i;
@@ -240,7 +210,8 @@ int smapi_set_DSP_cfg(void)
break;
}
if (i == ARRAY_SIZE(ausDspIrqs)) {
- PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_3780i_irq %x. Aborting.\n", mwave_3780i_irq);
+ pr_err("%s: Error: Invalid mwave_3780i_irq %x. Aborting.\n", __func__,
+ mwave_3780i_irq);
return bRC;
}
}
@@ -251,7 +222,8 @@ int smapi_set_DSP_cfg(void)
break;
}
if (i == ARRAY_SIZE(ausUartBases)) {
- PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_uart_io address %x. Aborting.\n", mwave_uart_io);
+ pr_err("%s: Error: Invalid mwave_uart_io address %x. Aborting.\n", __func__,
+ mwave_uart_io);
return bRC;
}
uartio_index = i;
@@ -264,7 +236,8 @@ int smapi_set_DSP_cfg(void)
break;
}
if (i == ARRAY_SIZE(ausUartIrqs)) {
- PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_uart_irq %x. Aborting.\n", mwave_uart_irq);
+ pr_err("%s: Error: Invalid mwave_uart_irq %x. Aborting.\n", __func__,
+ mwave_uart_irq);
return bRC;
}
}
@@ -279,46 +252,15 @@ int smapi_set_DSP_cfg(void)
if (usBX & 0x0100) { /* serial port A is present */
if (usCX & 1) { /* serial port is enabled */
if ((usSI & 0xFF) == mwave_uart_irq) {
-#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES
- PRINTK_ERROR(KERN_ERR_MWAVE
- "smapi::smapi_set_DSP_cfg: Serial port A irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq);
-#else
- PRINTK_3(TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg: Serial port A irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq);
-#endif
-#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES
- PRINTK_1(TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg Disabling conflicting serial port\n");
- bRC = smapi_request(0x1403, 0x0100, 0, usSI,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
- bRC = smapi_request(0x1402, 0x0000, 0, 0,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
-#else
+ pr_err("%s: Serial port A irq %x conflicts with mwave_uart_irq %x\n",
+ __func__, usSI & 0xFF, mwave_uart_irq);
goto exit_conflict;
-#endif
} else {
if ((usSI >> 8) == uartio_index) {
-#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES
- PRINTK_ERROR(KERN_ERR_MWAVE
- "smapi::smapi_set_DSP_cfg: Serial port A base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]);
-#else
- PRINTK_3(TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg: Serial port A base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]);
-#endif
-#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES
- PRINTK_1(TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg Disabling conflicting serial port A\n");
- bRC = smapi_request (0x1403, 0x0100, 0, usSI,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
- bRC = smapi_request (0x1402, 0x0000, 0, 0,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
-#else
+ pr_err("%s: Serial port A base I/O address %x conflicts with mwave uart I/O %x\n",
+ __func__, ausUartBases[usSI >> 8],
+ ausUartBases[uartio_index]);
goto exit_conflict;
-#endif
}
}
}
@@ -332,46 +274,15 @@ int smapi_set_DSP_cfg(void)
if (usBX & 0x0100) { /* serial port B is present */
if (usCX & 1) { /* serial port is enabled */
if ((usSI & 0xFF) == mwave_uart_irq) {
-#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES
- PRINTK_ERROR(KERN_ERR_MWAVE
- "smapi::smapi_set_DSP_cfg: Serial port B irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq);
-#else
- PRINTK_3(TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg: Serial port B irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq);
-#endif
-#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES
- PRINTK_1(TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg Disabling conflicting serial port B\n");
- bRC = smapi_request(0x1405, 0x0100, 0, usSI,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
- bRC = smapi_request(0x1404, 0x0000, 0, 0,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
-#else
+ pr_err("%s: Serial port B irq %x conflicts with mwave_uart_irq %x\n",
+ __func__, usSI & 0xFF, mwave_uart_irq);
goto exit_conflict;
-#endif
} else {
if ((usSI >> 8) == uartio_index) {
-#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES
- PRINTK_ERROR(KERN_ERR_MWAVE
- "smapi::smapi_set_DSP_cfg: Serial port B base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]);
-#else
- PRINTK_3(TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg: Serial port B base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]);
-#endif
-#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES
- PRINTK_1 (TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg Disabling conflicting serial port B\n");
- bRC = smapi_request (0x1405, 0x0100, 0, usSI,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
- bRC = smapi_request (0x1404, 0x0000, 0, 0,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
-#else
+ pr_err("%s: Serial port B base I/O address %x conflicts with mwave uart I/O %x\n",
+ __func__, ausUartBases[usSI >> 8],
+ ausUartBases[uartio_index]);
goto exit_conflict;
-#endif
}
}
}
@@ -387,58 +298,15 @@ int smapi_set_DSP_cfg(void)
/* bRC == 0 */
if ((usCX & 0xff) != 0xff) { /* IR port not disabled */
if ((usCX & 0xff) == mwave_uart_irq) {
-#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES
- PRINTK_ERROR(KERN_ERR_MWAVE
- "smapi::smapi_set_DSP_cfg: IR port irq %x conflicts with mwave_uart_irq %x\n", usCX & 0xff, mwave_uart_irq);
-#else
- PRINTK_3(TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg: IR port irq %x conflicts with mwave_uart_irq %x\n", usCX & 0xff, mwave_uart_irq);
-#endif
-#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES
- PRINTK_1(TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg Disabling conflicting IR port\n");
- bRC = smapi_request(0x1701, 0x0100, 0, 0,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
- bRC = smapi_request(0x1700, 0, 0, 0,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
- bRC = smapi_request(0x1705, 0x01ff, 0, usSI,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
- bRC = smapi_request(0x1704, 0x0000, 0, 0,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
-#else
+ pr_err("%s: IR port irq %x conflicts with mwave_uart_irq %x\n",
+ __func__, usCX & 0xff, mwave_uart_irq);
goto exit_conflict;
-#endif
} else {
if ((usSI & 0xff) == uartio_index) {
-#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES
- PRINTK_ERROR(KERN_ERR_MWAVE
- "smapi::smapi_set_DSP_cfg: IR port base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI & 0xff], ausUartBases[uartio_index]);
-#else
- PRINTK_3(TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg: IR port base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI & 0xff], ausUartBases[uartio_index]);
-#endif
-#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES
- PRINTK_1(TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg Disabling conflicting IR port\n");
- bRC = smapi_request(0x1701, 0x0100, 0, 0,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
- bRC = smapi_request(0x1700, 0, 0, 0,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
- bRC = smapi_request(0x1705, 0x01ff, 0, usSI,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
- bRC = smapi_request(0x1704, 0x0000, 0, 0,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
-#else
+ pr_err("%s: IR port base I/O address %x conflicts with mwave uart I/O %x\n",
+ __func__, ausUartBases[usSI & 0xff],
+ ausUartBases[uartio_index]);
goto exit_conflict;
-#endif
}
}
}
@@ -482,7 +350,6 @@ int smapi_set_DSP_cfg(void)
if (bRC) goto exit_smapi_request_error;
/* normal exit: */
- PRINTK_1(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg exit\n");
return 0;
exit_conflict:
@@ -490,64 +357,32 @@ exit_conflict:
return -EIO;
exit_smapi_request_error:
- PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg exit on smapi_request error bRC %x\n", bRC);
+ pr_err("%s: exit on smapi_request error bRC %x\n", __func__, bRC);
return bRC;
}
int smapi_set_DSP_power_state(bool bOn)
{
- int bRC;
unsigned short usAX, usBX, usCX, usDX, usDI, usSI;
unsigned short usPowerFunction;
- PRINTK_2(TRACE_SMAPI, "smapi::smapi_set_DSP_power_state entry bOn %x\n", bOn);
-
usPowerFunction = (bOn) ? 1 : 0;
- bRC = smapi_request(0x4901, 0x0000, 0, usPowerFunction,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
-
- PRINTK_2(TRACE_SMAPI, "smapi::smapi_set_DSP_power_state exit bRC %x\n", bRC);
-
- return bRC;
+ return smapi_request(0x4901, 0x0000, 0, usPowerFunction, &usAX, &usBX, &usCX, &usDX, &usDI,
+ &usSI);
}
-#if 0
-static int SmapiQuerySystemID(void)
-{
- int bRC = -EIO;
- unsigned short usAX = 0xffff, usBX = 0xffff, usCX = 0xffff,
- usDX = 0xffff, usDI = 0xffff, usSI = 0xffff;
-
- printk("smapi::SmapiQUerySystemID entry\n");
- bRC = smapi_request(0x0000, 0, 0, 0,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
-
- if (bRC == 0) {
- printk("AX=%x, BX=%x, CX=%x, DX=%x, DI=%x, SI=%x\n",
- usAX, usBX, usCX, usDX, usDI, usSI);
- } else {
- printk("smapi::SmapiQuerySystemID smapi_request error\n");
- }
-
- return bRC;
-}
-#endif /* 0 */
-
int smapi_init(void)
{
int retval = -EIO;
unsigned short usSmapiID = 0;
unsigned long flags;
- PRINTK_1(TRACE_SMAPI, "smapi::smapi_init entry\n");
-
spin_lock_irqsave(&rtc_lock, flags);
usSmapiID = CMOS_READ(0x7C);
usSmapiID |= (CMOS_READ(0x7D) << 8);
spin_unlock_irqrestore(&rtc_lock, flags);
- PRINTK_2(TRACE_SMAPI, "smapi::smapi_init usSmapiID %x\n", usSmapiID);
if (usSmapiID == 0x5349) {
spin_lock_irqsave(&rtc_lock, flags);
@@ -555,16 +390,13 @@ int smapi_init(void)
g_usSmapiPort |= (CMOS_READ(0x7F) << 8);
spin_unlock_irqrestore(&rtc_lock, flags);
if (g_usSmapiPort == 0) {
- PRINTK_ERROR("smapi::smapi_init, ERROR unable to read from SMAPI port\n");
+ pr_err("%s: ERROR unable to read from SMAPI port\n", __func__);
} else {
- PRINTK_2(TRACE_SMAPI,
- "smapi::smapi_init, exit true g_usSmapiPort %x\n",
- g_usSmapiPort);
retval = 0;
//SmapiQuerySystemID();
}
} else {
- PRINTK_ERROR("smapi::smapi_init, ERROR invalid usSmapiID\n");
+ pr_err("%s: ERROR invalid usSmapiID\n", __func__);
retval = -ENXIO;
}
diff --git a/drivers/char/mwave/smapi.h b/drivers/char/mwave/smapi.h
index ebc206b000b9..e605b16ed23c 100644
--- a/drivers/char/mwave/smapi.h
+++ b/drivers/char/mwave/smapi.h
@@ -49,7 +49,7 @@
#ifndef _LINUX_SMAPI_H
#define _LINUX_SMAPI_H
-typedef struct {
+struct smapi_dsp_settings {
int bDSPPresent;
int bDSPEnabled;
int bModemEnabled;
@@ -65,10 +65,10 @@ typedef struct {
unsigned short usSndblstIRQ;
unsigned short usSndblstDMA;
unsigned short usSndblstBaseIO;
-} SMAPI_DSP_SETTINGS;
+};
int smapi_init(void);
-int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings);
+int smapi_query_DSP_cfg(struct smapi_dsp_settings *pSettings);
int smapi_set_DSP_cfg(void);
int smapi_set_DSP_power_state(bool bOn);
diff --git a/drivers/char/mwave/tp3780i.c b/drivers/char/mwave/tp3780i.c
index 83eaffeb22c8..7363b0f764e0 100644
--- a/drivers/char/mwave/tp3780i.c
+++ b/drivers/char/mwave/tp3780i.c
@@ -46,6 +46,8 @@
* First release to the public
*/
+#define pr_fmt(fmt) "tp3780i: " fmt
+
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/ptrace.h>
@@ -65,16 +67,14 @@ static unsigned short s_ausThinkpadDmaToField[8] =
static unsigned short s_numIrqs = 16, s_numDmas = 8;
-static void EnableSRAM(THINKPAD_BD_DATA * pBDData)
+static void EnableSRAM(struct thinkpad_bd_data *pBDData)
{
- DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+ struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings;
unsigned short usDspBaseIO = pSettings->usDspBaseIO;
DSP_GPIO_OUTPUT_DATA_15_8 rGpioOutputData;
DSP_GPIO_DRIVER_ENABLE_15_8 rGpioDriverEnable;
DSP_GPIO_MODE_15_8 rGpioMode;
- PRINTK_1(TRACE_TP3780I, "tp3780i::EnableSRAM, entry\n");
-
MKWORD(rGpioMode) = ReadMsaCfg(DSP_GpioModeControl_15_8);
rGpioMode.GpioMode10 = 0;
WriteMsaCfg(DSP_GpioModeControl_15_8, MKWORD(rGpioMode));
@@ -88,54 +88,31 @@ static void EnableSRAM(THINKPAD_BD_DATA * pBDData)
rGpioOutputData.Latch10 = 0;
rGpioOutputData.Mask10 = true;
WriteMsaCfg(DSP_GpioOutputData_15_8, MKWORD(rGpioOutputData));
-
- PRINTK_1(TRACE_TP3780I, "tp3780i::EnableSRAM exit\n");
}
static irqreturn_t UartInterrupt(int irq, void *dev_id)
{
- PRINTK_3(TRACE_TP3780I,
- "tp3780i::UartInterrupt entry irq %x dev_id %p\n", irq, dev_id);
return IRQ_HANDLED;
}
static irqreturn_t DspInterrupt(int irq, void *dev_id)
{
- pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd;
- DSP_3780I_CONFIG_SETTINGS *pSettings = &pDrvData->rBDData.rDspSettings;
+ struct mwave_device_data *pDrvData = &mwave_s_mdd;
+ struct dsp_3780i_config_settings *pSettings = &pDrvData->rBDData.rDspSettings;
unsigned short usDspBaseIO = pSettings->usDspBaseIO;
unsigned short usIPCSource = 0, usIsolationMask, usPCNum;
- PRINTK_3(TRACE_TP3780I,
- "tp3780i::DspInterrupt entry irq %x dev_id %p\n", irq, dev_id);
-
if (dsp3780I_GetIPCSource(usDspBaseIO, &usIPCSource) == 0) {
- PRINTK_2(TRACE_TP3780I,
- "tp3780i::DspInterrupt, return from dsp3780i_GetIPCSource, usIPCSource %x\n",
- usIPCSource);
usIsolationMask = 1;
for (usPCNum = 1; usPCNum <= 16; usPCNum++) {
if (usIPCSource & usIsolationMask) {
usIPCSource &= ~usIsolationMask;
- PRINTK_3(TRACE_TP3780I,
- "tp3780i::DspInterrupt usPCNum %x usIPCSource %x\n",
- usPCNum, usIPCSource);
if (pDrvData->IPCs[usPCNum - 1].usIntCount == 0) {
pDrvData->IPCs[usPCNum - 1].usIntCount = 1;
}
- PRINTK_2(TRACE_TP3780I,
- "tp3780i::DspInterrupt usIntCount %x\n",
- pDrvData->IPCs[usPCNum - 1].usIntCount);
if (pDrvData->IPCs[usPCNum - 1].bIsEnabled == true) {
- PRINTK_2(TRACE_TP3780I,
- "tp3780i::DspInterrupt, waking up usPCNum %x\n",
- usPCNum - 1);
wake_up_interruptible(&pDrvData->IPCs[usPCNum - 1].ipc_wait_queue);
- } else {
- PRINTK_2(TRACE_TP3780I,
- "tp3780i::DspInterrupt, no one waiting for IPC %x\n",
- usPCNum - 1);
}
}
if (usIPCSource == 0)
@@ -143,56 +120,42 @@ static irqreturn_t DspInterrupt(int irq, void *dev_id)
/* try next IPC */
usIsolationMask = usIsolationMask << 1;
}
- } else {
- PRINTK_1(TRACE_TP3780I,
- "tp3780i::DspInterrupt, return false from dsp3780i_GetIPCSource\n");
}
- PRINTK_1(TRACE_TP3780I, "tp3780i::DspInterrupt exit\n");
return IRQ_HANDLED;
}
-int tp3780I_InitializeBoardData(THINKPAD_BD_DATA * pBDData)
+int tp3780I_InitializeBoardData(struct thinkpad_bd_data *pBDData)
{
int retval = 0;
- DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
-
-
- PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_InitializeBoardData entry pBDData %p\n", pBDData);
+ struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings;
pBDData->bDSPEnabled = false;
pSettings->bInterruptClaimed = false;
retval = smapi_init();
if (retval) {
- PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_InitializeBoardData: Error: SMAPI is not available on this machine\n");
+ pr_err("%s: Error: SMAPI is not available on this machine\n", __func__);
} else {
if (mwave_3780i_irq || mwave_3780i_io || mwave_uart_irq || mwave_uart_io) {
retval = smapi_set_DSP_cfg();
}
}
- PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_InitializeBoardData exit retval %x\n", retval);
-
return retval;
}
-void tp3780I_Cleanup(THINKPAD_BD_DATA *pBDData)
+void tp3780I_Cleanup(struct thinkpad_bd_data *pBDData)
{
- PRINTK_2(TRACE_TP3780I,
- "tp3780i::tp3780I_Cleanup entry and exit pBDData %p\n", pBDData);
}
-int tp3780I_CalcResources(THINKPAD_BD_DATA * pBDData)
+int tp3780I_CalcResources(struct thinkpad_bd_data *pBDData)
{
- SMAPI_DSP_SETTINGS rSmapiInfo;
- DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
-
- PRINTK_2(TRACE_TP3780I,
- "tp3780i::tp3780I_CalcResources entry pBDData %p\n", pBDData);
+ struct smapi_dsp_settings rSmapiInfo;
+ struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings;
if (smapi_query_DSP_cfg(&rSmapiInfo)) {
- PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_CalcResources: Error: Could not query DSP config. Aborting.\n");
+ pr_err("%s: Error: Could not query DSP config. Aborting.\n", __func__);
return -EIO;
}
@@ -203,7 +166,7 @@ int tp3780I_CalcResources(THINKPAD_BD_DATA * pBDData)
|| ( rSmapiInfo.usUartIRQ == 0 )
|| ( rSmapiInfo.usUartBaseIO == 0 )
) {
- PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_CalcResources: Error: Illegal resource setting. Aborting.\n");
+ pr_err("%s: Error: Illegal resource setting. Aborting.\n", __func__);
return -EIO;
}
@@ -225,41 +188,31 @@ int tp3780I_CalcResources(THINKPAD_BD_DATA * pBDData)
pBDData->bShareDspIrq = pBDData->bShareUartIrq = 0;
}
- PRINTK_1(TRACE_TP3780I, "tp3780i::tp3780I_CalcResources exit\n");
-
return 0;
}
-int tp3780I_ClaimResources(THINKPAD_BD_DATA * pBDData)
+int tp3780I_ClaimResources(struct thinkpad_bd_data *pBDData)
{
int retval = 0;
- DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+ struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings;
struct resource *pres;
- PRINTK_2(TRACE_TP3780I,
- "tp3780i::tp3780I_ClaimResources entry pBDData %p\n", pBDData);
-
pres = request_region(pSettings->usDspBaseIO, 16, "mwave_3780i");
if ( pres == NULL ) retval = -EIO;
if (retval) {
- PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_ClaimResources: Error: Could not claim I/O region starting at %x\n", pSettings->usDspBaseIO);
- retval = -EIO;
+ pr_err("%s: Error: Could not claim I/O region starting at %x\n", __func__,
+ pSettings->usDspBaseIO);
+ return -EIO;
}
- PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ClaimResources exit retval %x\n", retval);
-
return retval;
}
-int tp3780I_ReleaseResources(THINKPAD_BD_DATA * pBDData)
+int tp3780I_ReleaseResources(struct thinkpad_bd_data *pBDData)
{
- int retval = 0;
- DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
-
- PRINTK_2(TRACE_TP3780I,
- "tp3780i::tp3780I_ReleaseResources entry pBDData %p\n", pBDData);
+ struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings;
release_region(pSettings->usDspBaseIO & (~3), 16);
@@ -268,28 +221,23 @@ int tp3780I_ReleaseResources(THINKPAD_BD_DATA * pBDData)
pSettings->bInterruptClaimed = false;
}
- PRINTK_2(TRACE_TP3780I,
- "tp3780i::tp3780I_ReleaseResources exit retval %x\n", retval);
-
- return retval;
+ return 0;
}
-int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData)
+int tp3780I_EnableDSP(struct thinkpad_bd_data *pBDData)
{
- DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+ struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings;
bool bDSPPoweredUp = false, bInterruptAllocated = false;
- PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_EnableDSP entry pBDData %p\n", pBDData);
-
if (pBDData->bDSPEnabled) {
- PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: DSP already enabled!\n");
+ pr_err("%s: Error: DSP already enabled!\n", __func__);
goto exit_cleanup;
}
if (!pSettings->bDSPEnabled) {
- PRINTK_ERROR(KERN_ERR_MWAVE "tp3780::tp3780I_EnableDSP: Error: pSettings->bDSPEnabled not set\n");
+ pr_err("%s: Error: pSettings->bDSPEnabled not set\n", __func__);
goto exit_cleanup;
}
@@ -299,7 +247,7 @@ int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData)
|| (s_ausThinkpadIrqToField[pSettings->usDspIrq] == 0xFFFF)
|| (s_ausThinkpadDmaToField[pSettings->usDspDma] == 0xFFFF)
) {
- PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: invalid irq %x\n", pSettings->usDspIrq);
+ pr_err("%s: Error: invalid irq %x\n", __func__, pSettings->usDspIrq);
goto exit_cleanup;
}
@@ -307,7 +255,8 @@ int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData)
((pSettings->usDspBaseIO & 0xF00F) != 0)
|| (pSettings->usDspBaseIO & 0x0FF0) == 0
) {
- PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: Invalid DSP base I/O address %x\n", pSettings->usDspBaseIO);
+ pr_err("%s: Error: Invalid DSP base I/O address %x\n", __func__,
+ pSettings->usDspBaseIO);
goto exit_cleanup;
}
@@ -316,7 +265,7 @@ int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData)
pSettings->usUartIrq >= s_numIrqs
|| s_ausThinkpadIrqToField[pSettings->usUartIrq] == 0xFFFF
) {
- PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: Invalid UART IRQ %x\n", pSettings->usUartIrq);
+ pr_err("%s: Error: Invalid UART IRQ %x\n", __func__, pSettings->usUartIrq);
goto exit_cleanup;
}
switch (pSettings->usUartBaseIO) {
@@ -327,7 +276,8 @@ int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData)
break;
default:
- PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Error: Invalid UART base I/O address %x\n", pSettings->usUartBaseIO);
+ pr_err("%s: Error: Invalid UART base I/O address %x\n", __func__,
+ pSettings->usUartBaseIO);
goto exit_cleanup;
}
}
@@ -356,33 +306,30 @@ int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData)
pSettings->usChipletEnable = TP_CFG_ChipletEnable;
if (request_irq(pSettings->usUartIrq, &UartInterrupt, 0, "mwave_uart", NULL)) {
- PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: Could not get UART IRQ %x\n", pSettings->usUartIrq);
+ pr_err("%s: Error: Could not get UART IRQ %x\n", __func__, pSettings->usUartIrq);
goto exit_cleanup;
} else { /* no conflict just release */
free_irq(pSettings->usUartIrq, NULL);
}
if (request_irq(pSettings->usDspIrq, &DspInterrupt, 0, "mwave_3780i", NULL)) {
- PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Error: Could not get 3780i IRQ %x\n", pSettings->usDspIrq);
+ pr_err("%s: Error: Could not get 3780i IRQ %x\n", __func__, pSettings->usDspIrq);
goto exit_cleanup;
} else {
- PRINTK_3(TRACE_TP3780I,
- "tp3780i::tp3780I_EnableDSP, got interrupt %x bShareDspIrq %x\n",
- pSettings->usDspIrq, pBDData->bShareDspIrq);
bInterruptAllocated = true;
pSettings->bInterruptClaimed = true;
}
smapi_set_DSP_power_state(false);
if (smapi_set_DSP_power_state(true)) {
- PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: smapi_set_DSP_power_state(true) failed\n");
+ pr_err("%s: Error: smapi_set_DSP_power_state(true) failed\n", __func__);
goto exit_cleanup;
} else {
bDSPPoweredUp = true;
}
if (dsp3780I_EnableDSP(pSettings, s_ausThinkpadIrqToField, s_ausThinkpadDmaToField)) {
- PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Error: dsp7880I_EnableDSP() failed\n");
+ pr_err("%s: Error: dsp7880I_EnableDSP() failed\n", __func__);
goto exit_cleanup;
}
@@ -390,12 +337,10 @@ int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData)
pBDData->bDSPEnabled = true;
- PRINTK_1(TRACE_TP3780I, "tp3780i::tp3780I_EnableDSP exit\n");
-
return 0;
exit_cleanup:
- PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Cleaning up\n");
+ pr_err("%s: Cleaning up\n", __func__);
if (bDSPPoweredUp)
smapi_set_DSP_power_state(false);
if (bInterruptAllocated) {
@@ -406,12 +351,9 @@ exit_cleanup:
}
-int tp3780I_DisableDSP(THINKPAD_BD_DATA * pBDData)
+int tp3780I_DisableDSP(struct thinkpad_bd_data *pBDData)
{
- int retval = 0;
- DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
-
- PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_DisableDSP entry pBDData %p\n", pBDData);
+ struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings;
if (pBDData->bDSPEnabled) {
dsp3780I_DisableDSP(&pBDData->rDspSettings);
@@ -423,56 +365,38 @@ int tp3780I_DisableDSP(THINKPAD_BD_DATA * pBDData)
pBDData->bDSPEnabled = false;
}
- PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_DisableDSP exit retval %x\n", retval);
-
- return retval;
+ return 0;
}
-int tp3780I_ResetDSP(THINKPAD_BD_DATA * pBDData)
+int tp3780I_ResetDSP(struct thinkpad_bd_data *pBDData)
{
- int retval = 0;
- DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
-
- PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ResetDSP entry pBDData %p\n",
- pBDData);
+ struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings;
if (dsp3780I_Reset(pSettings) == 0) {
EnableSRAM(pBDData);
- } else {
- retval = -EIO;
+ return 0;
}
-
- PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ResetDSP exit retval %x\n", retval);
-
- return retval;
+ return -EIO;
}
-int tp3780I_StartDSP(THINKPAD_BD_DATA * pBDData)
+int tp3780I_StartDSP(struct thinkpad_bd_data *pBDData)
{
- int retval = 0;
- DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
-
- PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_StartDSP entry pBDData %p\n", pBDData);
+ struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings;
if (dsp3780I_Run(pSettings) == 0) {
// @BUG @TBD EnableSRAM(pBDData);
} else {
- retval = -EIO;
+ return -EIO;
}
- PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_StartDSP exit retval %x\n", retval);
-
- return retval;
+ return 0;
}
-int tp3780I_QueryAbilities(THINKPAD_BD_DATA * pBDData, MW_ABILITIES * pAbilities)
+int tp3780I_QueryAbilities(struct thinkpad_bd_data *pBDData, struct mw_abilities *pAbilities)
{
- PRINTK_2(TRACE_TP3780I,
- "tp3780i::tp3780I_QueryAbilities entry pBDData %p\n", pBDData);
-
memset(pAbilities, 0, sizeof(*pAbilities));
/* fill out standard constant fields */
pAbilities->instr_per_sec = pBDData->rDspSettings.uIps;
@@ -497,25 +421,17 @@ int tp3780I_QueryAbilities(THINKPAD_BD_DATA * pBDData, MW_ABILITIES * pAbilities
memcpy(pAbilities->bios_task_name, TP_ABILITIES_BIOSTASK_NAME,
sizeof(TP_ABILITIES_BIOSTASK_NAME));
- PRINTK_1(TRACE_TP3780I,
- "tp3780i::tp3780I_QueryAbilities exit retval=SUCCESSFUL\n");
-
return 0;
}
-int tp3780I_ReadWriteDspDStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode,
+int tp3780I_ReadWriteDspDStore(struct thinkpad_bd_data *pBDData, unsigned int uOpcode,
void __user *pvBuffer, unsigned int uCount,
unsigned long ulDSPAddr)
{
- int retval = 0;
- DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+ struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings;
unsigned short usDspBaseIO = pSettings->usDspBaseIO;
bool bRC = 0;
- PRINTK_6(TRACE_TP3780I,
- "tp3780i::tp3780I_ReadWriteDspDStore entry pBDData %p, uOpcode %x, pvBuffer %p, uCount %x, ulDSPAddr %lx\n",
- pBDData, uOpcode, pvBuffer, uCount, ulDSPAddr);
-
if (pBDData->bDSPEnabled) {
switch (uOpcode) {
case IOCTL_MW_READ_DATA:
@@ -532,26 +448,18 @@ int tp3780I_ReadWriteDspDStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode,
}
}
- retval = (bRC) ? -EIO : 0;
- PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ReadWriteDspDStore exit retval %x\n", retval);
-
- return retval;
+ return bRC ? -EIO : 0;
}
-int tp3780I_ReadWriteDspIStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode,
+int tp3780I_ReadWriteDspIStore(struct thinkpad_bd_data *pBDData, unsigned int uOpcode,
void __user *pvBuffer, unsigned int uCount,
unsigned long ulDSPAddr)
{
- int retval = 0;
- DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+ struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings;
unsigned short usDspBaseIO = pSettings->usDspBaseIO;
bool bRC = 0;
- PRINTK_6(TRACE_TP3780I,
- "tp3780i::tp3780I_ReadWriteDspIStore entry pBDData %p, uOpcode %x, pvBuffer %p, uCount %x, ulDSPAddr %lx\n",
- pBDData, uOpcode, pvBuffer, uCount, ulDSPAddr);
-
if (pBDData->bDSPEnabled) {
switch (uOpcode) {
case IOCTL_MW_READ_INST:
@@ -564,11 +472,6 @@ int tp3780I_ReadWriteDspIStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode,
}
}
- retval = (bRC) ? -EIO : 0;
-
- PRINTK_2(TRACE_TP3780I,
- "tp3780i::tp3780I_ReadWriteDspIStore exit retval %x\n", retval);
-
- return retval;
+ return bRC ? -EIO : 0;
}
diff --git a/drivers/char/mwave/tp3780i.h b/drivers/char/mwave/tp3780i.h
index 8bd976d42fae..c0001a344741 100644
--- a/drivers/char/mwave/tp3780i.h
+++ b/drivers/char/mwave/tp3780i.h
@@ -75,27 +75,27 @@
#define TP_CFG_PllBypass 0 /* don't bypass */
#define TP_CFG_ChipletEnable 0xFFFF /* Enable all chiplets */
-typedef struct {
+struct thinkpad_bd_data {
int bDSPEnabled;
int bShareDspIrq;
int bShareUartIrq;
- DSP_3780I_CONFIG_SETTINGS rDspSettings;
-} THINKPAD_BD_DATA;
+ struct dsp_3780i_config_settings rDspSettings;
+};
-int tp3780I_InitializeBoardData(THINKPAD_BD_DATA * pBDData);
-int tp3780I_CalcResources(THINKPAD_BD_DATA * pBDData);
-int tp3780I_ClaimResources(THINKPAD_BD_DATA * pBDData);
-int tp3780I_ReleaseResources(THINKPAD_BD_DATA * pBDData);
-int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData);
-int tp3780I_DisableDSP(THINKPAD_BD_DATA * pBDData);
-int tp3780I_ResetDSP(THINKPAD_BD_DATA * pBDData);
-int tp3780I_StartDSP(THINKPAD_BD_DATA * pBDData);
-int tp3780I_QueryAbilities(THINKPAD_BD_DATA * pBDData, MW_ABILITIES * pAbilities);
-void tp3780I_Cleanup(THINKPAD_BD_DATA *pBDData);
-int tp3780I_ReadWriteDspDStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode,
+int tp3780I_InitializeBoardData(struct thinkpad_bd_data *pBDData);
+int tp3780I_CalcResources(struct thinkpad_bd_data *pBDData);
+int tp3780I_ClaimResources(struct thinkpad_bd_data *pBDData);
+int tp3780I_ReleaseResources(struct thinkpad_bd_data *pBDData);
+int tp3780I_EnableDSP(struct thinkpad_bd_data *pBDData);
+int tp3780I_DisableDSP(struct thinkpad_bd_data *pBDData);
+int tp3780I_ResetDSP(struct thinkpad_bd_data *pBDData);
+int tp3780I_StartDSP(struct thinkpad_bd_data *pBDData);
+int tp3780I_QueryAbilities(struct thinkpad_bd_data *pBDData, struct mw_abilities *pAbilities);
+void tp3780I_Cleanup(struct thinkpad_bd_data *pBDData);
+int tp3780I_ReadWriteDspDStore(struct thinkpad_bd_data *pBDData, unsigned int uOpcode,
void __user *pvBuffer, unsigned int uCount,
unsigned long ulDSPAddr);
-int tp3780I_ReadWriteDspIStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode,
+int tp3780I_ReadWriteDspIStore(struct thinkpad_bd_data *pBDData, unsigned int uOpcode,
void __user *pvBuffer, unsigned int uCount,
unsigned long ulDSPAddr);
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
index e9f694b36871..9eff426a9286 100644
--- a/drivers/char/nvram.c
+++ b/drivers/char/nvram.c
@@ -540,6 +540,7 @@ static void __exit nvram_module_exit(void)
module_init(nvram_module_init);
module_exit(nvram_module_exit);
+MODULE_DESCRIPTION("CMOS/NV-RAM driver for Linux");
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(NVRAM_MINOR);
MODULE_ALIAS("devname:nvram");
diff --git a/drivers/char/nwbutton.c b/drivers/char/nwbutton.c
index ea378c0ed549..92cee5717237 100644
--- a/drivers/char/nwbutton.c
+++ b/drivers/char/nwbutton.c
@@ -241,6 +241,7 @@ static void __exit nwbutton_exit (void)
MODULE_AUTHOR("Alex Holden");
+MODULE_DESCRIPTION("NetWinder button driver");
MODULE_LICENSE("GPL");
module_init(nwbutton_init);
diff --git a/drivers/char/nwflash.c b/drivers/char/nwflash.c
index 0973c2c2b01a..9f52f0306ef7 100644
--- a/drivers/char/nwflash.c
+++ b/drivers/char/nwflash.c
@@ -618,6 +618,7 @@ static void __exit nwflash_exit(void)
iounmap((void *)FLASH_BASE);
}
+MODULE_DESCRIPTION("NetWinder flash memory driver");
MODULE_LICENSE("GPL");
module_param(flashdebug, bool, 0644);
diff --git a/drivers/char/pc8736x_gpio.c b/drivers/char/pc8736x_gpio.c
index c39a836ebd15..5f4696813cea 100644
--- a/drivers/char/pc8736x_gpio.c
+++ b/drivers/char/pc8736x_gpio.c
@@ -235,7 +235,6 @@ static const struct file_operations pc8736x_gpio_fileops = {
.open = pc8736x_gpio_open,
.write = nsc_gpio_write,
.read = nsc_gpio_read,
- .llseek = no_llseek,
};
static void __init pc8736x_init_shadow(void)
diff --git a/drivers/char/powernv-op-panel.c b/drivers/char/powernv-op-panel.c
index 3c99696b145e..53467b0a6187 100644
--- a/drivers/char/powernv-op-panel.c
+++ b/drivers/char/powernv-op-panel.c
@@ -195,12 +195,11 @@ free_oppanel_data:
return rc;
}
-static int oppanel_remove(struct platform_device *pdev)
+static void oppanel_remove(struct platform_device *pdev)
{
misc_deregister(&oppanel_dev);
kfree(oppanel_lines);
kfree(oppanel_data);
- return 0;
}
static const struct of_device_id oppanel_match[] = {
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index ee951b265213..d1dfbd8d4d42 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -296,28 +296,35 @@ static int register_device(int minor, struct pp_struct *pp)
if (!port) {
pr_warn("%s: no associated port!\n", name);
rc = -ENXIO;
- goto err;
+ goto err_free_name;
}
index = ida_alloc(&ida_index, GFP_KERNEL);
+ if (index < 0) {
+ pr_warn("%s: failed to get index!\n", name);
+ rc = index;
+ goto err_put_port;
+ }
+
memset(&ppdev_cb, 0, sizeof(ppdev_cb));
ppdev_cb.irq_func = pp_irq;
ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0;
ppdev_cb.private = pp;
pdev = parport_register_dev_model(port, name, &ppdev_cb, index);
- parport_put_port(port);
if (!pdev) {
pr_warn("%s: failed to register device!\n", name);
rc = -ENXIO;
ida_free(&ida_index, index);
- goto err;
+ goto err_put_port;
}
pp->pdev = pdev;
pp->index = index;
dev_dbg(&pdev->dev, "registered pardevice\n");
-err:
+err_put_port:
+ parport_put_port(port);
+err_free_name:
kfree(name);
return rc;
}
@@ -779,7 +786,6 @@ static const struct class ppdev_class = {
static const struct file_operations pp_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = pp_read,
.write = pp_write,
.poll = pp_poll,
@@ -832,7 +838,6 @@ static struct parport_driver pp_driver = {
.probe = pp_probe,
.match_port = pp_attach,
.detach = pp_detach,
- .devmodel = true,
};
static int __init ppdev_init(void)
@@ -875,5 +880,6 @@ static void __exit ppdev_cleanup(void)
module_init(ppdev_init);
module_exit(ppdev_cleanup);
+MODULE_DESCRIPTION("Support for user-space parallel port device drivers");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV_MAJOR(PP_MAJOR);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 2597cb43f438..bab03c7c4194 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
- * Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ * Copyright (C) 2017-2024 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
* Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
* Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights reserved.
*
@@ -56,6 +56,11 @@
#include <linux/sched/isolation.h>
#include <crypto/chacha.h>
#include <crypto/blake2s.h>
+#ifdef CONFIG_VDSO_GETRANDOM
+#include <vdso/getrandom.h>
+#include <vdso/datapage.h>
+#include <vdso/vsyscall.h>
+#endif
#include <asm/archrandom.h>
#include <asm/processor.h>
#include <asm/irq.h>
@@ -254,8 +259,8 @@ static void crng_reseed(struct work_struct *work)
u8 key[CHACHA_KEY_SIZE];
/* Immediately schedule the next reseeding, so that it fires sooner rather than later. */
- if (likely(system_unbound_wq))
- queue_delayed_work(system_unbound_wq, &next_reseed, crng_reseed_interval());
+ if (likely(system_dfl_wq))
+ queue_delayed_work(system_dfl_wq, &next_reseed, crng_reseed_interval());
extract_entropy(key, sizeof(key));
@@ -271,6 +276,22 @@ static void crng_reseed(struct work_struct *work)
if (next_gen == ULONG_MAX)
++next_gen;
WRITE_ONCE(base_crng.generation, next_gen);
+#ifdef CONFIG_VDSO_GETRANDOM
+ /* base_crng.generation's invalid value is ULONG_MAX, while
+ * vdso_k_rng_data->generation's invalid value is 0, so add one to the
+ * former to arrive at the latter. Use smp_store_release so that this
+ * is ordered with the write above to base_crng.generation. Pairs with
+ * the smp_rmb() before the syscall in the vDSO code.
+ *
+ * Cast to unsigned long for 32-bit architectures, since atomic 64-bit
+ * operations are not supported on those architectures. This is safe
+ * because base_crng.generation is a 32-bit value. On big-endian
+ * architectures it will be stored in the upper 32 bits, but that's okay
+ * because the vDSO side only checks whether the value changed, without
+ * actually using or interpreting the value.
+ */
+ smp_store_release((unsigned long *)&vdso_k_rng_data->generation, next_gen + 1);
+#endif
if (!static_branch_likely(&crng_is_ready))
crng_init = CRNG_READY;
spin_unlock_irqrestore(&base_crng.lock, flags);
@@ -288,11 +309,11 @@ static void crng_reseed(struct work_struct *work)
* key value, at index 4, so the state should always be zeroed out
* immediately after using in order to maintain forward secrecy.
* If the state cannot be erased in a timely manner, then it is
- * safer to set the random_data parameter to &chacha_state[4] so
- * that this function overwrites it before returning.
+ * safer to set the random_data parameter to &chacha_state->x[4]
+ * so that this function overwrites it before returning.
*/
static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
- u32 chacha_state[CHACHA_STATE_WORDS],
+ struct chacha_state *chacha_state,
u8 *random_data, size_t random_data_len)
{
u8 first_block[CHACHA_BLOCK_SIZE];
@@ -300,8 +321,8 @@ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
BUG_ON(random_data_len > 32);
chacha_init_consts(chacha_state);
- memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE);
- memset(&chacha_state[12], 0, sizeof(u32) * 4);
+ memcpy(&chacha_state->x[4], key, CHACHA_KEY_SIZE);
+ memset(&chacha_state->x[12], 0, sizeof(u32) * 4);
chacha20_block(chacha_state, first_block);
memcpy(key, first_block, CHACHA_KEY_SIZE);
@@ -314,7 +335,7 @@ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
* random data. It also returns up to 32 bytes on its own of random data
* that may be used; random_data_len may not be greater than 32.
*/
-static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
+static void crng_make_state(struct chacha_state *chacha_state,
u8 *random_data, size_t random_data_len)
{
unsigned long flags;
@@ -374,7 +395,7 @@ static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
static void _get_random_bytes(void *buf, size_t len)
{
- u32 chacha_state[CHACHA_STATE_WORDS];
+ struct chacha_state chacha_state;
u8 tmp[CHACHA_BLOCK_SIZE];
size_t first_block_len;
@@ -382,31 +403,31 @@ static void _get_random_bytes(void *buf, size_t len)
return;
first_block_len = min_t(size_t, 32, len);
- crng_make_state(chacha_state, buf, first_block_len);
+ crng_make_state(&chacha_state, buf, first_block_len);
len -= first_block_len;
buf += first_block_len;
while (len) {
if (len < CHACHA_BLOCK_SIZE) {
- chacha20_block(chacha_state, tmp);
+ chacha20_block(&chacha_state, tmp);
memcpy(buf, tmp, len);
memzero_explicit(tmp, sizeof(tmp));
break;
}
- chacha20_block(chacha_state, buf);
- if (unlikely(chacha_state[12] == 0))
- ++chacha_state[13];
+ chacha20_block(&chacha_state, buf);
+ if (unlikely(chacha_state.x[12] == 0))
+ ++chacha_state.x[13];
len -= CHACHA_BLOCK_SIZE;
buf += CHACHA_BLOCK_SIZE;
}
- memzero_explicit(chacha_state, sizeof(chacha_state));
+ chacha_zeroize_state(&chacha_state);
}
/*
* This returns random bytes in arbitrary quantities. The quality of the
- * random bytes is good as /dev/urandom. In order to ensure that the
+ * random bytes is as good as /dev/urandom. In order to ensure that the
* randomness provided by this function is okay, the function
* wait_for_random_bytes() should be called and return 0 at least once
* at any point prior.
@@ -420,7 +441,7 @@ EXPORT_SYMBOL(get_random_bytes);
static ssize_t get_random_bytes_user(struct iov_iter *iter)
{
- u32 chacha_state[CHACHA_STATE_WORDS];
+ struct chacha_state chacha_state;
u8 block[CHACHA_BLOCK_SIZE];
size_t ret = 0, copied;
@@ -432,21 +453,22 @@ static ssize_t get_random_bytes_user(struct iov_iter *iter)
* bytes, in case userspace causes copy_to_iter() below to sleep
* forever, so that we still retain forward secrecy in that case.
*/
- crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE);
+ crng_make_state(&chacha_state, (u8 *)&chacha_state.x[4],
+ CHACHA_KEY_SIZE);
/*
* However, if we're doing a read of len <= 32, we don't need to
* use chacha_state after, so we can simply return those bytes to
* the user directly.
*/
if (iov_iter_count(iter) <= CHACHA_KEY_SIZE) {
- ret = copy_to_iter(&chacha_state[4], CHACHA_KEY_SIZE, iter);
+ ret = copy_to_iter(&chacha_state.x[4], CHACHA_KEY_SIZE, iter);
goto out_zero_chacha;
}
for (;;) {
- chacha20_block(chacha_state, block);
- if (unlikely(chacha_state[12] == 0))
- ++chacha_state[13];
+ chacha20_block(&chacha_state, block);
+ if (unlikely(chacha_state.x[12] == 0))
+ ++chacha_state.x[13];
copied = copy_to_iter(block, sizeof(block), iter);
ret += copied;
@@ -463,13 +485,13 @@ static ssize_t get_random_bytes_user(struct iov_iter *iter)
memzero_explicit(block, sizeof(block));
out_zero_chacha:
- memzero_explicit(chacha_state, sizeof(chacha_state));
+ chacha_zeroize_state(&chacha_state);
return ret ? ret : -EFAULT;
}
/*
* Batched entropy returns random integers. The quality of the random
- * number is good as /dev/urandom. In order to ensure that the randomness
+ * number is as good as /dev/urandom. In order to ensure that the randomness
* provided by this function is okay, the function wait_for_random_bytes()
* should be called and return 0 at least once at any point prior.
*/
@@ -614,7 +636,7 @@ enum {
};
static struct {
- struct blake2s_state hash;
+ struct blake2s_ctx hash;
spinlock_t lock;
unsigned int init_bits;
} input_pool = {
@@ -679,7 +701,7 @@ static void extract_entropy(void *buf, size_t len)
/* next_key = HASHPRF(seed, RDSEED || 0) */
block.counter = 0;
- blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed));
+ blake2s(seed, sizeof(seed), (const u8 *)&block, sizeof(block), next_key, sizeof(next_key));
blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key));
spin_unlock_irqrestore(&input_pool.lock, flags);
@@ -689,7 +711,7 @@ static void extract_entropy(void *buf, size_t len)
i = min_t(size_t, len, BLAKE2S_HASH_SIZE);
/* output = HASHPRF(seed, RDSEED || ++counter) */
++block.counter;
- blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed));
+ blake2s(seed, sizeof(seed), (const u8 *)&block, sizeof(block), buf, i);
len -= i;
buf += i;
}
@@ -705,6 +727,7 @@ static void __cold _credit_init_bits(size_t bits)
static DECLARE_WORK(set_ready, crng_set_ready);
unsigned int new, orig, add;
unsigned long flags;
+ int m;
if (!bits)
return;
@@ -718,15 +741,18 @@ static void __cold _credit_init_bits(size_t bits)
if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
crng_reseed(NULL); /* Sets crng_init to CRNG_READY under base_crng.lock. */
- if (static_key_initialized && system_unbound_wq)
- queue_work(system_unbound_wq, &set_ready);
+ if (system_dfl_wq)
+ queue_work(system_dfl_wq, &set_ready);
atomic_notifier_call_chain(&random_ready_notifier, 0, NULL);
+#ifdef CONFIG_VDSO_GETRANDOM
+ WRITE_ONCE(vdso_k_rng_data->is_ready, true);
+#endif
wake_up_interruptible(&crng_init_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
pr_notice("crng init done\n");
- if (urandom_warning.missed)
- pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
- urandom_warning.missed);
+ m = ratelimit_state_get_miss(&urandom_warning);
+ if (m)
+ pr_notice("%d urandom warning(s) missed due to ratelimiting\n", m);
} else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) {
spin_lock_irqsave(&base_crng.lock, flags);
/* Check if crng_init is CRNG_EMPTY, to avoid race with crng_reseed(). */
@@ -768,7 +794,7 @@ static void __cold _credit_init_bits(size_t bits)
*
* add_bootloader_randomness() is called by bootloader drivers, such as EFI
* and device tree, and credits its input depending on whether or not the
- * command line option 'random.trust_bootloader'.
+ * command line option 'random.trust_bootloader' is set.
*
* add_vmfork_randomness() adds a unique (but not necessarily secret) ID
* representing the current instance of a VM to the pool, without crediting,
@@ -889,9 +915,8 @@ void __init random_init(void)
add_latent_entropy();
/*
- * If we were initialized by the cpu or bootloader before jump labels
- * or workqueues are initialized, then we should enable the static
- * branch here, where it's guaranteed that these have been initialized.
+ * If we were initialized by the cpu or bootloader before workqueues
+ * are initialized, then we should enable the static branch here.
*/
if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY)
crng_set_ready(NULL);
@@ -1270,6 +1295,7 @@ static void __cold try_to_generate_entropy(void)
struct entropy_timer_state *stack = PTR_ALIGN((void *)stack_bytes, SMP_CACHE_BYTES);
unsigned int i, num_different = 0;
unsigned long last = random_get_entropy();
+ cpumask_var_t timer_cpus;
int cpu = -1;
for (i = 0; i < NUM_TRIAL_SAMPLES - 1; ++i) {
@@ -1284,13 +1310,15 @@ static void __cold try_to_generate_entropy(void)
atomic_set(&stack->samples, 0);
timer_setup_on_stack(&stack->timer, entropy_timer, 0);
+ if (!alloc_cpumask_var(&timer_cpus, GFP_KERNEL))
+ goto out;
+
while (!crng_ready() && !signal_pending(current)) {
/*
* Check !timer_pending() and then ensure that any previous callback has finished
- * executing by checking try_to_del_timer_sync(), before queueing the next one.
+ * executing by checking timer_delete_sync_try(), before queueing the next one.
*/
- if (!timer_pending(&stack->timer) && try_to_del_timer_sync(&stack->timer) >= 0) {
- struct cpumask timer_cpus;
+ if (!timer_pending(&stack->timer) && timer_delete_sync_try(&stack->timer) >= 0) {
unsigned int num_cpus;
/*
@@ -1300,19 +1328,19 @@ static void __cold try_to_generate_entropy(void)
preempt_disable();
/* Only schedule callbacks on timer CPUs that are online. */
- cpumask_and(&timer_cpus, housekeeping_cpumask(HK_TYPE_TIMER), cpu_online_mask);
- num_cpus = cpumask_weight(&timer_cpus);
+ cpumask_and(timer_cpus, housekeeping_cpumask(HK_TYPE_TIMER), cpu_online_mask);
+ num_cpus = cpumask_weight(timer_cpus);
/* In very bizarre case of misconfiguration, fallback to all online. */
if (unlikely(num_cpus == 0)) {
- timer_cpus = *cpu_online_mask;
- num_cpus = cpumask_weight(&timer_cpus);
+ *timer_cpus = *cpu_online_mask;
+ num_cpus = cpumask_weight(timer_cpus);
}
/* Basic CPU round-robin, which avoids the current CPU. */
do {
- cpu = cpumask_next(cpu, &timer_cpus);
+ cpu = cpumask_next(cpu, timer_cpus);
if (cpu >= nr_cpu_ids)
- cpu = cpumask_first(&timer_cpus);
+ cpu = cpumask_first(timer_cpus);
} while (cpu == smp_processor_id() && num_cpus > 1);
/* Expiring the timer at `jiffies` means it's the next tick. */
@@ -1328,8 +1356,10 @@ static void __cold try_to_generate_entropy(void)
}
mix_pool_bytes(&stack->entropy, sizeof(stack->entropy));
- del_timer_sync(&stack->timer);
- destroy_timer_on_stack(&stack->timer);
+ free_cpumask_var(timer_cpus);
+out:
+ timer_delete_sync(&stack->timer);
+ timer_destroy_on_stack(&stack->timer);
}
@@ -1442,7 +1472,7 @@ static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
if (!crng_ready()) {
if (!ratelimit_disable && maxwarn <= 0)
- ++urandom_warning.missed;
+ ratelimit_state_inc_miss(&urandom_warning);
else if (ratelimit_disable || __ratelimit(&urandom_warning)) {
--maxwarn;
pr_notice("%s: uninitialized urandom read (%zu bytes read)\n",
@@ -1604,7 +1634,7 @@ static u8 sysctl_bootid[UUID_SIZE];
* UUID. The difference is in whether table->data is NULL; if it is,
* then a new UUID is generated and returned to the user.
*/
-static int proc_do_uuid(struct ctl_table *table, int write, void *buf,
+static int proc_do_uuid(const struct ctl_table *table, int write, void *buf,
size_t *lenp, loff_t *ppos)
{
u8 tmp_uuid[UUID_SIZE], *uuid;
@@ -1635,13 +1665,13 @@ static int proc_do_uuid(struct ctl_table *table, int write, void *buf,
}
/* The same as proc_dointvec, but writes don't change anything. */
-static int proc_do_rointvec(struct ctl_table *table, int write, void *buf,
+static int proc_do_rointvec(const struct ctl_table *table, int write, void *buf,
size_t *lenp, loff_t *ppos)
{
return write ? 0 : proc_dointvec(table, 0, buf, lenp, ppos);
}
-static struct ctl_table random_table[] = {
+static const struct ctl_table random_table[] = {
{
.procname = "poolsize",
.data = &sysctl_poolsize,
diff --git a/drivers/char/scx200_gpio.c b/drivers/char/scx200_gpio.c
index 9f701dcba95c..700e6affea6f 100644
--- a/drivers/char/scx200_gpio.c
+++ b/drivers/char/scx200_gpio.c
@@ -68,7 +68,6 @@ static const struct file_operations scx200_gpio_fileops = {
.read = nsc_gpio_read,
.open = scx200_gpio_open,
.release = scx200_gpio_release,
- .llseek = no_llseek,
};
static struct cdev scx200_gpio_cdev; /* use 1 cdev for all pins */
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index 22d249333f53..677bb5ac950a 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -37,6 +37,7 @@
#include <linux/kfifo.h>
#include <linux/platform_device.h>
#include <linux/gfp.h>
+#include <linux/string_choices.h>
#include <linux/uaccess.h>
#include <asm/io.h>
@@ -1054,7 +1055,6 @@ static const struct file_operations sonypi_misc_fops = {
.release = sonypi_misc_release,
.fasync = sonypi_misc_fasync,
.unlocked_ioctl = sonypi_misc_ioctl,
- .llseek = no_llseek,
};
static struct miscdevice sonypi_misc_device = {
@@ -1269,12 +1269,12 @@ static void sonypi_display_info(void)
"compat = %s, mask = 0x%08lx, useinput = %s, acpi = %s\n",
sonypi_device.model,
verbose,
- fnkeyinit ? "on" : "off",
- camera ? "on" : "off",
- compat ? "on" : "off",
+ str_on_off(fnkeyinit),
+ str_on_off(camera),
+ str_on_off(compat),
mask,
- useinput ? "on" : "off",
- SONYPI_ACPI_ACTIVE ? "on" : "off");
+ str_on_off(useinput),
+ str_on_off(SONYPI_ACPI_ACTIVE));
printk(KERN_INFO "sonypi: enabled at irq=%d, port1=0x%x, port2=0x%x\n",
sonypi_device.irq,
sonypi_device.ioport1, sonypi_device.ioport2);
@@ -1408,7 +1408,7 @@ static int sonypi_probe(struct platform_device *dev)
return error;
}
-static int sonypi_remove(struct platform_device *dev)
+static void sonypi_remove(struct platform_device *dev)
{
sonypi_disable();
@@ -1432,8 +1432,6 @@ static int sonypi_remove(struct platform_device *dev)
}
kfifo_free(&sonypi_device.fifo);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
index 896a3550fba9..b381ea7e85d2 100644
--- a/drivers/char/tlclk.c
+++ b/drivers/char/tlclk.c
@@ -42,11 +42,12 @@
#include <linux/sysfs.h>
#include <linux/device.h>
#include <linux/miscdevice.h>
-#include <linux/platform_device.h>
+#include <linux/device/faux.h>
#include <asm/io.h> /* inb/outb */
#include <linux/uaccess.h>
MODULE_AUTHOR("Sebastien Bouchard <sebastien.bouchard@ca.kontron.com>");
+MODULE_DESCRIPTION("Telecom Clock driver for Intel NetStructure(tm) MPCBL0010");
MODULE_LICENSE("GPL");
/*Hardware Reset of the PLL */
@@ -741,7 +742,7 @@ static ssize_t store_reset (struct device *d,
static DEVICE_ATTR(reset, (S_IWUSR|S_IWGRP), NULL, store_reset);
-static struct attribute *tlclk_sysfs_entries[] = {
+static struct attribute *tlclk_attrs[] = {
&dev_attr_current_ref.attr,
&dev_attr_telclock_version.attr,
&dev_attr_alarms.attr,
@@ -765,13 +766,9 @@ static struct attribute *tlclk_sysfs_entries[] = {
&dev_attr_reset.attr,
NULL
};
+ATTRIBUTE_GROUPS(tlclk);
-static const struct attribute_group tlclk_attribute_group = {
- .name = NULL, /* put in device directory */
- .attrs = tlclk_sysfs_entries,
-};
-
-static struct platform_device *tlclk_device;
+static struct faux_device *tlclk_device;
static int __init tlclk_init(void)
{
@@ -816,24 +813,13 @@ static int __init tlclk_init(void)
goto out3;
}
- tlclk_device = platform_device_register_simple("telco_clock",
- -1, NULL, 0);
- if (IS_ERR(tlclk_device)) {
- printk(KERN_ERR "tlclk: platform_device_register failed.\n");
- ret = PTR_ERR(tlclk_device);
+ tlclk_device = faux_device_create_with_groups("telco_clock", NULL, NULL, tlclk_groups);
+ if (!tlclk_device) {
+ ret = -ENODEV;
goto out4;
}
- ret = sysfs_create_group(&tlclk_device->dev.kobj,
- &tlclk_attribute_group);
- if (ret) {
- printk(KERN_ERR "tlclk: failed to create sysfs device attributes.\n");
- goto out5;
- }
-
return 0;
-out5:
- platform_device_unregister(tlclk_device);
out4:
misc_deregister(&tlclk_miscdev);
out3:
@@ -847,13 +833,12 @@ out1:
static void __exit tlclk_cleanup(void)
{
- sysfs_remove_group(&tlclk_device->dev.kobj, &tlclk_attribute_group);
- platform_device_unregister(tlclk_device);
+ faux_device_destroy(tlclk_device);
misc_deregister(&tlclk_miscdev);
unregister_chrdev(tlclk_major, "telco_clock");
release_region(TLCLK_BASE, 8);
- del_timer_sync(&switchover_timer);
+ timer_delete_sync(&switchover_timer);
kfree(alarm_events);
}
@@ -871,7 +856,7 @@ static void switchover_timeout(struct timer_list *unused)
}
/* Alarm processing is done, wake up read task */
- del_timer(&switchover_timer);
+ timer_delete(&switchover_timer);
got_event = 1;
wake_up(&wq);
}
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index e63a6a17793c..8a8f692b6088 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -29,10 +29,11 @@ if TCG_TPM
config TCG_TPM2_HMAC
bool "Use HMAC and encrypted transactions on the TPM bus"
- default y
+ default n
select CRYPTO_ECDH
select CRYPTO_LIB_AESCFB
select CRYPTO_LIB_SHA256
+ select CRYPTO_LIB_UTILS
help
Setting this causes us to deploy a scheme which uses request
and response HMACs in addition to encryption for
@@ -162,7 +163,7 @@ config TCG_NSC
config TCG_ATMEL
tristate "Atmel TPM Interface"
- depends on PPC64 || HAS_IOPORT_MAP
+ depends on HAS_IOPORT_MAP
depends on HAS_IOPORT
help
If you have a TPM security chip from Atmel say Yes and it
@@ -189,6 +190,15 @@ config TCG_IBMVTPM
will be accessible from within Linux. To compile this driver
as a module, choose M here; the module will be called tpm_ibmvtpm.
+config TCG_LOONGSON
+ tristate "Loongson TPM Interface"
+ depends on MFD_LOONGSON_SE
+ help
+ If you want to make Loongson TPM support available, say Yes and
+ it will be accessible from within Linux. To compile this
+ driver as a module, choose M here; the module will be called
+ tpm_loongson.
+
config TCG_XEN
tristate "XEN TPM Interface"
depends on TCG_TPM && XEN
@@ -210,6 +220,15 @@ config TCG_CRB
from within Linux. To compile this driver as a module, choose
M here; the module will be called tpm_crb.
+config TCG_ARM_CRB_FFA
+ tristate "TPM CRB over Arm FF-A Transport"
+ depends on ARM_FFA_TRANSPORT && TCG_CRB
+ default TCG_CRB
+ help
+ If the Arm FF-A transport is used to access the TPM say Yes.
+ To compile this driver as a module, choose M here; the module
+ will be called tpm_crb_ffa.
+
config TCG_VTPM_PROXY
tristate "VTPM Proxy Interface"
depends on TCG_TPM
@@ -225,5 +244,15 @@ config TCG_FTPM_TEE
help
This driver proxies for firmware TPM running in TEE.
+config TCG_SVSM
+ tristate "SNP SVSM vTPM interface"
+ depends on AMD_MEM_ENCRYPT
+ help
+ This is a driver for the AMD SVSM vTPM protocol that a SEV-SNP guest
+ OS can use to discover and talk to a vTPM emulated by the Secure VM
+ Service Module (SVSM) in the guest context, but at a more privileged
+ level (usually VMPL0). To compile this driver as a module, choose M
+ here; the module will be called tpm_svsm.
+
source "drivers/char/tpm/st33zp24/Kconfig"
endif # TCG_TPM
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index 4c695b0388f3..5b5cdc0d32e4 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -16,8 +16,8 @@ tpm-y += eventlog/common.o
tpm-y += eventlog/tpm1.o
tpm-y += eventlog/tpm2.o
tpm-y += tpm-buf.o
+tpm-y += tpm2-sessions.o
-tpm-$(CONFIG_TCG_TPM2_HMAC) += tpm2-sessions.o
tpm-$(CONFIG_ACPI) += tpm_ppi.o eventlog/acpi.o
tpm-$(CONFIG_EFI) += eventlog/efi.o
tpm-$(CONFIG_OF) += eventlog/of.o
@@ -42,5 +42,8 @@ obj-$(CONFIG_TCG_IBMVTPM) += tpm_ibmvtpm.o
obj-$(CONFIG_TCG_TIS_ST33ZP24) += st33zp24/
obj-$(CONFIG_TCG_XEN) += xen-tpmfront.o
obj-$(CONFIG_TCG_CRB) += tpm_crb.o
+obj-$(CONFIG_TCG_ARM_CRB_FFA) += tpm_crb_ffa.o
obj-$(CONFIG_TCG_VTPM_PROXY) += tpm_vtpm_proxy.o
obj-$(CONFIG_TCG_FTPM_TEE) += tpm_ftpm_tee.o
+obj-$(CONFIG_TCG_SVSM) += tpm_svsm.o
+obj-$(CONFIG_TCG_LOONGSON) += tpm_loongson.o
diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c
index 69533d0bfb51..cf02ec646f46 100644
--- a/drivers/char/tpm/eventlog/acpi.c
+++ b/drivers/char/tpm/eventlog/acpi.c
@@ -63,6 +63,11 @@ static bool tpm_is_tpm2_log(void *bios_event_log, u64 len)
return n == 0;
}
+static void tpm_bios_log_free(void *data)
+{
+ kvfree(data);
+}
+
/* read binary bios log */
int tpm_read_log_acpi(struct tpm_chip *chip)
{
@@ -136,7 +141,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
}
/* malloc EventLog space */
- log->bios_event_log = devm_kmalloc(&chip->dev, len, GFP_KERNEL);
+ log->bios_event_log = kvmalloc(len, GFP_KERNEL);
if (!log->bios_event_log)
return -ENOMEM;
@@ -161,10 +166,16 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
goto err;
}
+ ret = devm_add_action(&chip->dev, tpm_bios_log_free, log->bios_event_log);
+ if (ret) {
+ log->bios_event_log = NULL;
+ goto err;
+ }
+
return format;
err:
- devm_kfree(&chip->dev, log->bios_event_log);
+ tpm_bios_log_free(log->bios_event_log);
log->bios_event_log = NULL;
return ret;
}
diff --git a/drivers/char/tpm/eventlog/common.c b/drivers/char/tpm/eventlog/common.c
index 639c3f395a5a..691813d2a5a2 100644
--- a/drivers/char/tpm/eventlog/common.c
+++ b/drivers/char/tpm/eventlog/common.c
@@ -32,7 +32,7 @@ static int tpm_bios_measurements_open(struct inode *inode,
struct tpm_chip *chip;
inode_lock(inode);
- if (!inode->i_private) {
+ if (!inode->i_nlink) {
inode_unlock(inode);
return -ENODEV;
}
@@ -47,6 +47,8 @@ static int tpm_bios_measurements_open(struct inode *inode,
if (!err) {
seq = file->private_data;
seq->private = chip;
+ } else {
+ put_device(&chip->dev);
}
return err;
@@ -103,7 +105,7 @@ static int tpm_read_log(struct tpm_chip *chip)
void tpm_bios_log_setup(struct tpm_chip *chip)
{
const char *name = dev_name(&chip->dev);
- unsigned int cnt;
+ struct dentry *dentry;
int log_version;
int rc = 0;
@@ -115,14 +117,12 @@ void tpm_bios_log_setup(struct tpm_chip *chip)
return;
log_version = rc;
- cnt = 0;
- chip->bios_dir[cnt] = securityfs_create_dir(name, NULL);
+ chip->bios_dir = securityfs_create_dir(name, NULL);
/* NOTE: securityfs_create_dir can return ENODEV if securityfs is
* compiled out. The caller should ignore the ENODEV return code.
*/
- if (IS_ERR(chip->bios_dir[cnt]))
- goto err;
- cnt++;
+ if (IS_ERR(chip->bios_dir))
+ return;
chip->bin_log_seqops.chip = chip;
if (log_version == EFI_TCG2_EVENT_LOG_FORMAT_TCG_2)
@@ -133,14 +133,13 @@ void tpm_bios_log_setup(struct tpm_chip *chip)
&tpm1_binary_b_measurements_seqops;
- chip->bios_dir[cnt] =
+ dentry =
securityfs_create_file("binary_bios_measurements",
- 0440, chip->bios_dir[0],
+ 0440, chip->bios_dir,
(void *)&chip->bin_log_seqops,
&tpm_bios_measurements_ops);
- if (IS_ERR(chip->bios_dir[cnt]))
+ if (IS_ERR(dentry))
goto err;
- cnt++;
if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
@@ -148,42 +147,23 @@ void tpm_bios_log_setup(struct tpm_chip *chip)
chip->ascii_log_seqops.seqops =
&tpm1_ascii_b_measurements_seqops;
- chip->bios_dir[cnt] =
+ dentry =
securityfs_create_file("ascii_bios_measurements",
- 0440, chip->bios_dir[0],
+ 0440, chip->bios_dir,
(void *)&chip->ascii_log_seqops,
&tpm_bios_measurements_ops);
- if (IS_ERR(chip->bios_dir[cnt]))
+ if (IS_ERR(dentry))
goto err;
- cnt++;
}
return;
err:
- chip->bios_dir[cnt] = NULL;
tpm_bios_log_teardown(chip);
return;
}
void tpm_bios_log_teardown(struct tpm_chip *chip)
{
- int i;
- struct inode *inode;
-
- /* securityfs_remove currently doesn't take care of handling sync
- * between removal and opening of pseudo files. To handle this, a
- * workaround is added by making i_private = NULL here during removal
- * and to check it during open(), both within inode_lock()/unlock().
- * This design ensures that open() either safely gets kref or fails.
- */
- for (i = (TPM_NUM_EVENT_LOG_FILES - 1); i >= 0; i--) {
- if (chip->bios_dir[i]) {
- inode = d_inode(chip->bios_dir[i]);
- inode_lock(inode);
- inode->i_private = NULL;
- inode_unlock(inode);
- securityfs_remove(chip->bios_dir[i]);
- }
- }
+ securityfs_remove(chip->bios_dir);
}
diff --git a/drivers/char/tpm/eventlog/of.c b/drivers/char/tpm/eventlog/of.c
index 930fe43d5daf..92cec9722ee4 100644
--- a/drivers/char/tpm/eventlog/of.c
+++ b/drivers/char/tpm/eventlog/of.c
@@ -24,16 +24,10 @@
static int tpm_read_log_memory_region(struct tpm_chip *chip)
{
- struct device_node *node;
struct resource res;
int rc;
- node = of_parse_phandle(chip->dev.parent->of_node, "memory-region", 0);
- if (!node)
- return -ENODEV;
-
- rc = of_address_to_resource(node, 0, &res);
- of_node_put(node);
+ rc = of_reserved_mem_region_to_resource(chip->dev.parent->of_node, 0, &res);
if (rc)
return rc;
diff --git a/drivers/char/tpm/eventlog/tpm1.c b/drivers/char/tpm/eventlog/tpm1.c
index 12ee42a31c71..e7913b2853d5 100644
--- a/drivers/char/tpm/eventlog/tpm1.c
+++ b/drivers/char/tpm/eventlog/tpm1.c
@@ -257,11 +257,8 @@ static int tpm1_ascii_bios_measurements_show(struct seq_file *m, void *v)
(unsigned char *)(v + sizeof(struct tcpa_event));
eventname = kmalloc(MAX_TEXT_EVENT, GFP_KERNEL);
- if (!eventname) {
- printk(KERN_ERR "%s: ERROR - No Memory for event name\n ",
- __func__);
- return -EFAULT;
- }
+ if (!eventname)
+ return -ENOMEM;
/* 1st: PCR */
seq_printf(m, "%2d ", do_endian_conversion(event->pcr_index));
diff --git a/drivers/char/tpm/st33zp24/i2c.c b/drivers/char/tpm/st33zp24/i2c.c
index 45ca33b3dcb2..81348487c125 100644
--- a/drivers/char/tpm/st33zp24/i2c.c
+++ b/drivers/char/tpm/st33zp24/i2c.c
@@ -133,7 +133,7 @@ static void st33zp24_i2c_remove(struct i2c_client *client)
}
static const struct i2c_device_id st33zp24_i2c_id[] = {
- {TPM_ST33_I2C, 0},
+ { TPM_ST33_I2C },
{}
};
MODULE_DEVICE_TABLE(i2c, st33zp24_i2c_id);
diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c
index c0771980bc2f..2ed7815e4899 100644
--- a/drivers/char/tpm/st33zp24/st33zp24.c
+++ b/drivers/char/tpm/st33zp24/st33zp24.c
@@ -300,7 +300,7 @@ static irqreturn_t tpm_ioserirq_handler(int irq, void *dev_id)
* send TPM commands through the I2C bus.
*/
static int st33zp24_send(struct tpm_chip *chip, unsigned char *buf,
- size_t len)
+ size_t bufsiz, size_t len)
{
struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
u32 status, i, size, ordinal;
diff --git a/drivers/char/tpm/tpm-buf.c b/drivers/char/tpm/tpm-buf.c
index 647c6ca92ac3..dc882fc9fa9e 100644
--- a/drivers/char/tpm/tpm-buf.c
+++ b/drivers/char/tpm/tpm-buf.c
@@ -147,6 +147,26 @@ void tpm_buf_append_u32(struct tpm_buf *buf, const u32 value)
EXPORT_SYMBOL_GPL(tpm_buf_append_u32);
/**
+ * tpm_buf_append_handle() - Add a handle
+ * @chip: &tpm_chip instance
+ * @buf: &tpm_buf instance
+ * @handle: a TPM object handle
+ *
+ * Add a handle to the buffer, and increase the count tracking the number of
+ * handles in the command buffer. Works only for command buffers.
+ */
+void tpm_buf_append_handle(struct tpm_chip *chip, struct tpm_buf *buf, u32 handle)
+{
+ if (buf->flags & TPM_BUF_TPM2B) {
+ dev_err(&chip->dev, "Invalid buffer type (TPM2B)\n");
+ return;
+ }
+
+ tpm_buf_append_u32(buf, handle);
+ buf->handles++;
+}
+
+/**
* tpm_buf_read() - Read from a TPM buffer
* @buf: &tpm_buf instance
* @offset: offset within the buffer
@@ -181,7 +201,7 @@ static void tpm_buf_read(struct tpm_buf *buf, off_t *offset, size_t count, void
*/
u8 tpm_buf_read_u8(struct tpm_buf *buf, off_t *offset)
{
- u8 value;
+ u8 value = 0;
tpm_buf_read(buf, offset, sizeof(value), &value);
@@ -198,7 +218,7 @@ EXPORT_SYMBOL_GPL(tpm_buf_read_u8);
*/
u16 tpm_buf_read_u16(struct tpm_buf *buf, off_t *offset)
{
- u16 value;
+ u16 value = 0;
tpm_buf_read(buf, offset, sizeof(value), &value);
@@ -215,7 +235,7 @@ EXPORT_SYMBOL_GPL(tpm_buf_read_u16);
*/
u32 tpm_buf_read_u32(struct tpm_buf *buf, off_t *offset)
{
- u32 value;
+ u32 value = 0;
tpm_buf_read(buf, offset, sizeof(value), &value);
@@ -223,30 +243,4 @@ u32 tpm_buf_read_u32(struct tpm_buf *buf, off_t *offset)
}
EXPORT_SYMBOL_GPL(tpm_buf_read_u32);
-static u16 tpm_buf_tag(struct tpm_buf *buf)
-{
- struct tpm_header *head = (struct tpm_header *)buf->data;
-
- return be16_to_cpu(head->tag);
-}
-/**
- * tpm_buf_parameters - return the TPM response parameters area of the tpm_buf
- * @buf: tpm_buf to use
- *
- * Where the parameters are located depends on the tag of a TPM
- * command (it's immediately after the header for TPM_ST_NO_SESSIONS
- * or 4 bytes after for TPM_ST_SESSIONS). Evaluate this and return a
- * pointer to the first byte of the parameters area.
- *
- * @return: pointer to parameters area
- */
-u8 *tpm_buf_parameters(struct tpm_buf *buf)
-{
- int offset = TPM_HEADER_SIZE;
-
- if (tpm_buf_tag(buf) == TPM2_ST_SESSIONS)
- offset += 4;
-
- return &buf->data[offset];
-}
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 854546000c92..082b910ddf0d 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -168,6 +168,11 @@ int tpm_try_get_ops(struct tpm_chip *chip)
goto out_ops;
mutex_lock(&chip->tpm_mutex);
+
+ /* tmp_chip_start may issue IO that is denied while suspended */
+ if (chip->flags & TPM_CHIP_FLAG_SUSPENDED)
+ goto out_lock;
+
rc = tpm_chip_start(chip);
if (rc)
goto out_lock;
@@ -226,42 +231,6 @@ struct tpm_chip *tpm_default_chip(void)
EXPORT_SYMBOL_GPL(tpm_default_chip);
/**
- * tpm_find_get_ops() - find and reserve a TPM chip
- * @chip: a &struct tpm_chip instance, %NULL for the default chip
- *
- * Finds a TPM chip and reserves its class device and operations. The chip must
- * be released with tpm_put_ops() after use.
- * This function is for internal use only. It supports existing TPM callers
- * by accepting NULL, but those callers should be converted to pass in a chip
- * directly.
- *
- * Return:
- * A reserved &struct tpm_chip instance.
- * %NULL if a chip is not found.
- * %NULL if the chip is not available.
- */
-struct tpm_chip *tpm_find_get_ops(struct tpm_chip *chip)
-{
- int rc;
-
- if (chip) {
- if (!tpm_try_get_ops(chip))
- return chip;
- return NULL;
- }
-
- chip = tpm_default_chip();
- if (!chip)
- return NULL;
- rc = tpm_try_get_ops(chip);
- /* release additional reference we got from tpm_default_chip() */
- put_device(&chip->dev);
- if (rc)
- return NULL;
- return chip;
-}
-
-/**
* tpm_dev_release() - free chip memory and the device number
* @dev: the character device for the TPM chip
*
@@ -277,7 +246,6 @@ static void tpm_dev_release(struct device *dev)
kfree(chip->work_space.context_buf);
kfree(chip->work_space.session_buf);
- kfree(chip->allocated_banks);
#ifdef CONFIG_TCG_TPM2_HMAC
kfree(chip->auth);
#endif
@@ -300,6 +268,7 @@ int tpm_class_shutdown(struct device *dev)
down_write(&chip->ops_sem);
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
if (!tpm_chip_start(chip)) {
+ tpm2_end_auth_session(chip);
tpm2_shutdown(chip, TPM2_SU_CLEAR);
tpm_chip_stop(chip);
}
@@ -525,10 +494,6 @@ static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
struct tpm_chip *chip = container_of(rng, struct tpm_chip, hwrng);
- /* Give back zero bytes, as TPM chip has not yet fully resumed: */
- if (chip->flags & TPM_CHIP_FLAG_SUSPENDED)
- return 0;
-
return tpm_get_random(chip, data, max);
}
@@ -674,6 +639,16 @@ EXPORT_SYMBOL_GPL(tpm_chip_register);
*/
void tpm_chip_unregister(struct tpm_chip *chip)
{
+#ifdef CONFIG_TCG_TPM2_HMAC
+ int rc;
+
+ rc = tpm_try_get_ops(chip);
+ if (!rc) {
+ tpm2_end_auth_session(chip);
+ tpm_put_ops(chip);
+ }
+#endif
+
tpm_del_legacy_sysfs(chip);
if (tpm_is_hwrng_enabled(chip))
hwrng_unregister(&chip->hwrng);
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
index 30b4c288c1bb..f942c0c8e402 100644
--- a/drivers/char/tpm/tpm-dev-common.c
+++ b/drivers/char/tpm/tpm-dev-common.c
@@ -27,6 +27,9 @@ static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space,
struct tpm_header *header = (void *)buf;
ssize_t ret, len;
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ tpm2_end_auth_session(chip);
+
ret = tpm2_prepare_space(chip, space, buf, bufsiz);
/* If the command is not implemented by the TPM, synthesize a
* response with a TPM2_RC_COMMAND_CODE return for user-space.
@@ -47,6 +50,8 @@ static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space,
if (!ret)
ret = tpm2_commit_space(chip, space, buf, &len);
+ else
+ tpm2_flush_space(chip);
out_rc:
return ret ? ret : len;
@@ -86,7 +91,7 @@ out:
static void user_reader_timeout(struct timer_list *t)
{
- struct file_priv *priv = from_timer(priv, t, user_read_timer);
+ struct file_priv *priv = timer_container_of(priv, t, user_read_timer);
pr_warn("TPM user space timeout is deprecated (pid=%d)\n",
task_tgid_nr(current));
@@ -155,7 +160,7 @@ ssize_t tpm_common_read(struct file *file, char __user *buf,
out:
if (!priv->response_length) {
*off = 0;
- del_timer_sync(&priv->user_read_timer);
+ timer_delete_sync(&priv->user_read_timer);
flush_work(&priv->timeout_work);
}
mutex_unlock(&priv->buffer_mutex);
@@ -262,7 +267,7 @@ __poll_t tpm_common_poll(struct file *file, poll_table *wait)
void tpm_common_release(struct file *file, struct file_priv *priv)
{
flush_work(&priv->async_work);
- del_timer_sync(&priv->user_read_timer);
+ timer_delete_sync(&priv->user_read_timer);
flush_work(&priv->timeout_work);
file->private_data = NULL;
priv->response_length = 0;
@@ -270,7 +275,8 @@ void tpm_common_release(struct file *file, struct file_priv *priv)
int __init tpm_dev_common_init(void)
{
- tpm_dev_wq = alloc_workqueue("tpm_dev_wq", WQ_MEM_RECLAIM, 0);
+ tpm_dev_wq = alloc_workqueue("tpm_dev_wq", WQ_MEM_RECLAIM | WQ_PERCPU,
+ 0);
return !tpm_dev_wq ? -ENOMEM : 0;
}
diff --git a/drivers/char/tpm/tpm-dev.c b/drivers/char/tpm/tpm-dev.c
index e2c0baa69fef..97c94b5e9340 100644
--- a/drivers/char/tpm/tpm-dev.c
+++ b/drivers/char/tpm/tpm-dev.c
@@ -59,7 +59,6 @@ static int tpm_release(struct inode *inode, struct file *file)
const struct file_operations tpm_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.open = tpm_open,
.read = tpm_common_read,
.write = tpm_common_write,
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 5da134f12c9a..f745a098908b 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -52,12 +52,43 @@ MODULE_PARM_DESC(suspend_pcr,
unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal)
{
if (chip->flags & TPM_CHIP_FLAG_TPM2)
- return tpm2_calc_ordinal_duration(chip, ordinal);
+ return tpm2_calc_ordinal_duration(ordinal);
else
return tpm1_calc_ordinal_duration(chip, ordinal);
}
EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
+static void tpm_chip_cancel(struct tpm_chip *chip)
+{
+ if (!chip->ops->cancel)
+ return;
+
+ chip->ops->cancel(chip);
+}
+
+static u8 tpm_chip_status(struct tpm_chip *chip)
+{
+ if (!chip->ops->status)
+ return 0;
+
+ return chip->ops->status(chip);
+}
+
+static bool tpm_chip_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ if (!chip->ops->req_canceled)
+ return false;
+
+ return chip->ops->req_canceled(chip, status);
+}
+
+static bool tpm_transmit_completed(u8 status, struct tpm_chip *chip)
+{
+ u8 status_masked = status & chip->ops->req_complete_mask;
+
+ return status_masked == chip->ops->req_complete_val;
+}
+
static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz)
{
struct tpm_header *header = buf;
@@ -82,7 +113,7 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz)
return -E2BIG;
}
- rc = chip->ops->send(chip, buf, count);
+ rc = chip->ops->send(chip, buf, bufsiz, count);
if (rc < 0) {
if (rc != -EPIPE)
dev_err(&chip->dev,
@@ -90,8 +121,19 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz)
return rc;
}
- /* A sanity check. send() should just return zero on success e.g.
- * not the command length.
+ /*
+ * Synchronous devices return the response directly during the send()
+ * call in the same buffer.
+ */
+ if (chip->flags & TPM_CHIP_FLAG_SYNC) {
+ len = rc;
+ rc = 0;
+ goto out_sync;
+ }
+
+ /*
+ * A sanity check. send() of asynchronous devices should just return
+ * zero on success e.g. not the command length.
*/
if (rc > 0) {
dev_warn(&chip->dev,
@@ -104,12 +146,11 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz)
stop = jiffies + tpm_calc_ordinal_duration(chip, ordinal);
do {
- u8 status = chip->ops->status(chip);
- if ((status & chip->ops->req_complete_mask) ==
- chip->ops->req_complete_val)
+ u8 status = tpm_chip_status(chip);
+ if (tpm_transmit_completed(status, chip))
goto out_recv;
- if (chip->ops->req_canceled(chip, status)) {
+ if (tpm_chip_req_canceled(chip, status)) {
dev_err(&chip->dev, "Operation Canceled\n");
return -ECANCELED;
}
@@ -118,7 +159,14 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz)
rmb();
} while (time_before(jiffies, stop));
- chip->ops->cancel(chip);
+ /*
+ * Check for completion one more time, just in case the device reported
+ * it while the driver was sleeping in the busy loop above.
+ */
+ if (tpm_transmit_completed(tpm_chip_status(chip), chip))
+ goto out_recv;
+
+ tpm_chip_cancel(chip);
dev_err(&chip->dev, "Operation Timed out\n");
return -ETIME;
@@ -127,7 +175,10 @@ out_recv:
if (len < 0) {
rc = len;
dev_err(&chip->dev, "tpm_transmit: tpm_recv: error %d\n", rc);
- } else if (len < TPM_HEADER_SIZE || len != be32_to_cpu(header->length))
+ return rc;
+ }
+out_sync:
+ if (len < TPM_HEADER_SIZE || len != be32_to_cpu(header->length))
rc = -EFAULT;
return rc ? rc : len;
@@ -262,10 +313,13 @@ int tpm_is_tpm2(struct tpm_chip *chip)
{
int rc;
- chip = tpm_find_get_ops(chip);
if (!chip)
return -ENODEV;
+ rc = tpm_try_get_ops(chip);
+ if (rc)
+ return rc;
+
rc = (chip->flags & TPM_CHIP_FLAG_TPM2) != 0;
tpm_put_ops(chip);
@@ -287,10 +341,13 @@ int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx,
{
int rc;
- chip = tpm_find_get_ops(chip);
if (!chip)
return -ENODEV;
+ rc = tpm_try_get_ops(chip);
+ if (rc)
+ return rc;
+
if (chip->flags & TPM_CHIP_FLAG_TPM2)
rc = tpm2_pcr_read(chip, pcr_idx, digest, NULL);
else
@@ -318,10 +375,13 @@ int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
int rc;
int i;
- chip = tpm_find_get_ops(chip);
if (!chip)
return -ENODEV;
+ rc = tpm_try_get_ops(chip);
+ if (rc)
+ return rc;
+
for (i = 0; i < chip->nr_allocated_banks; i++) {
if (digests[i].alg_id != chip->allocated_banks[i].alg_id) {
rc = -EINVAL;
@@ -370,6 +430,13 @@ int tpm_pm_suspend(struct device *dev)
if (!chip)
return -ENODEV;
+ rc = tpm_try_get_ops(chip);
+ if (rc) {
+ /* Can be safely set out of locks, as no action cannot race: */
+ chip->flags |= TPM_CHIP_FLAG_SUSPENDED;
+ goto out;
+ }
+
if (chip->flags & TPM_CHIP_FLAG_ALWAYS_POWERED)
goto suspended;
@@ -377,19 +444,19 @@ int tpm_pm_suspend(struct device *dev)
!pm_suspend_via_firmware())
goto suspended;
- rc = tpm_try_get_ops(chip);
- if (!rc) {
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
- tpm2_shutdown(chip, TPM2_SU_STATE);
- else
- rc = tpm1_pm_suspend(chip, tpm_suspend_pcr);
-
- tpm_put_ops(chip);
+ if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+ tpm2_end_auth_session(chip);
+ tpm2_shutdown(chip, TPM2_SU_STATE);
+ goto suspended;
}
+ rc = tpm1_pm_suspend(chip, tpm_suspend_pcr);
+
suspended:
chip->flags |= TPM_CHIP_FLAG_SUSPENDED;
+ tpm_put_ops(chip);
+out:
if (rc)
dev_err(dev, "Ignoring error %d while suspending\n", rc);
return 0;
@@ -434,10 +501,13 @@ int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max)
if (!out || max > TPM_MAX_RNG_DATA)
return -EINVAL;
- chip = tpm_find_get_ops(chip);
if (!chip)
return -ENODEV;
+ rc = tpm_try_get_ops(chip);
+ if (rc)
+ return rc;
+
if (chip->flags & TPM_CHIP_FLAG_TPM2)
rc = tpm2_get_random(chip, out, max);
else
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 6b8b9956ba69..02c07fef41ba 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -28,7 +28,7 @@
#include <linux/tpm_eventlog.h>
#ifdef CONFIG_X86
-#include <asm/intel-family.h>
+#include <asm/cpu_device_id.h>
#endif
#define TPM_MINOR 224 /* officially assigned */
@@ -267,7 +267,6 @@ static inline void tpm_msleep(unsigned int delay_msec)
int tpm_chip_bootstrap(struct tpm_chip *chip);
int tpm_chip_start(struct tpm_chip *chip);
void tpm_chip_stop(struct tpm_chip *chip);
-struct tpm_chip *tpm_find_get_ops(struct tpm_chip *chip);
struct tpm_chip *tpm_chip_alloc(struct device *dev,
const struct tpm_class_ops *ops);
@@ -299,7 +298,7 @@ ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id,
ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip);
int tpm2_auto_startup(struct tpm_chip *chip);
void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type);
-unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
+unsigned long tpm2_calc_ordinal_duration(u32 ordinal);
int tpm2_probe(struct tpm_chip *chip);
int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip);
int tpm2_find_cc(struct tpm_chip *chip, u32 cc);
diff --git a/drivers/char/tpm/tpm1-cmd.c b/drivers/char/tpm/tpm1-cmd.c
index cf64c7385105..b49a790f1bd5 100644
--- a/drivers/char/tpm/tpm1-cmd.c
+++ b/drivers/char/tpm/tpm1-cmd.c
@@ -799,11 +799,6 @@ int tpm1_pm_suspend(struct tpm_chip *chip, u32 tpm_suspend_pcr)
*/
int tpm1_get_pcr_allocation(struct tpm_chip *chip)
{
- chip->allocated_banks = kcalloc(1, sizeof(*chip->allocated_banks),
- GFP_KERNEL);
- if (!chip->allocated_banks)
- return -ENOMEM;
-
chip->allocated_banks[0].alg_id = TPM_ALG_SHA1;
chip->allocated_banks[0].digest_size = hash_digest_size[HASH_ALGO_SHA1];
chip->allocated_banks[0].crypto_id = HASH_ALGO_SHA1;
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index 0cdf892ec2a7..3a77be7ebf4a 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -11,10 +11,17 @@
* used by the kernel internally.
*/
+#include "linux/dev_printk.h"
+#include "linux/tpm.h"
#include "tpm.h"
#include <crypto/hash_info.h>
+#include <linux/unaligned.h>
-static struct tpm2_hash tpm2_hash_map[] = {
+static bool disable_pcr_integrity;
+module_param(disable_pcr_integrity, bool, 0444);
+MODULE_PARM_DESC(disable_pcr_integrity, "Disable integrity protection of TPM2_PCR_Extend");
+
+struct tpm2_hash tpm2_hash_map[] = {
{HASH_ALGO_SHA1, TPM_ALG_SHA1},
{HASH_ALGO_SHA256, TPM_ALG_SHA256},
{HASH_ALGO_SHA384, TPM_ALG_SHA384},
@@ -22,122 +29,71 @@ static struct tpm2_hash tpm2_hash_map[] = {
{HASH_ALGO_SM3_256, TPM_ALG_SM3_256},
};
+int tpm2_find_hash_alg(unsigned int crypto_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tpm2_hash_map); i++)
+ if (crypto_id == tpm2_hash_map[i].crypto_id)
+ return tpm2_hash_map[i].tpm_id;
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(tpm2_find_hash_alg);
+
int tpm2_get_timeouts(struct tpm_chip *chip)
{
- /* Fixed timeouts for TPM2 */
chip->timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A);
chip->timeout_b = msecs_to_jiffies(TPM2_TIMEOUT_B);
chip->timeout_c = msecs_to_jiffies(TPM2_TIMEOUT_C);
chip->timeout_d = msecs_to_jiffies(TPM2_TIMEOUT_D);
-
- /* PTP spec timeouts */
- chip->duration[TPM_SHORT] = msecs_to_jiffies(TPM2_DURATION_SHORT);
- chip->duration[TPM_MEDIUM] = msecs_to_jiffies(TPM2_DURATION_MEDIUM);
- chip->duration[TPM_LONG] = msecs_to_jiffies(TPM2_DURATION_LONG);
-
- /* Key creation commands long timeouts */
- chip->duration[TPM_LONG_LONG] =
- msecs_to_jiffies(TPM2_DURATION_LONG_LONG);
-
chip->flags |= TPM_CHIP_FLAG_HAVE_TIMEOUTS;
-
return 0;
}
-/**
- * tpm2_ordinal_duration_index() - returns an index to the chip duration table
- * @ordinal: TPM command ordinal.
- *
- * The function returns an index to the chip duration table
- * (enum tpm_duration), that describes the maximum amount of
- * time the chip could take to return the result for a particular ordinal.
- *
- * The values of the MEDIUM, and LONG durations are taken
- * from the PC Client Profile (PTP) specification (750, 2000 msec)
- *
- * LONG_LONG is for commands that generates keys which empirically takes
- * a longer time on some systems.
- *
- * Return:
- * * TPM_MEDIUM
- * * TPM_LONG
- * * TPM_LONG_LONG
- * * TPM_UNDEFINED
+/*
+ * Contains the maximum durations in milliseconds for TPM2 commands.
*/
-static u8 tpm2_ordinal_duration_index(u32 ordinal)
-{
- switch (ordinal) {
- /* Startup */
- case TPM2_CC_STARTUP: /* 144 */
- return TPM_MEDIUM;
-
- case TPM2_CC_SELF_TEST: /* 143 */
- return TPM_LONG;
-
- case TPM2_CC_GET_RANDOM: /* 17B */
- return TPM_LONG;
-
- case TPM2_CC_SEQUENCE_UPDATE: /* 15C */
- return TPM_MEDIUM;
- case TPM2_CC_SEQUENCE_COMPLETE: /* 13E */
- return TPM_MEDIUM;
- case TPM2_CC_EVENT_SEQUENCE_COMPLETE: /* 185 */
- return TPM_MEDIUM;
- case TPM2_CC_HASH_SEQUENCE_START: /* 186 */
- return TPM_MEDIUM;
-
- case TPM2_CC_VERIFY_SIGNATURE: /* 177 */
- return TPM_LONG_LONG;
-
- case TPM2_CC_PCR_EXTEND: /* 182 */
- return TPM_MEDIUM;
-
- case TPM2_CC_HIERARCHY_CONTROL: /* 121 */
- return TPM_LONG;
- case TPM2_CC_HIERARCHY_CHANGE_AUTH: /* 129 */
- return TPM_LONG;
-
- case TPM2_CC_GET_CAPABILITY: /* 17A */
- return TPM_MEDIUM;
-
- case TPM2_CC_NV_READ: /* 14E */
- return TPM_LONG;
-
- case TPM2_CC_CREATE_PRIMARY: /* 131 */
- return TPM_LONG_LONG;
- case TPM2_CC_CREATE: /* 153 */
- return TPM_LONG_LONG;
- case TPM2_CC_CREATE_LOADED: /* 191 */
- return TPM_LONG_LONG;
-
- default:
- return TPM_UNDEFINED;
- }
-}
+static const struct {
+ unsigned long ordinal;
+ unsigned long duration;
+} tpm2_ordinal_duration_map[] = {
+ {TPM2_CC_STARTUP, 750},
+ {TPM2_CC_SELF_TEST, 3000},
+ {TPM2_CC_GET_RANDOM, 2000},
+ {TPM2_CC_SEQUENCE_UPDATE, 750},
+ {TPM2_CC_SEQUENCE_COMPLETE, 750},
+ {TPM2_CC_EVENT_SEQUENCE_COMPLETE, 750},
+ {TPM2_CC_HASH_SEQUENCE_START, 750},
+ {TPM2_CC_VERIFY_SIGNATURE, 30000},
+ {TPM2_CC_PCR_EXTEND, 750},
+ {TPM2_CC_HIERARCHY_CONTROL, 2000},
+ {TPM2_CC_HIERARCHY_CHANGE_AUTH, 2000},
+ {TPM2_CC_GET_CAPABILITY, 750},
+ {TPM2_CC_NV_READ, 2000},
+ {TPM2_CC_CREATE_PRIMARY, 30000},
+ {TPM2_CC_CREATE, 30000},
+ {TPM2_CC_CREATE_LOADED, 30000},
+};
/**
- * tpm2_calc_ordinal_duration() - calculate the maximum command duration
- * @chip: TPM chip to use.
+ * tpm2_calc_ordinal_duration() - Calculate the maximum command duration
* @ordinal: TPM command ordinal.
*
- * The function returns the maximum amount of time the chip could take
- * to return the result for a particular ordinal in jiffies.
- *
- * Return: A maximal duration time for an ordinal in jiffies.
+ * Returns the maximum amount of time the chip is expected by kernel to
+ * take in jiffies.
*/
-unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal)
+unsigned long tpm2_calc_ordinal_duration(u32 ordinal)
{
- unsigned int index;
+ int i;
- index = tpm2_ordinal_duration_index(ordinal);
+ for (i = 0; i < ARRAY_SIZE(tpm2_ordinal_duration_map); i++)
+ if (ordinal == tpm2_ordinal_duration_map[i].ordinal)
+ return msecs_to_jiffies(tpm2_ordinal_duration_map[i].duration);
- if (index != TPM_UNDEFINED)
- return chip->duration[index];
- else
- return msecs_to_jiffies(TPM2_DURATION_DEFAULT);
+ return msecs_to_jiffies(TPM2_DURATION_DEFAULT);
}
-
struct tpm2_pcr_read_out {
__be32 update_cnt;
__be32 pcr_selects_cnt;
@@ -232,18 +188,30 @@ int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
int rc;
int i;
- rc = tpm2_start_auth_session(chip);
- if (rc)
- return rc;
+ if (!disable_pcr_integrity) {
+ rc = tpm2_start_auth_session(chip);
+ if (rc)
+ return rc;
+ }
rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_PCR_EXTEND);
if (rc) {
- tpm2_end_auth_session(chip);
+ if (!disable_pcr_integrity)
+ tpm2_end_auth_session(chip);
return rc;
}
- tpm_buf_append_name(chip, &buf, pcr_idx, NULL);
- tpm_buf_append_hmac_session(chip, &buf, 0, NULL, 0);
+ if (!disable_pcr_integrity) {
+ rc = tpm_buf_append_name(chip, &buf, pcr_idx, NULL);
+ if (rc) {
+ tpm_buf_destroy(&buf);
+ return rc;
+ }
+ tpm_buf_append_hmac_session(chip, &buf, 0, NULL, 0);
+ } else {
+ tpm_buf_append_handle(chip, &buf, pcr_idx);
+ tpm_buf_append_auth(chip, &buf, NULL, 0);
+ }
tpm_buf_append_u32(&buf, chip->nr_allocated_banks);
@@ -253,9 +221,17 @@ int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
chip->allocated_banks[i].digest_size);
}
- tpm_buf_fill_hmac_session(chip, &buf);
+ if (!disable_pcr_integrity) {
+ rc = tpm_buf_fill_hmac_session(chip, &buf);
+ if (rc) {
+ tpm_buf_destroy(&buf);
+ return rc;
+ }
+ }
+
rc = tpm_transmit_cmd(chip, &buf, 0, "attempting extend a PCR value");
- rc = tpm_buf_check_hmac_response(chip, &buf, rc);
+ if (!disable_pcr_integrity)
+ rc = tpm_buf_check_hmac_response(chip, &buf, rc);
tpm_buf_destroy(&buf);
@@ -281,6 +257,7 @@ struct tpm2_get_random_out {
int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
{
struct tpm2_get_random_out *out;
+ struct tpm_header *head;
struct tpm_buf buf;
u32 recd;
u32 num_bytes = max;
@@ -288,6 +265,7 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
int total = 0;
int retries = 5;
u8 *dest_ptr = dest;
+ off_t offset;
if (!num_bytes || max > TPM_MAX_RNG_DATA)
return -EINVAL;
@@ -304,11 +282,24 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
do {
tpm_buf_reset(&buf, TPM2_ST_SESSIONS, TPM2_CC_GET_RANDOM);
- tpm_buf_append_hmac_session_opt(chip, &buf, TPM2_SA_ENCRYPT
- | TPM2_SA_CONTINUE_SESSION,
- NULL, 0);
+ if (tpm2_chip_auth(chip)) {
+ tpm_buf_append_hmac_session(chip, &buf,
+ TPM2_SA_ENCRYPT |
+ TPM2_SA_CONTINUE_SESSION,
+ NULL, 0);
+ } else {
+ offset = buf.handles * 4 + TPM_HEADER_SIZE;
+ head = (struct tpm_header *)buf.data;
+ if (tpm_buf_length(&buf) == offset)
+ head->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS);
+ }
tpm_buf_append_u16(&buf, num_bytes);
- tpm_buf_fill_hmac_session(chip, &buf);
+ err = tpm_buf_fill_hmac_session(chip, &buf);
+ if (err) {
+ tpm_buf_destroy(&buf);
+ return err;
+ }
+
err = tpm_transmit_cmd(chip, &buf,
offsetof(struct tpm2_get_random_out,
buffer),
@@ -320,7 +311,13 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
goto out;
}
- out = (struct tpm2_get_random_out *)tpm_buf_parameters(&buf);
+ head = (struct tpm_header *)buf.data;
+ offset = TPM_HEADER_SIZE;
+ /* Skip the parameter size field: */
+ if (be16_to_cpu(head->tag) == TPM2_ST_SESSIONS)
+ offset += 4;
+
+ out = (struct tpm2_get_random_out *)&buf.data[offset];
recd = min_t(u32, be16_to_cpu(out->size), num_bytes);
if (tpm_buf_length(&buf) <
TPM_HEADER_SIZE +
@@ -337,7 +334,6 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
} while (retries-- && total < max);
tpm_buf_destroy(&buf);
- tpm2_end_auth_session(chip);
return total ? total : -EIO;
out:
@@ -580,11 +576,9 @@ ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip)
nr_possible_banks = be32_to_cpup(
(__be32 *)&buf.data[TPM_HEADER_SIZE + 5]);
-
- chip->allocated_banks = kcalloc(nr_possible_banks,
- sizeof(*chip->allocated_banks),
- GFP_KERNEL);
- if (!chip->allocated_banks) {
+ if (nr_possible_banks > TPM2_MAX_PCR_BANKS) {
+ pr_err("tpm: out of bank capacity: %u > %u\n",
+ nr_possible_banks, TPM2_MAX_PCR_BANKS);
rc = -ENOMEM;
goto out;
}
diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c
index ea8860661876..4149379665c4 100644
--- a/drivers/char/tpm/tpm2-sessions.c
+++ b/drivers/char/tpm/tpm2-sessions.c
@@ -40,11 +40,6 @@
*
* These are the usage functions:
*
- * tpm2_start_auth_session() which allocates the opaque auth structure
- * and gets a session from the TPM. This must be called before
- * any of the following functions. The session is protected by a
- * session_key which is derived from a random salt value
- * encrypted to the NULL seed.
* tpm2_end_auth_session() kills the session and frees the resources.
* Under normal operation this function is done by
* tpm_buf_check_hmac_response(), so this is only to be used on
@@ -71,17 +66,17 @@
#include "tpm.h"
#include <linux/random.h>
#include <linux/scatterlist.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <crypto/kpp.h>
#include <crypto/ecdh.h>
-#include <crypto/hash.h>
-#include <crypto/hmac.h>
+#include <crypto/sha2.h>
+#include <crypto/utils.h>
/* maximum number of names the TPM must remember for authorization */
#define AUTH_MAX_NAMES 3
-static int tpm2_create_primary(struct tpm_chip *chip, u32 hierarchy,
- u32 *handle, u8 *name);
+#define AES_KEY_BYTES AES_KEYSIZE_128
+#define AES_KEY_BITS (AES_KEY_BYTES*8)
/*
* This is the structure that carries all the auth information (like
@@ -145,65 +140,293 @@ struct tpm2_auth {
u8 name[AUTH_MAX_NAMES][2 + SHA512_DIGEST_SIZE];
};
+#ifdef CONFIG_TCG_TPM2_HMAC
/*
* Name Size based on TPM algorithm (assumes no hash bigger than 255)
*/
-static u8 name_size(const u8 *name)
+static int name_size(const u8 *name)
{
- static u8 size_map[] = {
- [TPM_ALG_SHA1] = SHA1_DIGEST_SIZE,
- [TPM_ALG_SHA256] = SHA256_DIGEST_SIZE,
- [TPM_ALG_SHA384] = SHA384_DIGEST_SIZE,
- [TPM_ALG_SHA512] = SHA512_DIGEST_SIZE,
- };
- u16 alg = get_unaligned_be16(name);
- return size_map[alg] + 2;
+ u16 hash_alg = get_unaligned_be16(name);
+
+ switch (hash_alg) {
+ case TPM_ALG_SHA1:
+ return SHA1_DIGEST_SIZE + 2;
+ case TPM_ALG_SHA256:
+ return SHA256_DIGEST_SIZE + 2;
+ case TPM_ALG_SHA384:
+ return SHA384_DIGEST_SIZE + 2;
+ case TPM_ALG_SHA512:
+ return SHA512_DIGEST_SIZE + 2;
+ default:
+ pr_warn("tpm: unsupported name algorithm: 0x%04x\n", hash_alg);
+ return -EINVAL;
+ }
}
-/*
- * It turns out the crypto hmac(sha256) is hard for us to consume
- * because it assumes a fixed key and the TPM seems to change the key
- * on every operation, so we weld the hmac init and final functions in
- * here to give it the same usage characteristics as a regular hash
+static int tpm2_read_public(struct tpm_chip *chip, u32 handle, void *name)
+{
+ u32 mso = tpm2_handle_mso(handle);
+ off_t offset = TPM_HEADER_SIZE;
+ int rc, name_size_alg;
+ struct tpm_buf buf;
+
+ if (mso != TPM2_MSO_PERSISTENT && mso != TPM2_MSO_VOLATILE &&
+ mso != TPM2_MSO_NVRAM) {
+ memcpy(name, &handle, sizeof(u32));
+ return sizeof(u32);
+ }
+
+ rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_READ_PUBLIC);
+ if (rc)
+ return rc;
+
+ tpm_buf_append_u32(&buf, handle);
+
+ rc = tpm_transmit_cmd(chip, &buf, 0, "TPM2_ReadPublic");
+ if (rc) {
+ tpm_buf_destroy(&buf);
+ return tpm_ret_to_err(rc);
+ }
+
+ /* Skip TPMT_PUBLIC: */
+ offset += tpm_buf_read_u16(&buf, &offset);
+
+ /*
+ * Ensure space for the length field of TPM2B_NAME and hashAlg field of
+ * TPMT_HA (the extra four bytes).
+ */
+ if (offset + 4 > tpm_buf_length(&buf)) {
+ tpm_buf_destroy(&buf);
+ return -EIO;
+ }
+
+ rc = tpm_buf_read_u16(&buf, &offset);
+ name_size_alg = name_size(&buf.data[offset]);
+
+ if (name_size_alg < 0)
+ return name_size_alg;
+
+ if (rc != name_size_alg) {
+ tpm_buf_destroy(&buf);
+ return -EIO;
+ }
+
+ if (offset + rc > tpm_buf_length(&buf)) {
+ tpm_buf_destroy(&buf);
+ return -EIO;
+ }
+
+ memcpy(name, &buf.data[offset], rc);
+ return name_size_alg;
+}
+#endif /* CONFIG_TCG_TPM2_HMAC */
+
+/**
+ * tpm_buf_append_name() - add a handle area to the buffer
+ * @chip: the TPM chip structure
+ * @buf: The buffer to be appended
+ * @handle: The handle to be appended
+ * @name: The name of the handle (may be NULL)
+ *
+ * In order to compute session HMACs, we need to know the names of the
+ * objects pointed to by the handles. For most objects, this is simply
+ * the actual 4 byte handle or an empty buf (in these cases @name
+ * should be NULL) but for volatile objects, permanent objects and NV
+ * areas, the name is defined as the hash (according to the name
+ * algorithm which should be set to sha256) of the public area to
+ * which the two byte algorithm id has been appended. For these
+ * objects, the @name pointer should point to this. If a name is
+ * required but @name is NULL, then TPM2_ReadPublic() will be called
+ * on the handle to obtain the name.
+ *
+ * As with most tpm_buf operations, success is assumed because failure
+ * will be caused by an incorrect programming model and indicated by a
+ * kernel message.
+ *
+ * Ends the authorization session on failure.
*/
-static void tpm2_hmac_init(struct sha256_state *sctx, u8 *key, u32 key_len)
+int tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf,
+ u32 handle, u8 *name)
{
- u8 pad[SHA256_BLOCK_SIZE];
- int i;
+#ifdef CONFIG_TCG_TPM2_HMAC
+ enum tpm2_mso_type mso = tpm2_handle_mso(handle);
+ struct tpm2_auth *auth;
+ u16 name_size_alg;
+ int slot;
+ int ret;
+#endif
+
+ if (!tpm2_chip_auth(chip)) {
+ tpm_buf_append_handle(chip, buf, handle);
+ return 0;
+ }
+
+#ifdef CONFIG_TCG_TPM2_HMAC
+ slot = (tpm_buf_length(buf) - TPM_HEADER_SIZE) / 4;
+ if (slot >= AUTH_MAX_NAMES) {
+ dev_err(&chip->dev, "too many handles\n");
+ ret = -EIO;
+ goto err;
+ }
+ auth = chip->auth;
+ if (auth->session != tpm_buf_length(buf)) {
+ dev_err(&chip->dev, "session state malformed");
+ ret = -EIO;
+ goto err;
+ }
+ tpm_buf_append_u32(buf, handle);
+ auth->session += 4;
+
+ if (mso == TPM2_MSO_PERSISTENT ||
+ mso == TPM2_MSO_VOLATILE ||
+ mso == TPM2_MSO_NVRAM) {
+ if (!name) {
+ ret = tpm2_read_public(chip, handle, auth->name[slot]);
+ if (ret < 0)
+ goto err;
- sha256_init(sctx);
- for (i = 0; i < sizeof(pad); i++) {
- if (i < key_len)
- pad[i] = key[i];
- else
- pad[i] = 0;
- pad[i] ^= HMAC_IPAD_VALUE;
+ name_size_alg = ret;
+ }
+ } else {
+ if (name) {
+ dev_err(&chip->dev, "handle 0x%08x does not use a name\n",
+ handle);
+ ret = -EIO;
+ goto err;
+ }
}
- sha256_update(sctx, pad, sizeof(pad));
+
+ auth->name_h[slot] = handle;
+ if (name)
+ memcpy(auth->name[slot], name, name_size_alg);
+#endif
+ return 0;
+
+#ifdef CONFIG_TCG_TPM2_HMAC
+err:
+ tpm2_end_auth_session(chip);
+ return tpm_ret_to_err(ret);
+#endif
}
+EXPORT_SYMBOL_GPL(tpm_buf_append_name);
-static void tpm2_hmac_final(struct sha256_state *sctx, u8 *key, u32 key_len,
- u8 *out)
+void tpm_buf_append_auth(struct tpm_chip *chip, struct tpm_buf *buf,
+ u8 *passphrase, int passphrase_len)
{
- u8 pad[SHA256_BLOCK_SIZE];
- int i;
+ /* offset tells us where the sessions area begins */
+ int offset = buf->handles * 4 + TPM_HEADER_SIZE;
+ u32 len = 9 + passphrase_len;
+
+ if (tpm_buf_length(buf) != offset) {
+ /* not the first session so update the existing length */
+ len += get_unaligned_be32(&buf->data[offset]);
+ put_unaligned_be32(len, &buf->data[offset]);
+ } else {
+ tpm_buf_append_u32(buf, len);
+ }
+ /* auth handle */
+ tpm_buf_append_u32(buf, TPM2_RS_PW);
+ /* nonce */
+ tpm_buf_append_u16(buf, 0);
+ /* attributes */
+ tpm_buf_append_u8(buf, 0);
+ /* passphrase */
+ tpm_buf_append_u16(buf, passphrase_len);
+ tpm_buf_append(buf, passphrase, passphrase_len);
+}
+
+/**
+ * tpm_buf_append_hmac_session() - Append a TPM session element
+ * @chip: the TPM chip structure
+ * @buf: The buffer to be appended
+ * @attributes: The session attributes
+ * @passphrase: The session authority (NULL if none)
+ * @passphrase_len: The length of the session authority (0 if none)
+ *
+ * This fills in a session structure in the TPM command buffer, except
+ * for the HMAC which cannot be computed until the command buffer is
+ * complete. The type of session is controlled by the @attributes,
+ * the main ones of which are TPM2_SA_CONTINUE_SESSION which means the
+ * session won't terminate after tpm_buf_check_hmac_response(),
+ * TPM2_SA_DECRYPT which means this buffers first parameter should be
+ * encrypted with a session key and TPM2_SA_ENCRYPT, which means the
+ * response buffer's first parameter needs to be decrypted (confusing,
+ * but the defines are written from the point of view of the TPM).
+ *
+ * Any session appended by this command must be finalized by calling
+ * tpm_buf_fill_hmac_session() otherwise the HMAC will be incorrect
+ * and the TPM will reject the command.
+ *
+ * As with most tpm_buf operations, success is assumed because failure
+ * will be caused by an incorrect programming model and indicated by a
+ * kernel message.
+ */
+void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf,
+ u8 attributes, u8 *passphrase,
+ int passphrase_len)
+{
+#ifdef CONFIG_TCG_TPM2_HMAC
+ u8 nonce[SHA256_DIGEST_SIZE];
+ struct tpm2_auth *auth;
+ u32 len;
+#endif
- for (i = 0; i < sizeof(pad); i++) {
- if (i < key_len)
- pad[i] = key[i];
- else
- pad[i] = 0;
- pad[i] ^= HMAC_OPAD_VALUE;
+ if (!tpm2_chip_auth(chip)) {
+ tpm_buf_append_auth(chip, buf, passphrase, passphrase_len);
+ return;
}
- /* collect the final hash; use out as temporary storage */
- sha256_final(sctx, out);
+#ifdef CONFIG_TCG_TPM2_HMAC
+ /* The first write to /dev/tpm{rm0} will flush the session. */
+ attributes |= TPM2_SA_CONTINUE_SESSION;
+
+ /*
+ * The Architecture Guide requires us to strip trailing zeros
+ * before computing the HMAC
+ */
+ while (passphrase && passphrase_len > 0 && passphrase[passphrase_len - 1] == '\0')
+ passphrase_len--;
- sha256_init(sctx);
- sha256_update(sctx, pad, sizeof(pad));
- sha256_update(sctx, out, SHA256_DIGEST_SIZE);
- sha256_final(sctx, out);
+ auth = chip->auth;
+ auth->attrs = attributes;
+ auth->passphrase_len = passphrase_len;
+ if (passphrase_len)
+ memcpy(auth->passphrase, passphrase, passphrase_len);
+
+ if (auth->session != tpm_buf_length(buf)) {
+ /* we're not the first session */
+ len = get_unaligned_be32(&buf->data[auth->session]);
+ if (4 + len + auth->session != tpm_buf_length(buf)) {
+ WARN(1, "session length mismatch, cannot append");
+ return;
+ }
+
+ /* add our new session */
+ len += 9 + 2 * SHA256_DIGEST_SIZE;
+ put_unaligned_be32(len, &buf->data[auth->session]);
+ } else {
+ tpm_buf_append_u32(buf, 9 + 2 * SHA256_DIGEST_SIZE);
+ }
+
+ /* random number for our nonce */
+ get_random_bytes(nonce, sizeof(nonce));
+ memcpy(auth->our_nonce, nonce, sizeof(nonce));
+ tpm_buf_append_u32(buf, auth->handle);
+ /* our new nonce */
+ tpm_buf_append_u16(buf, SHA256_DIGEST_SIZE);
+ tpm_buf_append(buf, nonce, SHA256_DIGEST_SIZE);
+ tpm_buf_append_u8(buf, auth->attrs);
+ /* and put a placeholder for the hmac */
+ tpm_buf_append_u16(buf, SHA256_DIGEST_SIZE);
+ tpm_buf_append(buf, nonce, SHA256_DIGEST_SIZE);
+#endif
}
+EXPORT_SYMBOL_GPL(tpm_buf_append_hmac_session);
+
+#ifdef CONFIG_TCG_TPM2_HMAC
+
+static int tpm2_create_primary(struct tpm_chip *chip, u32 hierarchy,
+ u32 *handle, u8 *name);
/*
* assume hash sha256 and nonces u, v of size SHA256_DIGEST_SIZE but
@@ -216,16 +439,16 @@ static void tpm2_KDFa(u8 *key, u32 key_len, const char *label, u8 *u,
const __be32 bits = cpu_to_be32(bytes * 8);
while (bytes > 0) {
- struct sha256_state sctx;
+ struct hmac_sha256_ctx hctx;
__be32 c = cpu_to_be32(counter);
- tpm2_hmac_init(&sctx, key, key_len);
- sha256_update(&sctx, (u8 *)&c, sizeof(c));
- sha256_update(&sctx, label, strlen(label)+1);
- sha256_update(&sctx, u, SHA256_DIGEST_SIZE);
- sha256_update(&sctx, v, SHA256_DIGEST_SIZE);
- sha256_update(&sctx, (u8 *)&bits, sizeof(bits));
- tpm2_hmac_final(&sctx, key, key_len, out);
+ hmac_sha256_init_usingrawkey(&hctx, key, key_len);
+ hmac_sha256_update(&hctx, (u8 *)&c, sizeof(c));
+ hmac_sha256_update(&hctx, label, strlen(label) + 1);
+ hmac_sha256_update(&hctx, u, SHA256_DIGEST_SIZE);
+ hmac_sha256_update(&hctx, v, SHA256_DIGEST_SIZE);
+ hmac_sha256_update(&hctx, (u8 *)&bits, sizeof(bits));
+ hmac_sha256_final(&hctx, out);
bytes -= SHA256_DIGEST_SIZE;
counter++;
@@ -243,7 +466,7 @@ static void tpm2_KDFa(u8 *key, u32 key_len, const char *label, u8 *u,
static void tpm2_KDFe(u8 z[EC_PT_SZ], const char *str, u8 *pt_u, u8 *pt_v,
u8 *out)
{
- struct sha256_state sctx;
+ struct sha256_ctx sctx;
/*
* this should be an iterative counter, but because we know
* we're only taking 32 bytes for the point using a sha256
@@ -263,7 +486,8 @@ static void tpm2_KDFe(u8 z[EC_PT_SZ], const char *str, u8 *pt_u, u8 *pt_v,
sha256_final(&sctx, out);
}
-static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip)
+static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip,
+ struct tpm2_auth *auth)
{
struct crypto_kpp *kpp;
struct kpp_request *req;
@@ -322,7 +546,7 @@ static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip)
sg_set_buf(&s[0], chip->null_ec_key_x, EC_PT_SZ);
sg_set_buf(&s[1], chip->null_ec_key_y, EC_PT_SZ);
kpp_request_set_input(req, s, EC_PT_SZ*2);
- sg_init_one(d, chip->auth->salt, EC_PT_SZ);
+ sg_init_one(d, auth->salt, EC_PT_SZ);
kpp_request_set_output(req, d, EC_PT_SZ);
crypto_kpp_compute_shared_secret(req);
kpp_request_free(req);
@@ -333,90 +557,13 @@ static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip)
* This works because KDFe fully consumes the secret before it
* writes the salt
*/
- tpm2_KDFe(chip->auth->salt, "SECRET", x, chip->null_ec_key_x,
- chip->auth->salt);
+ tpm2_KDFe(auth->salt, "SECRET", x, chip->null_ec_key_x, auth->salt);
out:
crypto_free_kpp(kpp);
}
/**
- * tpm_buf_append_hmac_session() - Append a TPM session element
- * @chip: the TPM chip structure
- * @buf: The buffer to be appended
- * @attributes: The session attributes
- * @passphrase: The session authority (NULL if none)
- * @passphrase_len: The length of the session authority (0 if none)
- *
- * This fills in a session structure in the TPM command buffer, except
- * for the HMAC which cannot be computed until the command buffer is
- * complete. The type of session is controlled by the @attributes,
- * the main ones of which are TPM2_SA_CONTINUE_SESSION which means the
- * session won't terminate after tpm_buf_check_hmac_response(),
- * TPM2_SA_DECRYPT which means this buffers first parameter should be
- * encrypted with a session key and TPM2_SA_ENCRYPT, which means the
- * response buffer's first parameter needs to be decrypted (confusing,
- * but the defines are written from the point of view of the TPM).
- *
- * Any session appended by this command must be finalized by calling
- * tpm_buf_fill_hmac_session() otherwise the HMAC will be incorrect
- * and the TPM will reject the command.
- *
- * As with most tpm_buf operations, success is assumed because failure
- * will be caused by an incorrect programming model and indicated by a
- * kernel message.
- */
-void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf,
- u8 attributes, u8 *passphrase,
- int passphrase_len)
-{
- u8 nonce[SHA256_DIGEST_SIZE];
- u32 len;
- struct tpm2_auth *auth = chip->auth;
-
- /*
- * The Architecture Guide requires us to strip trailing zeros
- * before computing the HMAC
- */
- while (passphrase && passphrase_len > 0
- && passphrase[passphrase_len - 1] == '\0')
- passphrase_len--;
-
- auth->attrs = attributes;
- auth->passphrase_len = passphrase_len;
- if (passphrase_len)
- memcpy(auth->passphrase, passphrase, passphrase_len);
-
- if (auth->session != tpm_buf_length(buf)) {
- /* we're not the first session */
- len = get_unaligned_be32(&buf->data[auth->session]);
- if (4 + len + auth->session != tpm_buf_length(buf)) {
- WARN(1, "session length mismatch, cannot append");
- return;
- }
-
- /* add our new session */
- len += 9 + 2 * SHA256_DIGEST_SIZE;
- put_unaligned_be32(len, &buf->data[auth->session]);
- } else {
- tpm_buf_append_u32(buf, 9 + 2 * SHA256_DIGEST_SIZE);
- }
-
- /* random number for our nonce */
- get_random_bytes(nonce, sizeof(nonce));
- memcpy(auth->our_nonce, nonce, sizeof(nonce));
- tpm_buf_append_u32(buf, auth->handle);
- /* our new nonce */
- tpm_buf_append_u16(buf, SHA256_DIGEST_SIZE);
- tpm_buf_append(buf, nonce, SHA256_DIGEST_SIZE);
- tpm_buf_append_u8(buf, auth->attrs);
- /* and put a placeholder for the hmac */
- tpm_buf_append_u16(buf, SHA256_DIGEST_SIZE);
- tpm_buf_append(buf, nonce, SHA256_DIGEST_SIZE);
-}
-EXPORT_SYMBOL(tpm_buf_append_hmac_session);
-
-/**
* tpm_buf_fill_hmac_session() - finalize the session HMAC
* @chip: the TPM chip structure
* @buf: The buffer to be appended
@@ -430,11 +577,9 @@ EXPORT_SYMBOL(tpm_buf_append_hmac_session);
* encryption key and encrypts the first parameter of the command
* buffer with it.
*
- * As with most tpm_buf operations, success is assumed because failure
- * will be caused by an incorrect programming model and indicated by a
- * kernel message.
+ * Ends the authorization session on failure.
*/
-void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
+int tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
{
u32 cc, handles, val;
struct tpm2_auth *auth = chip->auth;
@@ -444,7 +589,14 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
u8 *hmac = NULL;
u32 attrs;
u8 cphash[SHA256_DIGEST_SIZE];
- struct sha256_state sctx;
+ struct sha256_ctx sctx;
+ struct hmac_sha256_ctx hctx;
+ int ret;
+
+ if (!auth) {
+ ret = -EIO;
+ goto err;
+ }
/* save the command code in BE format */
auth->ordinal = head->ordinal;
@@ -453,9 +605,11 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
i = tpm2_find_cc(chip, cc);
if (i < 0) {
- dev_err(&chip->dev, "Command 0x%x not found in TPM\n", cc);
- return;
+ dev_err(&chip->dev, "command 0x%08x not found\n", cc);
+ ret = -EIO;
+ goto err;
}
+
attrs = chip->cc_attrs_tbl[i];
handles = (attrs >> TPM2_CC_ATTR_CHANDLES) & GENMASK(2, 0);
@@ -469,9 +623,9 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
u32 handle = tpm_buf_read_u32(buf, &offset_s);
if (auth->name_h[i] != handle) {
- dev_err(&chip->dev, "TPM: handle %d wrong for name\n",
- i);
- return;
+ dev_err(&chip->dev, "invalid handle 0x%08x\n", handle);
+ ret = -EIO;
+ goto err;
}
}
/* point offset_s to the start of the sessions */
@@ -502,12 +656,14 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
offset_s += len;
}
if (offset_s != offset_p) {
- dev_err(&chip->dev, "TPM session length is incorrect\n");
- return;
+ dev_err(&chip->dev, "session length is incorrect\n");
+ ret = -EIO;
+ goto err;
}
if (!hmac) {
- dev_err(&chip->dev, "TPM could not find HMAC session\n");
- return;
+ dev_err(&chip->dev, "could not find HMAC session\n");
+ ret = -EIO;
+ goto err;
}
/* encrypt before HMAC */
@@ -539,8 +695,11 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
if (mso == TPM2_MSO_PERSISTENT ||
mso == TPM2_MSO_VOLATILE ||
mso == TPM2_MSO_NVRAM) {
- sha256_update(&sctx, auth->name[i],
- name_size(auth->name[i]));
+ ret = name_size(auth->name[i]);
+ if (ret < 0)
+ goto err;
+
+ sha256_update(&sctx, auth->name[i], ret);
} else {
__be32 h = cpu_to_be32(auth->name_h[i]);
@@ -553,114 +712,21 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
sha256_final(&sctx, cphash);
/* now calculate the hmac */
- tpm2_hmac_init(&sctx, auth->session_key, sizeof(auth->session_key)
- + auth->passphrase_len);
- sha256_update(&sctx, cphash, sizeof(cphash));
- sha256_update(&sctx, auth->our_nonce, sizeof(auth->our_nonce));
- sha256_update(&sctx, auth->tpm_nonce, sizeof(auth->tpm_nonce));
- sha256_update(&sctx, &auth->attrs, 1);
- tpm2_hmac_final(&sctx, auth->session_key, sizeof(auth->session_key)
- + auth->passphrase_len, hmac);
-}
-EXPORT_SYMBOL(tpm_buf_fill_hmac_session);
-
-static int tpm2_parse_read_public(char *name, struct tpm_buf *buf)
-{
- struct tpm_header *head = (struct tpm_header *)buf->data;
- off_t offset = TPM_HEADER_SIZE;
- u32 tot_len = be32_to_cpu(head->length);
- u32 val;
-
- /* we're starting after the header so adjust the length */
- tot_len -= TPM_HEADER_SIZE;
-
- /* skip public */
- val = tpm_buf_read_u16(buf, &offset);
- if (val > tot_len)
- return -EINVAL;
- offset += val;
- /* name */
- val = tpm_buf_read_u16(buf, &offset);
- if (val != name_size(&buf->data[offset]))
- return -EINVAL;
- memcpy(name, &buf->data[offset], val);
- /* forget the rest */
+ hmac_sha256_init_usingrawkey(&hctx, auth->session_key,
+ sizeof(auth->session_key) +
+ auth->passphrase_len);
+ hmac_sha256_update(&hctx, cphash, sizeof(cphash));
+ hmac_sha256_update(&hctx, auth->our_nonce, sizeof(auth->our_nonce));
+ hmac_sha256_update(&hctx, auth->tpm_nonce, sizeof(auth->tpm_nonce));
+ hmac_sha256_update(&hctx, &auth->attrs, 1);
+ hmac_sha256_final(&hctx, hmac);
return 0;
-}
-
-static int tpm2_read_public(struct tpm_chip *chip, u32 handle, char *name)
-{
- struct tpm_buf buf;
- int rc;
- rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_READ_PUBLIC);
- if (rc)
- return rc;
-
- tpm_buf_append_u32(&buf, handle);
- rc = tpm_transmit_cmd(chip, &buf, 0, "read public");
- if (rc == TPM2_RC_SUCCESS)
- rc = tpm2_parse_read_public(name, &buf);
-
- tpm_buf_destroy(&buf);
-
- return rc;
+err:
+ tpm2_end_auth_session(chip);
+ return ret;
}
-
-/**
- * tpm_buf_append_name() - add a handle area to the buffer
- * @chip: the TPM chip structure
- * @buf: The buffer to be appended
- * @handle: The handle to be appended
- * @name: The name of the handle (may be NULL)
- *
- * In order to compute session HMACs, we need to know the names of the
- * objects pointed to by the handles. For most objects, this is simply
- * the actual 4 byte handle or an empty buf (in these cases @name
- * should be NULL) but for volatile objects, permanent objects and NV
- * areas, the name is defined as the hash (according to the name
- * algorithm which should be set to sha256) of the public area to
- * which the two byte algorithm id has been appended. For these
- * objects, the @name pointer should point to this. If a name is
- * required but @name is NULL, then TPM2_ReadPublic() will be called
- * on the handle to obtain the name.
- *
- * As with most tpm_buf operations, success is assumed because failure
- * will be caused by an incorrect programming model and indicated by a
- * kernel message.
- */
-void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf,
- u32 handle, u8 *name)
-{
- enum tpm2_mso_type mso = tpm2_handle_mso(handle);
- struct tpm2_auth *auth = chip->auth;
- int slot;
-
- slot = (tpm_buf_length(buf) - TPM_HEADER_SIZE)/4;
- if (slot >= AUTH_MAX_NAMES) {
- dev_err(&chip->dev, "TPM: too many handles\n");
- return;
- }
- WARN(auth->session != tpm_buf_length(buf),
- "name added in wrong place\n");
- tpm_buf_append_u32(buf, handle);
- auth->session += 4;
-
- if (mso == TPM2_MSO_PERSISTENT ||
- mso == TPM2_MSO_VOLATILE ||
- mso == TPM2_MSO_NVRAM) {
- if (!name)
- tpm2_read_public(chip, handle, auth->name[slot]);
- } else {
- if (name)
- dev_err(&chip->dev, "TPM: Handle does not require name but one is specified\n");
- }
-
- auth->name_h[slot] = handle;
- if (name)
- memcpy(auth->name[slot], name, name_size(name));
-}
-EXPORT_SYMBOL(tpm_buf_append_name);
+EXPORT_SYMBOL(tpm_buf_fill_hmac_session);
/**
* tpm_buf_check_hmac_response() - check the TPM return HMAC for correctness
@@ -696,12 +762,17 @@ int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf,
struct tpm2_auth *auth = chip->auth;
off_t offset_s, offset_p;
u8 rphash[SHA256_DIGEST_SIZE];
- u32 attrs;
- struct sha256_state sctx;
+ u32 attrs, cc;
+ struct sha256_ctx sctx;
+ struct hmac_sha256_ctx hctx;
u16 tag = be16_to_cpu(head->tag);
- u32 cc = be32_to_cpu(auth->ordinal);
int parm_len, len, i, handles;
+ if (!auth)
+ return rc;
+
+ cc = be32_to_cpu(auth->ordinal);
+
if (auth->session >= TPM_HEADER_SIZE) {
WARN(1, "tpm session not filled correctly\n");
goto out;
@@ -763,21 +834,20 @@ int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf,
sha256_final(&sctx, rphash);
/* now calculate the hmac */
- tpm2_hmac_init(&sctx, auth->session_key, sizeof(auth->session_key)
- + auth->passphrase_len);
- sha256_update(&sctx, rphash, sizeof(rphash));
- sha256_update(&sctx, auth->tpm_nonce, sizeof(auth->tpm_nonce));
- sha256_update(&sctx, auth->our_nonce, sizeof(auth->our_nonce));
- sha256_update(&sctx, &auth->attrs, 1);
+ hmac_sha256_init_usingrawkey(&hctx, auth->session_key,
+ sizeof(auth->session_key) +
+ auth->passphrase_len);
+ hmac_sha256_update(&hctx, rphash, sizeof(rphash));
+ hmac_sha256_update(&hctx, auth->tpm_nonce, sizeof(auth->tpm_nonce));
+ hmac_sha256_update(&hctx, auth->our_nonce, sizeof(auth->our_nonce));
+ hmac_sha256_update(&hctx, &auth->attrs, 1);
/* we're done with the rphash, so put our idea of the hmac there */
- tpm2_hmac_final(&sctx, auth->session_key, sizeof(auth->session_key)
- + auth->passphrase_len, rphash);
- if (memcmp(rphash, &buf->data[offset_s], SHA256_DIGEST_SIZE) == 0) {
- rc = 0;
- } else {
+ hmac_sha256_final(&hctx, rphash);
+ if (crypto_memneq(rphash, &buf->data[offset_s], SHA256_DIGEST_SIZE)) {
dev_err(&chip->dev, "TPM: HMAC check failed\n");
goto out;
}
+ rc = 0;
/* now do response decryption */
if (auth->attrs & TPM2_SA_ENCRYPT) {
@@ -799,7 +869,9 @@ int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf,
if (rc)
/* manually close the session if it wasn't consumed */
tpm2_flush_context(chip, auth->handle);
- memzero_explicit(auth, sizeof(*auth));
+
+ kfree_sensitive(auth);
+ chip->auth = NULL;
} else {
/* reset for next use */
auth->session = TPM_HEADER_SIZE;
@@ -821,8 +893,14 @@ EXPORT_SYMBOL(tpm_buf_check_hmac_response);
*/
void tpm2_end_auth_session(struct tpm_chip *chip)
{
- tpm2_flush_context(chip, chip->auth->handle);
- memzero_explicit(chip->auth, sizeof(*chip->auth));
+ struct tpm2_auth *auth = chip->auth;
+
+ if (!auth)
+ return;
+
+ tpm2_flush_context(chip, auth->handle);
+ kfree_sensitive(auth);
+ chip->auth = NULL;
}
EXPORT_SYMBOL(tpm2_end_auth_session);
@@ -856,53 +934,66 @@ static int tpm2_parse_start_auth_session(struct tpm2_auth *auth,
static int tpm2_load_null(struct tpm_chip *chip, u32 *null_key)
{
- int rc;
unsigned int offset = 0; /* dummy offset for null seed context */
u8 name[SHA256_DIGEST_SIZE + 2];
+ u32 tmp_null_key;
+ int rc;
rc = tpm2_load_context(chip, chip->null_key_context, &offset,
- null_key);
- if (rc != -EINVAL)
- return rc;
+ &tmp_null_key);
+ if (rc != -EINVAL) {
+ if (!rc)
+ *null_key = tmp_null_key;
+ goto err;
+ }
- /* an integrity failure may mean the TPM has been reset */
- dev_err(&chip->dev, "NULL key integrity failure!\n");
- /* check the null name against what we know */
- tpm2_create_primary(chip, TPM2_RH_NULL, NULL, name);
- if (memcmp(name, chip->null_key_name, sizeof(name)) == 0)
- /* name unchanged, assume transient integrity failure */
- return rc;
- /*
- * Fatal TPM failure: the NULL seed has actually changed, so
- * the TPM must have been illegally reset. All in-kernel TPM
- * operations will fail because the NULL primary can't be
- * loaded to salt the sessions, but disable the TPM anyway so
- * userspace programmes can't be compromised by it.
- */
- dev_err(&chip->dev, "NULL name has changed, disabling TPM due to interference\n");
- chip->flags |= TPM_CHIP_FLAG_DISABLE;
+ /* Try to re-create null key, given the integrity failure: */
+ rc = tpm2_create_primary(chip, TPM2_RH_NULL, &tmp_null_key, name);
+ if (rc)
+ goto err;
+
+ /* Return null key if the name has not been changed: */
+ if (!memcmp(name, chip->null_key_name, sizeof(name))) {
+ *null_key = tmp_null_key;
+ return 0;
+ }
+
+ /* Deduce from the name change TPM interference: */
+ dev_err(&chip->dev, "null key integrity check failed\n");
+ tpm2_flush_context(chip, tmp_null_key);
+err:
+ if (rc) {
+ chip->flags |= TPM_CHIP_FLAG_DISABLE;
+ rc = -ENODEV;
+ }
return rc;
}
/**
- * tpm2_start_auth_session() - create a HMAC authentication session with the TPM
- * @chip: the TPM chip structure to create the session with
+ * tpm2_start_auth_session() - Create an a HMAC authentication session
+ * @chip: A TPM chip
*
- * This function loads the NULL seed from its saved context and starts
- * an authentication session on the null seed, fills in the
- * @chip->auth structure to contain all the session details necessary
- * for performing the HMAC, encrypt and decrypt operations and
- * returns. The NULL seed is flushed before this function returns.
+ * Loads the ephemeral key (null seed), and starts an HMAC authenticated
+ * session. The null seed is flushed before the return.
*
- * Return: zero on success or actual error encountered.
+ * Returns zero on success, or a POSIX error code.
*/
int tpm2_start_auth_session(struct tpm_chip *chip)
{
+ struct tpm2_auth *auth;
struct tpm_buf buf;
- struct tpm2_auth *auth = chip->auth;
- int rc;
u32 null_key;
+ int rc;
+
+ if (chip->auth) {
+ dev_dbg_once(&chip->dev, "auth session is active\n");
+ return 0;
+ }
+
+ auth = kzalloc(sizeof(*auth), GFP_KERNEL);
+ if (!auth)
+ return -ENOMEM;
rc = tpm2_load_null(chip, &null_key);
if (rc)
@@ -924,7 +1015,7 @@ int tpm2_start_auth_session(struct tpm_chip *chip)
tpm_buf_append(&buf, auth->our_nonce, sizeof(auth->our_nonce));
/* append encrypted salt and squirrel away unencrypted in auth */
- tpm_buf_append_salt(&buf, chip);
+ tpm_buf_append_salt(&buf, chip, auth);
/* session type (HMAC, audit or policy) */
tpm_buf_append_u8(&buf, TPM2_SE_HMAC);
@@ -938,7 +1029,7 @@ int tpm2_start_auth_session(struct tpm_chip *chip)
/* hash algorithm for session */
tpm_buf_append_u16(&buf, TPM_ALG_SHA256);
- rc = tpm_transmit_cmd(chip, &buf, 0, "start auth session");
+ rc = tpm_ret_to_err(tpm_transmit_cmd(chip, &buf, 0, "StartAuthSession"));
tpm2_flush_context(chip, null_key);
if (rc == TPM2_RC_SUCCESS)
@@ -946,14 +1037,31 @@ int tpm2_start_auth_session(struct tpm_chip *chip)
tpm_buf_destroy(&buf);
- if (rc)
- goto out;
+ if (rc == TPM2_RC_SUCCESS) {
+ chip->auth = auth;
+ return 0;
+ }
- out:
+out:
+ kfree_sensitive(auth);
return rc;
}
EXPORT_SYMBOL(tpm2_start_auth_session);
+/*
+ * A mask containing the object attributes for the kernel held null primary key
+ * used in HMAC encryption. For more information on specific attributes look up
+ * to "8.3 TPMA_OBJECT (Object Attributes)".
+ */
+#define TPM2_OA_NULL_KEY ( \
+ TPM2_OA_NO_DA | \
+ TPM2_OA_FIXED_TPM | \
+ TPM2_OA_FIXED_PARENT | \
+ TPM2_OA_SENSITIVE_DATA_ORIGIN | \
+ TPM2_OA_USER_WITH_AUTH | \
+ TPM2_OA_DECRYPT | \
+ TPM2_OA_RESTRICTED)
+
/**
* tpm2_parse_create_primary() - parse the data returned from TPM_CC_CREATE_PRIMARY
*
@@ -1018,7 +1126,7 @@ static int tpm2_parse_create_primary(struct tpm_chip *chip, struct tpm_buf *buf,
val = tpm_buf_read_u32(buf, &offset_t);
/* object properties */
- if (val != TPM2_OA_TMPL)
+ if (val != TPM2_OA_NULL_KEY)
return -EINVAL;
/* auth policy (empty) */
@@ -1178,7 +1286,7 @@ static int tpm2_create_primary(struct tpm_chip *chip, u32 hierarchy,
tpm_buf_append_u16(&template, TPM_ALG_SHA256);
/* object properties */
- tpm_buf_append_u32(&template, TPM2_OA_TMPL);
+ tpm_buf_append_u32(&template, TPM2_OA_NULL_KEY);
/* sauth policy (empty) */
tpm_buf_append_u16(&template, 0);
@@ -1269,18 +1377,22 @@ static int tpm2_create_null_primary(struct tpm_chip *chip)
*
* Derive and context save the null primary and allocate memory in the
* struct tpm_chip for the authorizations.
+ *
+ * Return:
+ * * 0 - OK
+ * * -errno - A system error
+ * * TPM_RC - A TPM error
*/
int tpm2_sessions_init(struct tpm_chip *chip)
{
int rc;
rc = tpm2_create_null_primary(chip);
- if (rc)
- dev_err(&chip->dev, "TPM: security failed (NULL seed derivation): %d\n", rc);
-
- chip->auth = kmalloc(sizeof(*chip->auth), GFP_KERNEL);
- if (!chip->auth)
- return -ENOMEM;
+ if (rc) {
+ dev_err(&chip->dev, "null key creation failed with %d\n", rc);
+ return rc;
+ }
return rc;
}
+#endif /* CONFIG_TCG_TPM2_HMAC */
diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c
index 4892d491da8d..60354cd53b5c 100644
--- a/drivers/char/tpm/tpm2-space.c
+++ b/drivers/char/tpm/tpm2-space.c
@@ -12,7 +12,7 @@
*/
#include <linux/gfp.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "tpm.h"
enum tpm2_handle_types {
@@ -169,6 +169,9 @@ void tpm2_flush_space(struct tpm_chip *chip)
struct tpm_space *space = &chip->work_space;
int i;
+ if (!space)
+ return;
+
for (i = 0; i < ARRAY_SIZE(space->context_tbl); i++)
if (space->context_tbl[i] && ~space->context_tbl[i])
tpm2_flush_context(chip, space->context_tbl[i]);
diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
index 9fb2defa9dc4..f25faf468bba 100644
--- a/drivers/char/tpm/tpm_atmel.c
+++ b/drivers/char/tpm/tpm_atmel.c
@@ -15,7 +15,66 @@
*/
#include "tpm.h"
-#include "tpm_atmel.h"
+
+struct tpm_atmel_priv {
+ int region_size;
+ int have_region;
+ unsigned long base;
+ void __iomem *iobase;
+};
+
+#define atmel_getb(chip, offset) inb(atmel_get_priv(chip)->base + (offset))
+#define atmel_putb(val, chip, offset) \
+ outb(val, atmel_get_priv(chip)->base + (offset))
+#define atmel_request_region request_region
+#define atmel_release_region release_region
+/* Atmel definitions */
+enum tpm_atmel_addr {
+ TPM_ATMEL_BASE_ADDR_LO = 0x08,
+ TPM_ATMEL_BASE_ADDR_HI = 0x09
+};
+
+static inline int tpm_read_index(int base, int index)
+{
+ outb(index, base);
+ return inb(base + 1) & 0xFF;
+}
+
+/* Verify this is a 1.1 Atmel TPM */
+static int atmel_verify_tpm11(void)
+{
+ /* verify that it is an Atmel part */
+ if (tpm_read_index(TPM_ADDR, 4) != 'A' ||
+ tpm_read_index(TPM_ADDR, 5) != 'T' ||
+ tpm_read_index(TPM_ADDR, 6) != 'M' ||
+ tpm_read_index(TPM_ADDR, 7) != 'L')
+ return 1;
+
+ /* query chip for its version number */
+ if (tpm_read_index(TPM_ADDR, 0x00) != 1 ||
+ tpm_read_index(TPM_ADDR, 0x01) != 1)
+ return 1;
+
+ /* This is an atmel supported part */
+ return 0;
+}
+
+/* Determine where to talk to device */
+static void __iomem *atmel_get_base_addr(unsigned long *base, int *region_size)
+{
+ int lo, hi;
+
+ if (atmel_verify_tpm11() != 0)
+ return NULL;
+
+ lo = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_LO);
+ hi = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_HI);
+
+ *base = (hi << 8) | lo;
+ *region_size = 2;
+
+ return ioport_map(*base, *region_size);
+}
/* write status bits */
enum tpm_atmel_write_status {
@@ -89,7 +148,8 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count)
return size;
}
-static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t count)
+static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t count)
{
struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev);
int i;
@@ -142,7 +202,6 @@ static void atml_plat_remove(void)
tpm_chip_unregister(chip);
if (priv->have_region)
atmel_release_region(priv->base, priv->region_size);
- atmel_put_base_addr(priv->iobase);
platform_device_unregister(pdev);
}
@@ -211,7 +270,6 @@ static int __init init_atmel(void)
err_unreg_dev:
platform_device_unregister(pdev);
err_rel_reg:
- atmel_put_base_addr(iobase);
if (have_region)
atmel_release_region(base,
region_size);
diff --git a/drivers/char/tpm/tpm_atmel.h b/drivers/char/tpm/tpm_atmel.h
deleted file mode 100644
index 7ac3f69dcf0f..000000000000
--- a/drivers/char/tpm/tpm_atmel.h
+++ /dev/null
@@ -1,140 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2005 IBM Corporation
- *
- * Authors:
- * Kylene Hall <kjhall@us.ibm.com>
- *
- * Maintained by: <tpmdd-devel@lists.sourceforge.net>
- *
- * Device driver for TCG/TCPA TPM (trusted platform module).
- * Specifications at www.trustedcomputinggroup.org
- *
- * These difference are required on power because the device must be
- * discovered through the device tree and iomap must be used to get
- * around the need for holes in the io_page_mask. This does not happen
- * automatically because the tpm is not a normal pci device and lives
- * under the root node.
- */
-
-struct tpm_atmel_priv {
- int region_size;
- int have_region;
- unsigned long base;
- void __iomem *iobase;
-};
-
-#ifdef CONFIG_PPC64
-
-#include <linux/of.h>
-
-#define atmel_getb(priv, offset) readb(priv->iobase + offset)
-#define atmel_putb(val, priv, offset) writeb(val, priv->iobase + offset)
-#define atmel_request_region request_mem_region
-#define atmel_release_region release_mem_region
-
-static inline void atmel_put_base_addr(void __iomem *iobase)
-{
- iounmap(iobase);
-}
-
-static void __iomem * atmel_get_base_addr(unsigned long *base, int *region_size)
-{
- struct device_node *dn;
- unsigned long address, size;
- const unsigned int *reg;
- int reglen;
- int naddrc;
- int nsizec;
-
- dn = of_find_node_by_name(NULL, "tpm");
-
- if (!dn)
- return NULL;
-
- if (!of_device_is_compatible(dn, "AT97SC3201")) {
- of_node_put(dn);
- return NULL;
- }
-
- reg = of_get_property(dn, "reg", &reglen);
- naddrc = of_n_addr_cells(dn);
- nsizec = of_n_size_cells(dn);
-
- of_node_put(dn);
-
-
- if (naddrc == 2)
- address = ((unsigned long) reg[0] << 32) | reg[1];
- else
- address = reg[0];
-
- if (nsizec == 2)
- size =
- ((unsigned long) reg[naddrc] << 32) | reg[naddrc + 1];
- else
- size = reg[naddrc];
-
- *base = address;
- *region_size = size;
- return ioremap(*base, *region_size);
-}
-#else
-#define atmel_getb(chip, offset) inb(atmel_get_priv(chip)->base + offset)
-#define atmel_putb(val, chip, offset) \
- outb(val, atmel_get_priv(chip)->base + offset)
-#define atmel_request_region request_region
-#define atmel_release_region release_region
-/* Atmel definitions */
-enum tpm_atmel_addr {
- TPM_ATMEL_BASE_ADDR_LO = 0x08,
- TPM_ATMEL_BASE_ADDR_HI = 0x09
-};
-
-static inline int tpm_read_index(int base, int index)
-{
- outb(index, base);
- return inb(base+1) & 0xFF;
-}
-
-/* Verify this is a 1.1 Atmel TPM */
-static int atmel_verify_tpm11(void)
-{
-
- /* verify that it is an Atmel part */
- if (tpm_read_index(TPM_ADDR, 4) != 'A' ||
- tpm_read_index(TPM_ADDR, 5) != 'T' ||
- tpm_read_index(TPM_ADDR, 6) != 'M' ||
- tpm_read_index(TPM_ADDR, 7) != 'L')
- return 1;
-
- /* query chip for its version number */
- if (tpm_read_index(TPM_ADDR, 0x00) != 1 ||
- tpm_read_index(TPM_ADDR, 0x01) != 1)
- return 1;
-
- /* This is an atmel supported part */
- return 0;
-}
-
-static inline void atmel_put_base_addr(void __iomem *iobase)
-{
-}
-
-/* Determine where to talk to device */
-static void __iomem * atmel_get_base_addr(unsigned long *base, int *region_size)
-{
- int lo, hi;
-
- if (atmel_verify_tpm11() != 0)
- return NULL;
-
- lo = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_LO);
- hi = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_HI);
-
- *base = (hi << 8) | lo;
- *region_size = 2;
-
- return ioport_map(*base, *region_size);
-}
-#endif
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index ea085b14ab7c..6c25305c256e 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -19,6 +19,7 @@
#ifdef CONFIG_ARM64
#include <linux/arm-smccc.h>
#endif
+#include "tpm_crb_ffa.h"
#include "tpm.h"
#define ACPI_SIG_TPM2 "TPM2"
@@ -100,6 +101,8 @@ struct crb_priv {
u32 smc_func_id;
u32 __iomem *pluton_start_addr;
u32 __iomem *pluton_reply_addr;
+ u8 ffa_flags;
+ u8 ffa_attributes;
};
struct tpm2_crb_smc {
@@ -110,11 +113,29 @@ struct tpm2_crb_smc {
u32 smc_func_id;
};
+/* CRB over FFA start method parameters in TCG2 ACPI table */
+struct tpm2_crb_ffa {
+ u8 flags;
+ u8 attributes;
+ u16 partition_id;
+ u8 reserved[8];
+};
+
struct tpm2_crb_pluton {
u64 start_addr;
u64 reply_addr;
};
+/*
+ * Returns true if the start method supports idle.
+ */
+static inline bool tpm_crb_has_idle(u32 start_method)
+{
+ return !(start_method == ACPI_TPM2_START_METHOD ||
+ start_method == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD ||
+ start_method == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC);
+}
+
static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
unsigned long timeout)
{
@@ -158,6 +179,7 @@ static int crb_try_pluton_doorbell(struct crb_priv *priv, bool wait_for_complete
*
* @dev: crb device
* @priv: crb private data
+ * @loc: locality
*
* Write CRB_CTRL_REQ_GO_IDLE to TPM_CRB_CTRL_REQ
* The device should respond within TIMEOUT_C by clearing the bit.
@@ -169,17 +191,21 @@ static int crb_try_pluton_doorbell(struct crb_priv *priv, bool wait_for_complete
*
* Return: 0 always
*/
-static int __crb_go_idle(struct device *dev, struct crb_priv *priv)
+static int __crb_go_idle(struct device *dev, struct crb_priv *priv, int loc)
{
int rc;
- if ((priv->sm == ACPI_TPM2_START_METHOD) ||
- (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
- (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC))
+ if (!tpm_crb_has_idle(priv->sm))
return 0;
iowrite32(CRB_CTRL_REQ_GO_IDLE, &priv->regs_t->ctrl_req);
+ if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) {
+ rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_COMMAND, loc);
+ if (rc)
+ return rc;
+ }
+
rc = crb_try_pluton_doorbell(priv, true);
if (rc)
return rc;
@@ -200,7 +226,7 @@ static int crb_go_idle(struct tpm_chip *chip)
struct device *dev = &chip->dev;
struct crb_priv *priv = dev_get_drvdata(dev);
- return __crb_go_idle(dev, priv);
+ return __crb_go_idle(dev, priv, chip->locality);
}
/**
@@ -208,6 +234,7 @@ static int crb_go_idle(struct tpm_chip *chip)
*
* @dev: crb device
* @priv: crb private data
+ * @loc: locality
*
* Write CRB_CTRL_REQ_CMD_READY to TPM_CRB_CTRL_REQ
* and poll till the device acknowledge it by clearing the bit.
@@ -218,17 +245,21 @@ static int crb_go_idle(struct tpm_chip *chip)
*
* Return: 0 on success -ETIME on timeout;
*/
-static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv)
+static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv, int loc)
{
int rc;
- if ((priv->sm == ACPI_TPM2_START_METHOD) ||
- (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
- (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC))
+ if (!tpm_crb_has_idle(priv->sm))
return 0;
iowrite32(CRB_CTRL_REQ_CMD_READY, &priv->regs_t->ctrl_req);
+ if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) {
+ rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_COMMAND, loc);
+ if (rc)
+ return rc;
+ }
+
rc = crb_try_pluton_doorbell(priv, true);
if (rc)
return rc;
@@ -249,19 +280,26 @@ static int crb_cmd_ready(struct tpm_chip *chip)
struct device *dev = &chip->dev;
struct crb_priv *priv = dev_get_drvdata(dev);
- return __crb_cmd_ready(dev, priv);
+ return __crb_cmd_ready(dev, priv, chip->locality);
}
static int __crb_request_locality(struct device *dev,
struct crb_priv *priv, int loc)
{
- u32 value = CRB_LOC_STATE_LOC_ASSIGNED |
- CRB_LOC_STATE_TPM_REG_VALID_STS;
+ u32 value = CRB_LOC_STATE_LOC_ASSIGNED | CRB_LOC_STATE_TPM_REG_VALID_STS;
+ int rc;
if (!priv->regs_h)
return 0;
iowrite32(CRB_LOC_CTRL_REQUEST_ACCESS, &priv->regs_h->loc_ctrl);
+
+ if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) {
+ rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_LOCALITY_REQUEST, loc);
+ if (rc)
+ return rc;
+ }
+
if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, value, value,
TPM2_TIMEOUT_C)) {
dev_warn(dev, "TPM_LOC_STATE_x.requestAccess timed out\n");
@@ -281,14 +319,21 @@ static int crb_request_locality(struct tpm_chip *chip, int loc)
static int __crb_relinquish_locality(struct device *dev,
struct crb_priv *priv, int loc)
{
- u32 mask = CRB_LOC_STATE_LOC_ASSIGNED |
- CRB_LOC_STATE_TPM_REG_VALID_STS;
+ u32 mask = CRB_LOC_STATE_LOC_ASSIGNED | CRB_LOC_STATE_TPM_REG_VALID_STS;
u32 value = CRB_LOC_STATE_TPM_REG_VALID_STS;
+ int rc;
if (!priv->regs_h)
return 0;
iowrite32(CRB_LOC_CTRL_RELINQUISH, &priv->regs_h->loc_ctrl);
+
+ if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) {
+ rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_LOCALITY_REQUEST, loc);
+ if (rc)
+ return rc;
+ }
+
if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, mask, value,
TPM2_TIMEOUT_C)) {
dev_warn(dev, "TPM_LOC_STATE_x.Relinquish timed out\n");
@@ -369,7 +414,7 @@ static int crb_do_acpi_start(struct tpm_chip *chip)
#ifdef CONFIG_ARM64
/*
* This is a TPM Command Response Buffer start method that invokes a
- * Secure Monitor Call to requrest the firmware to execute or cancel
+ * Secure Monitor Call to request the firmware to execute or cancel
* a TPM 2.0 command.
*/
static int tpm_crb_smc_start(struct device *dev, unsigned long func_id)
@@ -394,7 +439,7 @@ static int tpm_crb_smc_start(struct device *dev, unsigned long func_id)
}
#endif
-static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
+static int crb_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, size_t len)
{
struct crb_priv *priv = dev_get_drvdata(&chip->dev);
int rc = 0;
@@ -412,7 +457,7 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
/* Seems to be necessary for every command */
if (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON)
- __crb_cmd_ready(&chip->dev, priv);
+ __crb_cmd_ready(&chip->dev, priv, chip->locality);
memcpy_toio(priv->cmd, buf, len);
@@ -423,13 +468,13 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
* report only ACPI start but in practice seems to require both
* CRB start, hence invoking CRB start method if hid == MSFT0101.
*/
- if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) ||
- (priv->sm == ACPI_TPM2_MEMORY_MAPPED) ||
- (!strcmp(priv->hid, "MSFT0101")))
+ if (priv->sm == ACPI_TPM2_COMMAND_BUFFER ||
+ priv->sm == ACPI_TPM2_MEMORY_MAPPED ||
+ !strcmp(priv->hid, "MSFT0101"))
iowrite32(CRB_START_INVOKE, &priv->regs_t->ctrl_start);
- if ((priv->sm == ACPI_TPM2_START_METHOD) ||
- (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD))
+ if (priv->sm == ACPI_TPM2_START_METHOD ||
+ priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD)
rc = crb_do_acpi_start(chip);
if (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) {
@@ -437,6 +482,11 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
rc = tpm_crb_smc_start(&chip->dev, priv->smc_func_id);
}
+ if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) {
+ iowrite32(CRB_START_INVOKE, &priv->regs_t->ctrl_start);
+ rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_COMMAND, chip->locality);
+ }
+
if (rc)
return rc;
@@ -446,13 +496,20 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
static void crb_cancel(struct tpm_chip *chip)
{
struct crb_priv *priv = dev_get_drvdata(&chip->dev);
+ int rc;
iowrite32(CRB_CANCEL_INVOKE, &priv->regs_t->ctrl_cancel);
- if (((priv->sm == ACPI_TPM2_START_METHOD) ||
- (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD)) &&
+ if ((priv->sm == ACPI_TPM2_START_METHOD ||
+ priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) &&
crb_do_acpi_start(chip))
dev_err(&chip->dev, "ACPI Start failed\n");
+
+ if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) {
+ rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_COMMAND, chip->locality);
+ if (rc)
+ dev_err(&chip->dev, "FF-A Start failed\n");
+ }
}
static bool crb_req_canceled(struct tpm_chip *chip, u8 status)
@@ -609,8 +666,9 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
* the control area, as one nice sane region except for some older
* stuff that puts the control area outside the ACPI IO region.
*/
- if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) ||
- (priv->sm == ACPI_TPM2_MEMORY_MAPPED)) {
+ if (priv->sm == ACPI_TPM2_COMMAND_BUFFER ||
+ priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA ||
+ priv->sm == ACPI_TPM2_MEMORY_MAPPED) {
if (iores &&
buf->control_address == iores->start +
sizeof(*priv->regs_h))
@@ -627,7 +685,7 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
* PTT HW bug w/a: wake up the device to access
* possibly not retained registers.
*/
- ret = __crb_cmd_ready(dev, priv);
+ ret = __crb_cmd_ready(dev, priv, 0);
if (ret)
goto out_relinquish_locality;
@@ -699,7 +757,7 @@ out:
if (!ret)
priv->cmd_size = cmd_size;
- __crb_go_idle(dev, priv);
+ __crb_go_idle(dev, priv, 0);
out_relinquish_locality:
@@ -731,6 +789,7 @@ static int crb_acpi_add(struct acpi_device *device)
struct tpm_chip *chip;
struct device *dev = &device->dev;
struct tpm2_crb_smc *crb_smc;
+ struct tpm2_crb_ffa *crb_ffa;
struct tpm2_crb_pluton *crb_pluton;
acpi_status status;
u32 sm;
@@ -769,6 +828,27 @@ static int crb_acpi_add(struct acpi_device *device)
priv->smc_func_id = crb_smc->smc_func_id;
}
+ if (sm == ACPI_TPM2_CRB_WITH_ARM_FFA) {
+ if (buf->header.length < (sizeof(*buf) + sizeof(*crb_ffa))) {
+ dev_err(dev,
+ FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n",
+ buf->header.length,
+ ACPI_TPM2_CRB_WITH_ARM_FFA);
+ rc = -EINVAL;
+ goto out;
+ }
+ crb_ffa = ACPI_ADD_PTR(struct tpm2_crb_ffa, buf, sizeof(*buf));
+ priv->ffa_flags = crb_ffa->flags;
+ priv->ffa_attributes = crb_ffa->attributes;
+ rc = tpm_crb_ffa_init();
+ if (rc) {
+ /* If FF-A driver is not available yet, request probe retry */
+ if (rc == -ENOENT)
+ rc = -EPROBE_DEFER;
+ goto out;
+ }
+ }
+
if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) {
if (buf->header.length < (sizeof(*buf) + sizeof(*crb_pluton))) {
dev_err(dev,
diff --git a/drivers/char/tpm/tpm_crb_ffa.c b/drivers/char/tpm/tpm_crb_ffa.c
new file mode 100644
index 000000000000..755b77b32ea4
--- /dev/null
+++ b/drivers/char/tpm/tpm_crb_ffa.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Arm Ltd.
+ *
+ * This device driver implements the TPM CRB start method
+ * as defined in the TPM Service Command Response Buffer
+ * Interface Over FF-A (DEN0138).
+ */
+
+#define pr_fmt(fmt) "CRB_FFA: " fmt
+
+#include <linux/arm_ffa.h>
+#include <linux/delay.h>
+#include <linux/moduleparam.h>
+#include "tpm_crb_ffa.h"
+
+static unsigned int busy_timeout_ms = 2000;
+
+module_param(busy_timeout_ms, uint, 0644);
+MODULE_PARM_DESC(busy_timeout_ms,
+ "Maximum time in ms to retry before giving up on busy");
+
+/* TPM service function status codes */
+#define CRB_FFA_OK 0x05000001
+#define CRB_FFA_OK_RESULTS_RETURNED 0x05000002
+#define CRB_FFA_NOFUNC 0x8e000001
+#define CRB_FFA_NOTSUP 0x8e000002
+#define CRB_FFA_INVARG 0x8e000005
+#define CRB_FFA_INV_CRB_CTRL_DATA 0x8e000006
+#define CRB_FFA_ALREADY 0x8e000009
+#define CRB_FFA_DENIED 0x8e00000a
+#define CRB_FFA_NOMEM 0x8e00000b
+
+#define CRB_FFA_VERSION_MAJOR 1
+#define CRB_FFA_VERSION_MINOR 0
+
+/* version encoding */
+#define CRB_FFA_MAJOR_VERSION_MASK GENMASK(30, 16)
+#define CRB_FFA_MINOR_VERSION_MASK GENMASK(15, 0)
+#define CRB_FFA_MAJOR_VERSION(x) ((u16)(FIELD_GET(CRB_FFA_MAJOR_VERSION_MASK, (x))))
+#define CRB_FFA_MINOR_VERSION(x) ((u16)(FIELD_GET(CRB_FFA_MINOR_VERSION_MASK, (x))))
+
+/*
+ * Normal world sends requests with FFA_MSG_SEND_DIRECT_REQ and
+ * responses are returned with FFA_MSG_SEND_DIRECT_RESP for normal
+ * messages.
+ *
+ * All requests with FFA_MSG_SEND_DIRECT_REQ and FFA_MSG_SEND_DIRECT_RESP
+ * are using the AArch32 or AArch64 SMC calling convention with register usage
+ * as defined in FF-A specification:
+ * w0: Function ID
+ * -for 32-bit: 0x8400006F or 0x84000070
+ * -for 64-bit: 0xC400006F or 0xC4000070
+ * w1: Source/Destination IDs
+ * w2: Reserved (MBZ)
+ * w3-w7: Implementation defined, free to be used below
+ */
+
+/*
+ * Returns the version of the interface that is available
+ * Call register usage:
+ * w3: Not used (MBZ)
+ * w4: TPM service function ID, CRB_FFA_GET_INTERFACE_VERSION
+ * w5-w7: Reserved (MBZ)
+ *
+ * Return register usage:
+ * w3: Not used (MBZ)
+ * w4: TPM service function status
+ * w5: TPM service interface version
+ * Bits[31:16]: major version
+ * Bits[15:0]: minor version
+ * w6-w7: Reserved (MBZ)
+ *
+ * Possible function status codes in register w4:
+ * CRB_FFA_OK_RESULTS_RETURNED: The version of the interface has been
+ * returned.
+ */
+#define CRB_FFA_GET_INTERFACE_VERSION 0x0f000001
+
+/*
+ * Notifies the TPM service that a TPM command or TPM locality request is
+ * ready to be processed, and allows the TPM service to process it.
+ * Call register usage:
+ * w3: Not used (MBZ)
+ * w4: TPM service function ID, CRB_FFA_START
+ * w5: Start function qualifier
+ * Bits[31:8] (MBZ)
+ * Bits[7:0]
+ * 0: Notifies TPM that a command is ready to be processed
+ * 1: Notifies TPM that a locality request is ready to be processed
+ * w6: TPM locality, one of 0..4
+ * -If the start function qualifier is 0, identifies the locality
+ * from where the command originated.
+ * -If the start function qualifier is 1, identifies the locality
+ * of the locality request
+ * w6-w7: Reserved (MBZ)
+ *
+ * Return register usage:
+ * w3: Not used (MBZ)
+ * w4: TPM service function status
+ * w5-w7: Reserved (MBZ)
+ *
+ * Possible function status codes in register w4:
+ * CRB_FFA_OK: the TPM service has been notified successfully
+ * CRB_FFA_INVARG: one or more arguments are not valid
+ * CRB_FFA_INV_CRB_CTRL_DATA: CRB control data or locality control
+ * data at the given TPM locality is not valid
+ * CRB_FFA_DENIED: the TPM has previously disabled locality requests and
+ * command processing at the given locality
+ */
+#define CRB_FFA_START 0x0f000201
+
+struct tpm_crb_ffa {
+ struct ffa_device *ffa_dev;
+ u16 major_version;
+ u16 minor_version;
+ /* lock to protect sending of FF-A messages: */
+ struct mutex msg_data_lock;
+ union {
+ struct ffa_send_direct_data direct_msg_data;
+ struct ffa_send_direct_data2 direct_msg_data2;
+ };
+};
+
+static struct tpm_crb_ffa *tpm_crb_ffa;
+static struct ffa_driver tpm_crb_ffa_driver;
+
+static int tpm_crb_ffa_to_linux_errno(int errno)
+{
+ int rc;
+
+ switch (errno) {
+ case CRB_FFA_OK:
+ rc = 0;
+ break;
+ case CRB_FFA_OK_RESULTS_RETURNED:
+ rc = 0;
+ break;
+ case CRB_FFA_NOFUNC:
+ rc = -ENOENT;
+ break;
+ case CRB_FFA_NOTSUP:
+ rc = -EPERM;
+ break;
+ case CRB_FFA_INVARG:
+ rc = -EINVAL;
+ break;
+ case CRB_FFA_INV_CRB_CTRL_DATA:
+ rc = -ENOEXEC;
+ break;
+ case CRB_FFA_ALREADY:
+ rc = -EEXIST;
+ break;
+ case CRB_FFA_DENIED:
+ rc = -EACCES;
+ break;
+ case CRB_FFA_NOMEM:
+ rc = -ENOMEM;
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+/**
+ * tpm_crb_ffa_init - called by the CRB driver to do any needed initialization
+ *
+ * This function is called by the tpm_crb driver during the tpm_crb
+ * driver's initialization. If the tpm_crb_ffa has not been probed
+ * yet, returns -ENOENT in order to force a retry. If th ffa_crb
+ * driver had been probed but failed with an error, returns -ENODEV
+ * in order to prevent further retries.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int tpm_crb_ffa_init(void)
+{
+ int ret = 0;
+
+ if (!IS_MODULE(CONFIG_TCG_ARM_CRB_FFA)) {
+ ret = ffa_register(&tpm_crb_ffa_driver);
+ if (ret) {
+ tpm_crb_ffa = ERR_PTR(-ENODEV);
+ return ret;
+ }
+ }
+
+ if (!tpm_crb_ffa)
+ ret = -ENOENT;
+
+ if (IS_ERR_VALUE(tpm_crb_ffa))
+ ret = -ENODEV;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tpm_crb_ffa_init);
+
+static int __tpm_crb_ffa_try_send_receive(unsigned long func_id,
+ unsigned long a0, unsigned long a1,
+ unsigned long a2)
+{
+ const struct ffa_msg_ops *msg_ops;
+ int ret;
+
+ msg_ops = tpm_crb_ffa->ffa_dev->ops->msg_ops;
+
+ if (ffa_partition_supports_direct_req2_recv(tpm_crb_ffa->ffa_dev)) {
+ tpm_crb_ffa->direct_msg_data2 = (struct ffa_send_direct_data2){
+ .data = { func_id, a0, a1, a2 },
+ };
+
+ ret = msg_ops->sync_send_receive2(tpm_crb_ffa->ffa_dev,
+ &tpm_crb_ffa->direct_msg_data2);
+ if (!ret)
+ ret = tpm_crb_ffa_to_linux_errno(tpm_crb_ffa->direct_msg_data2.data[0]);
+ } else {
+ tpm_crb_ffa->direct_msg_data = (struct ffa_send_direct_data){
+ .data1 = func_id,
+ .data2 = a0,
+ .data3 = a1,
+ .data4 = a2,
+ };
+
+ ret = msg_ops->sync_send_receive(tpm_crb_ffa->ffa_dev,
+ &tpm_crb_ffa->direct_msg_data);
+ if (!ret)
+ ret = tpm_crb_ffa_to_linux_errno(tpm_crb_ffa->direct_msg_data.data1);
+ }
+
+ return ret;
+}
+
+static int __tpm_crb_ffa_send_receive(unsigned long func_id, unsigned long a0,
+ unsigned long a1, unsigned long a2)
+{
+ ktime_t start, stop;
+ int ret;
+
+ if (!tpm_crb_ffa)
+ return -ENOENT;
+
+ start = ktime_get();
+ stop = ktime_add(start, ms_to_ktime(busy_timeout_ms));
+
+ for (;;) {
+ ret = __tpm_crb_ffa_try_send_receive(func_id, a0, a1, a2);
+ if (ret != -EBUSY)
+ break;
+
+ usleep_range(50, 100);
+ if (ktime_after(ktime_get(), stop)) {
+ dev_warn(&tpm_crb_ffa->ffa_dev->dev,
+ "Busy retry timed out\n");
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * tpm_crb_ffa_get_interface_version() - gets the ABI version of the TPM service
+ * @major: Pointer to caller-allocated buffer to hold the major version
+ * number the ABI
+ * @minor: Pointer to caller-allocated buffer to hold the minor version
+ * number the ABI
+ *
+ * Returns the major and minor version of the ABI of the FF-A based TPM.
+ * Allows the caller to evaluate its compatibility with the version of
+ * the ABI.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int tpm_crb_ffa_get_interface_version(u16 *major, u16 *minor)
+{
+ int rc;
+
+ if (!tpm_crb_ffa)
+ return -ENOENT;
+
+ if (IS_ERR_VALUE(tpm_crb_ffa))
+ return -ENODEV;
+
+ if (!major || !minor)
+ return -EINVAL;
+
+ guard(mutex)(&tpm_crb_ffa->msg_data_lock);
+
+ rc = __tpm_crb_ffa_send_receive(CRB_FFA_GET_INTERFACE_VERSION, 0x00, 0x00, 0x00);
+ if (!rc) {
+ if (ffa_partition_supports_direct_req2_recv(tpm_crb_ffa->ffa_dev)) {
+ *major = CRB_FFA_MAJOR_VERSION(tpm_crb_ffa->direct_msg_data2.data[1]);
+ *minor = CRB_FFA_MINOR_VERSION(tpm_crb_ffa->direct_msg_data2.data[1]);
+ } else {
+ *major = CRB_FFA_MAJOR_VERSION(tpm_crb_ffa->direct_msg_data.data2);
+ *minor = CRB_FFA_MINOR_VERSION(tpm_crb_ffa->direct_msg_data.data2);
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * tpm_crb_ffa_start() - signals the TPM that a field has changed in the CRB
+ * @request_type: Identifies whether the change to the CRB is in the command
+ * fields or locality fields.
+ * @locality: Specifies the locality number.
+ *
+ * Used by the CRB driver
+ * that might be useful to those using or modifying it. Begins with
+ * empty comment line, and may include additional embedded empty
+ * comment lines.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int tpm_crb_ffa_start(int request_type, int locality)
+{
+ if (!tpm_crb_ffa)
+ return -ENOENT;
+
+ if (IS_ERR_VALUE(tpm_crb_ffa))
+ return -ENODEV;
+
+ guard(mutex)(&tpm_crb_ffa->msg_data_lock);
+
+ return __tpm_crb_ffa_send_receive(CRB_FFA_START, request_type, locality, 0x00);
+}
+EXPORT_SYMBOL_GPL(tpm_crb_ffa_start);
+
+static int tpm_crb_ffa_probe(struct ffa_device *ffa_dev)
+{
+ struct tpm_crb_ffa *p;
+ int rc;
+
+ /* only one instance of a TPM partition is supported */
+ if (tpm_crb_ffa && !IS_ERR_VALUE(tpm_crb_ffa))
+ return -EEXIST;
+
+ tpm_crb_ffa = ERR_PTR(-ENODEV); // set tpm_crb_ffa so we can detect probe failure
+
+ if (!ffa_partition_supports_direct_recv(ffa_dev) &&
+ !ffa_partition_supports_direct_req2_recv(ffa_dev)) {
+ dev_warn(&ffa_dev->dev, "partition doesn't support direct message receive.\n");
+ return -EINVAL;
+ }
+
+ p = kzalloc(sizeof(*tpm_crb_ffa), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+ tpm_crb_ffa = p;
+
+ mutex_init(&tpm_crb_ffa->msg_data_lock);
+ tpm_crb_ffa->ffa_dev = ffa_dev;
+ ffa_dev_set_drvdata(ffa_dev, tpm_crb_ffa);
+
+ /* if TPM is aarch32 use 32-bit SMCs */
+ if (!ffa_partition_check_property(ffa_dev, FFA_PARTITION_AARCH64_EXEC))
+ ffa_dev->ops->msg_ops->mode_32bit_set(ffa_dev);
+
+ /* verify compatibility of TPM service version number */
+ rc = tpm_crb_ffa_get_interface_version(&tpm_crb_ffa->major_version,
+ &tpm_crb_ffa->minor_version);
+ if (rc) {
+ dev_err(&ffa_dev->dev, "failed to get crb interface version. rc:%d\n", rc);
+ goto out;
+ }
+
+ dev_info(&ffa_dev->dev, "ABI version %u.%u\n", tpm_crb_ffa->major_version,
+ tpm_crb_ffa->minor_version);
+
+ if (tpm_crb_ffa->major_version != CRB_FFA_VERSION_MAJOR ||
+ (tpm_crb_ffa->minor_version > 0 &&
+ tpm_crb_ffa->minor_version < CRB_FFA_VERSION_MINOR)) {
+ dev_warn(&ffa_dev->dev, "Incompatible ABI version\n");
+ goto out;
+ }
+
+ return 0;
+
+out:
+ kfree(tpm_crb_ffa);
+ tpm_crb_ffa = ERR_PTR(-ENODEV);
+ return -EINVAL;
+}
+
+static void tpm_crb_ffa_remove(struct ffa_device *ffa_dev)
+{
+ kfree(tpm_crb_ffa);
+ tpm_crb_ffa = NULL;
+}
+
+static const struct ffa_device_id tpm_crb_ffa_device_id[] = {
+ /* 17b862a4-1806-4faf-86b3-089a58353861 */
+ { UUID_INIT(0x17b862a4, 0x1806, 0x4faf,
+ 0x86, 0xb3, 0x08, 0x9a, 0x58, 0x35, 0x38, 0x61) },
+ {}
+};
+
+static struct ffa_driver tpm_crb_ffa_driver = {
+ .name = "ffa-crb",
+ .probe = tpm_crb_ffa_probe,
+ .remove = tpm_crb_ffa_remove,
+ .id_table = tpm_crb_ffa_device_id,
+};
+
+#ifdef MODULE
+module_ffa_driver(tpm_crb_ffa_driver);
+#endif
+
+MODULE_AUTHOR("Arm");
+MODULE_DESCRIPTION("TPM CRB FFA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_crb_ffa.h b/drivers/char/tpm/tpm_crb_ffa.h
new file mode 100644
index 000000000000..d7e1344ea003
--- /dev/null
+++ b/drivers/char/tpm/tpm_crb_ffa.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024 Arm Ltd.
+ *
+ * This device driver implements the TPM CRB start method
+ * as defined in the TPM Service Command Response Buffer
+ * Interface Over FF-A (DEN0138).
+ */
+#ifndef _TPM_CRB_FFA_H
+#define _TPM_CRB_FFA_H
+
+#if IS_REACHABLE(CONFIG_TCG_ARM_CRB_FFA)
+int tpm_crb_ffa_init(void);
+int tpm_crb_ffa_start(int request_type, int locality);
+#else
+static inline int tpm_crb_ffa_init(void) { return 0; }
+static inline int tpm_crb_ffa_start(int request_type, int locality) { return 0; }
+#endif
+
+#define CRB_FFA_START_TYPE_COMMAND 0
+#define CRB_FFA_START_TYPE_LOCALITY_REQUEST 1
+
+#endif
diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c
index 2ea4882251cf..4e63c30aeaf1 100644
--- a/drivers/char/tpm/tpm_ftpm_tee.c
+++ b/drivers/char/tpm/tpm_ftpm_tee.c
@@ -31,45 +31,19 @@ static const uuid_t ftpm_ta_uuid =
0x82, 0xCB, 0x34, 0x3F, 0xB7, 0xF3, 0x78, 0x96);
/**
- * ftpm_tee_tpm_op_recv() - retrieve fTPM response.
- * @chip: the tpm_chip description as specified in driver/char/tpm/tpm.h.
- * @buf: the buffer to store data.
- * @count: the number of bytes to read.
- *
- * Return:
- * In case of success the number of bytes received.
- * On failure, -errno.
- */
-static int ftpm_tee_tpm_op_recv(struct tpm_chip *chip, u8 *buf, size_t count)
-{
- struct ftpm_tee_private *pvt_data = dev_get_drvdata(chip->dev.parent);
- size_t len;
-
- len = pvt_data->resp_len;
- if (count < len) {
- dev_err(&chip->dev,
- "%s: Invalid size in recv: count=%zd, resp_len=%zd\n",
- __func__, count, len);
- return -EIO;
- }
-
- memcpy(buf, pvt_data->resp_buf, len);
- pvt_data->resp_len = 0;
-
- return len;
-}
-
-/**
- * ftpm_tee_tpm_op_send() - send TPM commands through the TEE shared memory.
+ * ftpm_tee_tpm_op_send() - send TPM commands through the TEE shared memory
+ * and retrieve the response.
* @chip: the tpm_chip description as specified in driver/char/tpm/tpm.h
- * @buf: the buffer to send.
- * @len: the number of bytes to send.
+ * @buf: the buffer to send and to store the response.
+ * @bufsiz: the size of the buffer.
+ * @cmd_len: the number of bytes to send.
*
* Return:
- * In case of success, returns 0.
+ * In case of success, returns the number of bytes received.
* On failure, -errno
*/
-static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len)
+static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t cmd_len)
{
struct ftpm_tee_private *pvt_data = dev_get_drvdata(chip->dev.parent);
size_t resp_len;
@@ -80,16 +54,15 @@ static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len)
struct tee_param command_params[4];
struct tee_shm *shm = pvt_data->shm;
- if (len > MAX_COMMAND_SIZE) {
+ if (cmd_len > MAX_COMMAND_SIZE) {
dev_err(&chip->dev,
"%s: len=%zd exceeds MAX_COMMAND_SIZE supported by fTPM TA\n",
- __func__, len);
+ __func__, cmd_len);
return -EIO;
}
memset(&transceive_args, 0, sizeof(transceive_args));
memset(command_params, 0, sizeof(command_params));
- pvt_data->resp_len = 0;
/* Invoke FTPM_OPTEE_TA_SUBMIT_COMMAND function of fTPM TA */
transceive_args = (struct tee_ioctl_invoke_arg) {
@@ -103,7 +76,7 @@ static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len)
.attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT,
.u.memref = {
.shm = shm,
- .size = len,
+ .size = cmd_len,
.shm_offs = 0,
},
};
@@ -115,7 +88,7 @@ static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len)
return PTR_ERR(temp_buf);
}
memset(temp_buf, 0, (MAX_COMMAND_SIZE + MAX_RESPONSE_SIZE));
- memcpy(temp_buf, buf, len);
+ memcpy(temp_buf, buf, cmd_len);
command_params[1] = (struct tee_param) {
.attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT,
@@ -156,38 +129,21 @@ static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len)
__func__, resp_len);
return -EIO;
}
+ if (resp_len > bufsiz) {
+ dev_err(&chip->dev,
+ "%s: resp_len=%zd exceeds bufsiz=%zd\n",
+ __func__, resp_len, bufsiz);
+ return -EIO;
+ }
- /* sanity checks look good, cache the response */
- memcpy(pvt_data->resp_buf, temp_buf, resp_len);
- pvt_data->resp_len = resp_len;
-
- return 0;
-}
-
-static void ftpm_tee_tpm_op_cancel(struct tpm_chip *chip)
-{
- /* not supported */
-}
-
-static u8 ftpm_tee_tpm_op_status(struct tpm_chip *chip)
-{
- return 0;
-}
+ memcpy(buf, temp_buf, resp_len);
-static bool ftpm_tee_tpm_req_canceled(struct tpm_chip *chip, u8 status)
-{
- return false;
+ return resp_len;
}
static const struct tpm_class_ops ftpm_tee_tpm_ops = {
.flags = TPM_OPS_AUTO_STARTUP,
- .recv = ftpm_tee_tpm_op_recv,
.send = ftpm_tee_tpm_op_send,
- .cancel = ftpm_tee_tpm_op_cancel,
- .status = ftpm_tee_tpm_op_status,
- .req_complete_mask = 0,
- .req_complete_val = 0,
- .req_canceled = ftpm_tee_tpm_req_canceled,
};
/*
@@ -271,7 +227,7 @@ static int ftpm_tee_probe(struct device *dev)
}
pvt_data->chip = chip;
- pvt_data->chip->flags |= TPM_CHIP_FLAG_TPM2;
+ pvt_data->chip->flags |= TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_SYNC;
/* Create a character device for the fTPM */
rc = tpm_chip_register(pvt_data->chip);
@@ -362,11 +318,11 @@ MODULE_DEVICE_TABLE(of, of_ftpm_tee_ids);
static struct platform_driver ftpm_tee_plat_driver = {
.driver = {
.name = "ftpm-tee",
- .of_match_table = of_match_ptr(of_ftpm_tee_ids),
+ .of_match_table = of_ftpm_tee_ids,
},
.shutdown = ftpm_plat_tee_shutdown,
.probe = ftpm_plat_tee_probe,
- .remove_new = ftpm_plat_tee_remove,
+ .remove = ftpm_plat_tee_remove,
};
/* UUID of the fTPM TA */
diff --git a/drivers/char/tpm/tpm_ftpm_tee.h b/drivers/char/tpm/tpm_ftpm_tee.h
index f98daa7bf68c..8d5c3f0d2879 100644
--- a/drivers/char/tpm/tpm_ftpm_tee.h
+++ b/drivers/char/tpm/tpm_ftpm_tee.h
@@ -21,18 +21,13 @@
/**
* struct ftpm_tee_private - fTPM's private data
* @chip: struct tpm_chip instance registered with tpm framework.
- * @state: internal state
* @session: fTPM TA session identifier.
- * @resp_len: cached response buffer length.
- * @resp_buf: cached response buffer.
* @ctx: TEE context handler.
* @shm: Memory pool shared with fTPM TA in TEE.
*/
struct ftpm_tee_private {
struct tpm_chip *chip;
u32 session;
- size_t resp_len;
- u8 resp_buf[MAX_RESPONSE_SIZE];
struct tee_context *ctx;
struct tee_shm *shm;
};
diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c
index 301a95b3734f..4f229656a8e2 100644
--- a/drivers/char/tpm/tpm_i2c_atmel.c
+++ b/drivers/char/tpm/tpm_i2c_atmel.c
@@ -37,7 +37,8 @@ struct priv_data {
u8 buffer[sizeof(struct tpm_header) + 25];
};
-static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
+static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t len)
{
struct priv_data *priv = dev_get_drvdata(&chip->dev);
struct i2c_client *client = to_i2c_client(chip->dev.parent);
@@ -186,7 +187,7 @@ static void i2c_atmel_remove(struct i2c_client *client)
}
static const struct i2c_device_id i2c_atmel_id[] = {
- {I2C_DRIVER_NAME, 0},
+ { I2C_DRIVER_NAME },
{}
};
MODULE_DEVICE_TABLE(i2c, i2c_atmel_id);
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index 81d8a78dc655..bdf1f329a679 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -514,7 +514,8 @@ out:
return size;
}
-static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len)
+static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t len)
{
int rc, status;
ssize_t burstcnt;
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
index 3c3ee5f551db..d44903b29929 100644
--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -350,7 +350,8 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
* tpm.c can skip polling for the data to be available as the interrupt is
* waited for here
*/
-static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len)
+static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t len)
{
struct priv_data *priv = dev_get_drvdata(&chip->dev);
struct device *dev = chip->dev.parent;
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index d3989b257f42..4734a69406ce 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -191,13 +191,15 @@ static int tpm_ibmvtpm_resume(struct device *dev)
* tpm_ibmvtpm_send() - Send a TPM command
* @chip: tpm chip struct
* @buf: buffer contains data to send
- * @count: size of buffer
+ * @bufsiz: size of the buffer
+ * @count: length of the command
*
* Return:
* 0 on success,
* -errno on error
*/
-static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
+static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t count)
{
struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
bool retry = true;
@@ -450,6 +452,7 @@ static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status)
}
static const struct tpm_class_ops tpm_ibmvtpm = {
+ .flags = TPM_OPS_AUTO_STARTUP,
.recv = tpm_ibmvtpm_recv,
.send = tpm_ibmvtpm_send,
.cancel = tpm_ibmvtpm_cancel,
@@ -690,16 +693,6 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
if (!strcmp(id->compat, "IBM,vtpm20"))
chip->flags |= TPM_CHIP_FLAG_TPM2;
- rc = tpm_get_timeouts(chip);
- if (rc)
- goto init_irq_cleanup;
-
- if (chip->flags & TPM_CHIP_FLAG_TPM2) {
- rc = tpm2_get_cc_attrs_tbl(chip);
- if (rc)
- goto init_irq_cleanup;
- }
-
return tpm_chip_register(chip);
init_irq_cleanup:
do {
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
index 2d2ae37153ba..7638b65b851b 100644
--- a/drivers/char/tpm/tpm_infineon.c
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -312,7 +312,8 @@ recv_begin:
return -EIO;
}
-static int tpm_inf_send(struct tpm_chip *chip, u8 * buf, size_t count)
+static int tpm_inf_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t count)
{
int i;
int ret;
diff --git a/drivers/char/tpm/tpm_loongson.c b/drivers/char/tpm/tpm_loongson.c
new file mode 100644
index 000000000000..9e50250763d1
--- /dev/null
+++ b/drivers/char/tpm/tpm_loongson.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Loongson Technology Corporation Limited. */
+
+#include <linux/device.h>
+#include <linux/mfd/loongson-se.h>
+#include <linux/platform_device.h>
+#include <linux/wait.h>
+
+#include "tpm.h"
+
+struct tpm_loongson_cmd {
+ u32 cmd_id;
+ u32 data_off;
+ u32 data_len;
+ u32 pad[5];
+};
+
+static int tpm_loongson_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct loongson_se_engine *tpm_engine = dev_get_drvdata(&chip->dev);
+ struct tpm_loongson_cmd *cmd_ret = tpm_engine->command_ret;
+
+ if (cmd_ret->data_len > count)
+ return -EIO;
+
+ memcpy(buf, tpm_engine->data_buffer, cmd_ret->data_len);
+
+ return cmd_ret->data_len;
+}
+
+static int tpm_loongson_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, size_t count)
+{
+ struct loongson_se_engine *tpm_engine = dev_get_drvdata(&chip->dev);
+ struct tpm_loongson_cmd *cmd = tpm_engine->command;
+
+ if (count > tpm_engine->buffer_size)
+ return -E2BIG;
+
+ cmd->data_len = count;
+ memcpy(tpm_engine->data_buffer, buf, count);
+
+ return loongson_se_send_engine_cmd(tpm_engine);
+}
+
+static const struct tpm_class_ops tpm_loongson_ops = {
+ .flags = TPM_OPS_AUTO_STARTUP,
+ .recv = tpm_loongson_recv,
+ .send = tpm_loongson_send,
+};
+
+static int tpm_loongson_probe(struct platform_device *pdev)
+{
+ struct loongson_se_engine *tpm_engine;
+ struct device *dev = &pdev->dev;
+ struct tpm_loongson_cmd *cmd;
+ struct tpm_chip *chip;
+
+ tpm_engine = loongson_se_init_engine(dev->parent, SE_ENGINE_TPM);
+ if (!tpm_engine)
+ return -ENODEV;
+ cmd = tpm_engine->command;
+ cmd->cmd_id = SE_CMD_TPM;
+ cmd->data_off = tpm_engine->buffer_off;
+
+ chip = tpmm_chip_alloc(dev, &tpm_loongson_ops);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ chip->flags = TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_IRQ;
+ dev_set_drvdata(&chip->dev, tpm_engine);
+
+ return tpm_chip_register(chip);
+}
+
+static struct platform_driver tpm_loongson = {
+ .probe = tpm_loongson_probe,
+ .driver = {
+ .name = "tpm_loongson",
+ },
+};
+module_platform_driver(tpm_loongson);
+
+MODULE_ALIAS("platform:tpm_loongson");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Loongson TPM driver");
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
index 0f62bbc940da..879ac88f5783 100644
--- a/drivers/char/tpm/tpm_nsc.c
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -178,7 +178,8 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count)
return size;
}
-static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count)
+static int tpm_nsc_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t count)
{
struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev);
u8 data;
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
index bc7b1b4501b3..c9793a3d986d 100644
--- a/drivers/char/tpm/tpm_ppi.c
+++ b/drivers/char/tpm/tpm_ppi.c
@@ -33,6 +33,20 @@ static const guid_t tpm_ppi_guid =
GUID_INIT(0x3DDDFAA6, 0x361B, 0x4EB4,
0xA4, 0x24, 0x8D, 0x10, 0x08, 0x9D, 0x16, 0x53);
+static const char * const tpm_ppi_info[] = {
+ "Not implemented",
+ "BIOS only",
+ "Blocked for OS by system firmware",
+ "User required",
+ "User not required",
+};
+
+/* A spinlock to protect access to the cache from concurrent reads */
+static DEFINE_MUTEX(tpm_ppi_lock);
+
+static u32 ppi_operations_cache[PPI_VS_REQ_END + 1];
+static bool ppi_cache_populated;
+
static bool tpm_ppi_req_has_parameter(u64 req)
{
return req == 23;
@@ -52,7 +66,7 @@ static ssize_t tpm_show_ppi_version(struct device *dev,
{
struct tpm_chip *chip = to_tpm_chip(dev);
- return scnprintf(buf, PAGE_SIZE, "%s\n", chip->ppi_version);
+ return sysfs_emit(buf, "%s\n", chip->ppi_version);
}
static ssize_t tpm_show_ppi_request(struct device *dev,
@@ -87,12 +101,10 @@ static ssize_t tpm_show_ppi_request(struct device *dev,
else {
req = obj->package.elements[1].integer.value;
if (tpm_ppi_req_has_parameter(req))
- size = scnprintf(buf, PAGE_SIZE,
- "%llu %llu\n", req,
- obj->package.elements[2].integer.value);
+ size = sysfs_emit(buf, "%llu %llu\n", req,
+ obj->package.elements[2].integer.value);
else
- size = scnprintf(buf, PAGE_SIZE,
- "%llu\n", req);
+ size = sysfs_emit(buf, "%llu\n", req);
}
} else if (obj->package.count == 2 &&
obj->package.elements[0].type == ACPI_TYPE_INTEGER &&
@@ -100,8 +112,8 @@ static ssize_t tpm_show_ppi_request(struct device *dev,
if (obj->package.elements[0].integer.value)
size = -EFAULT;
else
- size = scnprintf(buf, PAGE_SIZE, "%llu\n",
- obj->package.elements[1].integer.value);
+ size = sysfs_emit(buf, "%llu\n",
+ obj->package.elements[1].integer.value);
}
ACPI_FREE(obj);
@@ -211,10 +223,10 @@ static ssize_t tpm_show_ppi_transition_action(struct device *dev,
}
if (ret < ARRAY_SIZE(info) - 1)
- status = scnprintf(buf, PAGE_SIZE, "%d: %s\n", ret, info[ret]);
+ status = sysfs_emit(buf, "%d: %s\n", ret, info[ret]);
else
- status = scnprintf(buf, PAGE_SIZE, "%d: %s\n", ret,
- info[ARRAY_SIZE(info)-1]);
+ status = sysfs_emit(buf, "%d: %s\n", ret,
+ info[ARRAY_SIZE(info) - 1]);
return status;
}
@@ -255,23 +267,23 @@ static ssize_t tpm_show_ppi_response(struct device *dev,
res = ret_obj[2].integer.value;
if (req) {
if (res == 0)
- status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req,
- "0: Success");
+ status = sysfs_emit(buf, "%llu %s\n", req,
+ "0: Success");
else if (res == 0xFFFFFFF0)
- status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req,
- "0xFFFFFFF0: User Abort");
+ status = sysfs_emit(buf, "%llu %s\n", req,
+ "0xFFFFFFF0: User Abort");
else if (res == 0xFFFFFFF1)
- status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req,
- "0xFFFFFFF1: BIOS Failure");
+ status = sysfs_emit(buf, "%llu %s\n", req,
+ "0xFFFFFFF1: BIOS Failure");
else if (res >= 1 && res <= 0x00000FFF)
- status = scnprintf(buf, PAGE_SIZE, "%llu %llu: %s\n",
- req, res, "Corresponding TPM error");
+ status = sysfs_emit(buf, "%llu %llu: %s\n",
+ req, res, "Corresponding TPM error");
else
- status = scnprintf(buf, PAGE_SIZE, "%llu %llu: %s\n",
- req, res, "Error");
+ status = sysfs_emit(buf, "%llu %llu: %s\n",
+ req, res, "Error");
} else {
- status = scnprintf(buf, PAGE_SIZE, "%llu: %s\n",
- req, "No Recent Request");
+ status = sysfs_emit(buf, "%llu: %s\n",
+ req, "No Recent Request");
}
cleanup:
@@ -279,46 +291,33 @@ cleanup:
return status;
}
-static ssize_t show_ppi_operations(acpi_handle dev_handle, char *buf, u32 start,
- u32 end)
+static ssize_t cache_ppi_operations(acpi_handle dev_handle, char *buf)
{
int i;
u32 ret;
- char *str = buf;
+ int len = 0;
union acpi_object *obj, tmp;
union acpi_object argv = ACPI_INIT_DSM_ARGV4(1, &tmp);
- static char *info[] = {
- "Not implemented",
- "BIOS only",
- "Blocked for OS by BIOS",
- "User required",
- "User not required",
- };
-
if (!acpi_check_dsm(dev_handle, &tpm_ppi_guid, TPM_PPI_REVISION_ID_1,
1 << TPM_PPI_FN_GETOPR))
return -EPERM;
tmp.integer.type = ACPI_TYPE_INTEGER;
- for (i = start; i <= end; i++) {
+ for (i = 0; i <= PPI_VS_REQ_END; i++) {
tmp.integer.value = i;
obj = tpm_eval_dsm(dev_handle, TPM_PPI_FN_GETOPR,
ACPI_TYPE_INTEGER, &argv,
TPM_PPI_REVISION_ID_1);
- if (!obj) {
+ if (!obj)
return -ENOMEM;
- } else {
- ret = obj->integer.value;
- ACPI_FREE(obj);
- }
- if (ret > 0 && ret < ARRAY_SIZE(info))
- str += scnprintf(str, PAGE_SIZE, "%d %d: %s\n",
- i, ret, info[ret]);
+ ret = obj->integer.value;
+ ppi_operations_cache[i] = ret;
+ ACPI_FREE(obj);
}
- return str - buf;
+ return len;
}
static ssize_t tpm_show_ppi_tcg_operations(struct device *dev,
@@ -326,9 +325,30 @@ static ssize_t tpm_show_ppi_tcg_operations(struct device *dev,
char *buf)
{
struct tpm_chip *chip = to_tpm_chip(dev);
+ ssize_t len = 0;
+ u32 ret;
+ int i;
+
+ mutex_lock(&tpm_ppi_lock);
+ if (!ppi_cache_populated) {
+ len = cache_ppi_operations(chip->acpi_dev_handle, buf);
+ if (len < 0) {
+ mutex_unlock(&tpm_ppi_lock);
+ return len;
+ }
+
+ ppi_cache_populated = true;
+ }
- return show_ppi_operations(chip->acpi_dev_handle, buf, 0,
- PPI_TPM_REQ_MAX);
+ for (i = 0; i <= PPI_TPM_REQ_MAX; i++) {
+ ret = ppi_operations_cache[i];
+ if (ret >= 0 && ret < ARRAY_SIZE(tpm_ppi_info))
+ len += sysfs_emit_at(buf, len, "%d %d: %s\n",
+ i, ret, tpm_ppi_info[ret]);
+ }
+ mutex_unlock(&tpm_ppi_lock);
+
+ return len;
}
static ssize_t tpm_show_ppi_vs_operations(struct device *dev,
@@ -336,9 +356,30 @@ static ssize_t tpm_show_ppi_vs_operations(struct device *dev,
char *buf)
{
struct tpm_chip *chip = to_tpm_chip(dev);
+ ssize_t len = 0;
+ u32 ret;
+ int i;
+
+ mutex_lock(&tpm_ppi_lock);
+ if (!ppi_cache_populated) {
+ len = cache_ppi_operations(chip->acpi_dev_handle, buf);
+ if (len < 0) {
+ mutex_unlock(&tpm_ppi_lock);
+ return len;
+ }
+
+ ppi_cache_populated = true;
+ }
+
+ for (i = PPI_VS_REQ_START; i <= PPI_VS_REQ_END; i++) {
+ ret = ppi_operations_cache[i];
+ if (ret >= 0 && ret < ARRAY_SIZE(tpm_ppi_info))
+ len += sysfs_emit_at(buf, len, "%d %d: %s\n",
+ i, ret, tpm_ppi_info[ret]);
+ }
+ mutex_unlock(&tpm_ppi_lock);
- return show_ppi_operations(chip->acpi_dev_handle, buf, PPI_VS_REQ_START,
- PPI_VS_REQ_END);
+ return len;
}
static DEVICE_ATTR(version, S_IRUGO, tpm_show_ppi_version, NULL);
diff --git a/drivers/char/tpm/tpm_svsm.c b/drivers/char/tpm/tpm_svsm.c
new file mode 100644
index 000000000000..f5ba0f64850b
--- /dev/null
+++ b/drivers/char/tpm/tpm_svsm.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ *
+ * Driver for the vTPM defined by the AMD SVSM spec [1].
+ *
+ * The specification defines a protocol that a SEV-SNP guest OS can use to
+ * discover and talk to a vTPM emulated by the Secure VM Service Module (SVSM)
+ * in the guest context, but at a more privileged level (usually VMPL0).
+ *
+ * [1] "Secure VM Service Module for SEV-SNP Guests"
+ * Publication # 58019 Revision: 1.00
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/tpm_svsm.h>
+
+#include <asm/sev.h>
+
+#include "tpm.h"
+
+struct tpm_svsm_priv {
+ void *buffer;
+};
+
+static int tpm_svsm_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t cmd_len)
+{
+ struct tpm_svsm_priv *priv = dev_get_drvdata(&chip->dev);
+ int ret;
+
+ ret = svsm_vtpm_cmd_request_fill(priv->buffer, 0, buf, cmd_len);
+ if (ret)
+ return ret;
+
+ /*
+ * The SVSM call uses the same buffer for the command and for the
+ * response, so after this call, the buffer will contain the response.
+ *
+ * Note: we have to use an internal buffer because the device in SVSM
+ * expects the svsm_vtpm header + data to be physically contiguous.
+ */
+ ret = snp_svsm_vtpm_send_command(priv->buffer);
+ if (ret)
+ return ret;
+
+ return svsm_vtpm_cmd_response_parse(priv->buffer, buf, bufsiz);
+}
+
+static struct tpm_class_ops tpm_chip_ops = {
+ .flags = TPM_OPS_AUTO_STARTUP,
+ .send = tpm_svsm_send,
+};
+
+static int __init tpm_svsm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tpm_svsm_priv *priv;
+ struct tpm_chip *chip;
+ int err;
+
+ priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /*
+ * The maximum buffer supported is one page (see SVSM_VTPM_MAX_BUFFER
+ * in tpm_svsm.h).
+ */
+ priv->buffer = (void *)devm_get_free_pages(dev, GFP_KERNEL, 0);
+ if (!priv->buffer)
+ return -ENOMEM;
+
+ chip = tpmm_chip_alloc(dev, &tpm_chip_ops);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ dev_set_drvdata(&chip->dev, priv);
+
+ chip->flags |= TPM_CHIP_FLAG_SYNC;
+ err = tpm2_probe(chip);
+ if (err)
+ return err;
+
+ err = tpm_chip_register(chip);
+ if (err)
+ return err;
+
+ dev_info(dev, "SNP SVSM vTPM %s device\n",
+ (chip->flags & TPM_CHIP_FLAG_TPM2) ? "2.0" : "1.2");
+
+ return 0;
+}
+
+static void __exit tpm_svsm_remove(struct platform_device *pdev)
+{
+ struct tpm_chip *chip = platform_get_drvdata(pdev);
+
+ tpm_chip_unregister(chip);
+}
+
+/*
+ * tpm_svsm_remove() lives in .exit.text. For drivers registered via
+ * module_platform_driver_probe() this is ok because they cannot get unbound
+ * at runtime. So mark the driver struct with __refdata to prevent modpost
+ * triggering a section mismatch warning.
+ */
+static struct platform_driver tpm_svsm_driver __refdata = {
+ .remove = __exit_p(tpm_svsm_remove),
+ .driver = {
+ .name = "tpm-svsm",
+ },
+};
+
+module_platform_driver_probe(tpm_svsm_driver, tpm_svsm_probe);
+
+MODULE_DESCRIPTION("SNP SVSM vTPM Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:tpm-svsm");
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 2f7326d297ad..9aa230a63616 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -356,7 +356,7 @@ MODULE_DEVICE_TABLE(of, tis_of_platform_match);
static struct platform_driver tis_drv = {
.probe = tpm_tis_plat_probe,
- .remove_new = tpm_tis_plat_remove,
+ .remove = tpm_tis_plat_remove,
.driver = {
.name = "tpm_tis",
.pm = &tpm_tis_pm,
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 176cd8dbf1db..e2a1769081b1 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -114,11 +114,10 @@ again:
return 0;
/* process status changes without irq support */
do {
+ usleep_range(priv->timeout_min, priv->timeout_max);
status = chip->ops->status(chip);
if ((status & mask) == mask)
return 0;
- usleep_range(priv->timeout_min,
- priv->timeout_max);
} while (time_before(jiffies, stop));
return -ETIME;
}
@@ -266,8 +265,7 @@ static u8 tpm_tis_status(struct tpm_chip *chip)
/*
* Dump stack for forensics, as invalid TPM_STS.x could be
- * potentially triggered by impaired tpm_try_get_ops() or
- * tpm_find_get_ops().
+ * potentially triggered by impaired tpm_try_get_ops().
*/
dump_stack();
}
@@ -464,7 +462,10 @@ static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len)
if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
&priv->int_queue, false) < 0) {
- rc = -ETIME;
+ if (test_bit(TPM_TIS_STATUS_VALID_RETRY, &priv->flags))
+ rc = -EAGAIN;
+ else
+ rc = -ETIME;
goto out_err;
}
status = tpm_tis_status(chip);
@@ -481,7 +482,10 @@ static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len)
if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
&priv->int_queue, false) < 0) {
- rc = -ETIME;
+ if (test_bit(TPM_TIS_STATUS_VALID_RETRY, &priv->flags))
+ rc = -EAGAIN;
+ else
+ rc = -ETIME;
goto out_err;
}
status = tpm_tis_status(chip);
@@ -546,9 +550,11 @@ static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len)
if (rc >= 0)
/* Data transfer done successfully */
break;
- else if (rc != -EIO)
+ else if (rc != -EAGAIN && rc != -EIO)
/* Data transfer failed, not recoverable */
return rc;
+
+ usleep_range(priv->timeout_min, priv->timeout_max);
}
/* go and do it */
@@ -573,7 +579,8 @@ out_err:
return rc;
}
-static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
+static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t len)
{
int rc, irq;
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
@@ -970,8 +977,8 @@ restore_irqs:
* will call disable_irq which undoes all of the above.
*/
if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) {
- tpm_tis_write8(priv, original_int_vec,
- TPM_INT_VECTOR(priv->locality));
+ tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality),
+ original_int_vec);
rc = -1;
}
@@ -1020,7 +1027,8 @@ void tpm_tis_remove(struct tpm_chip *chip)
interrupt = 0;
tpm_tis_write32(priv, reg, ~TPM_GLOBAL_INT_ENABLE & interrupt);
- flush_work(&priv->free_irq_work);
+ if (priv->free_irq_work.func)
+ flush_work(&priv->free_irq_work);
tpm_tis_clkrun_enable(chip, false);
@@ -1143,6 +1151,9 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
priv->timeout_max = TIS_TIMEOUT_MAX_ATML;
}
+ if (priv->manufacturer_id == TPM_VID_IFX)
+ set_bit(TPM_TIS_STATUS_VALID_RETRY, &priv->flags);
+
if (is_bsw()) {
priv->ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR,
ILB_REMAP_SIZE);
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index 13e99cf65efe..6c3aa480396b 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -54,7 +54,7 @@ enum tis_int_flags {
enum tis_defaults {
TIS_MEM_LEN = 0x5000,
TIS_SHORT_TIMEOUT = 750, /* ms */
- TIS_LONG_TIMEOUT = 2000, /* 2 sec */
+ TIS_LONG_TIMEOUT = 4000, /* 4 secs */
TIS_TIMEOUT_MIN_ATML = 14700, /* usecs */
TIS_TIMEOUT_MAX_ATML = 15000, /* usecs */
};
@@ -89,6 +89,7 @@ enum tpm_tis_flags {
TPM_TIS_INVALID_STATUS = 1,
TPM_TIS_DEFAULT_CANCELLATION = 2,
TPM_TIS_IRQ_TESTED = 3,
+ TPM_TIS_STATUS_VALID_RETRY = 4,
};
struct tpm_tis_data {
@@ -210,7 +211,7 @@ static inline int tpm_tis_verify_crc(struct tpm_tis_data *data, size_t len,
static inline bool is_bsw(void)
{
#ifdef CONFIG_X86
- return ((boot_cpu_data.x86_model == INTEL_FAM6_ATOM_AIRMONT) ? 1 : 0);
+ return (boot_cpu_data.x86_vfm == INTEL_ATOM_AIRMONT) ? 1 : 0;
#else
return false;
#endif
diff --git a/drivers/char/tpm/tpm_tis_i2c.c b/drivers/char/tpm/tpm_tis_i2c.c
index 9511c0d50185..6cd07dd34507 100644
--- a/drivers/char/tpm/tpm_tis_i2c.c
+++ b/drivers/char/tpm/tpm_tis_i2c.c
@@ -375,7 +375,7 @@ static void tpm_tis_i2c_remove(struct i2c_client *client)
}
static const struct i2c_device_id tpm_tis_i2c_id[] = {
- { "tpm_tis_i2c", 0 },
+ { "tpm_tis_i2c" },
{}
};
MODULE_DEVICE_TABLE(i2c, tpm_tis_i2c_id);
diff --git a/drivers/char/tpm/tpm_tis_i2c_cr50.c b/drivers/char/tpm/tpm_tis_i2c_cr50.c
index adf22992138e..fc6891a0b693 100644
--- a/drivers/char/tpm/tpm_tis_i2c_cr50.c
+++ b/drivers/char/tpm/tpm_tis_i2c_cr50.c
@@ -17,6 +17,7 @@
*/
#include <linux/acpi.h>
+#include <linux/bug.h>
#include <linux/completion.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
@@ -30,11 +31,13 @@
#define TPM_CR50_MAX_BUFSIZE 64
#define TPM_CR50_TIMEOUT_SHORT_MS 2 /* Short timeout during transactions */
#define TPM_CR50_TIMEOUT_NOIRQ_MS 20 /* Timeout for TPM ready without IRQ */
-#define TPM_CR50_I2C_DID_VID 0x00281ae0L /* Device and vendor ID reg value */
-#define TPM_TI50_I2C_DID_VID 0x504a6666L /* Device and vendor ID reg value */
+#define TPM_CR50_I2C_DID_VID 0x00281ae0L /* Device and vendor ID for Cr50 H1 */
+#define TPM_TI50_DT_I2C_DID_VID 0x504a6666L /* Device and vendor ID for Ti50 DT */
+#define TPM_TI50_OT_I2C_DID_VID 0x50666666L /* Device and vendor ID for TI50 OT */
#define TPM_CR50_I2C_MAX_RETRIES 3 /* Max retries due to I2C errors */
#define TPM_CR50_I2C_RETRY_DELAY_LO 55 /* Min usecs between retries on I2C */
#define TPM_CR50_I2C_RETRY_DELAY_HI 65 /* Max usecs between retries on I2C */
+#define TPM_CR50_I2C_DEFAULT_LOC 0
#define TPM_I2C_ACCESS(l) (0x0000 | ((l) << 4))
#define TPM_I2C_STS(l) (0x0001 | ((l) << 4))
@@ -199,8 +202,6 @@ static int tpm_cr50_i2c_read(struct tpm_chip *chip, u8 addr, u8 *buffer, size_t
};
int rc;
- i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT);
-
/* Prepare for completion interrupt */
tpm_cr50_i2c_enable_tpm_irq(chip);
@@ -219,7 +220,6 @@ static int tpm_cr50_i2c_read(struct tpm_chip *chip, u8 addr, u8 *buffer, size_t
out:
tpm_cr50_i2c_disable_tpm_irq(chip);
- i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT);
if (rc < 0)
return rc;
@@ -261,8 +261,6 @@ static int tpm_cr50_i2c_write(struct tpm_chip *chip, u8 addr, u8 *buffer,
priv->buf[0] = addr;
memcpy(priv->buf + 1, buffer, len);
- i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT);
-
/* Prepare for completion interrupt */
tpm_cr50_i2c_enable_tpm_irq(chip);
@@ -276,7 +274,6 @@ static int tpm_cr50_i2c_write(struct tpm_chip *chip, u8 addr, u8 *buffer,
out:
tpm_cr50_i2c_disable_tpm_irq(chip);
- i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT);
if (rc < 0)
return rc;
@@ -285,25 +282,26 @@ out:
}
/**
- * tpm_cr50_check_locality() - Verify TPM locality 0 is active.
+ * tpm_cr50_check_locality() - Verify if required TPM locality is active.
* @chip: A TPM chip.
+ * @loc: Locality to be verified
*
* Return:
- * - 0: Success.
+ * - loc: Success.
* - -errno: A POSIX error code.
*/
-static int tpm_cr50_check_locality(struct tpm_chip *chip)
+static int tpm_cr50_check_locality(struct tpm_chip *chip, int loc)
{
u8 mask = TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY;
u8 buf;
int rc;
- rc = tpm_cr50_i2c_read(chip, TPM_I2C_ACCESS(0), &buf, sizeof(buf));
+ rc = tpm_cr50_i2c_read(chip, TPM_I2C_ACCESS(loc), &buf, sizeof(buf));
if (rc < 0)
return rc;
if ((buf & mask) == mask)
- return 0;
+ return loc;
return -EIO;
}
@@ -311,53 +309,72 @@ static int tpm_cr50_check_locality(struct tpm_chip *chip)
/**
* tpm_cr50_release_locality() - Release TPM locality.
* @chip: A TPM chip.
- * @force: Flag to force release if set.
+ * @loc: Locality to be released
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
*/
-static void tpm_cr50_release_locality(struct tpm_chip *chip, bool force)
+static int tpm_cr50_release_locality(struct tpm_chip *chip, int loc)
{
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
u8 mask = TPM_ACCESS_VALID | TPM_ACCESS_REQUEST_PENDING;
- u8 addr = TPM_I2C_ACCESS(0);
+ u8 addr = TPM_I2C_ACCESS(loc);
u8 buf;
+ int rc;
- if (tpm_cr50_i2c_read(chip, addr, &buf, sizeof(buf)) < 0)
- return;
+ rc = tpm_cr50_i2c_read(chip, addr, &buf, sizeof(buf));
+ if (rc < 0)
+ goto unlock_out;
- if (force || (buf & mask) == mask) {
+ if ((buf & mask) == mask) {
buf = TPM_ACCESS_ACTIVE_LOCALITY;
- tpm_cr50_i2c_write(chip, addr, &buf, sizeof(buf));
+ rc = tpm_cr50_i2c_write(chip, addr, &buf, sizeof(buf));
}
+
+unlock_out:
+ i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT);
+ return rc;
}
/**
- * tpm_cr50_request_locality() - Request TPM locality 0.
+ * tpm_cr50_request_locality() - Request TPM locality.
* @chip: A TPM chip.
+ * @loc: Locality to be requested.
*
* Return:
- * - 0: Success.
+ * - loc: Success.
* - -errno: A POSIX error code.
*/
-static int tpm_cr50_request_locality(struct tpm_chip *chip)
+static int tpm_cr50_request_locality(struct tpm_chip *chip, int loc)
{
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
u8 buf = TPM_ACCESS_REQUEST_USE;
unsigned long stop;
int rc;
- if (!tpm_cr50_check_locality(chip))
- return 0;
+ i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT);
- rc = tpm_cr50_i2c_write(chip, TPM_I2C_ACCESS(0), &buf, sizeof(buf));
+ if (tpm_cr50_check_locality(chip, loc) == loc)
+ return loc;
+
+ rc = tpm_cr50_i2c_write(chip, TPM_I2C_ACCESS(loc), &buf, sizeof(buf));
if (rc < 0)
- return rc;
+ goto unlock_out;
stop = jiffies + chip->timeout_a;
do {
- if (!tpm_cr50_check_locality(chip))
- return 0;
+ if (tpm_cr50_check_locality(chip, loc) == loc)
+ return loc;
msleep(TPM_CR50_TIMEOUT_SHORT_MS);
} while (time_before(jiffies, stop));
- return -ETIMEDOUT;
+ rc = -ETIMEDOUT;
+
+unlock_out:
+ i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT);
+ return rc;
}
/**
@@ -373,7 +390,7 @@ static u8 tpm_cr50_i2c_tis_status(struct tpm_chip *chip)
{
u8 buf[4];
- if (tpm_cr50_i2c_read(chip, TPM_I2C_STS(0), buf, sizeof(buf)) < 0)
+ if (tpm_cr50_i2c_read(chip, TPM_I2C_STS(chip->locality), buf, sizeof(buf)) < 0)
return 0;
return buf[0];
@@ -389,7 +406,7 @@ static void tpm_cr50_i2c_tis_set_ready(struct tpm_chip *chip)
{
u8 buf[4] = { TPM_STS_COMMAND_READY };
- tpm_cr50_i2c_write(chip, TPM_I2C_STS(0), buf, sizeof(buf));
+ tpm_cr50_i2c_write(chip, TPM_I2C_STS(chip->locality), buf, sizeof(buf));
msleep(TPM_CR50_TIMEOUT_SHORT_MS);
}
@@ -419,7 +436,7 @@ static int tpm_cr50_i2c_get_burst_and_status(struct tpm_chip *chip, u8 mask,
stop = jiffies + chip->timeout_b;
do {
- if (tpm_cr50_i2c_read(chip, TPM_I2C_STS(0), buf, sizeof(buf)) < 0) {
+ if (tpm_cr50_i2c_read(chip, TPM_I2C_STS(chip->locality), buf, sizeof(buf)) < 0) {
msleep(TPM_CR50_TIMEOUT_SHORT_MS);
continue;
}
@@ -453,7 +470,7 @@ static int tpm_cr50_i2c_tis_recv(struct tpm_chip *chip, u8 *buf, size_t buf_len)
u8 mask = TPM_STS_VALID | TPM_STS_DATA_AVAIL;
size_t burstcnt, cur, len, expected;
- u8 addr = TPM_I2C_DATA_FIFO(0);
+ u8 addr = TPM_I2C_DATA_FIFO(chip->locality);
u32 status;
int rc;
@@ -515,7 +532,6 @@ static int tpm_cr50_i2c_tis_recv(struct tpm_chip *chip, u8 *buf, size_t buf_len)
goto out_err;
}
- tpm_cr50_release_locality(chip, false);
return cur;
out_err:
@@ -523,7 +539,6 @@ out_err:
if (tpm_cr50_i2c_tis_status(chip) & TPM_STS_COMMAND_READY)
tpm_cr50_i2c_tis_set_ready(chip);
- tpm_cr50_release_locality(chip, false);
return rc;
}
@@ -531,13 +546,15 @@ out_err:
* tpm_cr50_i2c_tis_send() - TPM transmission callback.
* @chip: A TPM chip.
* @buf: Buffer to send.
- * @len: Buffer length.
+ * @bufsiz: Buffer size.
+ * @len: Command length.
*
* Return:
* - 0: Success.
* - -errno: A POSIX error code.
*/
-static int tpm_cr50_i2c_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
+static int tpm_cr50_i2c_tis_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t len)
{
size_t burstcnt, limit, sent = 0;
u8 tpm_go[4] = { TPM_STS_GO };
@@ -545,10 +562,6 @@ static int tpm_cr50_i2c_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
u32 status;
int rc;
- rc = tpm_cr50_request_locality(chip);
- if (rc < 0)
- return rc;
-
/* Wait until TPM is ready for a command */
stop = jiffies + chip->timeout_b;
while (!(tpm_cr50_i2c_tis_status(chip) & TPM_STS_COMMAND_READY)) {
@@ -577,7 +590,8 @@ static int tpm_cr50_i2c_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
* that is inserted by tpm_cr50_i2c_write()
*/
limit = min_t(size_t, burstcnt - 1, len);
- rc = tpm_cr50_i2c_write(chip, TPM_I2C_DATA_FIFO(0), &buf[sent], limit);
+ rc = tpm_cr50_i2c_write(chip, TPM_I2C_DATA_FIFO(chip->locality),
+ &buf[sent], limit);
if (rc < 0) {
dev_err(&chip->dev, "Write failed\n");
goto out_err;
@@ -598,7 +612,7 @@ static int tpm_cr50_i2c_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
}
/* Start the TPM command */
- rc = tpm_cr50_i2c_write(chip, TPM_I2C_STS(0), tpm_go,
+ rc = tpm_cr50_i2c_write(chip, TPM_I2C_STS(chip->locality), tpm_go,
sizeof(tpm_go));
if (rc < 0) {
dev_err(&chip->dev, "Start command failed\n");
@@ -611,7 +625,6 @@ out_err:
if (tpm_cr50_i2c_tis_status(chip) & TPM_STS_COMMAND_READY)
tpm_cr50_i2c_tis_set_ready(chip);
- tpm_cr50_release_locality(chip, false);
return rc;
}
@@ -650,6 +663,8 @@ static const struct tpm_class_ops cr50_i2c = {
.req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
.req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
.req_canceled = &tpm_cr50_i2c_req_canceled,
+ .request_locality = &tpm_cr50_request_locality,
+ .relinquish_locality = &tpm_cr50_release_locality,
};
#ifdef CONFIG_ACPI
@@ -669,6 +684,27 @@ MODULE_DEVICE_TABLE(of, of_cr50_i2c_match);
#endif
/**
+ * tpm_cr50_vid_to_name() - Maps VID to name.
+ * @vendor: Vendor identifier to map to name
+ *
+ * Return:
+ * A valid string for the vendor or empty string
+ */
+static const char *tpm_cr50_vid_to_name(u32 vendor)
+{
+ switch (vendor) {
+ case TPM_CR50_I2C_DID_VID:
+ return "cr50";
+ case TPM_TI50_DT_I2C_DID_VID:
+ return "ti50 DT";
+ case TPM_TI50_OT_I2C_DID_VID:
+ return "ti50 OT";
+ default:
+ return "unknown";
+ }
+}
+
+/**
* tpm_cr50_i2c_probe() - Driver probe function.
* @client: I2C client information.
*
@@ -684,6 +720,7 @@ static int tpm_cr50_i2c_probe(struct i2c_client *client)
u32 vendor;
u8 buf[4];
int rc;
+ int loc;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
@@ -726,29 +763,37 @@ static int tpm_cr50_i2c_probe(struct i2c_client *client)
TPM_CR50_TIMEOUT_NOIRQ_MS);
}
- rc = tpm_cr50_request_locality(chip);
- if (rc < 0) {
+ loc = tpm_cr50_request_locality(chip, TPM_CR50_I2C_DEFAULT_LOC);
+ if (loc < 0) {
dev_err(dev, "Could not request locality\n");
- return rc;
+ return loc;
}
/* Read four bytes from DID_VID register */
- rc = tpm_cr50_i2c_read(chip, TPM_I2C_DID_VID(0), buf, sizeof(buf));
+ rc = tpm_cr50_i2c_read(chip, TPM_I2C_DID_VID(loc), buf, sizeof(buf));
if (rc < 0) {
dev_err(dev, "Could not read vendor id\n");
- tpm_cr50_release_locality(chip, true);
+ if (tpm_cr50_release_locality(chip, loc))
+ dev_err(dev, "Could not release locality\n");
+ return rc;
+ }
+
+ rc = tpm_cr50_release_locality(chip, loc);
+ if (rc) {
+ dev_err(dev, "Could not release locality\n");
return rc;
}
vendor = le32_to_cpup((__le32 *)buf);
- if (vendor != TPM_CR50_I2C_DID_VID && vendor != TPM_TI50_I2C_DID_VID) {
+ if (vendor != TPM_CR50_I2C_DID_VID &&
+ vendor != TPM_TI50_DT_I2C_DID_VID &&
+ vendor != TPM_TI50_OT_I2C_DID_VID) {
dev_err(dev, "Vendor ID did not match! ID was %08x\n", vendor);
- tpm_cr50_release_locality(chip, true);
return -ENODEV;
}
dev_info(dev, "%s TPM 2.0 (i2c 0x%02x irq %d id 0x%x)\n",
- vendor == TPM_TI50_I2C_DID_VID ? "ti50" : "cr50",
+ tpm_cr50_vid_to_name(vendor),
client->addr, client->irq, vendor >> 16);
return tpm_chip_register(chip);
}
@@ -772,7 +817,6 @@ static void tpm_cr50_i2c_remove(struct i2c_client *client)
}
tpm_chip_unregister(chip);
- tpm_cr50_release_locality(chip, true);
}
static SIMPLE_DEV_PM_OPS(cr50_i2c_pm, tpm_pm_suspend, tpm_pm_resume);
diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c
index 3f9eaf27b41b..61b42c83ced8 100644
--- a/drivers/char/tpm/tpm_tis_spi_main.c
+++ b/drivers/char/tpm/tpm_tis_spi_main.c
@@ -37,6 +37,7 @@
#include "tpm_tis_spi.h"
#define MAX_SPI_FRAMESIZE 64
+#define SPI_HDRSIZE 4
/*
* TCG SPI flow control is documented in section 6.4 of the spec[1]. In short,
@@ -247,7 +248,7 @@ static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
int tpm_tis_spi_init(struct spi_device *spi, struct tpm_tis_spi_phy *phy,
int irq, const struct tpm_tis_phy_ops *phy_ops)
{
- phy->iobuf = devm_kmalloc(&spi->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL);
+ phy->iobuf = devm_kmalloc(&spi->dev, SPI_HDRSIZE + MAX_SPI_FRAMESIZE, GFP_KERNEL);
if (!phy->iobuf)
return -ENOMEM;
@@ -317,6 +318,7 @@ static void tpm_tis_spi_remove(struct spi_device *dev)
}
static const struct spi_device_id tpm_tis_spi_id[] = {
+ { "attpm20p", (unsigned long)tpm_tis_spi_probe },
{ "st33htpm-spi", (unsigned long)tpm_tis_spi_probe },
{ "slb9670", (unsigned long)tpm_tis_spi_probe },
{ "tpm_tis_spi", (unsigned long)tpm_tis_spi_probe },
diff --git a/drivers/char/tpm/tpm_tis_synquacer.c b/drivers/char/tpm/tpm_tis_synquacer.c
index 0621ebec530b..4927714d277a 100644
--- a/drivers/char/tpm/tpm_tis_synquacer.c
+++ b/drivers/char/tpm/tpm_tis_synquacer.c
@@ -152,7 +152,7 @@ MODULE_DEVICE_TABLE(acpi, tpm_synquacer_acpi_tbl);
static struct platform_driver tis_synquacer_drv = {
.probe = tpm_tis_synquacer_probe,
- .remove_new = tpm_tis_synquacer_remove,
+ .remove = tpm_tis_synquacer_remove,
.driver = {
.name = "tpm_tis_synquacer",
.pm = &tpm_tis_synquacer_pm,
diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c
index 11c502039faf..0818bb517805 100644
--- a/drivers/char/tpm/tpm_vtpm_proxy.c
+++ b/drivers/char/tpm/tpm_vtpm_proxy.c
@@ -243,7 +243,6 @@ static int vtpm_proxy_fops_release(struct inode *inode, struct file *filp)
static const struct file_operations vtpm_proxy_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = vtpm_proxy_fops_read,
.write = vtpm_proxy_fops_write,
.poll = vtpm_proxy_fops_poll,
@@ -322,12 +321,14 @@ static int vtpm_proxy_is_driver_command(struct tpm_chip *chip,
*
* @chip: tpm chip to use
* @buf: send buffer
+ * @bufsiz: size of the buffer
* @count: bytes to send
*
* Return:
* 0 in case of success, negative error value otherwise.
*/
-static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t count)
+static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t count)
{
struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev);
diff --git a/drivers/char/tpm/tpmrm-dev.c b/drivers/char/tpm/tpmrm-dev.c
index eef0fb06ea83..c25df7ea064e 100644
--- a/drivers/char/tpm/tpmrm-dev.c
+++ b/drivers/char/tpm/tpmrm-dev.c
@@ -46,7 +46,6 @@ static int tpmrm_release(struct inode *inode, struct file *file)
const struct file_operations tpmrm_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.open = tpmrm_open,
.read = tpm_common_read,
.write = tpm_common_write,
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index 80cca3b83b22..556bf2256716 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -131,7 +131,8 @@ static size_t shr_data_offset(struct vtpm_shared_page *shr)
return struct_size(shr, extra_pages, shr->nr_extra_pages);
}
-static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
+static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t count)
{
struct tpm_private *priv = dev_get_drvdata(&chip->dev);
struct vtpm_shared_page *shr = priv->shr;
diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
index 4c806a189ee5..d7f841ab4323 100644
--- a/drivers/char/ttyprintk.c
+++ b/drivers/char/ttyprintk.c
@@ -228,4 +228,5 @@ static void __exit ttyprintk_exit(void)
device_initcall(ttyprintk_init);
module_exit(ttyprintk_exit);
+MODULE_DESCRIPTION("TTY driver to output user messages via printk");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 035f89f1a251..088182e54deb 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -26,6 +26,7 @@
#include <linux/workqueue.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
+#include <linux/string_choices.h>
#include "../tty/hvc/hvc_console.h"
#define is_rproc_enabled IS_ENABLED(CONFIG_REMOTEPROC)
@@ -883,9 +884,9 @@ static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
if (len + offset > PAGE_SIZE)
len = PAGE_SIZE - offset;
- src = kmap_atomic(buf->page);
+ src = kmap_local_page(buf->page);
memcpy(page_address(page) + offset, src + buf->offset, len);
- kunmap_atomic(src);
+ kunmap_local(src);
sg_set_page(&(sgl->sg[sgl->n]), page, len, offset);
}
@@ -923,14 +924,14 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
pipe_lock(pipe);
ret = 0;
- if (pipe_empty(pipe->head, pipe->tail))
+ if (pipe_is_empty(pipe))
goto error_out;
ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK);
if (ret < 0)
goto error_out;
- occupancy = pipe_occupancy(pipe->head, pipe->tail);
+ occupancy = pipe_buf_usage(pipe);
buf = alloc_buf(port->portdev->vdev, 0, occupancy);
if (!buf) {
@@ -1093,7 +1094,6 @@ static const struct file_operations port_fops = {
.poll = port_fops_poll,
.release = port_fops_release,
.fasync = port_fops_fasync,
- .llseek = no_llseek,
};
/*
@@ -1270,8 +1270,7 @@ static int port_debugfs_show(struct seq_file *s, void *data)
seq_printf(s, "bytes_sent: %lu\n", port->stats.bytes_sent);
seq_printf(s, "bytes_received: %lu\n", port->stats.bytes_received);
seq_printf(s, "bytes_discarded: %lu\n", port->stats.bytes_discarded);
- seq_printf(s, "is_console: %s\n",
- is_console_port(port) ? "yes" : "no");
+ seq_printf(s, "is_console: %s\n", str_yes_no(is_console_port(port)));
seq_printf(s, "console_vtermno: %u\n", port->cons.vtermno);
return 0;
@@ -1322,7 +1321,6 @@ static void send_sigio_to_port(struct port *port)
static int add_port(struct ports_device *portdev, u32 id)
{
- char debugfs_name[16];
struct port *port;
dev_t devt;
int err;
@@ -1425,9 +1423,7 @@ static int add_port(struct ports_device *portdev, u32 id)
* Finally, create the debugfs file that we can use to
* inspect a port's state at any time
*/
- snprintf(debugfs_name, sizeof(debugfs_name), "vport%up%u",
- port->portdev->vdev->index, id);
- port->debugfs_file = debugfs_create_file(debugfs_name, 0444,
+ port->debugfs_file = debugfs_create_file(dev_name(port->dev), 0444,
pdrvdata.debugfs_dir,
port, &port_debugfs_fops);
return 0;
@@ -1580,8 +1576,8 @@ static void handle_control_message(struct virtio_device *vdev,
break;
case VIRTIO_CONSOLE_RESIZE: {
struct {
- __u16 rows;
- __u16 cols;
+ __virtio16 cols;
+ __virtio16 rows;
} size;
if (!is_console_port(port))
@@ -1589,7 +1585,8 @@ static void handle_control_message(struct virtio_device *vdev,
memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt),
sizeof(size));
- set_console_size(port, size.rows, size.cols);
+ set_console_size(port, virtio16_to_cpu(vdev, size.rows),
+ virtio16_to_cpu(vdev, size.cols));
port->cons.hvc->irq_requested = 1;
resize_console(port);
@@ -1804,8 +1801,7 @@ static void config_work_handler(struct work_struct *work)
static int init_vqs(struct ports_device *portdev)
{
- vq_callback_t **io_callbacks;
- char **io_names;
+ struct virtqueue_info *vqs_info;
struct virtqueue **vqs;
u32 i, j, nr_ports, nr_queues;
int err;
@@ -1814,15 +1810,12 @@ static int init_vqs(struct ports_device *portdev)
nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2;
vqs = kmalloc_array(nr_queues, sizeof(struct virtqueue *), GFP_KERNEL);
- io_callbacks = kmalloc_array(nr_queues, sizeof(vq_callback_t *),
- GFP_KERNEL);
- io_names = kmalloc_array(nr_queues, sizeof(char *), GFP_KERNEL);
+ vqs_info = kcalloc(nr_queues, sizeof(*vqs_info), GFP_KERNEL);
portdev->in_vqs = kmalloc_array(nr_ports, sizeof(struct virtqueue *),
GFP_KERNEL);
portdev->out_vqs = kmalloc_array(nr_ports, sizeof(struct virtqueue *),
GFP_KERNEL);
- if (!vqs || !io_callbacks || !io_names || !portdev->in_vqs ||
- !portdev->out_vqs) {
+ if (!vqs || !vqs_info || !portdev->in_vqs || !portdev->out_vqs) {
err = -ENOMEM;
goto free;
}
@@ -1833,30 +1826,27 @@ static int init_vqs(struct ports_device *portdev)
* 0 before others.
*/
j = 0;
- io_callbacks[j] = in_intr;
- io_callbacks[j + 1] = out_intr;
- io_names[j] = "input";
- io_names[j + 1] = "output";
+ vqs_info[j].callback = in_intr;
+ vqs_info[j + 1].callback = out_intr;
+ vqs_info[j].name = "input";
+ vqs_info[j + 1].name = "output";
j += 2;
if (use_multiport(portdev)) {
- io_callbacks[j] = control_intr;
- io_callbacks[j + 1] = NULL;
- io_names[j] = "control-i";
- io_names[j + 1] = "control-o";
+ vqs_info[j].callback = control_intr;
+ vqs_info[j].name = "control-i";
+ vqs_info[j + 1].name = "control-o";
for (i = 1; i < nr_ports; i++) {
j += 2;
- io_callbacks[j] = in_intr;
- io_callbacks[j + 1] = out_intr;
- io_names[j] = "input";
- io_names[j + 1] = "output";
+ vqs_info[j].callback = in_intr;
+ vqs_info[j + 1].callback = out_intr;
+ vqs_info[j].name = "input";
+ vqs_info[j + 1].name = "output";
}
}
/* Find the queues. */
- err = virtio_find_vqs(portdev->vdev, nr_queues, vqs,
- io_callbacks,
- (const char **)io_names, NULL);
+ err = virtio_find_vqs(portdev->vdev, nr_queues, vqs, vqs_info, NULL);
if (err)
goto free;
@@ -1874,8 +1864,7 @@ static int init_vqs(struct ports_device *portdev)
portdev->out_vqs[i] = vqs[j + 1];
}
}
- kfree(io_names);
- kfree(io_callbacks);
+ kfree(vqs_info);
kfree(vqs);
return 0;
@@ -1883,8 +1872,7 @@ static int init_vqs(struct ports_device *portdev)
free:
kfree(portdev->out_vqs);
kfree(portdev->in_vqs);
- kfree(io_names);
- kfree(io_callbacks);
+ kfree(vqs_info);
kfree(vqs);
return err;
@@ -2016,25 +2004,27 @@ static int virtcons_probe(struct virtio_device *vdev)
multiport = true;
}
- err = init_vqs(portdev);
- if (err < 0) {
- dev_err(&vdev->dev, "Error %d initializing vqs\n", err);
- goto free_chrdev;
- }
-
spin_lock_init(&portdev->ports_lock);
INIT_LIST_HEAD(&portdev->ports);
INIT_LIST_HEAD(&portdev->list);
- virtio_device_ready(portdev->vdev);
-
INIT_WORK(&portdev->config_work, &config_work_handler);
INIT_WORK(&portdev->control_work, &control_work_handler);
if (multiport) {
spin_lock_init(&portdev->c_ivq_lock);
spin_lock_init(&portdev->c_ovq_lock);
+ }
+ err = init_vqs(portdev);
+ if (err < 0) {
+ dev_err(&vdev->dev, "Error %d initializing vqs\n", err);
+ goto free_chrdev;
+ }
+
+ virtio_device_ready(portdev->vdev);
+
+ if (multiport) {
err = fill_queue(portdev->c_ivq, &portdev->c_ivq_lock);
if (err < 0) {
dev_err(&vdev->dev,
@@ -2173,7 +2163,6 @@ static struct virtio_driver virtio_console = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = virtcons_probe,
.remove = virtcons_remove,
@@ -2188,7 +2177,6 @@ static struct virtio_driver virtio_rproc_serial = {
.feature_table = rproc_serial_features,
.feature_table_size = ARRAY_SIZE(rproc_serial_features),
.driver.name = "virtio_rproc_serial",
- .driver.owner = THIS_MODULE,
.id_table = rproc_serial_id_table,
.probe = virtcons_probe,
.remove = virtcons_remove,
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index 4f6c3cb8aa41..34a345dc5e72 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -738,7 +738,7 @@ MODULE_DEVICE_TABLE(of, hwicap_of_match);
static struct platform_driver hwicap_platform_driver = {
.probe = hwicap_drv_probe,
- .remove_new = hwicap_drv_remove,
+ .remove = hwicap_drv_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = hwicap_of_match,
diff --git a/drivers/char/xillybus/xillybus_core.c b/drivers/char/xillybus/xillybus_core.c
index 11b7c4749274..fc4e69b5cb6a 100644
--- a/drivers/char/xillybus/xillybus_core.c
+++ b/drivers/char/xillybus/xillybus_core.c
@@ -1184,8 +1184,7 @@ static int xillybus_flush(struct file *filp, fl_owner_t id)
static void xillybus_autoflush(struct work_struct *work)
{
- struct delayed_work *workitem = container_of(
- work, struct delayed_work, work);
+ struct delayed_work *workitem = to_delayed_work(work);
struct xilly_channel *channel = container_of(
workitem, struct xilly_channel, rd_workitem);
int rc;
@@ -1974,7 +1973,7 @@ EXPORT_SYMBOL(xillybus_endpoint_remove);
static int __init xillybus_init(void)
{
- xillybus_wq = alloc_workqueue(xillyname, 0, 0);
+ xillybus_wq = alloc_workqueue(xillyname, WQ_UNBOUND, 0);
if (!xillybus_wq)
return -ENOMEM;
diff --git a/drivers/char/xillybus/xillybus_of.c b/drivers/char/xillybus/xillybus_of.c
index 8802e2a6fd20..1a1e64133315 100644
--- a/drivers/char/xillybus/xillybus_of.c
+++ b/drivers/char/xillybus/xillybus_of.c
@@ -74,7 +74,7 @@ static void xilly_drv_remove(struct platform_device *op)
static struct platform_driver xillybus_platform_driver = {
.probe = xilly_drv_probe,
- .remove_new = xilly_drv_remove,
+ .remove = xilly_drv_remove,
.driver = {
.name = xillyname,
.of_match_table = xillybus_of_match,
diff --git a/drivers/char/xillybus/xillyusb.c b/drivers/char/xillybus/xillyusb.c
index 5a5afa14ca8c..386531474213 100644
--- a/drivers/char/xillybus/xillyusb.c
+++ b/drivers/char/xillybus/xillyusb.c
@@ -50,6 +50,7 @@ MODULE_LICENSE("GPL v2");
static const char xillyname[] = "xillyusb";
static unsigned int fifo_buf_order;
+static struct workqueue_struct *wakeup_wq;
#define USB_VENDOR_ID_XILINX 0x03fd
#define USB_VENDOR_ID_ALTERA 0x09fb
@@ -569,10 +570,6 @@ static void cleanup_dev(struct kref *kref)
* errors if executed. The mechanism relies on that xdev->error is assigned
* a non-zero value by report_io_error() prior to queueing wakeup_all(),
* which prevents bulk_in_work() from calling process_bulk_in().
- *
- * The fact that wakeup_all() and bulk_in_work() are queued on the same
- * workqueue makes their concurrent execution very unlikely, however the
- * kernel's API doesn't seem to ensure this strictly.
*/
static void wakeup_all(struct work_struct *work)
@@ -627,7 +624,7 @@ static void report_io_error(struct xillyusb_dev *xdev,
if (do_once) {
kref_get(&xdev->kref); /* xdev is used by work item */
- queue_work(xdev->workq, &xdev->wakeup_workitem);
+ queue_work(wakeup_wq, &xdev->wakeup_workitem);
}
}
@@ -1906,6 +1903,13 @@ static const struct file_operations xillyusb_fops = {
static int xillyusb_setup_base_eps(struct xillyusb_dev *xdev)
{
+ struct usb_device *udev = xdev->udev;
+
+ /* Verify that device has the two fundamental bulk in/out endpoints */
+ if (usb_pipe_type_check(udev, usb_sndbulkpipe(udev, MSG_EP_NUM)) ||
+ usb_pipe_type_check(udev, usb_rcvbulkpipe(udev, IN_EP_NUM)))
+ return -ENODEV;
+
xdev->msg_ep = endpoint_alloc(xdev, MSG_EP_NUM | USB_DIR_OUT,
bulk_out_work, 1, 2);
if (!xdev->msg_ep)
@@ -1935,14 +1939,15 @@ static int setup_channels(struct xillyusb_dev *xdev,
__le16 *chandesc,
int num_channels)
{
- struct xillyusb_channel *chan;
+ struct usb_device *udev = xdev->udev;
+ struct xillyusb_channel *chan, *new_channels;
int i;
chan = kcalloc(num_channels, sizeof(*chan), GFP_KERNEL);
if (!chan)
return -ENOMEM;
- xdev->channels = chan;
+ new_channels = chan;
for (i = 0; i < num_channels; i++, chan++) {
unsigned int in_desc = le16_to_cpu(*chandesc++);
@@ -1971,6 +1976,15 @@ static int setup_channels(struct xillyusb_dev *xdev,
*/
if ((out_desc & 0x80) && i < 14) { /* Entry is valid */
+ if (usb_pipe_type_check(udev,
+ usb_sndbulkpipe(udev, i + 2))) {
+ dev_err(xdev->dev,
+ "Missing BULK OUT endpoint %d\n",
+ i + 2);
+ kfree(new_channels);
+ return -ENODEV;
+ }
+
chan->writable = 1;
chan->out_synchronous = !!(out_desc & 0x40);
chan->out_seekable = !!(out_desc & 0x20);
@@ -1980,6 +1994,7 @@ static int setup_channels(struct xillyusb_dev *xdev,
}
}
+ xdev->channels = new_channels;
return 0;
}
@@ -2096,9 +2111,11 @@ static int xillyusb_discovery(struct usb_interface *interface)
* just after responding with the IDT, there is no reason for any
* work item to be running now. To be sure that xdev->channels
* is updated on anything that might run in parallel, flush the
- * workqueue, which rarely does anything.
+ * device's workqueue and the wakeup work item. This rarely
+ * does anything.
*/
flush_workqueue(xdev->workq);
+ flush_work(&xdev->wakeup_workitem);
xdev->num_channels = num_channels;
@@ -2146,7 +2163,7 @@ static int xillyusb_probe(struct usb_interface *interface,
spin_lock_init(&xdev->error_lock);
xdev->in_counter = 0;
xdev->in_bytes_left = 0;
- xdev->workq = alloc_workqueue(xillyname, WQ_HIGHPRI, 0);
+ xdev->workq = alloc_workqueue(xillyname, WQ_HIGHPRI | WQ_UNBOUND, 0);
if (!xdev->workq) {
dev_err(&interface->dev, "Failed to allocate work queue\n");
@@ -2258,6 +2275,10 @@ static int __init xillyusb_init(void)
{
int rc = 0;
+ wakeup_wq = alloc_workqueue(xillyname, WQ_UNBOUND, 0);
+ if (!wakeup_wq)
+ return -ENOMEM;
+
if (LOG2_INITIAL_FIFO_BUF_SIZE > PAGE_SHIFT)
fifo_buf_order = LOG2_INITIAL_FIFO_BUF_SIZE - PAGE_SHIFT;
else
@@ -2265,12 +2286,17 @@ static int __init xillyusb_init(void)
rc = usb_register(&xillyusb_driver);
+ if (rc)
+ destroy_workqueue(wakeup_wq);
+
return rc;
}
static void __exit xillyusb_exit(void)
{
usb_deregister(&xillyusb_driver);
+
+ destroy_workqueue(wakeup_wq);
}
module_init(xillyusb_init);
diff --git a/drivers/clk/.kunitconfig b/drivers/clk/.kunitconfig
index efa12ac2b3f2..08e26137f3d9 100644
--- a/drivers/clk/.kunitconfig
+++ b/drivers/clk/.kunitconfig
@@ -1,6 +1,9 @@
CONFIG_KUNIT=y
+CONFIG_OF=y
+CONFIG_OF_OVERLAY=y
CONFIG_COMMON_CLK=y
CONFIG_CLK_KUNIT_TEST=y
+CONFIG_CLK_FIXED_RATE_KUNIT_TEST=y
CONFIG_CLK_GATE_KUNIT_TEST=y
CONFIG_CLK_FD_KUNIT_TEST=y
CONFIG_UML_PCI_OVER_VIRTIO=n
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 3e9099504fad..3a1611008e48 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -61,7 +61,6 @@ config LMK04832
config COMMON_CLK_APPLE_NCO
tristate "Clock driver for Apple SoC NCOs"
depends on ARCH_APPLE || COMPILE_TEST
- default ARCH_APPLE
help
This driver supports NCO (Numerically Controlled Oscillator) blocks
found on Apple SoCs such as t8103 (M1). The blocks are typically
@@ -88,6 +87,15 @@ config COMMON_CLK_RK808
These multi-function devices have two fixed-rate oscillators, clocked at 32KHz each.
Clkout1 is always on, Clkout2 can off by control register.
+config COMMON_CLK_RP1
+ tristate "Raspberry Pi RP1-based clock support"
+ depends on MISC_RP1 || COMPILE_TEST
+ default MISC_RP1
+ help
+ Enable common clock framework support for Raspberry Pi RP1.
+ This multi-function device has 3 main PLLs and several clock
+ generators to drive the internal sub-peripherals.
+
config COMMON_CLK_HI655X
tristate "Clock driver for Hi655x" if EXPERT
depends on (MFD_HI655X_PMIC || COMPILE_TEST)
@@ -218,6 +226,25 @@ config COMMON_CLK_EN7523
This driver provides the fixed clocks and gates present on Airoha
ARM silicon.
+config COMMON_CLK_EP93XX
+ tristate "Clock driver for Cirrus Logic ep93xx SoC"
+ depends on ARCH_EP93XX || COMPILE_TEST
+ select AUXILIARY_BUS
+ select REGMAP_MMIO
+ help
+ This driver supports the SoC clocks on the Cirrus Logic ep93xx.
+
+config COMMON_CLK_EYEQ
+ bool "Clock driver for the Mobileye EyeQ platform"
+ depends on MACH_EYEQ5 || MACH_EYEQ6H || COMPILE_TEST
+ select AUXILIARY_BUS
+ default MACH_EYEQ5 || MACH_EYEQ6H
+ help
+ This driver provides clocks found on Mobileye EyeQ5, EyeQ6L and Eye6H
+ SoCs. Controllers live in shared register regions called OLB. Driver
+ provides read-only PLLs, derived from the main crystal clock (which
+ must be constant). It also exposes some divider clocks.
+
config COMMON_CLK_FSL_FLEXSPI
tristate "Clock driver for FlexSPI on Layerscape SoCs"
depends on ARCH_LAYERSCAPE || COMPILE_TEST
@@ -251,7 +278,7 @@ config COMMON_CLK_LAN966X
tristate "Generic Clock Controller driver for LAN966X SoC"
depends on HAS_IOMEM
depends on OF
- depends on SOC_LAN966 || COMPILE_TEST
+ depends on SOC_LAN966 || ARCH_LAN969X || COMPILE_TEST
help
This driver provides support for Generic Clock Controller(GCK) on
LAN966X SoC. GCK generates and supplies clock to various peripherals
@@ -283,7 +310,7 @@ config CLK_TWL
help
Enable support for controlling the clock resources on TWL family
PMICs. These devices have some 32K clock outputs which can be
- controlled by software. For now, only the TWL6032 clocks are
+ controlled by software. For now, the TWL6032 and TWL6030 clocks are
supported.
config CLK_TWL6040
@@ -334,6 +361,15 @@ config COMMON_CLK_LOCHNAGAR
This driver supports the clocking features of the Cirrus Logic
Lochnagar audio development board.
+config COMMON_CLK_NPCM8XX
+ tristate "Clock driver for the NPCM8XX SoC Family"
+ depends on ARCH_NPCM || COMPILE_TEST
+ select AUXILIARY_BUS
+ help
+ This driver supports the clocks on the Nuvoton BMC NPCM8XX SoC Family,
+ all the clocks are initialized by the bootloader, so this driver
+ allows only reading of current settings directly from the hardware.
+
config COMMON_CLK_LOONGSON2
bool "Clock driver for Loongson-2 SoC"
depends on LOONGARCH || COMPILE_TEST
@@ -466,6 +502,15 @@ config COMMON_CLK_SP7021
Not all features of the PLL are currently supported
by the driver.
+config COMMON_CLK_RPMI
+ tristate "Clock driver based on RISC-V RPMI"
+ depends on RISCV || COMPILE_TEST
+ depends on MAILBOX
+ default RISCV
+ help
+ Support for clocks based on the clock service group defined by
+ the RISC-V platform management interface (RPMI) specification.
+
source "drivers/clk/actions/Kconfig"
source "drivers/clk/analogbits/Kconfig"
source "drivers/clk/baikal-t1/Kconfig"
@@ -476,6 +521,7 @@ source "drivers/clk/imx/Kconfig"
source "drivers/clk/ingenic/Kconfig"
source "drivers/clk/keystone/Kconfig"
source "drivers/clk/mediatek/Kconfig"
+source "drivers/clk/mmp/Kconfig"
source "drivers/clk/meson/Kconfig"
source "drivers/clk/mstar/Kconfig"
source "drivers/clk/microchip/Kconfig"
@@ -490,11 +536,13 @@ source "drivers/clk/samsung/Kconfig"
source "drivers/clk/sifive/Kconfig"
source "drivers/clk/socfpga/Kconfig"
source "drivers/clk/sophgo/Kconfig"
+source "drivers/clk/spacemit/Kconfig"
source "drivers/clk/sprd/Kconfig"
source "drivers/clk/starfive/Kconfig"
source "drivers/clk/sunxi/Kconfig"
source "drivers/clk/sunxi-ng/Kconfig"
source "drivers/clk/tegra/Kconfig"
+source "drivers/clk/thead/Kconfig"
source "drivers/clk/stm32/Kconfig"
source "drivers/clk/ti/Kconfig"
source "drivers/clk/uniphier/Kconfig"
@@ -508,12 +556,22 @@ config CLK_KUNIT_TEST
tristate "Basic Clock Framework Kunit Tests" if !KUNIT_ALL_TESTS
depends on KUNIT
default KUNIT_ALL_TESTS
+ select DTC
help
Kunit tests for the common clock framework.
+config CLK_FIXED_RATE_KUNIT_TEST
+ tristate "Basic fixed rate clk type KUnit test" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ select DTC
+ help
+ KUnit tests for the basic fixed rate clk type.
+
config CLK_GATE_KUNIT_TEST
tristate "Basic gate type Kunit test" if !KUNIT_ALL_TESTS
depends on KUNIT
+ depends on !S390
default KUNIT_ALL_TESTS
help
Kunit test for the basic clk gate type.
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 4abe16c8ccdf..61ec08404442 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -2,10 +2,29 @@
# common clock types
obj-$(CONFIG_HAVE_CLK) += clk-devres.o clk-bulk.o clkdev.o
obj-$(CONFIG_COMMON_CLK) += clk.o
-obj-$(CONFIG_CLK_KUNIT_TEST) += clk_test.o
+obj-$(CONFIG_CLK_KUNIT_TEST) += clk-test.o
+clk-test-y := clk_test.o \
+ kunit_clk_assigned_rates_u64_one.dtbo.o \
+ kunit_clk_assigned_rates_u64_one_consumer.dtbo.o \
+ kunit_clk_assigned_rates_u64_multiple.dtbo.o \
+ kunit_clk_assigned_rates_u64_multiple_consumer.dtbo.o \
+ kunit_clk_assigned_rates_multiple.dtbo.o \
+ kunit_clk_assigned_rates_multiple_consumer.dtbo.o \
+ kunit_clk_assigned_rates_null.dtbo.o \
+ kunit_clk_assigned_rates_null_consumer.dtbo.o \
+ kunit_clk_assigned_rates_one.dtbo.o \
+ kunit_clk_assigned_rates_one_consumer.dtbo.o \
+ kunit_clk_assigned_rates_without.dtbo.o \
+ kunit_clk_assigned_rates_without_consumer.dtbo.o \
+ kunit_clk_assigned_rates_zero.dtbo.o \
+ kunit_clk_assigned_rates_zero_consumer.dtbo.o \
+ kunit_clk_hw_get_dev_of_node.dtbo.o \
+ kunit_clk_parent_data_test.dtbo.o
obj-$(CONFIG_COMMON_CLK) += clk-divider.o
obj-$(CONFIG_COMMON_CLK) += clk-fixed-factor.o
obj-$(CONFIG_COMMON_CLK) += clk-fixed-rate.o
+obj-$(CONFIG_CLK_FIXED_RATE_KUNIT_TEST) += clk-fixed-rate-test.o
+clk-fixed-rate-test-y := clk-fixed-rate_test.o kunit_clk_fixed_rate_test.dtbo.o
obj-$(CONFIG_COMMON_CLK) += clk-gate.o
obj-$(CONFIG_CLK_GATE_KUNIT_TEST) += clk-gate_test.o
obj-$(CONFIG_COMMON_CLK) += clk-multiplier.o
@@ -18,6 +37,11 @@ ifeq ($(CONFIG_OF), y)
obj-$(CONFIG_COMMON_CLK) += clk-conf.o
endif
+# KUnit specific helpers
+ifeq ($(CONFIG_COMMON_CLK), y)
+obj-$(CONFIG_KUNIT) += clk_kunit_helpers.o
+endif
+
# hardware specific clock types
# please keep this section sorted lexicographically by file path name
obj-$(CONFIG_COMMON_CLK_APPLE_NCO) += clk-apple-nco.o
@@ -30,8 +54,10 @@ obj-$(CONFIG_COMMON_CLK_CDCE706) += clk-cdce706.o
obj-$(CONFIG_COMMON_CLK_CDCE925) += clk-cdce925.o
obj-$(CONFIG_ARCH_CLPS711X) += clk-clps711x.o
obj-$(CONFIG_COMMON_CLK_CS2000_CP) += clk-cs2000-cp.o
+obj-$(CONFIG_COMMON_CLK_EP93XX) += clk-ep93xx.o
obj-$(CONFIG_ARCH_SPARX5) += clk-sparx5.o
obj-$(CONFIG_COMMON_CLK_EN7523) += clk-en7523.o
+obj-$(CONFIG_COMMON_CLK_EYEQ) += clk-eyeq.o
obj-$(CONFIG_COMMON_CLK_FIXED_MMIO) += clk-fixed-mmio.o
obj-$(CONFIG_COMMON_CLK_FSL_FLEXSPI) += clk-fsl-flexspi.o
obj-$(CONFIG_COMMON_CLK_FSL_SAI) += clk-fsl-sai.o
@@ -52,12 +78,15 @@ obj-$(CONFIG_ARCH_MILBEAUT_M10V) += clk-milbeaut.o
obj-$(CONFIG_ARCH_MOXART) += clk-moxart.o
obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o
obj-$(CONFIG_ARCH_NPCM7XX) += clk-npcm7xx.o
+obj-$(CONFIG_COMMON_CLK_NPCM8XX) += clk-npcm8xx.o
obj-$(CONFIG_ARCH_NSPIRE) += clk-nspire.o
obj-$(CONFIG_COMMON_CLK_PALMAS) += clk-palmas.o
obj-$(CONFIG_CLK_LS1028A_PLLDIG) += clk-plldig.o
obj-$(CONFIG_COMMON_CLK_PWM) += clk-pwm.o
obj-$(CONFIG_CLK_QORIQ) += clk-qoriq.o
obj-$(CONFIG_COMMON_CLK_RK808) += clk-rk808.o
+obj-$(CONFIG_COMMON_CLK_RP1) += clk-rp1.o
+obj-$(CONFIG_COMMON_CLK_RPMI) += clk-rpmi.o
obj-$(CONFIG_COMMON_CLK_HI655X) += clk-hi655x.o
obj-$(CONFIG_COMMON_CLK_S2MPS11) += clk-s2mps11.o
obj-$(CONFIG_COMMON_CLK_SCMI) += clk-scmi.o
@@ -96,8 +125,7 @@ obj-$(CONFIG_ARCH_HISI) += hisilicon/
obj-y += imgtec/
obj-y += imx/
obj-y += ingenic/
-obj-$(CONFIG_ARCH_K3) += keystone/
-obj-$(CONFIG_ARCH_KEYSTONE) += keystone/
+obj-y += keystone/
obj-y += mediatek/
obj-$(CONFIG_ARCH_MESON) += meson/
obj-y += microchip/
@@ -119,6 +147,7 @@ obj-$(CONFIG_COMMON_CLK_SAMSUNG) += samsung/
obj-$(CONFIG_CLK_SIFIVE) += sifive/
obj-y += socfpga/
obj-y += sophgo/
+obj-y += spacemit/
obj-$(CONFIG_PLAT_SPEAR) += spear/
obj-y += sprd/
obj-$(CONFIG_ARCH_STI) += st/
@@ -127,6 +156,7 @@ obj-y += starfive/
obj-$(CONFIG_ARCH_SUNXI) += sunxi/
obj-y += sunxi-ng/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
+obj-$(CONFIG_ARCH_THEAD) += thead/
obj-y += ti/
obj-$(CONFIG_CLK_UNIPHIER) += uniphier/
obj-$(CONFIG_ARCH_U8500) += ux500/
diff --git a/drivers/clk/actions/owl-common.c b/drivers/clk/actions/owl-common.c
index c62024b7c737..b3dded204dc5 100644
--- a/drivers/clk/actions/owl-common.c
+++ b/drivers/clk/actions/owl-common.c
@@ -18,7 +18,6 @@ static const struct regmap_config owl_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x00cc,
- .fast_io = true,
};
static void owl_clk_set_regmap(const struct owl_clk_desc *desc,
diff --git a/drivers/clk/actions/owl-common.h b/drivers/clk/actions/owl-common.h
index 8fb65f3e82d7..5768a2e0f6a0 100644
--- a/drivers/clk/actions/owl-common.h
+++ b/drivers/clk/actions/owl-common.h
@@ -32,7 +32,7 @@ struct owl_clk_desc {
};
static inline struct owl_clk_common *
- hw_to_owl_clk_common(const struct clk_hw *hw)
+ hw_to_owl_clk_common(struct clk_hw *hw)
{
return container_of(hw, struct owl_clk_common, hw);
}
diff --git a/drivers/clk/actions/owl-composite.c b/drivers/clk/actions/owl-composite.c
index 48f177f6ce9c..00b74f8bc437 100644
--- a/drivers/clk/actions/owl-composite.c
+++ b/drivers/clk/actions/owl-composite.c
@@ -122,13 +122,13 @@ static int owl_comp_fact_set_rate(struct clk_hw *hw, unsigned long rate,
rate, parent_rate);
}
-static long owl_comp_fix_fact_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int owl_comp_fix_fact_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct owl_composite *comp = hw_to_owl_comp(hw);
struct clk_fixed_factor *fix_fact_hw = &comp->rate.fix_fact_hw;
- return comp->fix_fact_ops->round_rate(&fix_fact_hw->hw, rate, parent_rate);
+ return comp->fix_fact_ops->determine_rate(&fix_fact_hw->hw, req);
}
static unsigned long owl_comp_fix_fact_recalc_rate(struct clk_hw *hw,
@@ -193,7 +193,7 @@ const struct clk_ops owl_comp_fix_fact_ops = {
.is_enabled = owl_comp_is_enabled,
/* fix_fact_ops */
- .round_rate = owl_comp_fix_fact_round_rate,
+ .determine_rate = owl_comp_fix_fact_determine_rate,
.recalc_rate = owl_comp_fix_fact_recalc_rate,
.set_rate = owl_comp_fix_fact_set_rate,
};
diff --git a/drivers/clk/actions/owl-composite.h b/drivers/clk/actions/owl-composite.h
index bca38bf8f218..6d7c6f0c47c8 100644
--- a/drivers/clk/actions/owl-composite.h
+++ b/drivers/clk/actions/owl-composite.h
@@ -108,7 +108,7 @@ struct owl_composite {
}, \
}
-static inline struct owl_composite *hw_to_owl_comp(const struct clk_hw *hw)
+static inline struct owl_composite *hw_to_owl_comp(struct clk_hw *hw)
{
struct owl_clk_common *common = hw_to_owl_clk_common(hw);
diff --git a/drivers/clk/actions/owl-divider.c b/drivers/clk/actions/owl-divider.c
index cddac00fe324..118f1393c678 100644
--- a/drivers/clk/actions/owl-divider.c
+++ b/drivers/clk/actions/owl-divider.c
@@ -23,13 +23,16 @@ long owl_divider_helper_round_rate(struct owl_clk_common *common,
div_hw->div_flags);
}
-static long owl_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int owl_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct owl_divider *div = hw_to_owl_divider(hw);
- return owl_divider_helper_round_rate(&div->common, &div->div_hw,
- rate, parent_rate);
+ req->rate = owl_divider_helper_round_rate(&div->common, &div->div_hw,
+ req->rate,
+ &req->best_parent_rate);
+
+ return 0;
}
unsigned long owl_divider_helper_recalc_rate(struct owl_clk_common *common,
@@ -89,6 +92,6 @@ static int owl_divider_set_rate(struct clk_hw *hw, unsigned long rate,
const struct clk_ops owl_divider_ops = {
.recalc_rate = owl_divider_recalc_rate,
- .round_rate = owl_divider_round_rate,
+ .determine_rate = owl_divider_determine_rate,
.set_rate = owl_divider_set_rate,
};
diff --git a/drivers/clk/actions/owl-divider.h b/drivers/clk/actions/owl-divider.h
index 083be6d80954..d76f58782c52 100644
--- a/drivers/clk/actions/owl-divider.h
+++ b/drivers/clk/actions/owl-divider.h
@@ -49,7 +49,7 @@ struct owl_divider {
}, \
}
-static inline struct owl_divider *hw_to_owl_divider(const struct clk_hw *hw)
+static inline struct owl_divider *hw_to_owl_divider(struct clk_hw *hw)
{
struct owl_clk_common *common = hw_to_owl_clk_common(hw);
diff --git a/drivers/clk/actions/owl-factor.c b/drivers/clk/actions/owl-factor.c
index 64f316cf7cfc..12f41f6bacd6 100644
--- a/drivers/clk/actions/owl-factor.c
+++ b/drivers/clk/actions/owl-factor.c
@@ -130,14 +130,16 @@ long owl_factor_helper_round_rate(struct owl_clk_common *common,
return *parent_rate * mul / div;
}
-static long owl_factor_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int owl_factor_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct owl_factor *factor = hw_to_owl_factor(hw);
struct owl_factor_hw *factor_hw = &factor->factor_hw;
- return owl_factor_helper_round_rate(&factor->common, factor_hw,
- rate, parent_rate);
+ req->rate = owl_factor_helper_round_rate(&factor->common, factor_hw,
+ req->rate, &req->best_parent_rate);
+
+ return 0;
}
unsigned long owl_factor_helper_recalc_rate(struct owl_clk_common *common,
@@ -214,7 +216,7 @@ static int owl_factor_set_rate(struct clk_hw *hw, unsigned long rate,
}
const struct clk_ops owl_factor_ops = {
- .round_rate = owl_factor_round_rate,
+ .determine_rate = owl_factor_determine_rate,
.recalc_rate = owl_factor_recalc_rate,
.set_rate = owl_factor_set_rate,
};
diff --git a/drivers/clk/actions/owl-factor.h b/drivers/clk/actions/owl-factor.h
index 04b89cbfdccb..24c704d40925 100644
--- a/drivers/clk/actions/owl-factor.h
+++ b/drivers/clk/actions/owl-factor.h
@@ -57,7 +57,7 @@ struct owl_factor {
#define div_mask(d) ((1 << ((d)->width)) - 1)
-static inline struct owl_factor *hw_to_owl_factor(const struct clk_hw *hw)
+static inline struct owl_factor *hw_to_owl_factor(struct clk_hw *hw)
{
struct owl_clk_common *common = hw_to_owl_clk_common(hw);
diff --git a/drivers/clk/actions/owl-gate.h b/drivers/clk/actions/owl-gate.h
index c2f161c93fda..ac458d4385ee 100644
--- a/drivers/clk/actions/owl-gate.h
+++ b/drivers/clk/actions/owl-gate.h
@@ -56,7 +56,7 @@ struct owl_gate {
}, \
} \
-static inline struct owl_gate *hw_to_owl_gate(const struct clk_hw *hw)
+static inline struct owl_gate *hw_to_owl_gate(struct clk_hw *hw)
{
struct owl_clk_common *common = hw_to_owl_clk_common(hw);
diff --git a/drivers/clk/actions/owl-mux.h b/drivers/clk/actions/owl-mux.h
index 53b9ab665294..dc0ecc2d5e10 100644
--- a/drivers/clk/actions/owl-mux.h
+++ b/drivers/clk/actions/owl-mux.h
@@ -44,7 +44,7 @@ struct owl_mux {
}, \
}
-static inline struct owl_mux *hw_to_owl_mux(const struct clk_hw *hw)
+static inline struct owl_mux *hw_to_owl_mux(struct clk_hw *hw)
{
struct owl_clk_common *common = hw_to_owl_clk_common(hw);
diff --git a/drivers/clk/actions/owl-pll.c b/drivers/clk/actions/owl-pll.c
index 155f313986b4..869690b79cc1 100644
--- a/drivers/clk/actions/owl-pll.c
+++ b/drivers/clk/actions/owl-pll.c
@@ -56,8 +56,8 @@ static const struct clk_pll_table *_get_pll_table(
return table;
}
-static long owl_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int owl_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct owl_pll *pll = hw_to_owl_pll(hw);
struct owl_pll_hw *pll_hw = &pll->pll_hw;
@@ -65,17 +65,24 @@ static long owl_pll_round_rate(struct clk_hw *hw, unsigned long rate,
u32 mul;
if (pll_hw->table) {
- clkt = _get_pll_table(pll_hw->table, rate);
- return clkt->rate;
+ clkt = _get_pll_table(pll_hw->table, req->rate);
+ req->rate = clkt->rate;
+
+ return 0;
}
/* fixed frequency */
- if (pll_hw->width == 0)
- return pll_hw->bfreq;
+ if (pll_hw->width == 0) {
+ req->rate = pll_hw->bfreq;
- mul = owl_pll_calculate_mul(pll_hw, rate);
+ return 0;
+ }
+
+ mul = owl_pll_calculate_mul(pll_hw, req->rate);
- return pll_hw->bfreq * mul;
+ req->rate = pll_hw->bfreq * mul;
+
+ return 0;
}
static unsigned long owl_pll_recalc_rate(struct clk_hw *hw,
@@ -188,7 +195,7 @@ const struct clk_ops owl_pll_ops = {
.enable = owl_pll_enable,
.disable = owl_pll_disable,
.is_enabled = owl_pll_is_enabled,
- .round_rate = owl_pll_round_rate,
+ .determine_rate = owl_pll_determine_rate,
.recalc_rate = owl_pll_recalc_rate,
.set_rate = owl_pll_set_rate,
};
diff --git a/drivers/clk/actions/owl-pll.h b/drivers/clk/actions/owl-pll.h
index 78e5fc360b03..58e19f1ade43 100644
--- a/drivers/clk/actions/owl-pll.h
+++ b/drivers/clk/actions/owl-pll.h
@@ -98,7 +98,7 @@ struct owl_pll {
#define mul_mask(m) ((1 << ((m)->width)) - 1)
-static inline struct owl_pll *hw_to_owl_pll(const struct clk_hw *hw)
+static inline struct owl_pll *hw_to_owl_pll(struct clk_hw *hw)
{
struct owl_clk_common *common = hw_to_owl_clk_common(hw);
diff --git a/drivers/clk/analogbits/wrpll-cln28hpc.c b/drivers/clk/analogbits/wrpll-cln28hpc.c
index 65d422a588e1..9d178afc73bd 100644
--- a/drivers/clk/analogbits/wrpll-cln28hpc.c
+++ b/drivers/clk/analogbits/wrpll-cln28hpc.c
@@ -292,7 +292,7 @@ int wrpll_configure_for_rate(struct wrpll_cfg *c, u32 target_rate,
vco = vco_pre * f;
}
- delta = abs(target_rate - vco);
+ delta = abs(target_vco_rate - vco);
if (delta < best_delta) {
best_delta = delta;
best_r = r;
diff --git a/drivers/clk/at91/Makefile b/drivers/clk/at91/Makefile
index 89061b85e7d2..9128a06b860d 100644
--- a/drivers/clk/at91/Makefile
+++ b/drivers/clk/at91/Makefile
@@ -20,7 +20,9 @@ obj-$(CONFIG_SOC_AT91SAM9) += at91sam9260.o at91sam9rl.o at91sam9x5.o dt-compat.
obj-$(CONFIG_SOC_AT91SAM9) += at91sam9g45.o dt-compat.o
obj-$(CONFIG_SOC_AT91SAM9) += at91sam9n12.o at91sam9x5.o dt-compat.o
obj-$(CONFIG_SOC_SAM9X60) += sam9x60.o
+obj-$(CONFIG_SOC_SAM9X7) += sam9x7.o
obj-$(CONFIG_SOC_SAMA5D3) += sama5d3.o dt-compat.o
obj-$(CONFIG_SOC_SAMA5D4) += sama5d4.o dt-compat.o
obj-$(CONFIG_SOC_SAMA5D2) += sama5d2.o dt-compat.o
+obj-$(CONFIG_SOC_SAMA7D65) += sama7d65.o
obj-$(CONFIG_SOC_SAMA7G5) += sama7g5.o
diff --git a/drivers/clk/at91/clk-audio-pll.c b/drivers/clk/at91/clk-audio-pll.c
index a92da64c12e1..bf9b635ac9d6 100644
--- a/drivers/clk/at91/clk-audio-pll.c
+++ b/drivers/clk/at91/clk-audio-pll.c
@@ -270,8 +270,8 @@ static int clk_audio_pll_frac_determine_rate(struct clk_hw *hw,
return 0;
}
-static long clk_audio_pll_pad_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_audio_pll_pad_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_hw *pclk = clk_hw_get_parent(hw);
long best_rate = -EINVAL;
@@ -283,7 +283,7 @@ static long clk_audio_pll_pad_round_rate(struct clk_hw *hw, unsigned long rate,
int best_diff = -1;
pr_debug("A PLL/PAD: %s, rate = %lu (parent_rate = %lu)\n", __func__,
- rate, *parent_rate);
+ req->rate, req->best_parent_rate);
/*
* Rate divisor is actually made of two different divisors, multiplied
@@ -304,12 +304,12 @@ static long clk_audio_pll_pad_round_rate(struct clk_hw *hw, unsigned long rate,
continue;
best_parent_rate = clk_hw_round_rate(pclk,
- rate * tmp_qd * div);
+ req->rate * tmp_qd * div);
tmp_rate = best_parent_rate / (div * tmp_qd);
- tmp_diff = abs(rate - tmp_rate);
+ tmp_diff = abs(req->rate - tmp_rate);
if (best_diff < 0 || best_diff > tmp_diff) {
- *parent_rate = best_parent_rate;
+ req->best_parent_rate = best_parent_rate;
best_rate = tmp_rate;
best_diff = tmp_diff;
}
@@ -318,11 +318,13 @@ static long clk_audio_pll_pad_round_rate(struct clk_hw *hw, unsigned long rate,
pr_debug("A PLL/PAD: %s, best_rate = %ld, best_parent_rate = %lu\n",
__func__, best_rate, best_parent_rate);
- return best_rate;
+ req->rate = best_rate;
+
+ return 0;
}
-static long clk_audio_pll_pmc_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_audio_pll_pmc_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_hw *pclk = clk_hw_get_parent(hw);
long best_rate = -EINVAL;
@@ -333,20 +335,20 @@ static long clk_audio_pll_pmc_round_rate(struct clk_hw *hw, unsigned long rate,
int best_diff = -1;
pr_debug("A PLL/PMC: %s, rate = %lu (parent_rate = %lu)\n", __func__,
- rate, *parent_rate);
+ req->rate, req->best_parent_rate);
- if (!rate)
+ if (!req->rate)
return 0;
best_parent_rate = clk_round_rate(pclk->clk, 1);
- div = max(best_parent_rate / rate, 1UL);
+ div = max(best_parent_rate / req->rate, 1UL);
for (; div <= AUDIO_PLL_QDPMC_MAX; div++) {
- best_parent_rate = clk_round_rate(pclk->clk, rate * div);
+ best_parent_rate = clk_round_rate(pclk->clk, req->rate * div);
tmp_rate = best_parent_rate / div;
- tmp_diff = abs(rate - tmp_rate);
+ tmp_diff = abs(req->rate - tmp_rate);
if (best_diff < 0 || best_diff > tmp_diff) {
- *parent_rate = best_parent_rate;
+ req->best_parent_rate = best_parent_rate;
best_rate = tmp_rate;
best_diff = tmp_diff;
tmp_qd = div;
@@ -356,9 +358,11 @@ static long clk_audio_pll_pmc_round_rate(struct clk_hw *hw, unsigned long rate,
}
pr_debug("A PLL/PMC: %s, best_rate = %ld, best_parent_rate = %lu (qd = %d)\n",
- __func__, best_rate, *parent_rate, tmp_qd - 1);
+ __func__, best_rate, req->best_parent_rate, tmp_qd - 1);
+
+ req->rate = best_rate;
- return best_rate;
+ return 0;
}
static int clk_audio_pll_frac_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -436,7 +440,7 @@ static const struct clk_ops audio_pll_pad_ops = {
.enable = clk_audio_pll_pad_enable,
.disable = clk_audio_pll_pad_disable,
.recalc_rate = clk_audio_pll_pad_recalc_rate,
- .round_rate = clk_audio_pll_pad_round_rate,
+ .determine_rate = clk_audio_pll_pad_determine_rate,
.set_rate = clk_audio_pll_pad_set_rate,
};
@@ -444,7 +448,7 @@ static const struct clk_ops audio_pll_pmc_ops = {
.enable = clk_audio_pll_pmc_enable,
.disable = clk_audio_pll_pmc_disable,
.recalc_rate = clk_audio_pll_pmc_recalc_rate,
- .round_rate = clk_audio_pll_pmc_round_rate,
+ .determine_rate = clk_audio_pll_pmc_determine_rate,
.set_rate = clk_audio_pll_pmc_set_rate,
};
diff --git a/drivers/clk/at91/clk-h32mx.c b/drivers/clk/at91/clk-h32mx.c
index 1e6c12eeda10..a9aa93b5a870 100644
--- a/drivers/clk/at91/clk-h32mx.c
+++ b/drivers/clk/at91/clk-h32mx.c
@@ -40,21 +40,32 @@ static unsigned long clk_sama5d4_h32mx_recalc_rate(struct clk_hw *hw,
return parent_rate;
}
-static long clk_sama5d4_h32mx_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_sama5d4_h32mx_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned long div;
- if (rate > *parent_rate)
- return *parent_rate;
- div = *parent_rate / 2;
- if (rate < div)
- return div;
+ if (req->rate > req->best_parent_rate) {
+ req->rate = req->best_parent_rate;
- if (rate - div < *parent_rate - rate)
- return div;
+ return 0;
+ }
+ div = req->best_parent_rate / 2;
+ if (req->rate < div) {
+ req->rate = div;
+
+ return 0;
+ }
+
+ if (req->rate - div < req->best_parent_rate - req->rate) {
+ req->rate = div;
- return *parent_rate;
+ return 0;
+ }
+
+ req->rate = req->best_parent_rate;
+
+ return 0;
}
static int clk_sama5d4_h32mx_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -77,7 +88,7 @@ static int clk_sama5d4_h32mx_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops h32mx_ops = {
.recalc_rate = clk_sama5d4_h32mx_recalc_rate,
- .round_rate = clk_sama5d4_h32mx_round_rate,
+ .determine_rate = clk_sama5d4_h32mx_determine_rate,
.set_rate = clk_sama5d4_h32mx_set_rate,
};
diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c
index 15c46489ba85..d5ea2069ec83 100644
--- a/drivers/clk/at91/clk-master.c
+++ b/drivers/clk/at91/clk-master.c
@@ -20,7 +20,7 @@
#define PMC_MCR_CSS_SHIFT (16)
-#define MASTER_MAX_ID 4
+#define MASTER_MAX_ID 9
#define to_clk_master(hw) container_of(hw, struct clk_master, hw)
@@ -580,6 +580,9 @@ clk_sama7g5_master_recalc_rate(struct clk_hw *hw,
{
struct clk_master *master = to_clk_master(hw);
+ if (master->div == MASTER_PRES_MAX)
+ return DIV_ROUND_CLOSEST_ULL(parent_rate, 3);
+
return DIV_ROUND_CLOSEST_ULL(parent_rate, (1 << master->div));
}
diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c
index c173a44c800a..e7208c47268b 100644
--- a/drivers/clk/at91/clk-peripheral.c
+++ b/drivers/clk/at91/clk-peripheral.c
@@ -3,6 +3,7 @@
* Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
*/
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
@@ -279,8 +280,11 @@ static int clk_sam9x5_peripheral_determine_rate(struct clk_hw *hw,
long best_diff = LONG_MIN;
u32 shift;
- if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max)
- return parent_rate;
+ if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max) {
+ req->rate = parent_rate;
+
+ return 0;
+ }
/* Fist step: check the available dividers. */
for (shift = 0; shift <= PERIPHERAL_MAX_SHIFT; shift++) {
@@ -332,50 +336,57 @@ end:
return 0;
}
-static long clk_sam9x5_peripheral_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *parent_rate)
+static int clk_sam9x5_peripheral_no_parent_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int shift = 0;
unsigned long best_rate;
unsigned long best_diff;
- unsigned long cur_rate = *parent_rate;
+ unsigned long cur_rate = req->best_parent_rate;
unsigned long cur_diff;
struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
- if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max)
- return *parent_rate;
+ if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max) {
+ req->rate = req->best_parent_rate;
+
+ return 0;
+ }
if (periph->range.max) {
for (; shift <= PERIPHERAL_MAX_SHIFT; shift++) {
- cur_rate = *parent_rate >> shift;
+ cur_rate = req->best_parent_rate >> shift;
if (cur_rate <= periph->range.max)
break;
}
}
- if (rate >= cur_rate)
- return cur_rate;
+ if (req->rate >= cur_rate) {
+ req->rate = cur_rate;
+
+ return 0;
+ }
- best_diff = cur_rate - rate;
+ best_diff = cur_rate - req->rate;
best_rate = cur_rate;
for (; shift <= PERIPHERAL_MAX_SHIFT; shift++) {
- cur_rate = *parent_rate >> shift;
- if (cur_rate < rate)
- cur_diff = rate - cur_rate;
+ cur_rate = req->best_parent_rate >> shift;
+ if (cur_rate < req->rate)
+ cur_diff = req->rate - cur_rate;
else
- cur_diff = cur_rate - rate;
+ cur_diff = cur_rate - req->rate;
if (cur_diff < best_diff) {
best_diff = cur_diff;
best_rate = cur_rate;
}
- if (!best_diff || cur_rate < rate)
+ if (!best_diff || cur_rate < req->rate)
break;
}
- return best_rate;
+ req->rate = best_rate;
+
+ return 0;
}
static int clk_sam9x5_peripheral_set_rate(struct clk_hw *hw,
@@ -427,7 +438,7 @@ static const struct clk_ops sam9x5_peripheral_ops = {
.disable = clk_sam9x5_peripheral_disable,
.is_enabled = clk_sam9x5_peripheral_is_enabled,
.recalc_rate = clk_sam9x5_peripheral_recalc_rate,
- .round_rate = clk_sam9x5_peripheral_round_rate,
+ .determine_rate = clk_sam9x5_peripheral_no_parent_determine_rate,
.set_rate = clk_sam9x5_peripheral_set_rate,
.save_context = clk_sam9x5_peripheral_save_context,
.restore_context = clk_sam9x5_peripheral_restore_context,
diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c
index 249d6a53cedf..5c5f7398effe 100644
--- a/drivers/clk/at91/clk-pll.c
+++ b/drivers/clk/at91/clk-pll.c
@@ -231,13 +231,15 @@ static long clk_pll_get_best_div_mul(struct clk_pll *pll, unsigned long rate,
return bestrate;
}
-static long clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_pll *pll = to_clk_pll(hw);
- return clk_pll_get_best_div_mul(pll, rate, *parent_rate,
- NULL, NULL, NULL);
+ req->rate = clk_pll_get_best_div_mul(pll, req->rate, req->best_parent_rate,
+ NULL, NULL, NULL);
+
+ return 0;
}
static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -302,7 +304,7 @@ static const struct clk_ops pll_ops = {
.unprepare = clk_pll_unprepare,
.is_prepared = clk_pll_is_prepared,
.recalc_rate = clk_pll_recalc_rate,
- .round_rate = clk_pll_round_rate,
+ .determine_rate = clk_pll_determine_rate,
.set_rate = clk_pll_set_rate,
.save_context = clk_pll_save_context,
.restore_context = clk_pll_restore_context,
diff --git a/drivers/clk/at91/clk-plldiv.c b/drivers/clk/at91/clk-plldiv.c
index ba3a1839a96d..3ac09fecc54e 100644
--- a/drivers/clk/at91/clk-plldiv.c
+++ b/drivers/clk/at91/clk-plldiv.c
@@ -33,21 +33,33 @@ static unsigned long clk_plldiv_recalc_rate(struct clk_hw *hw,
return parent_rate;
}
-static long clk_plldiv_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_plldiv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned long div;
- if (rate > *parent_rate)
- return *parent_rate;
- div = *parent_rate / 2;
- if (rate < div)
- return div;
+ if (req->rate > req->best_parent_rate) {
+ req->rate = req->best_parent_rate;
- if (rate - div < *parent_rate - rate)
- return div;
+ return 0;
+ }
+
+ div = req->best_parent_rate / 2;
+ if (req->rate < div) {
+ req->rate = div;
+
+ return 0;
+ }
+
+ if (req->rate - div < req->best_parent_rate - req->rate) {
+ req->rate = div;
- return *parent_rate;
+ return 0;
+ }
+
+ req->rate = req->best_parent_rate;
+
+ return 0;
}
static int clk_plldiv_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -66,7 +78,7 @@ static int clk_plldiv_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops plldiv_ops = {
.recalc_rate = clk_plldiv_recalc_rate,
- .round_rate = clk_plldiv_round_rate,
+ .determine_rate = clk_plldiv_determine_rate,
.set_rate = clk_plldiv_set_rate,
};
diff --git a/drivers/clk/at91/clk-sam9x60-pll.c b/drivers/clk/at91/clk-sam9x60-pll.c
index ff65f7b916f0..3b965057ba0d 100644
--- a/drivers/clk/at91/clk-sam9x60-pll.c
+++ b/drivers/clk/at91/clk-sam9x60-pll.c
@@ -23,10 +23,7 @@
#define UPLL_DIV 2
#define PLL_MUL_MAX (FIELD_GET(PMC_PLL_CTRL1_MUL_MSK, UINT_MAX) + 1)
-#define FCORE_MIN (600000000)
-#define FCORE_MAX (1200000000)
-
-#define PLL_MAX_ID 7
+#define PLL_MAX_ID 9
struct sam9x60_pll_core {
struct regmap *regmap;
@@ -76,9 +73,15 @@ static unsigned long sam9x60_frac_pll_recalc_rate(struct clk_hw *hw,
{
struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
struct sam9x60_frac *frac = to_sam9x60_frac(core);
+ unsigned long freq;
- return parent_rate * (frac->mul + 1) +
+ freq = parent_rate * (frac->mul + 1) +
DIV_ROUND_CLOSEST_ULL((u64)parent_rate * frac->frac, (1 << 22));
+
+ if (core->layout->div2)
+ freq >>= 1;
+
+ return freq;
}
static int sam9x60_frac_pll_set(struct sam9x60_pll_core *core)
@@ -90,8 +93,8 @@ static int sam9x60_frac_pll_set(struct sam9x60_pll_core *core)
spin_lock_irqsave(core->lock, flags);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_ID_MSK, core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_ID_MSK, core->id);
regmap_read(regmap, AT91_PMC_PLL_CTRL1, &val);
cmul = (val & core->layout->mul_mask) >> core->layout->mul_shift;
cfrac = (val & core->layout->frac_mask) >> core->layout->frac_shift;
@@ -100,11 +103,8 @@ static int sam9x60_frac_pll_set(struct sam9x60_pll_core *core)
(cmul == frac->mul && cfrac == frac->frac))
goto unlock;
- /* Recommended value for PMC_PLL_ACR */
- if (core->characteristics->upll)
- val = AT91_PMC_PLL_ACR_DEFAULT_UPLL;
- else
- val = AT91_PMC_PLL_ACR_DEFAULT_PLLA;
+ /* Load recommended value for PMC_PLL_ACR */
+ val = core->characteristics->acr;
regmap_write(regmap, AT91_PMC_PLL_ACR, val);
regmap_write(regmap, AT91_PMC_PLL_CTRL1,
@@ -125,17 +125,17 @@ static int sam9x60_frac_pll_set(struct sam9x60_pll_core *core)
udelay(10);
}
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0,
AT91_PMC_PLL_CTRL0_ENLOCK | AT91_PMC_PLL_CTRL0_ENPLL,
AT91_PMC_PLL_CTRL0_ENLOCK | AT91_PMC_PLL_CTRL0_ENPLL);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
while (!sam9x60_pll_ready(regmap, core->id))
cpu_relax();
@@ -161,8 +161,8 @@ static void sam9x60_frac_pll_unprepare(struct clk_hw *hw)
spin_lock_irqsave(core->lock, flags);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_ID_MSK, core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_ID_MSK, core->id);
regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0, AT91_PMC_PLL_CTRL0_ENPLL, 0);
@@ -170,9 +170,9 @@ static void sam9x60_frac_pll_unprepare(struct clk_hw *hw)
regmap_update_bits(regmap, AT91_PMC_PLL_ACR,
AT91_PMC_PLL_ACR_UTMIBG | AT91_PMC_PLL_ACR_UTMIVR, 0);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
spin_unlock_irqrestore(core->lock, flags);
}
@@ -194,7 +194,8 @@ static long sam9x60_frac_pll_compute_mul_frac(struct sam9x60_pll_core *core,
unsigned long nmul = 0;
unsigned long nfrac = 0;
- if (rate < FCORE_MIN || rate > FCORE_MAX)
+ if (rate < core->characteristics->core_output[0].min ||
+ rate > core->characteristics->core_output[0].max)
return -ERANGE;
/*
@@ -214,7 +215,8 @@ static long sam9x60_frac_pll_compute_mul_frac(struct sam9x60_pll_core *core,
}
/* Check if resulted rate is a valid. */
- if (tmprate < FCORE_MIN || tmprate > FCORE_MAX)
+ if (tmprate < core->characteristics->core_output[0].min ||
+ tmprate > core->characteristics->core_output[0].max)
return -ERANGE;
if (update) {
@@ -225,12 +227,16 @@ static long sam9x60_frac_pll_compute_mul_frac(struct sam9x60_pll_core *core,
return tmprate;
}
-static long sam9x60_frac_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int sam9x60_frac_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
- return sam9x60_frac_pll_compute_mul_frac(core, rate, *parent_rate, false);
+ req->rate = sam9x60_frac_pll_compute_mul_frac(core, req->rate,
+ req->best_parent_rate,
+ false);
+
+ return 0;
}
static int sam9x60_frac_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -257,8 +263,8 @@ static int sam9x60_frac_pll_set_rate_chg(struct clk_hw *hw, unsigned long rate,
spin_lock_irqsave(core->lock, irqflags);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK,
- core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK,
+ core->id);
regmap_read(regmap, AT91_PMC_PLL_CTRL1, &val);
cmul = (val & core->layout->mul_mask) >> core->layout->mul_shift;
cfrac = (val & core->layout->frac_mask) >> core->layout->frac_shift;
@@ -270,18 +276,18 @@ static int sam9x60_frac_pll_set_rate_chg(struct clk_hw *hw, unsigned long rate,
(frac->mul << core->layout->mul_shift) |
(frac->frac << core->layout->frac_shift));
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0,
AT91_PMC_PLL_CTRL0_ENLOCK | AT91_PMC_PLL_CTRL0_ENPLL,
AT91_PMC_PLL_CTRL0_ENLOCK |
AT91_PMC_PLL_CTRL0_ENPLL);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
while (!sam9x60_pll_ready(regmap, core->id))
cpu_relax();
@@ -316,7 +322,7 @@ static const struct clk_ops sam9x60_frac_pll_ops = {
.unprepare = sam9x60_frac_pll_unprepare,
.is_prepared = sam9x60_frac_pll_is_prepared,
.recalc_rate = sam9x60_frac_pll_recalc_rate,
- .round_rate = sam9x60_frac_pll_round_rate,
+ .determine_rate = sam9x60_frac_pll_determine_rate,
.set_rate = sam9x60_frac_pll_set_rate,
.save_context = sam9x60_frac_pll_save_context,
.restore_context = sam9x60_frac_pll_restore_context,
@@ -327,13 +333,16 @@ static const struct clk_ops sam9x60_frac_pll_ops_chg = {
.unprepare = sam9x60_frac_pll_unprepare,
.is_prepared = sam9x60_frac_pll_is_prepared,
.recalc_rate = sam9x60_frac_pll_recalc_rate,
- .round_rate = sam9x60_frac_pll_round_rate,
+ .determine_rate = sam9x60_frac_pll_determine_rate,
.set_rate = sam9x60_frac_pll_set_rate_chg,
.save_context = sam9x60_frac_pll_save_context,
.restore_context = sam9x60_frac_pll_restore_context,
};
-/* This function should be called with spinlock acquired. */
+/* This function should be called with spinlock acquired.
+ * Warning: this function must be called only if the same PLL ID was set in
+ * PLL_UPDT register previously.
+ */
static void sam9x60_div_pll_set_div(struct sam9x60_pll_core *core, u32 div,
bool enable)
{
@@ -345,9 +354,9 @@ static void sam9x60_div_pll_set_div(struct sam9x60_pll_core *core, u32 div,
core->layout->div_mask | ena_msk,
(div << core->layout->div_shift) | ena_val);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
while (!sam9x60_pll_ready(regmap, core->id))
cpu_relax();
@@ -361,8 +370,8 @@ static int sam9x60_div_pll_set(struct sam9x60_pll_core *core)
unsigned int val, cdiv;
spin_lock_irqsave(core->lock, flags);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_ID_MSK, core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_ID_MSK, core->id);
regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val);
cdiv = (val & core->layout->div_mask) >> core->layout->div_shift;
@@ -393,15 +402,15 @@ static void sam9x60_div_pll_unprepare(struct clk_hw *hw)
spin_lock_irqsave(core->lock, flags);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_ID_MSK, core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_ID_MSK, core->id);
regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0,
core->layout->endiv_mask, 0);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
spin_unlock_irqrestore(core->lock, flags);
}
@@ -433,6 +442,12 @@ static unsigned long sam9x60_div_pll_recalc_rate(struct clk_hw *hw,
return DIV_ROUND_CLOSEST_ULL(parent_rate, (div->div + 1));
}
+static unsigned long sam9x60_fixed_div_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return parent_rate >> 1;
+}
+
static long sam9x60_div_pll_compute_div(struct sam9x60_pll_core *core,
unsigned long *parent_rate,
unsigned long rate)
@@ -476,12 +491,15 @@ static long sam9x60_div_pll_compute_div(struct sam9x60_pll_core *core,
return best_rate;
}
-static long sam9x60_div_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int sam9x60_div_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
- return sam9x60_div_pll_compute_div(core, parent_rate, rate);
+ req->rate = sam9x60_div_pll_compute_div(core, &req->best_parent_rate,
+ req->rate);
+
+ return 0;
}
static int sam9x60_div_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -507,8 +525,8 @@ static int sam9x60_div_pll_set_rate_chg(struct clk_hw *hw, unsigned long rate,
div->div = DIV_ROUND_CLOSEST(parent_rate, rate) - 1;
spin_lock_irqsave(core->lock, irqflags);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK,
- core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK,
+ core->id);
regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val);
cdiv = (val & core->layout->div_mask) >> core->layout->div_shift;
@@ -563,8 +581,8 @@ static int sam9x60_div_pll_notifier_fn(struct notifier_block *notifier,
div->div = div->safe_div;
spin_lock_irqsave(core.lock, irqflags);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK,
- core.id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK,
+ core.id);
regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val);
cdiv = (val & core.layout->div_mask) >> core.layout->div_shift;
@@ -590,7 +608,7 @@ static const struct clk_ops sam9x60_div_pll_ops = {
.unprepare = sam9x60_div_pll_unprepare,
.is_prepared = sam9x60_div_pll_is_prepared,
.recalc_rate = sam9x60_div_pll_recalc_rate,
- .round_rate = sam9x60_div_pll_round_rate,
+ .determine_rate = sam9x60_div_pll_determine_rate,
.set_rate = sam9x60_div_pll_set_rate,
.save_context = sam9x60_div_pll_save_context,
.restore_context = sam9x60_div_pll_restore_context,
@@ -601,12 +619,22 @@ static const struct clk_ops sam9x60_div_pll_ops_chg = {
.unprepare = sam9x60_div_pll_unprepare,
.is_prepared = sam9x60_div_pll_is_prepared,
.recalc_rate = sam9x60_div_pll_recalc_rate,
- .round_rate = sam9x60_div_pll_round_rate,
+ .determine_rate = sam9x60_div_pll_determine_rate,
.set_rate = sam9x60_div_pll_set_rate_chg,
.save_context = sam9x60_div_pll_save_context,
.restore_context = sam9x60_div_pll_restore_context,
};
+static const struct clk_ops sam9x60_fixed_div_pll_ops = {
+ .prepare = sam9x60_div_pll_prepare,
+ .unprepare = sam9x60_div_pll_unprepare,
+ .is_prepared = sam9x60_div_pll_is_prepared,
+ .recalc_rate = sam9x60_fixed_div_pll_recalc_rate,
+ .determine_rate = sam9x60_div_pll_determine_rate,
+ .save_context = sam9x60_div_pll_save_context,
+ .restore_context = sam9x60_div_pll_restore_context,
+};
+
struct clk_hw * __init
sam9x60_clk_register_frac_pll(struct regmap *regmap, spinlock_t *lock,
const char *name, const char *parent_name,
@@ -669,7 +697,8 @@ sam9x60_clk_register_frac_pll(struct regmap *regmap, spinlock_t *lock,
goto free;
}
- ret = sam9x60_frac_pll_compute_mul_frac(&frac->core, FCORE_MIN,
+ ret = sam9x60_frac_pll_compute_mul_frac(&frac->core,
+ characteristics->core_output[0].min,
parent_rate, true);
if (ret < 0) {
hw = ERR_PTR(ret);
@@ -725,10 +754,14 @@ sam9x60_clk_register_div_pll(struct regmap *regmap, spinlock_t *lock,
else
init.parent_names = &parent_name;
init.num_parents = 1;
- if (flags & CLK_SET_RATE_GATE)
+
+ if (layout->div2)
+ init.ops = &sam9x60_fixed_div_pll_ops;
+ else if (flags & CLK_SET_RATE_GATE)
init.ops = &sam9x60_div_pll_ops;
else
init.ops = &sam9x60_div_pll_ops_chg;
+
init.flags = flags;
div->core.id = id;
diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c
index b0696a928aa9..e906928cfbf0 100644
--- a/drivers/clk/at91/clk-usb.c
+++ b/drivers/clk/at91/clk-usb.c
@@ -319,8 +319,8 @@ static unsigned long at91rm9200_clk_usb_recalc_rate(struct clk_hw *hw,
return 0;
}
-static long at91rm9200_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int at91rm9200_clk_usb_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct at91rm9200_clk_usb *usb = to_at91rm9200_clk_usb(hw);
struct clk_hw *parent = clk_hw_get_parent(hw);
@@ -336,25 +336,27 @@ static long at91rm9200_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
if (!usb->divisors[i])
continue;
- tmp_parent_rate = rate * usb->divisors[i];
+ tmp_parent_rate = req->rate * usb->divisors[i];
tmp_parent_rate = clk_hw_round_rate(parent, tmp_parent_rate);
tmprate = DIV_ROUND_CLOSEST(tmp_parent_rate, usb->divisors[i]);
- if (tmprate < rate)
- tmpdiff = rate - tmprate;
+ if (tmprate < req->rate)
+ tmpdiff = req->rate - tmprate;
else
- tmpdiff = tmprate - rate;
+ tmpdiff = tmprate - req->rate;
if (bestdiff < 0 || bestdiff > tmpdiff) {
bestrate = tmprate;
bestdiff = tmpdiff;
- *parent_rate = tmp_parent_rate;
+ req->best_parent_rate = tmp_parent_rate;
}
if (!bestdiff)
break;
}
- return bestrate;
+ req->rate = bestrate;
+
+ return 0;
}
static int at91rm9200_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -384,7 +386,7 @@ static int at91rm9200_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops at91rm9200_usb_ops = {
.recalc_rate = at91rm9200_clk_usb_recalc_rate,
- .round_rate = at91rm9200_clk_usb_round_rate,
+ .determine_rate = at91rm9200_clk_usb_determine_rate,
.set_rate = at91rm9200_clk_usb_set_rate,
};
diff --git a/drivers/clk/at91/dt-compat.c b/drivers/clk/at91/dt-compat.c
index a32dc2111b90..f5a5f9ba7634 100644
--- a/drivers/clk/at91/dt-compat.c
+++ b/drivers/clk/at91/dt-compat.c
@@ -563,9 +563,10 @@ of_at91_clk_pll_get_characteristics(struct device_node *np)
if (num_cells < 2 || num_cells > 4)
return NULL;
- if (!of_get_property(np, "atmel,pll-clk-output-ranges", &tmp))
+ num_output = of_property_count_u32_elems(np, "atmel,pll-clk-output-ranges");
+ if (num_output <= 0)
return NULL;
- num_output = tmp / (sizeof(u32) * num_cells);
+ num_output /= num_cells;
characteristics = kzalloc(sizeof(*characteristics), GFP_KERNEL);
if (!characteristics)
diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c
index 5aa9c1f1c886..2310f6f73162 100644
--- a/drivers/clk/at91/pmc.c
+++ b/drivers/clk/at91/pmc.c
@@ -115,7 +115,7 @@ struct pmc_data *pmc_data_allocate(unsigned int ncore, unsigned int nsystem,
/* Address in SECURAM that say if we suspend to backup mode. */
static void __iomem *at91_pmc_backup_suspend;
-static int at91_pmc_suspend(void)
+static int at91_pmc_suspend(void *data)
{
unsigned int backup;
@@ -129,7 +129,7 @@ static int at91_pmc_suspend(void)
return clk_save_context();
}
-static void at91_pmc_resume(void)
+static void at91_pmc_resume(void *data)
{
unsigned int backup;
@@ -143,14 +143,19 @@ static void at91_pmc_resume(void)
clk_restore_context();
}
-static struct syscore_ops pmc_syscore_ops = {
+static const struct syscore_ops pmc_syscore_ops = {
.suspend = at91_pmc_suspend,
.resume = at91_pmc_resume,
};
+static struct syscore pmc_syscore = {
+ .ops = &pmc_syscore_ops,
+};
+
static const struct of_device_id pmc_dt_ids[] = {
{ .compatible = "atmel,sama5d2-pmc" },
{ .compatible = "microchip,sama7g5-pmc", },
+ { .compatible = "microchip,sama7d65-pmc", },
{ /* sentinel */ }
};
@@ -184,7 +189,7 @@ static int __init pmc_register_ops(void)
return -ENOMEM;
}
- register_syscore_ops(&pmc_syscore_ops);
+ register_syscore(&pmc_syscore);
return 0;
}
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
index 0f52e80bcd49..543d7aee8d24 100644
--- a/drivers/clk/at91/pmc.h
+++ b/drivers/clk/at91/pmc.h
@@ -64,6 +64,7 @@ struct clk_pll_layout {
u8 frac_shift;
u8 div_shift;
u8 endiv_shift;
+ u8 div2;
};
extern const struct clk_pll_layout at91rm9200_pll_layout;
@@ -75,9 +76,11 @@ struct clk_pll_characteristics {
struct clk_range input;
int num_output;
const struct clk_range *output;
+ const struct clk_range *core_output;
u16 *icpll;
u8 *out;
u8 upll : 1;
+ u32 acr;
};
struct clk_programmable_layout {
@@ -114,11 +117,24 @@ struct at91_clk_pms {
unsigned int parent;
};
-#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
-#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
-
#define ndck(a, s) (a[s - 1].id + 1)
#define nck(a) (a[ARRAY_SIZE(a) - 1].id + 1)
+
+#define PMC_INIT_TABLE(_table, _count) \
+ do { \
+ u8 _i; \
+ for (_i = 0; _i < (_count); _i++) \
+ (_table)[_i] = _i; \
+ } while (0)
+
+#define PMC_FILL_TABLE(_to, _from, _count) \
+ do { \
+ u8 _i; \
+ for (_i = 0; _i < (_count); _i++) { \
+ (_to)[_i] = (_from)[_i]; \
+ } \
+ } while (0)
+
struct pmc_data *pmc_data_allocate(unsigned int ncore, unsigned int nsystem,
unsigned int nperiph, unsigned int ngck,
unsigned int npck);
diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c
index e309cbf3cb9a..18baf4a256f4 100644
--- a/drivers/clk/at91/sam9x60.c
+++ b/drivers/clk/at91/sam9x60.c
@@ -26,10 +26,17 @@ static const struct clk_range plla_outputs[] = {
{ .min = 2343750, .max = 1200000000 },
};
+/* Fractional PLL core output range. */
+static const struct clk_range core_outputs[] = {
+ { .min = 600000000, .max = 1200000000 },
+};
+
static const struct clk_pll_characteristics plla_characteristics = {
.input = { .min = 12000000, .max = 48000000 },
.num_output = ARRAY_SIZE(plla_outputs),
.output = plla_outputs,
+ .core_output = core_outputs,
+ .acr = UL(0x00020010),
};
static const struct clk_range upll_outputs[] = {
@@ -40,7 +47,9 @@ static const struct clk_pll_characteristics upll_characteristics = {
.input = { .min = 12000000, .max = 48000000 },
.num_output = ARRAY_SIZE(upll_outputs),
.output = upll_outputs,
+ .core_output = core_outputs,
.upll = true,
+ .acr = UL(0x12023010), /* fIN = [18 MHz, 32 MHz]*/
};
static const struct clk_pll_layout pll_frac_layout = {
diff --git a/drivers/clk/at91/sam9x7.c b/drivers/clk/at91/sam9x7.c
new file mode 100644
index 000000000000..89868a0aeaba
--- /dev/null
+++ b/drivers/clk/at91/sam9x7.c
@@ -0,0 +1,952 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SAM9X7 PMC code.
+ *
+ * Copyright (C) 2023 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Varshini Rajendran <varshini.rajendran@microchip.com>
+ *
+ */
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/at91.h>
+
+#include "pmc.h"
+
+static DEFINE_SPINLOCK(pmc_pll_lock);
+static DEFINE_SPINLOCK(mck_lock);
+
+/**
+ * enum pll_ids - PLL clocks identifiers
+ * @PLL_ID_PLLA: PLLA identifier
+ * @PLL_ID_UPLL: UPLL identifier
+ * @PLL_ID_AUDIO: Audio PLL identifier
+ * @PLL_ID_LVDS: LVDS PLL identifier
+ * @PLL_ID_PLLA_DIV2: PLLA DIV2 identifier
+ * @PLL_ID_MAX: Max PLL Identifier
+ */
+enum pll_ids {
+ PLL_ID_PLLA,
+ PLL_ID_UPLL,
+ PLL_ID_AUDIO,
+ PLL_ID_LVDS,
+ PLL_ID_PLLA_DIV2,
+ PLL_ID_MAX,
+};
+
+/**
+ * enum pll_type - PLL type identifiers
+ * @PLL_TYPE_FRAC: fractional PLL identifier
+ * @PLL_TYPE_DIV: divider PLL identifier
+ */
+enum pll_type {
+ PLL_TYPE_FRAC,
+ PLL_TYPE_DIV,
+};
+
+static const struct clk_master_characteristics mck_characteristics = {
+ .output = { .min = 32000000, .max = 266666667 },
+ .divisors = { 1, 2, 4, 3, 5},
+ .have_div3_pres = 1,
+};
+
+static const struct clk_master_layout sam9x7_master_layout = {
+ .mask = 0x373,
+ .pres_shift = 4,
+ .offset = 0x28,
+};
+
+/* Fractional PLL core output range. */
+static const struct clk_range plla_core_outputs[] = {
+ { .min = 800000000, .max = 1600000000 },
+};
+
+static const struct clk_range upll_core_outputs[] = {
+ { .min = 600000000, .max = 960000000 },
+};
+
+static const struct clk_range lvdspll_core_outputs[] = {
+ { .min = 600000000, .max = 1200000000 },
+};
+
+static const struct clk_range audiopll_core_outputs[] = {
+ { .min = 600000000, .max = 1200000000 },
+};
+
+static const struct clk_range plladiv2_core_outputs[] = {
+ { .min = 800000000, .max = 1600000000 },
+};
+
+/* Fractional PLL output range. */
+static const struct clk_range plla_outputs[] = {
+ { .min = 400000000, .max = 800000000 },
+};
+
+static const struct clk_range upll_outputs[] = {
+ { .min = 300000000, .max = 480000000 },
+};
+
+static const struct clk_range lvdspll_outputs[] = {
+ { .min = 175000000, .max = 550000000 },
+};
+
+static const struct clk_range audiopll_outputs[] = {
+ { .min = 0, .max = 300000000 },
+};
+
+static const struct clk_range plladiv2_outputs[] = {
+ { .min = 200000000, .max = 400000000 },
+};
+
+/* PLL characteristics. */
+static const struct clk_pll_characteristics plla_characteristics = {
+ .input = { .min = 20000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(plla_outputs),
+ .output = plla_outputs,
+ .core_output = plla_core_outputs,
+ .acr = UL(0x00020010), /* Old ACR_DEFAULT_PLLA value */
+};
+
+static const struct clk_pll_characteristics upll_characteristics = {
+ .input = { .min = 20000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(upll_outputs),
+ .output = upll_outputs,
+ .core_output = upll_core_outputs,
+ .upll = true,
+ .acr = UL(0x12023010), /* fIN=[20 MHz, 32 MHz] */
+};
+
+static const struct clk_pll_characteristics lvdspll_characteristics = {
+ .input = { .min = 20000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(lvdspll_outputs),
+ .output = lvdspll_outputs,
+ .core_output = lvdspll_core_outputs,
+ .acr = UL(0x12023010), /* fIN=[20 MHz, 32 MHz] */
+};
+
+static const struct clk_pll_characteristics audiopll_characteristics = {
+ .input = { .min = 20000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(audiopll_outputs),
+ .output = audiopll_outputs,
+ .core_output = audiopll_core_outputs,
+ .acr = UL(0x12023010), /* fIN=[20 MHz, 32 MHz] */
+};
+
+static const struct clk_pll_characteristics plladiv2_characteristics = {
+ .input = { .min = 20000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(plladiv2_outputs),
+ .output = plladiv2_outputs,
+ .core_output = plladiv2_core_outputs,
+ .acr = UL(0x00020010), /* Old ACR_DEFAULT_PLLA value */
+};
+
+/* Layout for fractional PLL ID PLLA. */
+static const struct clk_pll_layout plla_frac_layout = {
+ .mul_mask = GENMASK(31, 24),
+ .frac_mask = GENMASK(21, 0),
+ .mul_shift = 24,
+ .frac_shift = 0,
+ .div2 = 1,
+};
+
+/* Layout for fractional PLLs. */
+static const struct clk_pll_layout pll_frac_layout = {
+ .mul_mask = GENMASK(31, 24),
+ .frac_mask = GENMASK(21, 0),
+ .mul_shift = 24,
+ .frac_shift = 0,
+};
+
+/* Layout for DIV PLLs. */
+static const struct clk_pll_layout pll_divpmc_layout = {
+ .div_mask = GENMASK(7, 0),
+ .endiv_mask = BIT(29),
+ .div_shift = 0,
+ .endiv_shift = 29,
+};
+
+/* Layout for DIV PLL ID PLLADIV2. */
+static const struct clk_pll_layout plladiv2_divpmc_layout = {
+ .div_mask = GENMASK(7, 0),
+ .endiv_mask = BIT(29),
+ .div_shift = 0,
+ .endiv_shift = 29,
+ .div2 = 1,
+};
+
+/* Layout for DIVIO dividers. */
+static const struct clk_pll_layout pll_divio_layout = {
+ .div_mask = GENMASK(19, 12),
+ .endiv_mask = BIT(30),
+ .div_shift = 12,
+ .endiv_shift = 30,
+};
+
+/*
+ * PLL clocks description
+ * @n: clock name
+ * @p: clock parent
+ * @l: clock layout
+ * @t: clock type
+ * @c: pll characteristics
+ * @f: clock flags
+ * @eid: export index in sam9x7->chws[] array
+ */
+static const struct {
+ const char *n;
+ const char *p;
+ const struct clk_pll_layout *l;
+ u8 t;
+ const struct clk_pll_characteristics *c;
+ unsigned long f;
+ u8 eid;
+} sam9x7_plls[][3] = {
+ [PLL_ID_PLLA] = {
+ {
+ .n = "plla_fracck",
+ .p = "mainck",
+ .l = &plla_frac_layout,
+ .t = PLL_TYPE_FRAC,
+ /*
+ * This feeds plla_divpmcck which feeds CPU. It should
+ * not be disabled.
+ */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE,
+ .c = &plla_characteristics,
+ },
+
+ {
+ .n = "plla_divpmcck",
+ .p = "plla_fracck",
+ .l = &pll_divpmc_layout,
+ .t = PLL_TYPE_DIV,
+ /* This feeds CPU. It should not be disabled */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE,
+ .eid = PMC_PLLACK,
+ .c = &plla_characteristics,
+ },
+ },
+
+ [PLL_ID_UPLL] = {
+ {
+ .n = "upll_fracck",
+ .p = "main_osc",
+ .l = &pll_frac_layout,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE,
+ .c = &upll_characteristics,
+ },
+
+ {
+ .n = "upll_divpmcck",
+ .p = "upll_fracck",
+ .l = &pll_divpmc_layout,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_UTMI,
+ .c = &upll_characteristics,
+ },
+ },
+
+ [PLL_ID_AUDIO] = {
+ {
+ .n = "audiopll_fracck",
+ .p = "main_osc",
+ .l = &pll_frac_layout,
+ .f = CLK_SET_RATE_GATE,
+ .c = &audiopll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ },
+
+ {
+ .n = "audiopll_divpmcck",
+ .p = "audiopll_fracck",
+ .l = &pll_divpmc_layout,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .c = &audiopll_characteristics,
+ .eid = PMC_AUDIOPMCPLL,
+ .t = PLL_TYPE_DIV,
+ },
+
+ {
+ .n = "audiopll_diviock",
+ .p = "audiopll_fracck",
+ .l = &pll_divio_layout,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .c = &audiopll_characteristics,
+ .eid = PMC_AUDIOIOPLL,
+ .t = PLL_TYPE_DIV,
+ },
+ },
+
+ [PLL_ID_LVDS] = {
+ {
+ .n = "lvdspll_fracck",
+ .p = "main_osc",
+ .l = &pll_frac_layout,
+ .f = CLK_SET_RATE_GATE,
+ .c = &lvdspll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ },
+
+ {
+ .n = "lvdspll_divpmcck",
+ .p = "lvdspll_fracck",
+ .l = &pll_divpmc_layout,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .c = &lvdspll_characteristics,
+ .eid = PMC_LVDSPLL,
+ .t = PLL_TYPE_DIV,
+ },
+ },
+
+ [PLL_ID_PLLA_DIV2] = {
+ {
+ .n = "plla_div2pmcck",
+ .p = "plla_fracck",
+ .l = &plladiv2_divpmc_layout,
+ /*
+ * This may feed critical parts of the system like timers.
+ * It should not be disabled.
+ */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE,
+ .c = &plladiv2_characteristics,
+ .eid = PMC_PLLADIV2,
+ .t = PLL_TYPE_DIV,
+ },
+ },
+};
+
+static const struct clk_programmable_layout sam9x7_programmable_layout = {
+ .pres_mask = 0xff,
+ .pres_shift = 8,
+ .css_mask = 0x1f,
+ .have_slck_mck = 0,
+ .is_pres_direct = 1,
+};
+
+static const struct clk_pcr_layout sam9x7_pcr_layout = {
+ .offset = 0x88,
+ .cmd = BIT(31),
+ .gckcss_mask = GENMASK(12, 8),
+ .pid_mask = GENMASK(6, 0),
+};
+
+static const struct {
+ char *n;
+ char *p;
+ u8 id;
+ unsigned long flags;
+} sam9x7_systemck[] = {
+ /*
+ * ddrck feeds DDR controller and is enabled by bootloader thus we need
+ * to keep it enabled in case there is no Linux consumer for it.
+ */
+ { .n = "ddrck", .p = "masterck_div", .id = 2, .flags = CLK_IS_CRITICAL },
+ { .n = "uhpck", .p = "usbck", .id = 6 },
+ { .n = "pck0", .p = "prog0", .id = 8 },
+ { .n = "pck1", .p = "prog1", .id = 9 },
+};
+
+/*
+ * Peripheral clocks description
+ * @n: clock name
+ * @f: clock flags
+ * @id: peripheral id
+ */
+static const struct {
+ char *n;
+ unsigned long f;
+ u8 id;
+} sam9x7_periphck[] = {
+ { .n = "pioA_clk", .id = 2, },
+ { .n = "pioB_clk", .id = 3, },
+ { .n = "pioC_clk", .id = 4, },
+ { .n = "flex0_clk", .id = 5, },
+ { .n = "flex1_clk", .id = 6, },
+ { .n = "flex2_clk", .id = 7, },
+ { .n = "flex3_clk", .id = 8, },
+ { .n = "flex6_clk", .id = 9, },
+ { .n = "flex7_clk", .id = 10, },
+ { .n = "flex8_clk", .id = 11, },
+ { .n = "sdmmc0_clk", .id = 12, },
+ { .n = "flex4_clk", .id = 13, },
+ { .n = "flex5_clk", .id = 14, },
+ { .n = "flex9_clk", .id = 15, },
+ { .n = "flex10_clk", .id = 16, },
+ { .n = "tcb0_clk", .id = 17, },
+ { .n = "pwm_clk", .id = 18, },
+ { .n = "adc_clk", .id = 19, },
+ { .n = "dma0_clk", .id = 20, },
+ { .n = "uhphs_clk", .id = 22, },
+ { .n = "udphs_clk", .id = 23, },
+ { .n = "macb0_clk", .id = 24, },
+ { .n = "lcd_clk", .id = 25, },
+ { .n = "sdmmc1_clk", .id = 26, },
+ { .n = "ssc_clk", .id = 28, },
+ { .n = "can0_clk", .id = 29, },
+ { .n = "can1_clk", .id = 30, },
+ { .n = "flex11_clk", .id = 32, },
+ { .n = "flex12_clk", .id = 33, },
+ { .n = "i2s_clk", .id = 34, },
+ { .n = "qspi_clk", .id = 35, },
+ { .n = "gfx2d_clk", .id = 36, },
+ { .n = "pit64b0_clk", .id = 37, },
+ { .n = "trng_clk", .id = 38, },
+ { .n = "aes_clk", .id = 39, },
+ { .n = "tdes_clk", .id = 40, },
+ { .n = "sha_clk", .id = 41, },
+ { .n = "classd_clk", .id = 42, },
+ { .n = "isi_clk", .id = 43, },
+ { .n = "pioD_clk", .id = 44, },
+ { .n = "tcb1_clk", .id = 45, },
+ { .n = "dbgu_clk", .id = 47, },
+ { .n = "pmecc_clk", .id = 48, },
+ /*
+ * mpddr_clk feeds DDR controller and is enabled by bootloader thus we
+ * need to keep it enabled in case there is no Linux consumer for it.
+ */
+ { .n = "mpddr_clk", .id = 49, .f = CLK_IS_CRITICAL },
+ { .n = "csi2dc_clk", .id = 52, },
+ { .n = "csi4l_clk", .id = 53, },
+ { .n = "dsi4l_clk", .id = 54, },
+ { .n = "lvdsc_clk", .id = 56, },
+ { .n = "pit64b1_clk", .id = 58, },
+ { .n = "puf_clk", .id = 59, },
+ { .n = "gmactsu_clk", .id = 67, },
+};
+
+/*
+ * Generic clock description
+ * @n: clock name
+ * @pp: PLL parents
+ * @pp_mux_table: PLL parents mux table
+ * @r: clock output range
+ * @pp_chg_id: id in parent array of changeable PLL parent
+ * @pp_count: PLL parents count
+ * @id: clock id
+ */
+static const struct {
+ const char *n;
+ const char *pp[8];
+ const char pp_mux_table[8];
+ struct clk_range r;
+ int pp_chg_id;
+ u8 pp_count;
+ u8 id;
+} sam9x7_gck[] = {
+ {
+ .n = "flex0_gclk",
+ .id = 5,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "flex1_gclk",
+ .id = 6,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "flex2_gclk",
+ .id = 7,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "flex3_gclk",
+ .id = 8,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "flex6_gclk",
+ .id = 9,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "flex7_gclk",
+ .id = 10,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "flex8_gclk",
+ .id = 11,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "sdmmc0_gclk",
+ .id = 12,
+ .r = { .max = 105000000 },
+ .pp = { "audiopll_divpmcck", "plla_div2pmcck", },
+ .pp_mux_table = { 6, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "flex4_gclk",
+ .id = 13,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "flex5_gclk",
+ .id = 14,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "flex9_gclk",
+ .id = 15,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "flex10_gclk",
+ .id = 16,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "tcb0_gclk",
+ .id = 17,
+ .pp = { "audiopll_divpmcck", "plla_div2pmcck", },
+ .pp_mux_table = { 6, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "adc_gclk",
+ .id = 19,
+ .pp = { "upll_divpmcck", "plla_div2pmcck", },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "lcd_gclk",
+ .id = 25,
+ .r = { .max = 75000000 },
+ .pp = { "audiopll_divpmcck", "plla_div2pmcck", },
+ .pp_mux_table = { 6, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "sdmmc1_gclk",
+ .id = 26,
+ .r = { .max = 105000000 },
+ .pp = { "audiopll_divpmcck", "plla_div2pmcck", },
+ .pp_mux_table = { 6, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "mcan0_gclk",
+ .id = 29,
+ .r = { .max = 80000000 },
+ .pp = { "upll_divpmcck", "plla_div2pmcck", },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "mcan1_gclk",
+ .id = 30,
+ .r = { .max = 80000000 },
+ .pp = { "upll_divpmcck", "plla_div2pmcck", },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "flex11_gclk",
+ .id = 32,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "flex12_gclk",
+ .id = 33,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "i2s_gclk",
+ .id = 34,
+ .r = { .max = 100000000 },
+ .pp = { "audiopll_divpmcck", "plla_div2pmcck", },
+ .pp_mux_table = { 6, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "qspi_gclk",
+ .id = 35,
+ .r = { .max = 200000000 },
+ .pp = { "audiopll_divpmcck", "plla_div2pmcck", },
+ .pp_mux_table = { 6, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "pit64b0_gclk",
+ .id = 37,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "classd_gclk",
+ .id = 42,
+ .r = { .max = 100000000 },
+ .pp = { "audiopll_divpmcck", "plla_div2pmcck", },
+ .pp_mux_table = { 6, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "tcb1_gclk",
+ .id = 45,
+ .pp = { "audiopll_divpmcck", "plla_div2pmcck", },
+ .pp_mux_table = { 6, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "dbgu_gclk",
+ .id = 47,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "mipiphy_gclk",
+ .id = 55,
+ .r = { .max = 27000000 },
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "pit64b1_gclk",
+ .id = 58,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "gmac_gclk",
+ .id = 67,
+ .pp = { "audiopll_divpmcck", "plla_div2pmcck", },
+ .pp_mux_table = { 6, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN,
+ },
+};
+
+static void __init sam9x7_pmc_setup(struct device_node *np)
+{
+ struct clk_range range = CLK_RANGE(0, 0);
+ const char *td_slck_name, *md_slck_name, *mainxtal_name;
+ struct pmc_data *sam9x7_pmc;
+ const char *parent_names[9];
+ void **clk_mux_buffer = NULL;
+ int clk_mux_buffer_size = 0;
+ struct clk_hw *main_osc_hw;
+ struct regmap *regmap;
+ struct clk_hw *hw;
+ int i, j;
+
+ i = of_property_match_string(np, "clock-names", "td_slck");
+ if (i < 0)
+ return;
+
+ td_slck_name = of_clk_get_parent_name(np, i);
+
+ i = of_property_match_string(np, "clock-names", "md_slck");
+ if (i < 0)
+ return;
+
+ md_slck_name = of_clk_get_parent_name(np, i);
+
+ i = of_property_match_string(np, "clock-names", "main_xtal");
+ if (i < 0)
+ return;
+ mainxtal_name = of_clk_get_parent_name(np, i);
+
+ regmap = device_node_to_regmap(np);
+ if (IS_ERR(regmap))
+ return;
+
+ sam9x7_pmc = pmc_data_allocate(PMC_LVDSPLL + 1,
+ nck(sam9x7_systemck),
+ nck(sam9x7_periphck),
+ nck(sam9x7_gck), 8);
+ if (!sam9x7_pmc)
+ return;
+
+ clk_mux_buffer = kmalloc(sizeof(void *) *
+ (ARRAY_SIZE(sam9x7_gck)),
+ GFP_KERNEL);
+ if (!clk_mux_buffer)
+ goto err_free;
+
+ hw = at91_clk_register_main_rc_osc(regmap, "main_rc_osc", 12000000,
+ 50000000);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name, NULL, 0);
+ if (IS_ERR(hw))
+ goto err_free;
+ main_osc_hw = hw;
+
+ parent_names[0] = "main_rc_osc";
+ parent_names[1] = "main_osc";
+ hw = at91_clk_register_sam9x5_main(regmap, "mainck", parent_names, NULL, 2);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sam9x7_pmc->chws[PMC_MAIN] = hw;
+
+ for (i = 0; i < PLL_ID_MAX; i++) {
+ for (j = 0; j < 3; j++) {
+ struct clk_hw *parent_hw;
+
+ if (!sam9x7_plls[i][j].n)
+ continue;
+
+ switch (sam9x7_plls[i][j].t) {
+ case PLL_TYPE_FRAC:
+ if (!strcmp(sam9x7_plls[i][j].p, "mainck"))
+ parent_hw = sam9x7_pmc->chws[PMC_MAIN];
+ else if (!strcmp(sam9x7_plls[i][j].p, "main_osc"))
+ parent_hw = main_osc_hw;
+ else
+ parent_hw = __clk_get_hw(of_clk_get_by_name
+ (np, sam9x7_plls[i][j].p));
+
+ hw = sam9x60_clk_register_frac_pll(regmap,
+ &pmc_pll_lock,
+ sam9x7_plls[i][j].n,
+ sam9x7_plls[i][j].p,
+ parent_hw, i,
+ sam9x7_plls[i][j].c,
+ sam9x7_plls[i][j].l,
+ sam9x7_plls[i][j].f);
+ break;
+
+ case PLL_TYPE_DIV:
+ hw = sam9x60_clk_register_div_pll(regmap,
+ &pmc_pll_lock,
+ sam9x7_plls[i][j].n,
+ sam9x7_plls[i][j].p, NULL, i,
+ sam9x7_plls[i][j].c,
+ sam9x7_plls[i][j].l,
+ sam9x7_plls[i][j].f, 0);
+ break;
+
+ default:
+ continue;
+ }
+
+ if (IS_ERR(hw))
+ goto err_free;
+
+ if (sam9x7_plls[i][j].eid)
+ sam9x7_pmc->chws[sam9x7_plls[i][j].eid] = hw;
+ }
+ }
+
+ parent_names[0] = md_slck_name;
+ parent_names[1] = "mainck";
+ parent_names[2] = "plla_divpmcck";
+ parent_names[3] = "upll_divpmcck";
+ hw = at91_clk_register_master_pres(regmap, "masterck_pres", 4,
+ parent_names, NULL, &sam9x7_master_layout,
+ &mck_characteristics, &mck_lock);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_master_div(regmap, "masterck_div",
+ "masterck_pres", NULL, &sam9x7_master_layout,
+ &mck_characteristics, &mck_lock,
+ CLK_SET_RATE_GATE, 0);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sam9x7_pmc->chws[PMC_MCK] = hw;
+
+ parent_names[0] = "plla_divpmcck";
+ parent_names[1] = "upll_divpmcck";
+ parent_names[2] = "main_osc";
+ hw = sam9x60_clk_register_usb(regmap, "usbck", parent_names, 3);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_names[0] = md_slck_name;
+ parent_names[1] = td_slck_name;
+ parent_names[2] = "mainck";
+ parent_names[3] = "masterck_div";
+ parent_names[4] = "plla_divpmcck";
+ parent_names[5] = "upll_divpmcck";
+ parent_names[6] = "audiopll_divpmcck";
+ for (i = 0; i < 2; i++) {
+ char name[6];
+
+ snprintf(name, sizeof(name), "prog%d", i);
+
+ hw = at91_clk_register_programmable(regmap, name,
+ parent_names, NULL, 7, i,
+ &sam9x7_programmable_layout,
+ NULL);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sam9x7_pmc->pchws[i] = hw;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sam9x7_systemck); i++) {
+ hw = at91_clk_register_system(regmap, sam9x7_systemck[i].n,
+ sam9x7_systemck[i].p, NULL,
+ sam9x7_systemck[i].id,
+ sam9x7_systemck[i].flags);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sam9x7_pmc->shws[sam9x7_systemck[i].id] = hw;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sam9x7_periphck); i++) {
+ hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
+ &sam9x7_pcr_layout,
+ sam9x7_periphck[i].n,
+ "masterck_div", NULL,
+ sam9x7_periphck[i].id,
+ &range, INT_MIN,
+ sam9x7_periphck[i].f);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sam9x7_pmc->phws[sam9x7_periphck[i].id] = hw;
+ }
+
+ parent_names[0] = md_slck_name;
+ parent_names[1] = td_slck_name;
+ parent_names[2] = "mainck";
+ parent_names[3] = "masterck_div";
+ for (i = 0; i < ARRAY_SIZE(sam9x7_gck); i++) {
+ u8 num_parents = 4 + sam9x7_gck[i].pp_count;
+ u32 *mux_table;
+
+ mux_table = kmalloc_array(num_parents, sizeof(*mux_table),
+ GFP_KERNEL);
+ if (!mux_table)
+ goto err_free;
+
+ PMC_INIT_TABLE(mux_table, 4);
+ PMC_FILL_TABLE(&mux_table[4], sam9x7_gck[i].pp_mux_table,
+ sam9x7_gck[i].pp_count);
+ PMC_FILL_TABLE(&parent_names[4], sam9x7_gck[i].pp,
+ sam9x7_gck[i].pp_count);
+
+ hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
+ &sam9x7_pcr_layout,
+ sam9x7_gck[i].n,
+ parent_names, NULL, mux_table,
+ num_parents,
+ sam9x7_gck[i].id,
+ &sam9x7_gck[i].r,
+ sam9x7_gck[i].pp_chg_id);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sam9x7_pmc->ghws[sam9x7_gck[i].id] = hw;
+ clk_mux_buffer[clk_mux_buffer_size++] = mux_table;
+ }
+
+ of_clk_add_hw_provider(np, of_clk_hw_pmc_get, sam9x7_pmc);
+ kfree(clk_mux_buffer);
+
+ return;
+
+err_free:
+ if (clk_mux_buffer) {
+ for (i = 0; i < clk_mux_buffer_size; i++)
+ kfree(clk_mux_buffer[i]);
+ kfree(clk_mux_buffer);
+ }
+ kfree(sam9x7_pmc);
+}
+
+/* Some clks are used for a clocksource */
+CLK_OF_DECLARE(sam9x7_pmc, "microchip,sam9x7-pmc", sam9x7_pmc_setup);
diff --git a/drivers/clk/at91/sama7d65.c b/drivers/clk/at91/sama7d65.c
new file mode 100644
index 000000000000..7dee2b160ffb
--- /dev/null
+++ b/drivers/clk/at91/sama7d65.c
@@ -0,0 +1,1379 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SAMA7D65 PMC code.
+ *
+ * Copyright (C) 2024 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Ryan Wanner <ryan.wanner@microchip.com>
+ */
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/at91.h>
+
+#include "pmc.h"
+
+static DEFINE_SPINLOCK(pmc_pll_lock);
+static DEFINE_SPINLOCK(pmc_mck0_lock);
+static DEFINE_SPINLOCK(pmc_mckX_lock);
+
+#define PMC_INDEX_MAX 25
+
+/*
+ * PLL clocks identifiers
+ * @PLL_ID_CPU: CPU PLL identifier
+ * @PLL_ID_SYS: System PLL identifier
+ * @PLL_ID_DDR: DDR PLL identifier
+ * @PLL_ID_GPU: Graphics subsystem PLL identifier
+ * @PLL_ID_BAUD: Baud PLL identifier
+ * @PLL_ID_AUDIO: Audio PLL identifier
+ * @PLL_ID_ETH: Ethernet PLL identifier
+ * @PLL_ID_LVDS: LVDS PLL identifier
+ * @PLL_ID_USB: USB PLL identifier
+ */
+enum pll_ids {
+ PLL_ID_CPU,
+ PLL_ID_SYS,
+ PLL_ID_DDR,
+ PLL_ID_GPU,
+ PLL_ID_BAUD,
+ PLL_ID_AUDIO,
+ PLL_ID_ETH,
+ PLL_ID_LVDS,
+ PLL_ID_USB,
+ PLL_ID_MAX
+};
+
+/*
+ * PLL component identifier
+ * @PLL_COMPID_FRAC: Fractional PLL component identifier
+ * @PLL_COMPID_DIV0: 1st PLL divider component identifier
+ * @PLL_COMPID_DIV1: 2nd PLL divider component identifier
+ */
+enum pll_component_id {
+ PLL_COMPID_FRAC,
+ PLL_COMPID_DIV0,
+ PLL_COMPID_DIV1,
+ PLL_COMPID_MAX
+};
+
+/*
+ * PLL type identifiers
+ * @PLL_TYPE_FRAC: fractional PLL identifier
+ * @PLL_TYPE_DIV: divider PLL identifier
+ */
+enum pll_type {
+ PLL_TYPE_FRAC,
+ PLL_TYPE_DIV
+};
+
+/* Layout for fractional PLLs. */
+static const struct clk_pll_layout pll_layout_frac = {
+ .mul_mask = GENMASK(31, 24),
+ .frac_mask = GENMASK(21, 0),
+ .mul_shift = 24,
+ .frac_shift = 0,
+};
+
+/* Layout for DIVPMC dividers. */
+static const struct clk_pll_layout pll_layout_divpmc = {
+ .div_mask = GENMASK(7, 0),
+ .endiv_mask = BIT(29),
+ .div_shift = 0,
+ .endiv_shift = 29,
+};
+
+/* Layout for DIVIO dividers. */
+static const struct clk_pll_layout pll_layout_divio = {
+ .div_mask = GENMASK(19, 12),
+ .endiv_mask = BIT(30),
+ .div_shift = 12,
+ .endiv_shift = 30,
+};
+
+/*
+ * CPU PLL output range.
+ * Notice: The upper limit has been setup to 1000000002 due to hardware
+ * block which cannot output exactly 1GHz.
+ */
+static const struct clk_range cpu_pll_outputs[] = {
+ { .min = 2343750, .max = 1000000002 },
+};
+
+/* PLL output range. */
+static const struct clk_range pll_outputs[] = {
+ { .min = 2343750, .max = 1200000000 },
+};
+
+/*
+ * Min: fCOREPLLCK = 600 MHz, PMC_PLL_CTRL0.DIVPMC = 255
+ * Max: fCOREPLLCK = 800 MHz, PMC_PLL_CTRL0.DIVPMC = 0
+ */
+static const struct clk_range lvdspll_outputs[] = {
+ { .min = 16406250, .max = 800000000 },
+};
+
+static const struct clk_range upll_outputs[] = {
+ { .min = 480000000, .max = 480000000 },
+};
+
+/* Fractional PLL core output range. */
+static const struct clk_range core_outputs[] = {
+ { .min = 600000000, .max = 1200000000 },
+};
+
+static const struct clk_range lvdspll_core_outputs[] = {
+ { .min = 600000000, .max = 1200000000 },
+};
+
+static const struct clk_range upll_core_outputs[] = {
+ { .min = 600000000, .max = 1200000000 },
+};
+
+/* CPU PLL characteristics. */
+static const struct clk_pll_characteristics cpu_pll_characteristics = {
+ .input = { .min = 12000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(cpu_pll_outputs),
+ .output = cpu_pll_outputs,
+ .core_output = core_outputs,
+ .acr = UL(0x00070010),
+};
+
+/* PLL characteristics. */
+static const struct clk_pll_characteristics pll_characteristics = {
+ .input = { .min = 12000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(pll_outputs),
+ .output = pll_outputs,
+ .core_output = core_outputs,
+ .acr = UL(0x00070010),
+};
+
+static const struct clk_pll_characteristics lvdspll_characteristics = {
+ .input = { .min = 12000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(lvdspll_outputs),
+ .output = lvdspll_outputs,
+ .core_output = lvdspll_core_outputs,
+ .acr = UL(0x00070010),
+};
+
+static const struct clk_pll_characteristics upll_characteristics = {
+ .input = { .min = 20000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(upll_outputs),
+ .output = upll_outputs,
+ .core_output = upll_core_outputs,
+ .acr = UL(0x12020010),
+ .upll = true,
+};
+
+/*
+ * SAMA7D65 PLL possible parents
+ * @SAMA7D65_PLL_PARENT_MAINCK: MAINCK is PLL a parent
+ * @SAMA7D65_PLL_PARENT_MAIN_XTAL: MAIN XTAL is a PLL parent
+ * @SAMA7D65_PLL_PARENT_FRACCK: Frac PLL is a PLL parent (for PLL dividers)
+ */
+enum sama7d65_pll_parent {
+ SAMA7D65_PLL_PARENT_MAINCK,
+ SAMA7D65_PLL_PARENT_MAIN_XTAL,
+ SAMA7D65_PLL_PARENT_FRACCK
+};
+
+/*
+ * PLL clocks description
+ * @n: clock name
+ * @l: clock layout
+ * @c: clock characteristics
+ * @hw: pointer to clk_hw
+ * @t: clock type
+ * @f: clock flags
+ * @p: clock parent
+ * @eid: export index in sama7d65->chws[] array
+ * @safe_div: intermediate divider need to be set on PRE_RATE_CHANGE
+ * notification
+ */
+static struct sama7d65_pll {
+ const char *n;
+ const struct clk_pll_layout *l;
+ const struct clk_pll_characteristics *c;
+ struct clk_hw *hw;
+ unsigned long f;
+ enum sama7d65_pll_parent p;
+ u8 t;
+ u8 eid;
+ u8 safe_div;
+} sama7d65_plls[][PLL_COMPID_MAX] = {
+ [PLL_ID_CPU] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "cpupll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAINCK,
+ .l = &pll_layout_frac,
+ .c = &cpu_pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ /*
+ * This feeds cpupll_divpmcck which feeds CPU. It should
+ * not be disabled.
+ */
+ .f = CLK_IS_CRITICAL,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "cpupll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &cpu_pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ /* This feeds CPU. It should not be disabled. */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .eid = PMC_CPUPLL,
+ /*
+ * Safe div=15 should be safe even for switching b/w 1GHz and
+ * 90MHz (frac pll might go up to 1.2GHz).
+ */
+ .safe_div = 15,
+ },
+ },
+
+ [PLL_ID_SYS] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "syspll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAINCK,
+ .l = &pll_layout_frac,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ /*
+ * This feeds syspll_divpmcck which may feed critical parts
+ * of the systems like timers. Therefore it should not be
+ * disabled.
+ */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "syspll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ /*
+ * This may feed critical parts of the systems like timers.
+ * Therefore it should not be disabled.
+ */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE,
+ .eid = PMC_SYSPLL,
+ },
+ },
+
+ [PLL_ID_DDR] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "ddrpll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAINCK,
+ .l = &pll_layout_frac,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ /*
+ * This feeds ddrpll_divpmcck which feeds DDR. It should not
+ * be disabled.
+ */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "ddrpll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ /* This feeds DDR. It should not be disabled. */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE,
+ },
+ },
+
+ [PLL_ID_GPU] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "gpupll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAINCK,
+ .l = &pll_layout_frac,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "gpupll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ },
+ },
+
+ [PLL_ID_BAUD] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "baudpll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAINCK,
+ .l = &pll_layout_frac,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "baudpll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_BAUDPLL,
+ },
+ },
+
+ [PLL_ID_AUDIO] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "audiopll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAIN_XTAL,
+ .l = &pll_layout_frac,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "audiopll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_AUDIOPMCPLL,
+ },
+
+ [PLL_COMPID_DIV1] = {
+ .n = "audiopll_diviock",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divio,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_AUDIOIOPLL,
+ },
+ },
+
+ [PLL_ID_ETH] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "ethpll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAIN_XTAL,
+ .l = &pll_layout_frac,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "ethpll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_ETHPLL,
+ },
+ },
+
+ [PLL_ID_LVDS] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "lvdspll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAIN_XTAL,
+ .l = &pll_layout_frac,
+ .c = &lvdspll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "lvdspll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &lvdspll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_LVDSPLL,
+ },
+ },
+
+ [PLL_ID_USB] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "usbpll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAIN_XTAL,
+ .l = &pll_layout_frac,
+ .c = &upll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "usbpll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &upll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_UTMI,
+ },
+ },
+};
+
+/* Used to create an array entry identifying a PLL by its components. */
+#define PLL_IDS_TO_ARR_ENTRY(_id, _comp) { PLL_ID_##_id, PLL_COMPID_##_comp}
+
+/*
+ * Master clock (MCK[0..9]) description
+ * @n: clock name
+ * @ep_chg_chg_id: index in parents array that specifies the changeable
+ * @ep: extra parents names array (entry formed by PLL components
+ * identifiers (see enum pll_component_id))
+ * @hw: pointer to clk_hw
+ * parent
+ * @ep_count: extra parents count
+ * @ep_mux_table: mux table for extra parents
+ * @id: clock id
+ * @eid: export index in sama7d65->chws[] array
+ * @c: true if clock is critical and cannot be disabled
+ */
+static struct {
+ const char *n;
+ struct {
+ int pll_id;
+ int pll_compid;
+ } ep[4];
+ struct clk_hw *hw;
+ int ep_chg_id;
+ u8 ep_count;
+ u8 ep_mux_table[4];
+ u8 id;
+ u8 eid;
+ u8 c;
+} sama7d65_mckx[] = {
+ { .n = "mck0", }, /* Dummy entry for MCK0 to store hw in probe. */
+ { .n = "mck1",
+ .id = 1,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .ep_mux_table = { 5, },
+ .ep_count = 1,
+ .ep_chg_id = INT_MIN,
+ .eid = PMC_MCK1,
+ .c = 1, },
+
+ { .n = "mck2",
+ .id = 2,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(DDR, DIV0), },
+ .ep_mux_table = { 5, 6, },
+ .ep_count = 2,
+ .ep_chg_id = INT_MIN,
+ .c = 1, },
+
+ { .n = "mck3",
+ .id = 3,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(DDR, DIV0), },
+ .ep_mux_table = { 5, 6, },
+ .ep_count = 2,
+ .ep_chg_id = INT_MIN,
+ .eid = PMC_MCK3,
+ .c = 1, },
+
+ { .n = "mck4",
+ .id = 4,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .ep_mux_table = { 5, },
+ .ep_count = 1,
+ .ep_chg_id = INT_MIN,
+ .c = 1, },
+
+ { .n = "mck5",
+ .id = 5,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .ep_mux_table = { 5, },
+ .ep_count = 1,
+ .ep_chg_id = INT_MIN,
+ .eid = PMC_MCK5,
+ .c = 1, },
+
+ { .n = "mck6",
+ .id = 6,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .ep_mux_table = { 5, },
+ .ep_chg_id = INT_MIN,
+ .ep_count = 1,
+ .c = 1, },
+
+ { .n = "mck7",
+ .id = 7,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .ep_mux_table = { 5, },
+ .ep_chg_id = INT_MIN,
+ .ep_count = 1, },
+
+ { .n = "mck8",
+ .id = 8,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .ep_mux_table = { 5, },
+ .ep_chg_id = INT_MIN,
+ .ep_count = 1, },
+
+ { .n = "mck9",
+ .id = 9,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .ep_mux_table = { 5, },
+ .ep_chg_id = INT_MIN,
+ .ep_count = 1, },
+};
+
+/*
+ * System clock description
+ * @n: clock name
+ * @p: clock parent name
+ * @id: clock id
+ */
+static const struct {
+ const char *n;
+ const char *p;
+ u8 id;
+} sama7d65_systemck[] = {
+ { .n = "uhpck", .p = "usbck", .id = 6 },
+ { .n = "pck0", .p = "prog0", .id = 8, },
+ { .n = "pck1", .p = "prog1", .id = 9, },
+ { .n = "pck2", .p = "prog2", .id = 10, },
+ { .n = "pck3", .p = "prog3", .id = 11, },
+ { .n = "pck4", .p = "prog4", .id = 12, },
+ { .n = "pck5", .p = "prog5", .id = 13, },
+ { .n = "pck6", .p = "prog6", .id = 14, },
+ { .n = "pck7", .p = "prog7", .id = 15, },
+};
+
+/* Mux table for programmable clocks. */
+static u32 sama7d65_prog_mux_table[] = { 0, 1, 2, 5, 7, 8, 9, 10, 12 };
+
+/*
+ * Peripheral clock parent hw identifier (used to index in sama7d65_mckx[])
+ * @PCK_PARENT_HW_MCK0: pck parent hw identifier is MCK0
+ * @PCK_PARENT_HW_MCK1: pck parent hw identifier is MCK1
+ * @PCK_PARENT_HW_MCK2: pck parent hw identifier is MCK2
+ * @PCK_PARENT_HW_MCK3: pck parent hw identifier is MCK3
+ * @PCK_PARENT_HW_MCK4: pck parent hw identifier is MCK4
+ * @PCK_PARENT_HW_MCK5: pck parent hw identifier is MCK5
+ * @PCK_PARENT_HW_MCK6: pck parent hw identifier is MCK6
+ * @PCK_PARENT_HW_MCK7: pck parent hw identifier is MCK7
+ * @PCK_PARENT_HW_MCK8: pck parent hw identifier is MCK8
+ * @PCK_PARENT_HW_MCK9: pck parent hw identifier is MCK9
+ * @PCK_PARENT_HW_MAX: max identifier
+ */
+enum sama7d65_pck_parent_hw_id {
+ PCK_PARENT_HW_MCK0,
+ PCK_PARENT_HW_MCK1,
+ PCK_PARENT_HW_MCK2,
+ PCK_PARENT_HW_MCK3,
+ PCK_PARENT_HW_MCK4,
+ PCK_PARENT_HW_MCK5,
+ PCK_PARENT_HW_MCK6,
+ PCK_PARENT_HW_MCK7,
+ PCK_PARENT_HW_MCK8,
+ PCK_PARENT_HW_MCK9,
+ PCK_PARENT_HW_MAX
+};
+
+/*
+ * Peripheral clock description
+ * @n: clock name
+ * @p: clock parent hw id
+ * @r: clock range values
+ * @id: clock id
+ * @chgp: index in parent array of the changeable parent
+ */
+static struct {
+ const char *n;
+ enum sama7d65_pck_parent_hw_id p;
+ struct clk_range r;
+ u8 chgp;
+ u8 id;
+} sama7d65_periphck[] = {
+ { .n = "pioA_clk", .p = PCK_PARENT_HW_MCK0, .id = 10, },
+ { .n = "securam_clk", .p = PCK_PARENT_HW_MCK0, .id = 17, },
+ { .n = "sfr_clk", .p = PCK_PARENT_HW_MCK7, .id = 18, },
+ { .n = "hsmc_clk", .p = PCK_PARENT_HW_MCK5, .id = 20, },
+ { .n = "xdmac0_clk", .p = PCK_PARENT_HW_MCK6, .id = 21, },
+ { .n = "xdmac1_clk", .p = PCK_PARENT_HW_MCK6, .id = 22, },
+ { .n = "xdmac2_clk", .p = PCK_PARENT_HW_MCK1, .id = 23, },
+ { .n = "acc_clk", .p = PCK_PARENT_HW_MCK7, .id = 24, },
+ { .n = "aes_clk", .p = PCK_PARENT_HW_MCK6, .id = 26, },
+ { .n = "tzaesbasc_clk", .p = PCK_PARENT_HW_MCK8, .id = 27, },
+ { .n = "asrc_clk", .p = PCK_PARENT_HW_MCK9, .id = 29, .r = { .max = 200000000, }, },
+ { .n = "cpkcc_clk", .p = PCK_PARENT_HW_MCK0, .id = 30, },
+ { .n = "eic_clk", .p = PCK_PARENT_HW_MCK7, .id = 33, },
+ { .n = "flex0_clk", .p = PCK_PARENT_HW_MCK7, .id = 34, },
+ { .n = "flex1_clk", .p = PCK_PARENT_HW_MCK7, .id = 35, },
+ { .n = "flex2_clk", .p = PCK_PARENT_HW_MCK7, .id = 36, },
+ { .n = "flex3_clk", .p = PCK_PARENT_HW_MCK7, .id = 37, },
+ { .n = "flex4_clk", .p = PCK_PARENT_HW_MCK8, .id = 38, },
+ { .n = "flex5_clk", .p = PCK_PARENT_HW_MCK8, .id = 39, },
+ { .n = "flex6_clk", .p = PCK_PARENT_HW_MCK8, .id = 40, },
+ { .n = "flex7_clk", .p = PCK_PARENT_HW_MCK8, .id = 41, },
+ { .n = "flex8_clk", .p = PCK_PARENT_HW_MCK9, .id = 42, },
+ { .n = "flex9_clk", .p = PCK_PARENT_HW_MCK9, .id = 43, },
+ { .n = "flex10_clk", .p = PCK_PARENT_HW_MCK9, .id = 44, },
+ { .n = "gmac0_clk", .p = PCK_PARENT_HW_MCK6, .id = 46, },
+ { .n = "gmac1_clk", .p = PCK_PARENT_HW_MCK6, .id = 47, },
+ { .n = "gmac0_tsu_clk", .p = PCK_PARENT_HW_MCK1, .id = 49, },
+ { .n = "gmac1_tsu_clk", .p = PCK_PARENT_HW_MCK1, .id = 50, },
+ { .n = "icm_clk", .p = PCK_PARENT_HW_MCK5, .id = 53, },
+ { .n = "i2smcc0_clk", .p = PCK_PARENT_HW_MCK9, .id = 54, .r = { .max = 200000000, }, },
+ { .n = "i2smcc1_clk", .p = PCK_PARENT_HW_MCK9, .id = 55, .r = { .max = 200000000, }, },
+ { .n = "lcd_clk", .p = PCK_PARENT_HW_MCK3, .id = 56, },
+ { .n = "matrix_clk", .p = PCK_PARENT_HW_MCK5, .id = 57, },
+ { .n = "mcan0_clk", .p = PCK_PARENT_HW_MCK5, .id = 58, .r = { .max = 200000000, }, },
+ { .n = "mcan1_clk", .p = PCK_PARENT_HW_MCK5, .id = 59, .r = { .max = 200000000, }, },
+ { .n = "mcan2_clk", .p = PCK_PARENT_HW_MCK5, .id = 60, .r = { .max = 200000000, }, },
+ { .n = "mcan3_clk", .p = PCK_PARENT_HW_MCK5, .id = 61, .r = { .max = 200000000, }, },
+ { .n = "mcan4_clk", .p = PCK_PARENT_HW_MCK5, .id = 62, .r = { .max = 200000000, }, },
+ { .n = "pdmc0_clk", .p = PCK_PARENT_HW_MCK9, .id = 64, .r = { .max = 200000000, }, },
+ { .n = "pdmc1_clk", .p = PCK_PARENT_HW_MCK9, .id = 65, .r = { .max = 200000000, }, },
+ { .n = "pit64b0_clk", .p = PCK_PARENT_HW_MCK7, .id = 66, },
+ { .n = "pit64b1_clk", .p = PCK_PARENT_HW_MCK7, .id = 67, },
+ { .n = "pit64b2_clk", .p = PCK_PARENT_HW_MCK7, .id = 68, },
+ { .n = "pit64b3_clk", .p = PCK_PARENT_HW_MCK8, .id = 69, },
+ { .n = "pit64b4_clk", .p = PCK_PARENT_HW_MCK8, .id = 70, },
+ { .n = "pit64b5_clk", .p = PCK_PARENT_HW_MCK8, .id = 71, },
+ { .n = "pwm_clk", .p = PCK_PARENT_HW_MCK7, .id = 72, },
+ { .n = "qspi0_clk", .p = PCK_PARENT_HW_MCK5, .id = 73, },
+ { .n = "qspi1_clk", .p = PCK_PARENT_HW_MCK5, .id = 74, },
+ { .n = "sdmmc0_clk", .p = PCK_PARENT_HW_MCK1, .id = 75, },
+ { .n = "sdmmc1_clk", .p = PCK_PARENT_HW_MCK1, .id = 76, },
+ { .n = "sdmmc2_clk", .p = PCK_PARENT_HW_MCK1, .id = 77, },
+ { .n = "sha_clk", .p = PCK_PARENT_HW_MCK6, .id = 78, },
+ { .n = "spdifrx_clk", .p = PCK_PARENT_HW_MCK9, .id = 79, .r = { .max = 200000000, }, },
+ { .n = "spdiftx_clk", .p = PCK_PARENT_HW_MCK9, .id = 80, .r = { .max = 200000000, }, },
+ { .n = "ssc0_clk", .p = PCK_PARENT_HW_MCK7, .id = 81, .r = { .max = 200000000, }, },
+ { .n = "ssc1_clk", .p = PCK_PARENT_HW_MCK8, .id = 82, .r = { .max = 200000000, }, },
+ { .n = "tcb0_ch0_clk", .p = PCK_PARENT_HW_MCK8, .id = 83, .r = { .max = 200000000, }, },
+ { .n = "tcb0_ch1_clk", .p = PCK_PARENT_HW_MCK8, .id = 84, .r = { .max = 200000000, }, },
+ { .n = "tcb0_ch2_clk", .p = PCK_PARENT_HW_MCK8, .id = 85, .r = { .max = 200000000, }, },
+ { .n = "tcb1_ch0_clk", .p = PCK_PARENT_HW_MCK5, .id = 86, .r = { .max = 200000000, }, },
+ { .n = "tcb1_ch1_clk", .p = PCK_PARENT_HW_MCK5, .id = 87, .r = { .max = 200000000, }, },
+ { .n = "tcb1_ch2_clk", .p = PCK_PARENT_HW_MCK5, .id = 88, .r = { .max = 200000000, }, },
+ { .n = "tcpca_clk", .p = PCK_PARENT_HW_MCK5, .id = 89, },
+ { .n = "tcpcb_clk", .p = PCK_PARENT_HW_MCK5, .id = 90, },
+ { .n = "tdes_clk", .p = PCK_PARENT_HW_MCK6, .id = 91, },
+ { .n = "trng_clk", .p = PCK_PARENT_HW_MCK6, .id = 92, },
+ { .n = "udphsa_clk", .p = PCK_PARENT_HW_MCK5, .id = 99, },
+ { .n = "udphsb_clk", .p = PCK_PARENT_HW_MCK5, .id = 100, },
+ { .n = "uhphs_clk", .p = PCK_PARENT_HW_MCK5, .id = 101, },
+ { .n = "dsi_clk", .p = PCK_PARENT_HW_MCK3, .id = 103, },
+ { .n = "lvdsc_clk", .p = PCK_PARENT_HW_MCK3, .id = 104, },
+};
+
+/*
+ * Generic clock description
+ * @n: clock name
+ * @pp: PLL parents (entry formed by PLL components identifiers
+ * (see enum pll_component_id))
+ * @pp_mux_table: PLL parents mux table
+ * @r: clock output range
+ * @pp_chg_id: id in parent array of changeable PLL parent
+ * @pp_count: PLL parents count
+ * @id: clock id
+ */
+static const struct {
+ const char *n;
+ struct {
+ int pll_id;
+ int pll_compid;
+ } pp[8];
+ const char pp_mux_table[8];
+ struct clk_range r;
+ int pp_chg_id;
+ u8 pp_count;
+ u8 id;
+} sama7d65_gck[] = {
+ { .n = "adc_gclk",
+ .id = 25,
+ .r = { .max = 100000000, },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 8, 9, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "asrc_gclk",
+ .id = 29,
+ .r = { .max = 200000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 9, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex0_gclk",
+ .id = 34,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = {8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex1_gclk",
+ .id = 35,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = {8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex2_gclk",
+ .id = 36,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = {8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex3_gclk",
+ .id = 37,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = {8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex4_gclk",
+ .id = 38,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex5_gclk",
+ .id = 39,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex6_gclk",
+ .id = 40,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex7_gclk",
+ .id = 41,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex8_gclk",
+ .id = 42,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex9_gclk",
+ .id = 43,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex10_gclk",
+ .id = 44,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "gmac0_gclk",
+ .id = 46,
+ .r = { .max = 125000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 10, },
+ .pp_count = 1,
+ .pp_chg_id = 4, },
+
+ { .n = "gmac1_gclk",
+ .id = 47,
+ .r = { .max = 125000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 10, },
+ .pp_count = 1,
+ .pp_chg_id = 4, },
+
+ { .n = "gmac0_tsu_gclk",
+ .id = 49,
+ .r = { .max = 400000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = {10, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "gmac1_tsu_gclk",
+ .id = 50,
+ .r = { .max = 400000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 10, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "i2smcc0_gclk",
+ .id = 54,
+ .r = { .max = 100000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 9, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "i2smcc1_gclk",
+ .id = 55,
+ .r = { .max = 100000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 9, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "lcdc_gclk",
+ .id = 56,
+ .r = { .max = 90000000 },
+ .pp_count = 0,
+ .pp_chg_id = INT_MIN,
+ },
+
+ { .n = "mcan0_gclk",
+ .id = 58,
+ .r = { .max = 80000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(USB, DIV0), },
+ .pp_mux_table = { 12 },
+ .pp_count = 1,
+ .pp_chg_id = 4, },
+
+ { .n = "mcan1_gclk",
+ .id = 59,
+ .r = { .max = 80000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(USB, DIV0), },
+ .pp_mux_table = { 12 },
+ .pp_count = 1,
+ .pp_chg_id = 4, },
+
+ { .n = "mcan2_gclk",
+ .id = 60,
+ .r = { .max = 80000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(USB, DIV0), },
+ .pp_mux_table = { 12 },
+ .pp_count = 1,
+ .pp_chg_id = 4, },
+
+ { .n = "mcan3_gclk",
+ .id = 61,
+ .r = { .max = 80000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(USB, DIV0), },
+ .pp_mux_table = { 12 },
+ .pp_count = 1,
+ .pp_chg_id = 4, },
+
+ { .n = "mcan4_gclk",
+ .id = 62,
+ .r = { .max = 80000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(USB, DIV0), },
+ .pp_mux_table = { 12 },
+ .pp_count = 1,
+ .pp_chg_id = 4, },
+
+ { .n = "pdmc0_gclk",
+ .id = 64,
+ .r = { .max = 80000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 9 },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pdmc1_gclk",
+ .id = 65,
+ .r = { .max = 80000000, },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 9, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pit64b0_gclk",
+ .id = 66,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pit64b1_gclk",
+ .id = 67,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pit64b2_gclk",
+ .id = 68,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pit64b3_gclk",
+ .id = 69,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = {8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pit64b4_gclk",
+ .id = 70,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = {8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pit64b5_gclk",
+ .id = 71,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = {8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "qspi0_gclk",
+ .id = 73,
+ .r = { .max = 400000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "qspi1_gclk",
+ .id = 74,
+ .r = { .max = 266000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "sdmmc0_gclk",
+ .id = 75,
+ .r = { .max = 208000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 10, },
+ .pp_count = 2,
+ .pp_chg_id = 4, },
+
+ { .n = "sdmmc1_gclk",
+ .id = 76,
+ .r = { .max = 208000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 10, },
+ .pp_count = 2,
+ .pp_chg_id = 4, },
+
+ { .n = "sdmmc2_gclk",
+ .id = 77,
+ .r = { .max = 208000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 10 },
+ .pp_count = 2,
+ .pp_chg_id = 4, },
+
+ { .n = "spdifrx_gclk",
+ .id = 79,
+ .r = { .max = 150000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 9, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "spdiftx_gclk",
+ .id = 80,
+ .r = { .max = 25000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 9, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "tcb0_ch0_gclk",
+ .id = 83,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "tcb1_ch0_gclk",
+ .id = 86,
+ .r = { .max = 67000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "DSI_gclk",
+ .id = 103,
+ .r = {.max = 27000000},
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .pp_mux_table = {5},
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "I3CC_gclk",
+ .id = 105,
+ .r = {.max = 125000000},
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = {8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+};
+
+/* MCK0 characteristics. */
+static const struct clk_master_characteristics mck0_characteristics = {
+ .output = { .min = 32768, .max = 200000000 },
+ .divisors = { 1, 2, 4, 3, 5 },
+ .have_div3_pres = 1,
+};
+
+/* MCK0 layout. */
+static const struct clk_master_layout mck0_layout = {
+ .mask = 0x773,
+ .pres_shift = 4,
+ .offset = 0x28,
+};
+
+/* Programmable clock layout. */
+static const struct clk_programmable_layout programmable_layout = {
+ .pres_mask = 0xff,
+ .pres_shift = 8,
+ .css_mask = 0x1f,
+ .have_slck_mck = 0,
+ .is_pres_direct = 1,
+};
+
+/* Peripheral clock layout. */
+static const struct clk_pcr_layout sama7d65_pcr_layout = {
+ .offset = 0x88,
+ .cmd = BIT(31),
+ .gckcss_mask = GENMASK(12, 8),
+ .pid_mask = GENMASK(6, 0),
+};
+
+static void __init sama7d65_pmc_setup(struct device_node *np)
+{
+ const char *main_xtal_name = "main_xtal";
+ struct pmc_data *sama7d65_pmc;
+ const char *parent_names[11];
+ void **alloc_mem = NULL;
+ int alloc_mem_size = 0;
+ struct regmap *regmap;
+ struct clk_hw *hw, *main_rc_hw, *main_osc_hw, *main_xtal_hw;
+ struct clk_hw *td_slck_hw, *md_slck_hw;
+ static struct clk_parent_data parent_data;
+ struct clk_hw *parent_hws[10];
+ bool bypass;
+ int i, j;
+
+ td_slck_hw = __clk_get_hw(of_clk_get_by_name(np, "td_slck"));
+ md_slck_hw = __clk_get_hw(of_clk_get_by_name(np, "md_slck"));
+ main_xtal_hw = __clk_get_hw(of_clk_get_by_name(np, main_xtal_name));
+
+ if (!td_slck_hw || !md_slck_hw || !main_xtal_hw)
+ return;
+
+ regmap = device_node_to_regmap(np);
+ if (IS_ERR(regmap))
+ return;
+
+ sama7d65_pmc = pmc_data_allocate(PMC_INDEX_MAX,
+ nck(sama7d65_systemck),
+ nck(sama7d65_periphck),
+ nck(sama7d65_gck), 8);
+ if (!sama7d65_pmc)
+ return;
+
+ alloc_mem = kmalloc(sizeof(void *) *
+ (ARRAY_SIZE(sama7d65_mckx) + ARRAY_SIZE(sama7d65_gck)),
+ GFP_KERNEL);
+ if (!alloc_mem)
+ goto err_free;
+
+ main_rc_hw = at91_clk_register_main_rc_osc(regmap, "main_rc_osc", 12000000,
+ 50000000);
+ if (IS_ERR(main_rc_hw))
+ goto err_free;
+
+ bypass = of_property_read_bool(np, "atmel,osc-bypass");
+
+ parent_data.name = main_xtal_name;
+ parent_data.fw_name = main_xtal_name;
+ main_osc_hw = at91_clk_register_main_osc(regmap, "main_osc", NULL,
+ &parent_data, bypass);
+ if (IS_ERR(main_osc_hw))
+ goto err_free;
+
+ parent_hws[0] = main_rc_hw;
+ parent_hws[1] = main_osc_hw;
+ hw = at91_clk_register_sam9x5_main(regmap, "mainck", NULL, parent_hws, 2);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_pmc->chws[PMC_MAIN] = hw;
+
+ for (i = 0; i < PLL_ID_MAX; i++) {
+ for (j = 0; j < PLL_COMPID_MAX; j++) {
+ struct clk_hw *parent_hw;
+
+ if (!sama7d65_plls[i][j].n)
+ continue;
+
+ switch (sama7d65_plls[i][j].t) {
+ case PLL_TYPE_FRAC:
+ switch (sama7d65_plls[i][j].p) {
+ case SAMA7D65_PLL_PARENT_MAINCK:
+ parent_hw = sama7d65_pmc->chws[PMC_MAIN];
+ break;
+ case SAMA7D65_PLL_PARENT_MAIN_XTAL:
+ parent_hw = main_xtal_hw;
+ break;
+ default:
+ /* Should not happen. */
+ parent_hw = NULL;
+ break;
+ }
+
+ hw = sam9x60_clk_register_frac_pll(regmap,
+ &pmc_pll_lock, sama7d65_plls[i][j].n,
+ NULL, parent_hw, i,
+ sama7d65_plls[i][j].c,
+ sama7d65_plls[i][j].l,
+ sama7d65_plls[i][j].f);
+ break;
+
+ case PLL_TYPE_DIV:
+ hw = sam9x60_clk_register_div_pll(regmap,
+ &pmc_pll_lock, sama7d65_plls[i][j].n,
+ NULL, sama7d65_plls[i][0].hw, i,
+ sama7d65_plls[i][j].c,
+ sama7d65_plls[i][j].l,
+ sama7d65_plls[i][j].f,
+ sama7d65_plls[i][j].safe_div);
+ break;
+
+ default:
+ continue;
+ }
+
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_plls[i][j].hw = hw;
+ if (sama7d65_plls[i][j].eid)
+ sama7d65_pmc->chws[sama7d65_plls[i][j].eid] = hw;
+ }
+ }
+
+ hw = at91_clk_register_master_div(regmap, "mck0", NULL,
+ sama7d65_plls[PLL_ID_CPU][1].hw,
+ &mck0_layout, &mck0_characteristics,
+ &pmc_mck0_lock, CLK_GET_RATE_NOCACHE, 5);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_pmc->chws[PMC_MCK] = hw;
+ sama7d65_mckx[PCK_PARENT_HW_MCK0].hw = hw;
+
+ parent_hws[0] = md_slck_hw;
+ parent_hws[1] = td_slck_hw;
+ parent_hws[2] = sama7d65_pmc->chws[PMC_MAIN];
+ for (i = PCK_PARENT_HW_MCK1; i < ARRAY_SIZE(sama7d65_mckx); i++) {
+ u8 num_parents = 3 + sama7d65_mckx[i].ep_count;
+ struct clk_hw *tmp_parent_hws[8];
+ u32 *mux_table;
+
+ mux_table = kmalloc_array(num_parents, sizeof(*mux_table),
+ GFP_KERNEL);
+ if (!mux_table)
+ goto err_free;
+
+ alloc_mem[alloc_mem_size++] = mux_table;
+
+ PMC_INIT_TABLE(mux_table, 3);
+ PMC_FILL_TABLE(&mux_table[3], sama7d65_mckx[i].ep_mux_table,
+ sama7d65_mckx[i].ep_count);
+ for (j = 0; j < sama7d65_mckx[i].ep_count; j++) {
+ u8 pll_id = sama7d65_mckx[i].ep[j].pll_id;
+ u8 pll_compid = sama7d65_mckx[i].ep[j].pll_compid;
+
+ tmp_parent_hws[j] = sama7d65_plls[pll_id][pll_compid].hw;
+ }
+ PMC_FILL_TABLE(&parent_hws[3], tmp_parent_hws,
+ sama7d65_mckx[i].ep_count);
+
+ hw = at91_clk_sama7g5_register_master(regmap, sama7d65_mckx[i].n,
+ num_parents, NULL, parent_hws,
+ mux_table, &pmc_mckX_lock,
+ sama7d65_mckx[i].id,
+ sama7d65_mckx[i].c,
+ sama7d65_mckx[i].ep_chg_id);
+
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_mckx[i].hw = hw;
+ if (sama7d65_mckx[i].eid)
+ sama7d65_pmc->chws[sama7d65_mckx[i].eid] = hw;
+ }
+
+ parent_names[0] = "syspll_divpmcck";
+ parent_names[1] = "usbpll_divpmcck";
+ parent_names[2] = "main_osc";
+ hw = sam9x60_clk_register_usb(regmap, "usbck", parent_names, 3);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_hws[0] = md_slck_hw;
+ parent_hws[1] = td_slck_hw;
+ parent_hws[2] = sama7d65_pmc->chws[PMC_MAIN];
+ parent_hws[3] = sama7d65_plls[PLL_ID_SYS][PLL_COMPID_DIV0].hw;
+ parent_hws[4] = sama7d65_plls[PLL_ID_DDR][PLL_COMPID_DIV0].hw;
+ parent_hws[5] = sama7d65_plls[PLL_ID_GPU][PLL_COMPID_DIV0].hw;
+ parent_hws[6] = sama7d65_plls[PLL_ID_BAUD][PLL_COMPID_DIV0].hw;
+ parent_hws[7] = sama7d65_plls[PLL_ID_AUDIO][PLL_COMPID_DIV0].hw;
+ parent_hws[8] = sama7d65_plls[PLL_ID_ETH][PLL_COMPID_DIV0].hw;
+
+ for (i = 0; i < 8; i++) {
+ char name[6];
+
+ snprintf(name, sizeof(name), "prog%d", i);
+
+ hw = at91_clk_register_programmable(regmap, name, NULL, parent_hws,
+ 9, i,
+ &programmable_layout,
+ sama7d65_prog_mux_table);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_pmc->pchws[i] = hw;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sama7d65_systemck); i++) {
+ hw = at91_clk_register_system(regmap, sama7d65_systemck[i].n,
+ sama7d65_systemck[i].p, NULL,
+ sama7d65_systemck[i].id, 0);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_pmc->shws[sama7d65_systemck[i].id] = hw;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sama7d65_periphck); i++) {
+ hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
+ &sama7d65_pcr_layout,
+ sama7d65_periphck[i].n,
+ NULL,
+ sama7d65_mckx[sama7d65_periphck[i].p].hw,
+ sama7d65_periphck[i].id,
+ &sama7d65_periphck[i].r,
+ sama7d65_periphck[i].chgp ? 0 :
+ INT_MIN, 0);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_pmc->phws[sama7d65_periphck[i].id] = hw;
+ }
+
+ parent_hws[0] = md_slck_hw;
+ parent_hws[1] = td_slck_hw;
+ parent_hws[2] = sama7d65_pmc->chws[PMC_MAIN];
+ parent_hws[3] = sama7d65_pmc->chws[PMC_MCK1];
+ for (i = 0; i < ARRAY_SIZE(sama7d65_gck); i++) {
+ u8 num_parents = 4 + sama7d65_gck[i].pp_count;
+ struct clk_hw *tmp_parent_hws[8];
+ u32 *mux_table;
+
+ mux_table = kmalloc_array(num_parents, sizeof(*mux_table),
+ GFP_KERNEL);
+ if (!mux_table)
+ goto err_free;
+
+ alloc_mem[alloc_mem_size++] = mux_table;
+
+ PMC_INIT_TABLE(mux_table, 4);
+ PMC_FILL_TABLE(&mux_table[4], sama7d65_gck[i].pp_mux_table,
+ sama7d65_gck[i].pp_count);
+ for (j = 0; j < sama7d65_gck[i].pp_count; j++) {
+ u8 pll_id = sama7d65_gck[i].pp[j].pll_id;
+ u8 pll_compid = sama7d65_gck[i].pp[j].pll_compid;
+
+ tmp_parent_hws[j] = sama7d65_plls[pll_id][pll_compid].hw;
+ }
+ PMC_FILL_TABLE(&parent_hws[4], tmp_parent_hws,
+ sama7d65_gck[i].pp_count);
+
+ hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
+ &sama7d65_pcr_layout,
+ sama7d65_gck[i].n, NULL,
+ parent_hws, mux_table,
+ num_parents,
+ sama7d65_gck[i].id,
+ &sama7d65_gck[i].r,
+ sama7d65_gck[i].pp_chg_id);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_pmc->ghws[sama7d65_gck[i].id] = hw;
+ }
+
+ of_clk_add_hw_provider(np, of_clk_hw_pmc_get, sama7d65_pmc);
+ kfree(alloc_mem);
+
+ return;
+
+err_free:
+ if (alloc_mem) {
+ for (i = 0; i < alloc_mem_size; i++)
+ kfree(alloc_mem[i]);
+ kfree(alloc_mem);
+ }
+
+ kfree(sama7d65_pmc);
+}
+
+/* Some clks are used for a clocksource */
+CLK_OF_DECLARE(sama7d65_pmc, "microchip,sama7d65-pmc", sama7d65_pmc_setup);
diff --git a/drivers/clk/at91/sama7g5.c b/drivers/clk/at91/sama7g5.c
index 91b5c6f14819..1340c2b00619 100644
--- a/drivers/clk/at91/sama7g5.c
+++ b/drivers/clk/at91/sama7g5.c
@@ -16,21 +16,6 @@
#include "pmc.h"
-#define SAMA7G5_INIT_TABLE(_table, _count) \
- do { \
- u8 _i; \
- for (_i = 0; _i < (_count); _i++) \
- (_table)[_i] = _i; \
- } while (0)
-
-#define SAMA7G5_FILL_TABLE(_to, _from, _count) \
- do { \
- u8 _i; \
- for (_i = 0; _i < (_count); _i++) { \
- (_to)[_i] = (_from)[_i]; \
- } \
- } while (0)
-
static DEFINE_SPINLOCK(pmc_pll_lock);
static DEFINE_SPINLOCK(pmc_mck0_lock);
static DEFINE_SPINLOCK(pmc_mckX_lock);
@@ -66,6 +51,7 @@ enum pll_component_id {
PLL_COMPID_FRAC,
PLL_COMPID_DIV0,
PLL_COMPID_DIV1,
+ PLL_COMPID_MAX,
};
/*
@@ -116,11 +102,18 @@ static const struct clk_range pll_outputs[] = {
{ .min = 2343750, .max = 1200000000 },
};
+/* Fractional PLL core output range. */
+static const struct clk_range core_outputs[] = {
+ { .min = 600000000, .max = 1200000000 },
+};
+
/* CPU PLL characteristics. */
static const struct clk_pll_characteristics cpu_pll_characteristics = {
.input = { .min = 12000000, .max = 50000000 },
.num_output = ARRAY_SIZE(cpu_pll_outputs),
.output = cpu_pll_outputs,
+ .core_output = core_outputs,
+ .acr = UL(0x00070010),
};
/* PLL characteristics. */
@@ -128,6 +121,8 @@ static const struct clk_pll_characteristics pll_characteristics = {
.input = { .min = 12000000, .max = 50000000 },
.num_output = ARRAY_SIZE(pll_outputs),
.output = pll_outputs,
+ .core_output = core_outputs,
+ .acr = UL(0x00070010),
};
/*
@@ -165,7 +160,7 @@ static struct sama7g5_pll {
u8 t;
u8 eid;
u8 safe_div;
-} sama7g5_plls[][PLL_ID_MAX] = {
+} sama7g5_plls[][PLL_COMPID_MAX] = {
[PLL_ID_CPU] = {
[PLL_COMPID_FRAC] = {
.n = "cpupll_fracck",
@@ -1038,7 +1033,7 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
sama7g5_pmc->chws[PMC_MAIN] = hw;
for (i = 0; i < PLL_ID_MAX; i++) {
- for (j = 0; j < 3; j++) {
+ for (j = 0; j < PLL_COMPID_MAX; j++) {
struct clk_hw *parent_hw;
if (!sama7g5_plls[i][j].n)
@@ -1112,17 +1107,17 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
if (!mux_table)
goto err_free;
- SAMA7G5_INIT_TABLE(mux_table, 3);
- SAMA7G5_FILL_TABLE(&mux_table[3], sama7g5_mckx[i].ep_mux_table,
- sama7g5_mckx[i].ep_count);
+ PMC_INIT_TABLE(mux_table, 3);
+ PMC_FILL_TABLE(&mux_table[3], sama7g5_mckx[i].ep_mux_table,
+ sama7g5_mckx[i].ep_count);
for (j = 0; j < sama7g5_mckx[i].ep_count; j++) {
u8 pll_id = sama7g5_mckx[i].ep[j].pll_id;
u8 pll_compid = sama7g5_mckx[i].ep[j].pll_compid;
tmp_parent_hws[j] = sama7g5_plls[pll_id][pll_compid].hw;
}
- SAMA7G5_FILL_TABLE(&parent_hws[3], tmp_parent_hws,
- sama7g5_mckx[i].ep_count);
+ PMC_FILL_TABLE(&parent_hws[3], tmp_parent_hws,
+ sama7g5_mckx[i].ep_count);
hw = at91_clk_sama7g5_register_master(regmap, sama7g5_mckx[i].n,
num_parents, NULL, parent_hws, mux_table,
@@ -1208,17 +1203,17 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
if (!mux_table)
goto err_free;
- SAMA7G5_INIT_TABLE(mux_table, 3);
- SAMA7G5_FILL_TABLE(&mux_table[3], sama7g5_gck[i].pp_mux_table,
- sama7g5_gck[i].pp_count);
+ PMC_INIT_TABLE(mux_table, 3);
+ PMC_FILL_TABLE(&mux_table[3], sama7g5_gck[i].pp_mux_table,
+ sama7g5_gck[i].pp_count);
for (j = 0; j < sama7g5_gck[i].pp_count; j++) {
u8 pll_id = sama7g5_gck[i].pp[j].pll_id;
u8 pll_compid = sama7g5_gck[i].pp[j].pll_compid;
tmp_parent_hws[j] = sama7g5_plls[pll_id][pll_compid].hw;
}
- SAMA7G5_FILL_TABLE(&parent_hws[3], tmp_parent_hws,
- sama7g5_gck[i].pp_count);
+ PMC_FILL_TABLE(&parent_hws[3], tmp_parent_hws,
+ sama7g5_gck[i].pp_count);
hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
&sama7g5_pcr_layout,
diff --git a/drivers/clk/at91/sckc.c b/drivers/clk/at91/sckc.c
index 7741d8f3dbee..021d1b412af4 100644
--- a/drivers/clk/at91/sckc.c
+++ b/drivers/clk/at91/sckc.c
@@ -12,6 +12,8 @@
#include <linux/of_address.h>
#include <linux/io.h>
+#include <dt-bindings/clock/at91.h>
+
#define SLOW_CLOCK_FREQ 32768
#define SLOWCK_SW_CYCLES 5
#define SLOWCK_SW_TIME_USEC ((SLOWCK_SW_CYCLES * USEC_PER_SEC) / \
@@ -470,7 +472,7 @@ static void __init of_sam9x60_sckc_setup(struct device_node *np)
{
void __iomem *regbase = of_iomap(np, 0);
struct clk_hw_onecell_data *clk_data;
- struct clk_hw *slow_rc, *slow_osc;
+ struct clk_hw *slow_rc, *slow_osc, *hw;
const char *xtal_name;
const struct clk_hw *parent_hws[2];
static struct clk_parent_data parent_data = {
@@ -506,19 +508,19 @@ static void __init of_sam9x60_sckc_setup(struct device_node *np)
/* MD_SLCK and TD_SLCK. */
clk_data->num = 2;
- clk_data->hws[0] = clk_hw_register_fixed_rate_parent_hw(NULL, "md_slck",
- slow_rc,
- 0, 32768);
- if (IS_ERR(clk_data->hws[0]))
+ hw = clk_hw_register_fixed_rate_parent_hw(NULL, "md_slck", slow_rc,
+ 0, 32768);
+ if (IS_ERR(hw))
goto clk_data_free;
+ clk_data->hws[SCKC_MD_SLCK] = hw;
parent_hws[0] = slow_rc;
parent_hws[1] = slow_osc;
- clk_data->hws[1] = at91_clk_register_sam9x5_slow(regbase, "td_slck",
- parent_hws, 2,
- &at91sam9x60_bits);
- if (IS_ERR(clk_data->hws[1]))
+ hw = at91_clk_register_sam9x5_slow(regbase, "td_slck", parent_hws,
+ 2, &at91sam9x60_bits);
+ if (IS_ERR(hw))
goto unregister_md_slck;
+ clk_data->hws[SCKC_TD_SLCK] = hw;
ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
if (WARN_ON(ret))
@@ -527,9 +529,9 @@ static void __init of_sam9x60_sckc_setup(struct device_node *np)
return;
unregister_td_slck:
- at91_clk_unregister_sam9x5_slow(clk_data->hws[1]);
+ at91_clk_unregister_sam9x5_slow(clk_data->hws[SCKC_TD_SLCK]);
unregister_md_slck:
- clk_hw_unregister(clk_data->hws[0]);
+ clk_hw_unregister(clk_data->hws[SCKC_MD_SLCK]);
clk_data_free:
kfree(clk_data);
unregister_slow_osc:
diff --git a/drivers/clk/axs10x/i2s_pll_clock.c b/drivers/clk/axs10x/i2s_pll_clock.c
index 2334e6c334cf..6f3e1151b354 100644
--- a/drivers/clk/axs10x/i2s_pll_clock.c
+++ b/drivers/clk/axs10x/i2s_pll_clock.c
@@ -108,21 +108,21 @@ static unsigned long i2s_pll_recalc_rate(struct clk_hw *hw,
return ((parent_rate / idiv) * fbdiv) / odiv;
}
-static long i2s_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int i2s_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct i2s_pll_clk *clk = to_i2s_pll_clk(hw);
- const struct i2s_pll_cfg *pll_cfg = i2s_pll_get_cfg(*prate);
+ const struct i2s_pll_cfg *pll_cfg = i2s_pll_get_cfg(req->best_parent_rate);
int i;
if (!pll_cfg) {
- dev_err(clk->dev, "invalid parent rate=%ld\n", *prate);
+ dev_err(clk->dev, "invalid parent rate=%ld\n", req->best_parent_rate);
return -EINVAL;
}
for (i = 0; pll_cfg[i].rate != 0; i++)
- if (pll_cfg[i].rate == rate)
- return rate;
+ if (pll_cfg[i].rate == req->rate)
+ return 0;
return -EINVAL;
}
@@ -156,7 +156,7 @@ static int i2s_pll_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops i2s_pll_ops = {
.recalc_rate = i2s_pll_recalc_rate,
- .round_rate = i2s_pll_round_rate,
+ .determine_rate = i2s_pll_determine_rate,
.set_rate = i2s_pll_set_rate,
};
@@ -215,7 +215,7 @@ static struct platform_driver i2s_pll_clk_driver = {
.of_match_table = i2s_pll_clk_id,
},
.probe = i2s_pll_clk_probe,
- .remove_new = i2s_pll_clk_remove,
+ .remove = i2s_pll_clk_remove,
};
module_platform_driver(i2s_pll_clk_driver);
diff --git a/drivers/clk/axs10x/pll_clock.c b/drivers/clk/axs10x/pll_clock.c
index 6c7a2b62b406..c7ca473ee76c 100644
--- a/drivers/clk/axs10x/pll_clock.c
+++ b/drivers/clk/axs10x/pll_clock.c
@@ -149,8 +149,8 @@ static unsigned long axs10x_pll_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long axs10x_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int axs10x_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int i;
long best_rate;
@@ -163,11 +163,13 @@ static long axs10x_pll_round_rate(struct clk_hw *hw, unsigned long rate,
best_rate = pll_cfg[0].rate;
for (i = 1; pll_cfg[i].rate != 0; i++) {
- if (abs(rate - pll_cfg[i].rate) < abs(rate - best_rate))
+ if (abs(req->rate - pll_cfg[i].rate) < abs(req->rate - best_rate))
best_rate = pll_cfg[i].rate;
}
- return best_rate;
+ req->rate = best_rate;
+
+ return 0;
}
static int axs10x_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -208,7 +210,7 @@ static int axs10x_pll_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops axs10x_pll_ops = {
.recalc_rate = axs10x_pll_recalc_rate,
- .round_rate = axs10x_pll_round_rate,
+ .determine_rate = axs10x_pll_determine_rate,
.set_rate = axs10x_pll_set_rate,
};
diff --git a/drivers/clk/baikal-t1/ccu-div.c b/drivers/clk/baikal-t1/ccu-div.c
index 8d5fc7158f33..849d1f55765f 100644
--- a/drivers/clk/baikal-t1/ccu-div.c
+++ b/drivers/clk/baikal-t1/ccu-div.c
@@ -228,15 +228,18 @@ static inline unsigned long ccu_div_var_calc_divider(unsigned long rate,
CCU_DIV_CLKDIV_MAX(mask));
}
-static long ccu_div_var_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ccu_div_var_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ccu_div *div = to_ccu_div(hw);
unsigned long divider;
- divider = ccu_div_var_calc_divider(rate, *parent_rate, div->mask);
+ divider = ccu_div_var_calc_divider(req->rate, req->best_parent_rate,
+ div->mask);
- return ccu_div_calc_freq(*parent_rate, divider);
+ req->rate = ccu_div_calc_freq(req->best_parent_rate, divider);
+
+ return 0;
}
/*
@@ -308,12 +311,14 @@ static unsigned long ccu_div_fixed_recalc_rate(struct clk_hw *hw,
return ccu_div_calc_freq(parent_rate, div->divider);
}
-static long ccu_div_fixed_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ccu_div_fixed_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ccu_div *div = to_ccu_div(hw);
- return ccu_div_calc_freq(*parent_rate, div->divider);
+ req->rate = ccu_div_calc_freq(req->best_parent_rate, div->divider);
+
+ return 0;
}
static int ccu_div_fixed_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -534,14 +539,14 @@ static const struct clk_ops ccu_div_var_gate_to_set_ops = {
.disable = ccu_div_gate_disable,
.is_enabled = ccu_div_gate_is_enabled,
.recalc_rate = ccu_div_var_recalc_rate,
- .round_rate = ccu_div_var_round_rate,
+ .determine_rate = ccu_div_var_determine_rate,
.set_rate = ccu_div_var_set_rate_fast,
.debug_init = ccu_div_var_debug_init
};
static const struct clk_ops ccu_div_var_nogate_ops = {
.recalc_rate = ccu_div_var_recalc_rate,
- .round_rate = ccu_div_var_round_rate,
+ .determine_rate = ccu_div_var_determine_rate,
.set_rate = ccu_div_var_set_rate_slow,
.debug_init = ccu_div_var_debug_init
};
@@ -551,7 +556,7 @@ static const struct clk_ops ccu_div_gate_ops = {
.disable = ccu_div_gate_disable,
.is_enabled = ccu_div_gate_is_enabled,
.recalc_rate = ccu_div_fixed_recalc_rate,
- .round_rate = ccu_div_fixed_round_rate,
+ .determine_rate = ccu_div_fixed_determine_rate,
.set_rate = ccu_div_fixed_set_rate,
.debug_init = ccu_div_gate_debug_init
};
@@ -565,7 +570,7 @@ static const struct clk_ops ccu_div_buf_ops = {
static const struct clk_ops ccu_div_fixed_ops = {
.recalc_rate = ccu_div_fixed_recalc_rate,
- .round_rate = ccu_div_fixed_round_rate,
+ .determine_rate = ccu_div_fixed_determine_rate,
.set_rate = ccu_div_fixed_set_rate,
.debug_init = ccu_div_fixed_debug_init
};
diff --git a/drivers/clk/baikal-t1/ccu-pll.c b/drivers/clk/baikal-t1/ccu-pll.c
index 13ef28001439..357269f41cdc 100644
--- a/drivers/clk/baikal-t1/ccu-pll.c
+++ b/drivers/clk/baikal-t1/ccu-pll.c
@@ -228,14 +228,16 @@ static void ccu_pll_calc_factors(unsigned long rate, unsigned long parent_rate,
}
}
-static long ccu_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ccu_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned long nr = 1, nf = 1, od = 1;
- ccu_pll_calc_factors(rate, *parent_rate, &nr, &nf, &od);
+ ccu_pll_calc_factors(req->rate, req->best_parent_rate, &nr, &nf, &od);
- return ccu_pll_calc_freq(*parent_rate, nr, nf, od);
+ req->rate = ccu_pll_calc_freq(req->best_parent_rate, nr, nf, od);
+
+ return 0;
}
/*
@@ -481,7 +483,7 @@ static const struct clk_ops ccu_pll_gate_to_set_ops = {
.disable = ccu_pll_disable,
.is_enabled = ccu_pll_is_enabled,
.recalc_rate = ccu_pll_recalc_rate,
- .round_rate = ccu_pll_round_rate,
+ .determine_rate = ccu_pll_determine_rate,
.set_rate = ccu_pll_set_rate_norst,
.debug_init = ccu_pll_debug_init
};
@@ -491,7 +493,7 @@ static const struct clk_ops ccu_pll_straight_set_ops = {
.disable = ccu_pll_disable,
.is_enabled = ccu_pll_is_enabled,
.recalc_rate = ccu_pll_recalc_rate,
- .round_rate = ccu_pll_round_rate,
+ .determine_rate = ccu_pll_determine_rate,
.set_rate = ccu_pll_set_rate_reset,
.debug_init = ccu_pll_debug_init
};
diff --git a/drivers/clk/baikal-t1/clk-ccu-div.c b/drivers/clk/baikal-t1/clk-ccu-div.c
index 84555a00f950..17d75e8e2e8f 100644
--- a/drivers/clk/baikal-t1/clk-ccu-div.c
+++ b/drivers/clk/baikal-t1/clk-ccu-div.c
@@ -405,7 +405,7 @@ static void ccu_div_clk_unregister(struct ccu_div_data *data, bool defer)
{
int idx;
- /* Uninstall only the clocks registered on the specfied stage */
+ /* Uninstall only the clocks registered on the specified stage */
for (idx = 0; idx < data->divs_num; ++idx) {
if (!!(data->divs_info[idx].features & CCU_DIV_BASIC) ^ defer)
continue;
diff --git a/drivers/clk/baikal-t1/clk-ccu-pll.c b/drivers/clk/baikal-t1/clk-ccu-pll.c
index fce02ce77347..921b87024feb 100644
--- a/drivers/clk/baikal-t1/clk-ccu-pll.c
+++ b/drivers/clk/baikal-t1/clk-ccu-pll.c
@@ -196,7 +196,7 @@ static void ccu_pll_clk_unregister(struct ccu_pll_data *data, bool defer)
{
int idx;
- /* Uninstall only the clocks registered on the specfied stage */
+ /* Uninstall only the clocks registered on the specified stage */
for (idx = 0; idx < CCU_PLL_NUM; ++idx) {
if (!!(pll_info[idx].features & CCU_PLL_BASIC) ^ defer)
continue;
diff --git a/drivers/clk/bcm/clk-bcm2711-dvp.c b/drivers/clk/bcm/clk-bcm2711-dvp.c
index 3cb235df9d37..e79720e85685 100644
--- a/drivers/clk/bcm/clk-bcm2711-dvp.c
+++ b/drivers/clk/bcm/clk-bcm2711-dvp.c
@@ -110,7 +110,7 @@ MODULE_DEVICE_TABLE(of, clk_dvp_dt_ids);
static struct platform_driver clk_dvp_driver = {
.probe = clk_dvp_probe,
- .remove_new = clk_dvp_remove,
+ .remove = clk_dvp_remove,
.driver = {
.name = "brcm2711-dvp",
.of_match_table = clk_dvp_dt_ids,
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index fb04734afc80..02215ea79403 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -570,18 +570,23 @@ static long bcm2835_pll_rate_from_divisors(unsigned long parent_rate,
return rate >> A2W_PLL_FRAC_BITS;
}
-static long bcm2835_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int bcm2835_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct bcm2835_pll *pll = container_of(hw, struct bcm2835_pll, hw);
const struct bcm2835_pll_data *data = pll->data;
u32 ndiv, fdiv;
- rate = clamp(rate, data->min_rate, data->max_rate);
+ req->rate = clamp(req->rate, data->min_rate, data->max_rate);
- bcm2835_pll_choose_ndiv_and_fdiv(rate, *parent_rate, &ndiv, &fdiv);
+ bcm2835_pll_choose_ndiv_and_fdiv(req->rate, req->best_parent_rate,
+ &ndiv, &fdiv);
- return bcm2835_pll_rate_from_divisors(*parent_rate, ndiv, fdiv, 1);
+ req->rate = bcm2835_pll_rate_from_divisors(req->best_parent_rate,
+ ndiv, fdiv,
+ 1);
+
+ return 0;
}
static unsigned long bcm2835_pll_get_rate(struct clk_hw *hw,
@@ -783,7 +788,7 @@ static const struct clk_ops bcm2835_pll_clk_ops = {
.unprepare = bcm2835_pll_off,
.recalc_rate = bcm2835_pll_get_rate,
.set_rate = bcm2835_pll_set_rate,
- .round_rate = bcm2835_pll_round_rate,
+ .determine_rate = bcm2835_pll_determine_rate,
.debug_init = bcm2835_pll_debug_init,
};
@@ -1550,7 +1555,7 @@ static const char *const bcm2835_clock_osc_parents[] = {
.parents = bcm2835_clock_osc_parents, \
__VA_ARGS__)
-/* main peripherial parent mux */
+/* main peripheral parent mux */
static const char *const bcm2835_clock_per_parents[] = {
"gnd",
"xosc",
diff --git a/drivers/clk/bcm/clk-bcm53573-ilp.c b/drivers/clk/bcm/clk-bcm53573-ilp.c
index 84f2af736ee8..b2fc05b60783 100644
--- a/drivers/clk/bcm/clk-bcm53573-ilp.c
+++ b/drivers/clk/bcm/clk-bcm53573-ilp.c
@@ -59,7 +59,7 @@ static unsigned long bcm53573_ilp_recalc_rate(struct clk_hw *hw,
/*
* At minimum we should loop for a bit to let hardware do the
* measurement. This isn't very accurate however, so for a better
- * precision lets try getting 20 different values for and use average.
+ * precision let's try getting 20 different values and use average.
*/
while (num < 20) {
regmap_read(regmap, PMU_XTAL_FREQ_RATIO, &cur_val);
@@ -112,7 +112,7 @@ static void bcm53573_ilp_init(struct device_node *np)
goto err_free_ilp;
}
- ilp->regmap = syscon_node_to_regmap(of_get_parent(np));
+ ilp->regmap = syscon_node_to_regmap(np->parent);
if (IS_ERR(ilp->regmap)) {
err = PTR_ERR(ilp->regmap);
goto err_free_ilp;
diff --git a/drivers/clk/bcm/clk-bcm63xx-gate.c b/drivers/clk/bcm/clk-bcm63xx-gate.c
index 36c7b302e396..d6d857474436 100644
--- a/drivers/clk/bcm/clk-bcm63xx-gate.c
+++ b/drivers/clk/bcm/clk-bcm63xx-gate.c
@@ -567,7 +567,7 @@ static const struct of_device_id clk_bcm63xx_dt_ids[] = {
static struct platform_driver clk_bcm63xx = {
.probe = clk_bcm63xx_probe,
- .remove_new = clk_bcm63xx_remove,
+ .remove = clk_bcm63xx_remove,
.driver = {
.name = "bcm63xx-clock",
.of_match_table = clk_bcm63xx_dt_ids,
diff --git a/drivers/clk/bcm/clk-iproc-asiu.c b/drivers/clk/bcm/clk-iproc-asiu.c
index dcacf55c55ae..83ec13da9b2e 100644
--- a/drivers/clk/bcm/clk-iproc-asiu.c
+++ b/drivers/clk/bcm/clk-iproc-asiu.c
@@ -98,22 +98,27 @@ static unsigned long iproc_asiu_clk_recalc_rate(struct clk_hw *hw,
return clk->rate;
}
-static long iproc_asiu_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int iproc_asiu_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned int div;
- if (rate == 0 || *parent_rate == 0)
+ if (req->rate == 0 || req->best_parent_rate == 0)
return -EINVAL;
- if (rate == *parent_rate)
- return *parent_rate;
+ if (req->rate == req->best_parent_rate)
+ return 0;
- div = DIV_ROUND_CLOSEST(*parent_rate, rate);
- if (div < 2)
- return *parent_rate;
+ div = DIV_ROUND_CLOSEST(req->best_parent_rate, req->rate);
+ if (div < 2) {
+ req->rate = req->best_parent_rate;
- return *parent_rate / div;
+ return 0;
+ }
+
+ req->rate = req->best_parent_rate / div;
+
+ return 0;
}
static int iproc_asiu_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -168,7 +173,7 @@ static const struct clk_ops iproc_asiu_ops = {
.enable = iproc_asiu_clk_enable,
.disable = iproc_asiu_clk_disable,
.recalc_rate = iproc_asiu_clk_recalc_rate,
- .round_rate = iproc_asiu_clk_round_rate,
+ .determine_rate = iproc_asiu_clk_determine_rate,
.set_rate = iproc_asiu_clk_set_rate,
};
diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c
index ec5749e301ba..0171e6b2bfca 100644
--- a/drivers/clk/bcm/clk-kona.c
+++ b/drivers/clk/bcm/clk-kona.c
@@ -10,6 +10,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/clk-provider.h>
+#include <linux/string_choices.h>
/*
* "Policies" affect the frequencies of bus clocks provided by a
@@ -52,24 +53,6 @@ static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div)
return (u64)reg_div + ((u64)1 << div->u.s.frac_width);
}
-/*
- * Build a scaled divider value as close as possible to the
- * given whole part (div_value) and fractional part (expressed
- * in billionths).
- */
-u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, u32 billionths)
-{
- u64 combined;
-
- BUG_ON(!div_value);
- BUG_ON(billionths >= BILLION);
-
- combined = (u64)div_value * BILLION + billionths;
- combined <<= div->u.s.frac_width;
-
- return DIV_ROUND_CLOSEST_ULL(combined, BILLION);
-}
-
/* The scaled minimum divisor representable by a divider */
static inline u64
scaled_div_min(struct bcm_clk_div *div)
@@ -502,7 +485,7 @@ static int clk_gate(struct ccu_data *ccu, const char *name,
return 0;
pr_err("%s: failed to %s gate for %s\n", __func__,
- enable ? "enable" : "disable", name);
+ str_enable_disable(enable), name);
return -EIO;
}
diff --git a/drivers/clk/bcm/clk-kona.h b/drivers/clk/bcm/clk-kona.h
index e09655024ac2..348a3454ce40 100644
--- a/drivers/clk/bcm/clk-kona.h
+++ b/drivers/clk/bcm/clk-kona.h
@@ -492,8 +492,6 @@ extern struct clk_ops kona_peri_clk_ops;
/* Externally visible functions */
extern u64 scaled_div_max(struct bcm_clk_div *div);
-extern u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value,
- u32 billionths);
extern void __init kona_dt_ccu_setup(struct ccu_data *ccu,
struct device_node *node);
diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c
index 4d411408e4af..1a9162f0ae31 100644
--- a/drivers/clk/bcm/clk-raspberrypi.c
+++ b/drivers/clk/bcm/clk-raspberrypi.c
@@ -34,6 +34,7 @@ static char *rpi_firmware_clk_names[] = {
[RPI_FIRMWARE_M2MC_CLK_ID] = "m2mc",
[RPI_FIRMWARE_PIXEL_BVB_CLK_ID] = "pixel-bvb",
[RPI_FIRMWARE_VEC_CLK_ID] = "vec",
+ [RPI_FIRMWARE_DISP_CLK_ID] = "disp",
};
#define RPI_FIRMWARE_STATE_ENABLE_BIT BIT(0)
@@ -56,11 +57,19 @@ struct raspberrypi_clk_data {
struct raspberrypi_clk *rpi;
};
+static inline
+const struct raspberrypi_clk_data *clk_hw_to_data(const struct clk_hw *hw)
+{
+ return container_of(hw, struct raspberrypi_clk_data, hw);
+}
+
struct raspberrypi_clk_variant {
bool export;
char *clkdev;
unsigned long min_rate;
bool minimize;
+ bool maximize;
+ u32 flags;
};
static struct raspberrypi_clk_variant
@@ -68,6 +77,7 @@ raspberrypi_clk_variants[RPI_FIRMWARE_NUM_CLK_ID] = {
[RPI_FIRMWARE_ARM_CLK_ID] = {
.export = true,
.clkdev = "cpu0",
+ .flags = CLK_IS_CRITICAL,
},
[RPI_FIRMWARE_CORE_CLK_ID] = {
.export = true,
@@ -83,6 +93,12 @@ raspberrypi_clk_variants[RPI_FIRMWARE_NUM_CLK_ID] = {
* always use the minimum the drivers will let us.
*/
.minimize = true,
+
+ /*
+ * It should never be disabled as it drives the bus for
+ * everything else.
+ */
+ .flags = CLK_IS_CRITICAL,
},
[RPI_FIRMWARE_M2MC_CLK_ID] = {
.export = true,
@@ -108,21 +124,46 @@ raspberrypi_clk_variants[RPI_FIRMWARE_NUM_CLK_ID] = {
* drivers will let us.
*/
.minimize = true,
+
+ /*
+ * As mentioned above, this clock is disabled during boot,
+ * the firmware will skip the HSM initialization, resulting
+ * in a bus lockup. Therefore, make sure it's enabled
+ * during boot, but after it, it can be enabled/disabled
+ * by the driver.
+ */
+ .flags = CLK_IGNORE_UNUSED,
},
[RPI_FIRMWARE_V3D_CLK_ID] = {
.export = true,
+ .maximize = true,
},
[RPI_FIRMWARE_PIXEL_CLK_ID] = {
.export = true,
+ .minimize = true,
+ .flags = CLK_IS_CRITICAL,
},
[RPI_FIRMWARE_HEVC_CLK_ID] = {
.export = true,
+ .minimize = true,
+ .flags = CLK_IS_CRITICAL,
+ },
+ [RPI_FIRMWARE_ISP_CLK_ID] = {
+ .export = true,
+ .minimize = true,
},
[RPI_FIRMWARE_PIXEL_BVB_CLK_ID] = {
.export = true,
+ .minimize = true,
+ .flags = CLK_IS_CRITICAL,
},
[RPI_FIRMWARE_VEC_CLK_ID] = {
.export = true,
+ .minimize = true,
+ },
+ [RPI_FIRMWARE_DISP_CLK_ID] = {
+ .export = true,
+ .minimize = true,
},
};
@@ -153,7 +194,6 @@ static int raspberrypi_clock_property(struct rpi_firmware *firmware,
struct raspberrypi_firmware_prop msg = {
.id = cpu_to_le32(data->id),
.val = cpu_to_le32(*val),
- .disable_turbo = cpu_to_le32(1),
};
int ret;
@@ -168,16 +208,18 @@ static int raspberrypi_clock_property(struct rpi_firmware *firmware,
static int raspberrypi_fw_is_prepared(struct clk_hw *hw)
{
- struct raspberrypi_clk_data *data =
- container_of(hw, struct raspberrypi_clk_data, hw);
+ const struct raspberrypi_clk_data *data = clk_hw_to_data(hw);
struct raspberrypi_clk *rpi = data->rpi;
u32 val = 0;
int ret;
ret = raspberrypi_clock_property(rpi->firmware, data,
RPI_FIRMWARE_GET_CLOCK_STATE, &val);
- if (ret)
+ if (ret) {
+ dev_err_ratelimited(rpi->dev, "Failed to get %s state: %d\n",
+ clk_hw_get_name(hw), ret);
return 0;
+ }
return !!(val & RPI_FIRMWARE_STATE_ENABLE_BIT);
}
@@ -186,16 +228,18 @@ static int raspberrypi_fw_is_prepared(struct clk_hw *hw)
static unsigned long raspberrypi_fw_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- struct raspberrypi_clk_data *data =
- container_of(hw, struct raspberrypi_clk_data, hw);
+ const struct raspberrypi_clk_data *data = clk_hw_to_data(hw);
struct raspberrypi_clk *rpi = data->rpi;
u32 val = 0;
int ret;
ret = raspberrypi_clock_property(rpi->firmware, data,
RPI_FIRMWARE_GET_CLOCK_RATE, &val);
- if (ret)
+ if (ret) {
+ dev_err_ratelimited(rpi->dev, "Failed to get %s frequency: %d\n",
+ clk_hw_get_name(hw), ret);
return 0;
+ }
return val;
}
@@ -203,8 +247,7 @@ static unsigned long raspberrypi_fw_get_rate(struct clk_hw *hw,
static int raspberrypi_fw_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
- struct raspberrypi_clk_data *data =
- container_of(hw, struct raspberrypi_clk_data, hw);
+ const struct raspberrypi_clk_data *data = clk_hw_to_data(hw);
struct raspberrypi_clk *rpi = data->rpi;
u32 _rate = rate;
int ret;
@@ -221,8 +264,7 @@ static int raspberrypi_fw_set_rate(struct clk_hw *hw, unsigned long rate,
static int raspberrypi_fw_dumb_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
- struct raspberrypi_clk_data *data =
- container_of(hw, struct raspberrypi_clk_data, hw);
+ const struct raspberrypi_clk_data *data = clk_hw_to_data(hw);
struct raspberrypi_clk_variant *variant = data->variant;
/*
@@ -244,7 +286,41 @@ static int raspberrypi_fw_dumb_determine_rate(struct clk_hw *hw,
return 0;
}
+static int raspberrypi_fw_prepare(struct clk_hw *hw)
+{
+ const struct raspberrypi_clk_data *data = clk_hw_to_data(hw);
+ struct raspberrypi_clk *rpi = data->rpi;
+ u32 state = RPI_FIRMWARE_STATE_ENABLE_BIT;
+ int ret;
+
+ ret = raspberrypi_clock_property(rpi->firmware, data,
+ RPI_FIRMWARE_SET_CLOCK_STATE, &state);
+ if (ret)
+ dev_err_ratelimited(rpi->dev,
+ "Failed to set clock %s state to on: %d\n",
+ clk_hw_get_name(hw), ret);
+
+ return ret;
+}
+
+static void raspberrypi_fw_unprepare(struct clk_hw *hw)
+{
+ const struct raspberrypi_clk_data *data = clk_hw_to_data(hw);
+ struct raspberrypi_clk *rpi = data->rpi;
+ u32 state = 0;
+ int ret;
+
+ ret = raspberrypi_clock_property(rpi->firmware, data,
+ RPI_FIRMWARE_SET_CLOCK_STATE, &state);
+ if (ret)
+ dev_err_ratelimited(rpi->dev,
+ "Failed to set clock %s state to off: %d\n",
+ clk_hw_get_name(hw), ret);
+}
+
static const struct clk_ops raspberrypi_firmware_clk_ops = {
+ .prepare = raspberrypi_fw_prepare,
+ .unprepare = raspberrypi_fw_unprepare,
.is_prepared = raspberrypi_fw_is_prepared,
.recalc_rate = raspberrypi_fw_get_rate,
.determine_rate = raspberrypi_fw_dumb_determine_rate,
@@ -271,8 +347,10 @@ static struct clk_hw *raspberrypi_clk_register(struct raspberrypi_clk *rpi,
init.name = devm_kasprintf(rpi->dev, GFP_KERNEL,
"fw-clk-%s",
rpi_firmware_clk_names[id]);
+ if (!init.name)
+ return ERR_PTR(-ENOMEM);
init.ops = &raspberrypi_firmware_clk_ops;
- init.flags = CLK_GET_RATE_NOCACHE;
+ init.flags = variant->flags | CLK_GET_RATE_NOCACHE;
data->hw.init = &init;
@@ -309,6 +387,9 @@ static struct clk_hw *raspberrypi_clk_register(struct raspberrypi_clk *rpi,
}
}
+ if (variant->maximize)
+ variant->min_rate = max_rate;
+
if (variant->min_rate) {
unsigned long rate;
@@ -458,11 +539,10 @@ static struct platform_driver raspberrypi_clk_driver = {
.of_match_table = raspberrypi_clk_match,
},
.probe = raspberrypi_clk_probe,
- .remove_new = raspberrypi_clk_remove,
+ .remove = raspberrypi_clk_remove,
};
module_platform_driver(raspberrypi_clk_driver);
MODULE_AUTHOR("Nicolas Saenz Julienne <nsaenzjulienne@suse.de>");
MODULE_DESCRIPTION("Raspberry Pi firmware clock driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:raspberrypi-clk");
diff --git a/drivers/clk/berlin/berlin2-avpll.c b/drivers/clk/berlin/berlin2-avpll.c
index aa89b4c9464e..79f3d37a0ee0 100644
--- a/drivers/clk/berlin/berlin2-avpll.c
+++ b/drivers/clk/berlin/berlin2-avpll.c
@@ -319,7 +319,7 @@ berlin2_avpll_channel_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
/*
* AV3 divider start at VCO_CTRL14, bit 7; each 4 bits wide.
- * AV2/AV3 form a fractional divider, where only specfic values for AV3
+ * AV2/AV3 form a fractional divider, where only specific values for AV3
* are allowed. AV3 != 0 divides by AV2/2, AV3=0 is bypass.
*/
if (ch->index < 6) {
diff --git a/drivers/clk/clk-apple-nco.c b/drivers/clk/clk-apple-nco.c
index 39472a51530a..d3ced4a0f029 100644
--- a/drivers/clk/clk-apple-nco.c
+++ b/drivers/clk/clk-apple-nco.c
@@ -212,13 +212,15 @@ static unsigned long applnco_recalc_rate(struct clk_hw *hw,
((u64) div) * incbase + inc1);
}
-static long applnco_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int applnco_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long lo = *parent_rate / (COARSE_DIV_OFFSET + LFSR_TBLSIZE) + 1;
- unsigned long hi = *parent_rate / COARSE_DIV_OFFSET;
+ unsigned long lo = req->best_parent_rate / (COARSE_DIV_OFFSET + LFSR_TBLSIZE) + 1;
+ unsigned long hi = req->best_parent_rate / COARSE_DIV_OFFSET;
- return clamp(rate, lo, hi);
+ req->rate = clamp(req->rate, lo, hi);
+
+ return 0;
}
static int applnco_enable(struct clk_hw *hw)
@@ -246,7 +248,7 @@ static void applnco_disable(struct clk_hw *hw)
static const struct clk_ops applnco_ops = {
.set_rate = applnco_set_rate,
.recalc_rate = applnco_recalc_rate,
- .round_rate = applnco_round_rate,
+ .determine_rate = applnco_determine_rate,
.enable = applnco_enable,
.disable = applnco_disable,
.is_enabled = applnco_is_enabled,
@@ -297,6 +299,9 @@ static int applnco_probe(struct platform_device *pdev)
memset(&init, 0, sizeof(init));
init.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"%s-%d", np->name, i);
+ if (!init.name)
+ return -ENOMEM;
+
init.ops = &applnco_ops;
init.parent_data = &pdata;
init.num_parents = 1;
diff --git a/drivers/clk/clk-asm9260.c b/drivers/clk/clk-asm9260.c
index 3432c801f1bd..595cfa533fb9 100644
--- a/drivers/clk/clk-asm9260.c
+++ b/drivers/clk/clk-asm9260.c
@@ -92,8 +92,8 @@ static const struct asm9260_div_clk asm9260_div_clks[] __initconst = {
{ CLKID_SYS_CPU, "cpu_div", "main_gate", HW_CPUCLKDIV },
{ CLKID_SYS_AHB, "ahb_div", "cpu_div", HW_SYSAHBCLKDIV },
- /* i2s has two deviders: one for only external mclk and internal
- * devider for all clks. */
+ /* i2s has two dividers: one for only external mclk and internal
+ * divider for all clks. */
{ CLKID_SYS_I2S0M, "i2s0m_div", "i2s0_mclk", HW_I2S0MCLKDIV },
{ CLKID_SYS_I2S1M, "i2s1m_div", "i2s1_mclk", HW_I2S1MCLKDIV },
{ CLKID_SYS_I2S0S, "i2s0s_div", "i2s0_gate", HW_I2S0SCLKDIV },
diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c
index faf88324f7b1..114afc13d640 100644
--- a/drivers/clk/clk-ast2600.c
+++ b/drivers/clk/clk-ast2600.c
@@ -92,7 +92,7 @@ static u8 soc_rev;
*
* There are some gates that do not have an associated reset; these are
* handled by using -1 as the index for the reset, and the consumer must
- * explictly assert/deassert reset lines as required.
+ * explicitly assert/deassert reset lines as required.
*
* Clocks marked with CLK_IS_CRITICAL:
*
diff --git a/drivers/clk/clk-axi-clkgen.c b/drivers/clk/clk-axi-clkgen.c
index bf4d8ddc93ae..fa5ccef73e60 100644
--- a/drivers/clk/clk-axi-clkgen.c
+++ b/drivers/clk/clk-axi-clkgen.c
@@ -6,13 +6,17 @@
* Author: Lars-Peter Clausen <lars@metafoo.de>
*/
-#include <linux/platform_device.h>
+#include <linux/adi-axi-common.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
-#include <linux/slab.h>
+#include <linux/err.h>
#include <linux/io.h>
-#include <linux/of.h>
#include <linux/module.h>
-#include <linux/err.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
#define AXI_CLKGEN_V2_REG_RESET 0x40
#define AXI_CLKGEN_V2_REG_CLKSEL 0x44
@@ -27,6 +31,9 @@
#define AXI_CLKGEN_V2_DRP_STATUS_BUSY BIT(16)
+#define ADI_CLKGEN_REG_FPGA_VOLTAGE 0x0140
+#define ADI_CLKGEN_INFO_FPGA_VOLTAGE(val) ((val) & GENMASK(15, 0))
+
#define MMCM_REG_CLKOUT5_2 0x07
#define MMCM_REG_CLKOUT0_1 0x08
#define MMCM_REG_CLKOUT0_2 0x09
@@ -89,7 +96,7 @@ static uint32_t axi_clkgen_lookup_filter(unsigned int m)
}
}
-static const uint32_t axi_clkgen_lock_table[] = {
+static const u32 axi_clkgen_lock_table[] = {
0x060603e8, 0x060603e8, 0x080803e8, 0x0b0b03e8,
0x0e0e03e8, 0x111103e8, 0x131303e8, 0x161603e8,
0x191903e8, 0x1c1c03e8, 0x1f1f0384, 0x1f1f0339,
@@ -101,7 +108,7 @@ static const uint32_t axi_clkgen_lock_table[] = {
0x1f1f012c, 0x1f1f0113, 0x1f1f0113, 0x1f1f0113,
};
-static uint32_t axi_clkgen_lookup_lock(unsigned int m)
+static u32 axi_clkgen_lookup_lock(unsigned int m)
{
if (m < ARRAY_SIZE(axi_clkgen_lock_table))
return axi_clkgen_lock_table[m];
@@ -117,14 +124,15 @@ static const struct axi_clkgen_limits axi_clkgen_zynqmp_default_limits = {
static const struct axi_clkgen_limits axi_clkgen_zynq_default_limits = {
.fpfd_min = 10000,
- .fpfd_max = 300000,
+ .fpfd_max = 450000,
.fvco_min = 600000,
.fvco_max = 1200000,
};
static void axi_clkgen_calc_params(const struct axi_clkgen_limits *limits,
- unsigned long fin, unsigned long fout,
- unsigned int *best_d, unsigned int *best_m, unsigned int *best_dout)
+ unsigned long fin, unsigned long fout,
+ unsigned int *best_d, unsigned int *best_m,
+ unsigned int *best_dout)
{
unsigned long d, d_min, d_max, _d_min, _d_max;
unsigned long m, m_min, m_max;
@@ -140,15 +148,15 @@ static void axi_clkgen_calc_params(const struct axi_clkgen_limits *limits,
*best_m = 0;
*best_dout = 0;
- d_min = max_t(unsigned long, DIV_ROUND_UP(fin, limits->fpfd_max), 1);
- d_max = min_t(unsigned long, fin / limits->fpfd_min, 80);
+ d_min = max(DIV_ROUND_UP(fin, limits->fpfd_max), 1);
+ d_max = min(fin / limits->fpfd_min, 80);
again:
fvco_min_fract = limits->fvco_min << fract_shift;
fvco_max_fract = limits->fvco_max << fract_shift;
- m_min = max_t(unsigned long, DIV_ROUND_UP(fvco_min_fract, fin) * d_min, 1);
- m_max = min_t(unsigned long, fvco_max_fract * d_max / fin, 64 << fract_shift);
+ m_min = max(DIV_ROUND_UP(fvco_min_fract, fin) * d_min, 1);
+ m_max = min(fvco_max_fract * d_max / fin, 64 << fract_shift);
for (m = m_min; m <= m_max; m++) {
_d_min = max(d_min, DIV_ROUND_UP(fin * m, fvco_max_fract));
@@ -171,7 +179,7 @@ again:
}
}
- /* Lets see if we find a better setting in fractional mode */
+ /* Let's see if we find a better setting in fractional mode */
if (fract_shift == 0) {
fract_shift = 3;
goto again;
@@ -191,9 +199,9 @@ struct axi_clkgen_div_params {
};
static void axi_clkgen_calc_clk_params(unsigned int divider,
- unsigned int frac_divider, struct axi_clkgen_div_params *params)
+ unsigned int frac_divider,
+ struct axi_clkgen_div_params *params)
{
-
memset(params, 0x0, sizeof(*params));
if (divider == 1) {
@@ -221,7 +229,7 @@ static void axi_clkgen_calc_clk_params(unsigned int divider,
if (params->edge == 0 || frac_divider == 1)
params->low--;
if (((params->edge == 0) ^ (frac_divider == 1)) ||
- (divider == 2 && frac_divider == 1))
+ (divider == 2 && frac_divider == 1))
params->frac_wf_f = 1;
params->frac_phase = params->edge * 4 + frac_divider / 2;
@@ -229,13 +237,13 @@ static void axi_clkgen_calc_clk_params(unsigned int divider,
}
static void axi_clkgen_write(struct axi_clkgen *axi_clkgen,
- unsigned int reg, unsigned int val)
+ unsigned int reg, unsigned int val)
{
writel(val, axi_clkgen->base + reg);
}
static void axi_clkgen_read(struct axi_clkgen *axi_clkgen,
- unsigned int reg, unsigned int *val)
+ unsigned int reg, unsigned int *val)
{
*val = readl(axi_clkgen->base + reg);
}
@@ -256,7 +264,7 @@ static int axi_clkgen_wait_non_busy(struct axi_clkgen *axi_clkgen)
}
static int axi_clkgen_mmcm_read(struct axi_clkgen *axi_clkgen,
- unsigned int reg, unsigned int *val)
+ unsigned int reg, unsigned int *val)
{
unsigned int reg_val;
int ret;
@@ -280,7 +288,8 @@ static int axi_clkgen_mmcm_read(struct axi_clkgen *axi_clkgen,
}
static int axi_clkgen_mmcm_write(struct axi_clkgen *axi_clkgen,
- unsigned int reg, unsigned int val, unsigned int mask)
+ unsigned int reg, unsigned int val,
+ unsigned int mask)
{
unsigned int reg_val = 0;
int ret;
@@ -301,8 +310,7 @@ static int axi_clkgen_mmcm_write(struct axi_clkgen *axi_clkgen,
return 0;
}
-static void axi_clkgen_mmcm_enable(struct axi_clkgen *axi_clkgen,
- bool enable)
+static void axi_clkgen_mmcm_enable(struct axi_clkgen *axi_clkgen, bool enable)
{
unsigned int val = AXI_CLKGEN_V2_RESET_ENABLE;
@@ -318,31 +326,31 @@ static struct axi_clkgen *clk_hw_to_axi_clkgen(struct clk_hw *clk_hw)
}
static void axi_clkgen_set_div(struct axi_clkgen *axi_clkgen,
- unsigned int reg1, unsigned int reg2, unsigned int reg3,
- struct axi_clkgen_div_params *params)
+ unsigned int reg1, unsigned int reg2,
+ unsigned int reg3,
+ struct axi_clkgen_div_params *params)
{
axi_clkgen_mmcm_write(axi_clkgen, reg1,
- (params->high << 6) | params->low, 0xefff);
+ (params->high << 6) | params->low, 0xefff);
axi_clkgen_mmcm_write(axi_clkgen, reg2,
- (params->frac << 12) | (params->frac_en << 11) |
- (params->frac_wf_r << 10) | (params->edge << 7) |
- (params->nocount << 6), 0x7fff);
+ (params->frac << 12) | (params->frac_en << 11) |
+ (params->frac_wf_r << 10) | (params->edge << 7) |
+ (params->nocount << 6), 0x7fff);
if (reg3 != 0) {
axi_clkgen_mmcm_write(axi_clkgen, reg3,
- (params->frac_phase << 11) | (params->frac_wf_f << 10), 0x3c00);
+ (params->frac_phase << 11) | (params->frac_wf_f << 10),
+ 0x3c00);
}
}
-static int axi_clkgen_set_rate(struct clk_hw *clk_hw,
- unsigned long rate, unsigned long parent_rate)
+static int axi_clkgen_set_rate(struct clk_hw *clk_hw, unsigned long rate,
+ unsigned long parent_rate)
{
struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(clk_hw);
const struct axi_clkgen_limits *limits = &axi_clkgen->limits;
unsigned int d, m, dout;
struct axi_clkgen_div_params params;
- uint32_t power = 0;
- uint32_t filter;
- uint32_t lock;
+ u32 power = 0, filter, lock;
if (parent_rate == 0 || rate == 0)
return -EINVAL;
@@ -362,22 +370,22 @@ static int axi_clkgen_set_rate(struct clk_hw *clk_hw,
axi_clkgen_calc_clk_params(dout >> 3, dout & 0x7, &params);
axi_clkgen_set_div(axi_clkgen, MMCM_REG_CLKOUT0_1, MMCM_REG_CLKOUT0_2,
- MMCM_REG_CLKOUT5_2, &params);
+ MMCM_REG_CLKOUT5_2, &params);
axi_clkgen_calc_clk_params(d, 0, &params);
axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_CLK_DIV,
- (params.edge << 13) | (params.nocount << 12) |
- (params.high << 6) | params.low, 0x3fff);
+ (params.edge << 13) | (params.nocount << 12) |
+ (params.high << 6) | params.low, 0x3fff);
axi_clkgen_calc_clk_params(m >> 3, m & 0x7, &params);
axi_clkgen_set_div(axi_clkgen, MMCM_REG_CLK_FB1, MMCM_REG_CLK_FB2,
- MMCM_REG_CLKOUT6_2, &params);
+ MMCM_REG_CLKOUT6_2, &params);
axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_LOCK1, lock & 0x3ff, 0x3ff);
axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_LOCK2,
- (((lock >> 16) & 0x1f) << 10) | 0x1, 0x7fff);
+ (((lock >> 16) & 0x1f) << 10) | 0x1, 0x7fff);
axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_LOCK3,
- (((lock >> 24) & 0x1f) << 10) | 0x3e9, 0x7fff);
+ (((lock >> 24) & 0x1f) << 10) | 0x3e9, 0x7fff);
axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_FILTER1, filter >> 16, 0x9900);
axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_FILTER2, filter, 0x9900);
@@ -406,7 +414,7 @@ static int axi_clkgen_determine_rate(struct clk_hw *hw,
}
static unsigned int axi_clkgen_get_div(struct axi_clkgen *axi_clkgen,
- unsigned int reg1, unsigned int reg2)
+ unsigned int reg1, unsigned int reg2)
{
unsigned int val1, val2;
unsigned int div;
@@ -433,7 +441,7 @@ static unsigned int axi_clkgen_get_div(struct axi_clkgen *axi_clkgen,
}
static unsigned long axi_clkgen_recalc_rate(struct clk_hw *clk_hw,
- unsigned long parent_rate)
+ unsigned long parent_rate)
{
struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(clk_hw);
unsigned int d, m, dout;
@@ -441,9 +449,9 @@ static unsigned long axi_clkgen_recalc_rate(struct clk_hw *clk_hw,
unsigned int val;
dout = axi_clkgen_get_div(axi_clkgen, MMCM_REG_CLKOUT0_1,
- MMCM_REG_CLKOUT0_2);
+ MMCM_REG_CLKOUT0_2);
m = axi_clkgen_get_div(axi_clkgen, MMCM_REG_CLK_FB1,
- MMCM_REG_CLK_FB2);
+ MMCM_REG_CLK_FB2);
axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLK_DIV, &val);
if (val & MMCM_CLK_DIV_NOCOUNT)
@@ -495,6 +503,54 @@ static u8 axi_clkgen_get_parent(struct clk_hw *clk_hw)
return parent;
}
+static int axi_clkgen_setup_limits(struct axi_clkgen *axi_clkgen,
+ struct device *dev)
+{
+ unsigned int tech, family, speed_grade, reg_value;
+
+ axi_clkgen_read(axi_clkgen, ADI_AXI_REG_FPGA_INFO, &reg_value);
+ tech = ADI_AXI_INFO_FPGA_TECH(reg_value);
+ family = ADI_AXI_INFO_FPGA_FAMILY(reg_value);
+ speed_grade = ADI_AXI_INFO_FPGA_SPEED_GRADE(reg_value);
+
+ axi_clkgen->limits.fpfd_min = 10000;
+ axi_clkgen->limits.fvco_min = 600000;
+
+ switch (speed_grade) {
+ case ADI_AXI_FPGA_SPEED_1 ... ADI_AXI_FPGA_SPEED_1LV:
+ axi_clkgen->limits.fvco_max = 1200000;
+ axi_clkgen->limits.fpfd_max = 450000;
+ break;
+ case ADI_AXI_FPGA_SPEED_2 ... ADI_AXI_FPGA_SPEED_2LV:
+ axi_clkgen->limits.fvco_max = 1440000;
+ axi_clkgen->limits.fpfd_max = 500000;
+ if (family == ADI_AXI_FPGA_FAMILY_KINTEX || family == ADI_AXI_FPGA_FAMILY_ARTIX) {
+ axi_clkgen_read(axi_clkgen, ADI_CLKGEN_REG_FPGA_VOLTAGE,
+ &reg_value);
+ if (ADI_CLKGEN_INFO_FPGA_VOLTAGE(reg_value) < 950) {
+ axi_clkgen->limits.fvco_max = 1200000;
+ axi_clkgen->limits.fpfd_max = 450000;
+ }
+ }
+ break;
+ case ADI_AXI_FPGA_SPEED_3:
+ axi_clkgen->limits.fvco_max = 1600000;
+ axi_clkgen->limits.fpfd_max = 550000;
+ break;
+ default:
+ return dev_err_probe(dev, -ENODEV, "Unknown speed grade %d\n",
+ speed_grade);
+ }
+
+ /* Overwrite vco limits for ultrascale+ */
+ if (tech == ADI_AXI_FPGA_TECH_ULTRASCALE_PLUS) {
+ axi_clkgen->limits.fvco_max = 1600000;
+ axi_clkgen->limits.fvco_min = 800000;
+ }
+
+ return 0;
+}
+
static const struct clk_ops axi_clkgen_ops = {
.recalc_rate = axi_clkgen_recalc_rate,
.determine_rate = axi_clkgen_determine_rate,
@@ -509,9 +565,11 @@ static int axi_clkgen_probe(struct platform_device *pdev)
{
const struct axi_clkgen_limits *dflt_limits;
struct axi_clkgen *axi_clkgen;
+ unsigned int pcore_version;
struct clk_init_data init;
const char *parent_names[2];
const char *clk_name;
+ struct clk *axi_clk;
unsigned int i;
int ret;
@@ -528,8 +586,24 @@ static int axi_clkgen_probe(struct platform_device *pdev)
return PTR_ERR(axi_clkgen->base);
init.num_parents = of_clk_get_parent_count(pdev->dev.of_node);
- if (init.num_parents < 1 || init.num_parents > 2)
- return -EINVAL;
+
+ axi_clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk");
+ if (!IS_ERR(axi_clk)) {
+ if (init.num_parents < 2 || init.num_parents > 3)
+ return -EINVAL;
+
+ init.num_parents -= 1;
+ } else {
+ /*
+ * Legacy... So that old DTs which do not have clock-names still
+ * work. In this case we don't explicitly enable the AXI bus
+ * clock.
+ */
+ if (PTR_ERR(axi_clk) != -ENOENT)
+ return PTR_ERR(axi_clk);
+ if (init.num_parents < 1 || init.num_parents > 2)
+ return -EINVAL;
+ }
for (i = 0; i < init.num_parents; i++) {
parent_names[i] = of_clk_get_parent_name(pdev->dev.of_node, i);
@@ -537,11 +611,20 @@ static int axi_clkgen_probe(struct platform_device *pdev)
return -EINVAL;
}
- memcpy(&axi_clkgen->limits, dflt_limits, sizeof(axi_clkgen->limits));
+ axi_clkgen_read(axi_clkgen, ADI_AXI_REG_VERSION, &pcore_version);
+
+ if (ADI_AXI_PCORE_VER_MAJOR(pcore_version) > 0x04) {
+ ret = axi_clkgen_setup_limits(axi_clkgen, &pdev->dev);
+ if (ret)
+ return ret;
+ } else {
+ memcpy(&axi_clkgen->limits, dflt_limits,
+ sizeof(axi_clkgen->limits));
+ }
clk_name = pdev->dev.of_node->name;
of_property_read_string(pdev->dev.of_node, "clock-output-names",
- &clk_name);
+ &clk_name);
init.name = clk_name;
init.ops = &axi_clkgen_ops;
diff --git a/drivers/clk/clk-axm5516.c b/drivers/clk/clk-axm5516.c
index 4a3462ee8f3e..3823383f3fa6 100644
--- a/drivers/clk/clk-axm5516.c
+++ b/drivers/clk/clk-axm5516.c
@@ -529,7 +529,6 @@ static const struct regmap_config axmclk_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x1fffc,
- .fast_io = true,
};
static const struct of_device_id axmclk_match_table[] = {
diff --git a/drivers/clk/clk-bm1880.c b/drivers/clk/clk-bm1880.c
index 002f7360b1c6..dac190bc6e19 100644
--- a/drivers/clk/clk-bm1880.c
+++ b/drivers/clk/clk-bm1880.c
@@ -608,8 +608,8 @@ static unsigned long bm1880_clk_div_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long bm1880_clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int bm1880_clk_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct bm1880_div_hw_clock *div_hw = to_bm1880_div_clk(hw);
struct bm1880_div_clock *div = &div_hw->div;
@@ -621,13 +621,18 @@ static long bm1880_clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
val = readl(reg_addr) >> div->shift;
val &= clk_div_mask(div->width);
- return divider_ro_round_rate(hw, rate, prate, div->table,
- div->width, div->flags,
- val);
+ req->rate = divider_ro_round_rate(hw, req->rate,
+ &req->best_parent_rate,
+ div->table,
+ div->width, div->flags, val);
+
+ return 0;
}
- return divider_round_rate(hw, rate, prate, div->table,
- div->width, div->flags);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ div->table, div->width, div->flags);
+
+ return 0;
}
static int bm1880_clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -665,7 +670,7 @@ static int bm1880_clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops bm1880_clk_div_ops = {
.recalc_rate = bm1880_clk_div_recalc_rate,
- .round_rate = bm1880_clk_div_round_rate,
+ .determine_rate = bm1880_clk_div_determine_rate,
.set_rate = bm1880_clk_div_set_rate,
};
diff --git a/drivers/clk/clk-cdce706.c b/drivers/clk/clk-cdce706.c
index dd3d42d9ad86..a495d313b02f 100644
--- a/drivers/clk/clk-cdce706.c
+++ b/drivers/clk/clk-cdce706.c
@@ -183,8 +183,8 @@ static unsigned long cdce706_pll_recalc_rate(struct clk_hw *hw,
return 0;
}
-static long cdce706_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int cdce706_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct cdce706_hw_data *hwd = to_hw_data(hw);
unsigned long mul, div;
@@ -192,9 +192,9 @@ static long cdce706_pll_round_rate(struct clk_hw *hw, unsigned long rate,
dev_dbg(&hwd->dev_data->client->dev,
"%s, rate: %lu, parent_rate: %lu\n",
- __func__, rate, *parent_rate);
+ __func__, req->rate, req->best_parent_rate);
- rational_best_approximation(rate, *parent_rate,
+ rational_best_approximation(req->rate, req->best_parent_rate,
CDCE706_PLL_N_MAX, CDCE706_PLL_M_MAX,
&mul, &div);
hwd->mul = mul;
@@ -204,9 +204,11 @@ static long cdce706_pll_round_rate(struct clk_hw *hw, unsigned long rate,
"%s, pll: %d, mul: %lu, div: %lu\n",
__func__, hwd->idx, mul, div);
- res = (u64)*parent_rate * hwd->mul;
+ res = (u64)req->best_parent_rate * hwd->mul;
do_div(res, hwd->div);
- return res;
+ req->rate = res;
+
+ return 0;
}
static int cdce706_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -251,7 +253,7 @@ static int cdce706_pll_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops cdce706_pll_ops = {
.recalc_rate = cdce706_pll_recalc_rate,
- .round_rate = cdce706_pll_round_rate,
+ .determine_rate = cdce706_pll_determine_rate,
.set_rate = cdce706_pll_set_rate,
};
@@ -678,7 +680,7 @@ MODULE_DEVICE_TABLE(of, cdce706_dt_match);
#endif
static const struct i2c_device_id cdce706_id[] = {
- { "cdce706", 0 },
+ { "cdce706" },
{ }
};
MODULE_DEVICE_TABLE(i2c, cdce706_id);
diff --git a/drivers/clk/clk-cdce925.c b/drivers/clk/clk-cdce925.c
index e48be7a6c0e2..0b2ad21e6e4d 100644
--- a/drivers/clk/clk-cdce925.c
+++ b/drivers/clk/clk-cdce925.c
@@ -128,13 +128,15 @@ static void cdce925_pll_find_rate(unsigned long rate,
}
}
-static long cdce925_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int cdce925_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u16 n, m;
- cdce925_pll_find_rate(rate, *parent_rate, &n, &m);
- return (long)cdce925_pll_calculate_rate(*parent_rate, n, m);
+ cdce925_pll_find_rate(req->rate, req->best_parent_rate, &n, &m);
+ req->rate = (long)cdce925_pll_calculate_rate(req->best_parent_rate, n, m);
+
+ return 0;
}
static int cdce925_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -266,7 +268,7 @@ static const struct clk_ops cdce925_pll_ops = {
.prepare = cdce925_pll_prepare,
.unprepare = cdce925_pll_unprepare,
.recalc_rate = cdce925_pll_recalc_rate,
- .round_rate = cdce925_pll_round_rate,
+ .determine_rate = cdce925_pll_determine_rate,
.set_rate = cdce925_pll_set_rate,
};
@@ -420,20 +422,23 @@ static unsigned long cdce925_clk_best_parent_rate(
return rate * pdiv_best;
}
-static long cdce925_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int cdce925_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long l_parent_rate = *parent_rate;
- u16 divider = cdce925_calc_divider(rate, l_parent_rate);
+ unsigned long l_parent_rate = req->best_parent_rate;
+ u16 divider = cdce925_calc_divider(req->rate, l_parent_rate);
- if (l_parent_rate / divider != rate) {
- l_parent_rate = cdce925_clk_best_parent_rate(hw, rate);
- divider = cdce925_calc_divider(rate, l_parent_rate);
- *parent_rate = l_parent_rate;
+ if (l_parent_rate / divider != req->rate) {
+ l_parent_rate = cdce925_clk_best_parent_rate(hw, req->rate);
+ divider = cdce925_calc_divider(req->rate, l_parent_rate);
+ req->best_parent_rate = l_parent_rate;
}
if (divider)
- return (long)(l_parent_rate / divider);
+ req->rate = (long)(l_parent_rate / divider);
+ else
+ req->rate = 0;
+
return 0;
}
@@ -451,7 +456,7 @@ static const struct clk_ops cdce925_clk_ops = {
.prepare = cdce925_clk_prepare,
.unprepare = cdce925_clk_unprepare,
.recalc_rate = cdce925_clk_recalc_rate,
- .round_rate = cdce925_clk_round_rate,
+ .determine_rate = cdce925_clk_determine_rate,
.set_rate = cdce925_clk_set_rate,
};
@@ -473,14 +478,17 @@ static u16 cdce925_y1_calc_divider(unsigned long rate,
return (u16)divider;
}
-static long cdce925_clk_y1_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int cdce925_clk_y1_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long l_parent_rate = *parent_rate;
- u16 divider = cdce925_y1_calc_divider(rate, l_parent_rate);
+ unsigned long l_parent_rate = req->best_parent_rate;
+ u16 divider = cdce925_y1_calc_divider(req->rate, l_parent_rate);
if (divider)
- return (long)(l_parent_rate / divider);
+ req->rate = (long)(l_parent_rate / divider);
+ else
+ req->rate = 0;
+
return 0;
}
@@ -498,7 +506,7 @@ static const struct clk_ops cdce925_clk_y1_ops = {
.prepare = cdce925_clk_prepare,
.unprepare = cdce925_clk_unprepare,
.recalc_rate = cdce925_clk_recalc_rate,
- .round_rate = cdce925_clk_y1_round_rate,
+ .determine_rate = cdce925_clk_y1_determine_rate,
.set_rate = cdce925_clk_y1_set_rate,
};
@@ -601,7 +609,7 @@ static int cdce925_regulator_enable(struct device *dev, const char *name)
/* The CDCE925 uses a funky way to read/write registers. Bulk mode is
* just weird, so just use the single byte mode exclusively. */
-static struct regmap_bus regmap_cdce925_bus = {
+static const struct regmap_bus regmap_cdce925_bus = {
.write = cdce925_regmap_i2c_write,
.read = cdce925_regmap_i2c_read,
};
diff --git a/drivers/clk/clk-clps711x.c b/drivers/clk/clk-clps711x.c
index f8417ee2961a..402ab74d9bfb 100644
--- a/drivers/clk/clk-clps711x.c
+++ b/drivers/clk/clk-clps711x.c
@@ -99,7 +99,7 @@ static void __init clps711x_clk_init_dt(struct device_node *np)
*/
tmp &= ~(SYSCON1_TC1M | SYSCON1_TC1S);
/* Timer2 in prescale mode.
- * Value writen is automatically re-loaded when
+ * Value written is automatically re-loaded when
* the counter underflows.
*/
tmp |= SYSCON1_TC2M | SYSCON1_TC2S;
diff --git a/drivers/clk/clk-conf.c b/drivers/clk/clk-conf.c
index 1a4e6340f95c..303a0bb26e54 100644
--- a/drivers/clk/clk-conf.c
+++ b/drivers/clk/clk-conf.c
@@ -10,6 +10,7 @@
#include <linux/device.h>
#include <linux/of.h>
#include <linux/printk.h>
+#include <linux/slab.h>
static int __set_clk_parents(struct device_node *node, bool clk_supplier)
{
@@ -81,13 +82,44 @@ err:
static int __set_clk_rates(struct device_node *node, bool clk_supplier)
{
struct of_phandle_args clkspec;
- struct property *prop;
- const __be32 *cur;
- int rc, index = 0;
+ int rc, count, count_64, index;
struct clk *clk;
- u32 rate;
+ u64 *rates_64 __free(kfree) = NULL;
+ u32 *rates __free(kfree) = NULL;
+
+ count = of_property_count_u32_elems(node, "assigned-clock-rates");
+ count_64 = of_property_count_u64_elems(node, "assigned-clock-rates-u64");
+ if (count_64 > 0) {
+ count = count_64;
+ rates_64 = kcalloc(count, sizeof(*rates_64), GFP_KERNEL);
+ if (!rates_64)
+ return -ENOMEM;
+
+ rc = of_property_read_u64_array(node,
+ "assigned-clock-rates-u64",
+ rates_64, count);
+ } else if (count > 0) {
+ rates = kcalloc(count, sizeof(*rates), GFP_KERNEL);
+ if (!rates)
+ return -ENOMEM;
+
+ rc = of_property_read_u32_array(node, "assigned-clock-rates",
+ rates, count);
+ } else {
+ return 0;
+ }
+
+ if (rc)
+ return rc;
+
+ for (index = 0; index < count; index++) {
+ unsigned long rate;
+
+ if (rates_64)
+ rate = rates_64[index];
+ else
+ rate = rates[index];
- of_property_for_each_u32(node, "assigned-clock-rates", prop, cur, rate) {
if (rate) {
rc = of_parse_phandle_with_args(node, "assigned-clocks",
"#clock-cells", index, &clkspec);
@@ -114,12 +146,11 @@ static int __set_clk_rates(struct device_node *node, bool clk_supplier)
rc = clk_set_rate(clk, rate);
if (rc < 0)
- pr_err("clk: couldn't set %s clk rate to %u (%d), current rate: %lu\n",
+ pr_err("clk: couldn't set %s clk rate to %lu (%d), current rate: %lu\n",
__clk_get_name(clk), rate, rc,
clk_get_rate(clk));
clk_put(clk);
}
- index++;
}
return 0;
}
diff --git a/drivers/clk/clk-cs2000-cp.c b/drivers/clk/clk-cs2000-cp.c
index 35cb93ad298a..8800472ba63f 100644
--- a/drivers/clk/clk-cs2000-cp.c
+++ b/drivers/clk/clk-cs2000-cp.c
@@ -305,15 +305,19 @@ static unsigned long cs2000_recalc_rate(struct clk_hw *hw,
return cs2000_ratio_to_rate(ratio, parent_rate, priv->lf_ratio);
}
-static long cs2000_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int cs2000_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct cs2000_priv *priv = hw_to_priv(hw);
u32 ratio;
- ratio = cs2000_rate_to_ratio(*parent_rate, rate, priv->lf_ratio);
+ ratio = cs2000_rate_to_ratio(req->best_parent_rate, req->rate,
+ priv->lf_ratio);
- return cs2000_ratio_to_rate(ratio, *parent_rate, priv->lf_ratio);
+ req->rate = cs2000_ratio_to_rate(ratio, req->best_parent_rate,
+ priv->lf_ratio);
+
+ return 0;
}
static int cs2000_select_ratio_mode(struct cs2000_priv *priv,
@@ -430,7 +434,7 @@ static u8 cs2000_get_parent(struct clk_hw *hw)
static const struct clk_ops cs2000_ops = {
.get_parent = cs2000_get_parent,
.recalc_rate = cs2000_recalc_rate,
- .round_rate = cs2000_round_rate,
+ .determine_rate = cs2000_determine_rate,
.set_rate = cs2000_set_rate,
.prepare = cs2000_enable,
.unprepare = cs2000_disable,
diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c
index 90e6078fb6e1..5368d92d9b39 100644
--- a/drivers/clk/clk-devres.c
+++ b/drivers/clk/clk-devres.c
@@ -99,6 +99,34 @@ struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id)
}
EXPORT_SYMBOL_GPL(devm_clk_get_optional_enabled);
+struct clk *devm_clk_get_optional_enabled_with_rate(struct device *dev,
+ const char *id,
+ unsigned long rate)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = __devm_clk_get(dev, id, clk_get_optional, NULL,
+ clk_disable_unprepare);
+ if (IS_ERR(clk))
+ return ERR_CAST(clk);
+
+ ret = clk_set_rate(clk, rate);
+ if (ret)
+ goto out_put_clk;
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto out_put_clk;
+
+ return clk;
+
+out_put_clk:
+ devm_clk_put(dev, clk);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(devm_clk_get_optional_enabled_with_rate);
+
struct clk_bulk_devres {
struct clk_bulk_data *clks;
int num_clks;
@@ -190,8 +218,8 @@ static void devm_clk_bulk_release_all_enable(struct device *dev, void *res)
clk_bulk_put_all(devres->num_clks, devres->clks);
}
-int __must_check devm_clk_bulk_get_all_enable(struct device *dev,
- struct clk_bulk_data **clks)
+int __must_check devm_clk_bulk_get_all_enabled(struct device *dev,
+ struct clk_bulk_data **clks)
{
struct clk_bulk_devres *devres;
int ret;
@@ -216,11 +244,12 @@ int __must_check devm_clk_bulk_get_all_enable(struct device *dev,
} else {
clk_bulk_put_all(devres->num_clks, devres->clks);
devres_free(devres);
+ return ret;
}
- return ret;
+ return devres->num_clks;
}
-EXPORT_SYMBOL_GPL(devm_clk_bulk_get_all_enable);
+EXPORT_SYMBOL_GPL(devm_clk_bulk_get_all_enabled);
static int devm_clk_match(struct device *dev, void *res, void *data)
{
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index a2c2b5203b0a..2601b6155afb 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -72,6 +72,8 @@ static unsigned int _get_maxdiv(const struct clk_div_table *table, u8 width,
return clk_div_mask(width);
if (flags & CLK_DIVIDER_POWER_OF_TWO)
return 1 << clk_div_mask(width);
+ if (flags & CLK_DIVIDER_EVEN_INTEGERS)
+ return 2 * (clk_div_mask(width) + 1);
if (table)
return _get_table_maxdiv(table, width);
return clk_div_mask(width) + 1;
@@ -97,6 +99,8 @@ static unsigned int _get_div(const struct clk_div_table *table,
return 1 << val;
if (flags & CLK_DIVIDER_MAX_AT_ZERO)
return val ? val : clk_div_mask(width) + 1;
+ if (flags & CLK_DIVIDER_EVEN_INTEGERS)
+ return 2 * (val + 1);
if (table)
return _get_table_div(table, val);
return val + 1;
@@ -122,6 +126,8 @@ static unsigned int _get_val(const struct clk_div_table *table,
return __ffs(div);
if (flags & CLK_DIVIDER_MAX_AT_ZERO)
return (div == clk_div_mask(width) + 1) ? 0 : div;
+ if (flags & CLK_DIVIDER_EVEN_INTEGERS)
+ return (div >> 1) - 1;
if (table)
return _get_table_val(table, div);
return div - 1;
@@ -425,27 +431,6 @@ long divider_ro_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
}
EXPORT_SYMBOL_GPL(divider_ro_round_rate_parent);
-static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
-{
- struct clk_divider *divider = to_clk_divider(hw);
-
- /* if read only, just return current value */
- if (divider->flags & CLK_DIVIDER_READ_ONLY) {
- u32 val;
-
- val = clk_div_readl(divider) >> divider->shift;
- val &= clk_div_mask(divider->width);
-
- return divider_ro_round_rate(hw, rate, prate, divider->table,
- divider->width, divider->flags,
- val);
- }
-
- return divider_round_rate(hw, rate, prate, divider->table,
- divider->width, divider->flags);
-}
-
static int clk_divider_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
@@ -521,7 +506,6 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
const struct clk_ops clk_divider_ops = {
.recalc_rate = clk_divider_recalc_rate,
- .round_rate = clk_divider_round_rate,
.determine_rate = clk_divider_determine_rate,
.set_rate = clk_divider_set_rate,
};
@@ -529,7 +513,6 @@ EXPORT_SYMBOL_GPL(clk_divider_ops);
const struct clk_ops clk_divider_ro_ops = {
.recalc_rate = clk_divider_recalc_rate,
- .round_rate = clk_divider_round_rate,
.determine_rate = clk_divider_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_divider_ro_ops);
@@ -538,7 +521,8 @@ struct clk_hw *__clk_hw_register_divider(struct device *dev,
struct device_node *np, const char *name,
const char *parent_name, const struct clk_hw *parent_hw,
const struct clk_parent_data *parent_data, unsigned long flags,
- void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags,
+ void __iomem *reg, u8 shift, u8 width,
+ unsigned long clk_divider_flags,
const struct clk_div_table *table, spinlock_t *lock)
{
struct clk_divider *div;
@@ -610,8 +594,8 @@ EXPORT_SYMBOL_GPL(__clk_hw_register_divider);
struct clk *clk_register_divider_table(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
- u8 clk_divider_flags, const struct clk_div_table *table,
- spinlock_t *lock)
+ unsigned long clk_divider_flags,
+ const struct clk_div_table *table, spinlock_t *lock)
{
struct clk_hw *hw;
@@ -664,7 +648,8 @@ struct clk_hw *__devm_clk_hw_register_divider(struct device *dev,
struct device_node *np, const char *name,
const char *parent_name, const struct clk_hw *parent_hw,
const struct clk_parent_data *parent_data, unsigned long flags,
- void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags,
+ void __iomem *reg, u8 shift, u8 width,
+ unsigned long clk_divider_flags,
const struct clk_div_table *table, spinlock_t *lock)
{
struct clk_hw **ptr, *hw;
diff --git a/drivers/clk/clk-en7523.c b/drivers/clk/clk-en7523.c
index ccc394692671..08cc8e5acf43 100644
--- a/drivers/clk/clk-en7523.c
+++ b/drivers/clk/clk-en7523.c
@@ -3,9 +3,16 @@
#include <linux/delay.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
+#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
#include <dt-bindings/clock/en7523-clk.h>
+#include <dt-bindings/reset/airoha,en7523-reset.h>
+#include <dt-bindings/reset/airoha,en7581-reset.h>
+
+#define RST_NR_PER_BANK 32
#define REG_PCI_CONTROL 0x88
#define REG_PCI_CONTROL_PERSTOUT BIT(29)
@@ -27,18 +34,14 @@
#define REG_RESET_CONTROL_PCIE1 BIT(27)
#define REG_RESET_CONTROL_PCIE2 BIT(26)
/* EN7581 */
-#define REG_PCIE0_MEM 0x00
-#define REG_PCIE0_MEM_MASK 0x04
-#define REG_PCIE1_MEM 0x08
-#define REG_PCIE1_MEM_MASK 0x0c
-#define REG_PCIE2_MEM 0x10
-#define REG_PCIE2_MEM_MASK 0x14
-#define REG_PCIE_RESET_OPEN_DRAIN 0x018c
-#define REG_PCIE_RESET_OPEN_DRAIN_MASK GENMASK(2, 0)
#define REG_NP_SCU_PCIC 0x88
#define REG_NP_SCU_SSTR 0x9c
#define REG_PCIE_XSI0_SEL_MASK GENMASK(14, 13)
#define REG_PCIE_XSI1_SEL_MASK GENMASK(12, 11)
+#define REG_CRYPTO_CLKSRC2 0x20c
+
+#define REG_RST_CTRL2 0x830
+#define REG_RST_CTRL1 0x834
struct en_clk_desc {
int id;
@@ -57,6 +60,7 @@ struct en_clk_desc {
u8 div_shift;
u16 div_val0;
u8 div_step;
+ u8 div_offset;
};
struct en_clk_gate {
@@ -64,10 +68,18 @@ struct en_clk_gate {
struct clk_hw hw;
};
+struct en_rst_data {
+ const u16 *bank_ofs;
+ const u16 *idx_map;
+ void __iomem *base;
+ struct reset_controller_dev rcdev;
+};
+
struct en_clk_soc_data {
+ u32 num_clocks;
const struct clk_ops pcie_ops;
- int (*hw_init)(struct platform_device *pdev, void __iomem *base,
- void __iomem *np_base);
+ int (*hw_init)(struct platform_device *pdev,
+ struct clk_hw_onecell_data *clk_data);
};
static const u32 gsw_base[] = { 400000000, 500000000 };
@@ -75,6 +87,12 @@ static const u32 emi_base[] = { 333000000, 400000000 };
static const u32 bus_base[] = { 500000000, 540000000 };
static const u32 slic_base[] = { 100000000, 3125000 };
static const u32 npu_base[] = { 333000000, 400000000, 500000000 };
+/* EN7581 */
+static const u32 emi7581_base[] = { 540000000, 480000000, 400000000, 300000000 };
+static const u32 bus7581_base[] = { 600000000, 540000000 };
+static const u32 npu7581_base[] = { 800000000, 750000000, 720000000, 600000000 };
+static const u32 crypto_base[] = { 540000000, 480000000 };
+static const u32 emmc7581_base[] = { 200000000, 150000000 };
static const struct en_clk_desc en7523_base_clks[] = {
{
@@ -90,6 +108,7 @@ static const struct en_clk_desc en7523_base_clks[] = {
.div_bits = 3,
.div_shift = 0,
.div_step = 1,
+ .div_offset = 1,
}, {
.id = EN7523_CLK_EMI,
.name = "emi",
@@ -103,6 +122,7 @@ static const struct en_clk_desc en7523_base_clks[] = {
.div_bits = 3,
.div_shift = 0,
.div_step = 1,
+ .div_offset = 1,
}, {
.id = EN7523_CLK_BUS,
.name = "bus",
@@ -116,6 +136,7 @@ static const struct en_clk_desc en7523_base_clks[] = {
.div_bits = 3,
.div_shift = 0,
.div_step = 1,
+ .div_offset = 1,
}, {
.id = EN7523_CLK_SLIC,
.name = "slic",
@@ -156,27 +177,242 @@ static const struct en_clk_desc en7523_base_clks[] = {
.div_bits = 3,
.div_shift = 0,
.div_step = 1,
+ .div_offset = 1,
}, {
.id = EN7523_CLK_CRYPTO,
.name = "crypto",
.base_reg = REG_CRYPTO_CLKSRC,
.base_bits = 1,
- .base_shift = 8,
+ .base_shift = 0,
.base_values = emi_base,
.n_base_values = ARRAY_SIZE(emi_base),
}
};
-static unsigned int en7523_get_base_rate(void __iomem *base, unsigned int i)
-{
- const struct en_clk_desc *desc = &en7523_base_clks[i];
- u32 val;
+static const struct en_clk_desc en7581_base_clks[] = {
+ {
+ .id = EN7523_CLK_GSW,
+ .name = "gsw",
+
+ .base_reg = REG_GSW_CLK_DIV_SEL,
+ .base_bits = 1,
+ .base_shift = 8,
+ .base_values = gsw_base,
+ .n_base_values = ARRAY_SIZE(gsw_base),
+
+ .div_bits = 3,
+ .div_shift = 0,
+ .div_step = 1,
+ .div_offset = 1,
+ }, {
+ .id = EN7523_CLK_EMI,
+ .name = "emi",
+
+ .base_reg = REG_EMI_CLK_DIV_SEL,
+ .base_bits = 2,
+ .base_shift = 8,
+ .base_values = emi7581_base,
+ .n_base_values = ARRAY_SIZE(emi7581_base),
+
+ .div_bits = 3,
+ .div_shift = 0,
+ .div_step = 1,
+ .div_offset = 1,
+ }, {
+ .id = EN7523_CLK_BUS,
+ .name = "bus",
+
+ .base_reg = REG_BUS_CLK_DIV_SEL,
+ .base_bits = 1,
+ .base_shift = 8,
+ .base_values = bus7581_base,
+ .n_base_values = ARRAY_SIZE(bus7581_base),
+
+ .div_bits = 3,
+ .div_shift = 0,
+ .div_step = 1,
+ .div_offset = 1,
+ }, {
+ .id = EN7523_CLK_SLIC,
+ .name = "slic",
+
+ .base_reg = REG_SPI_CLK_FREQ_SEL,
+ .base_bits = 1,
+ .base_shift = 0,
+ .base_values = slic_base,
+ .n_base_values = ARRAY_SIZE(slic_base),
+
+ .div_reg = REG_SPI_CLK_DIV_SEL,
+ .div_bits = 5,
+ .div_shift = 24,
+ .div_val0 = 20,
+ .div_step = 2,
+ }, {
+ .id = EN7523_CLK_SPI,
+ .name = "spi",
+
+ .base_reg = REG_SPI_CLK_DIV_SEL,
+
+ .base_value = 400000000,
+
+ .div_bits = 5,
+ .div_shift = 8,
+ .div_val0 = 40,
+ .div_step = 2,
+ }, {
+ .id = EN7523_CLK_NPU,
+ .name = "npu",
+
+ .base_reg = REG_NPU_CLK_DIV_SEL,
+ .base_bits = 2,
+ .base_shift = 8,
+ .base_values = npu7581_base,
+ .n_base_values = ARRAY_SIZE(npu7581_base),
+ .div_bits = 3,
+ .div_shift = 0,
+ .div_step = 1,
+ .div_offset = 1,
+ }, {
+ .id = EN7523_CLK_CRYPTO,
+ .name = "crypto",
+
+ .base_reg = REG_CRYPTO_CLKSRC2,
+ .base_bits = 1,
+ .base_shift = 0,
+ .base_values = crypto_base,
+ .n_base_values = ARRAY_SIZE(crypto_base),
+ }, {
+ .id = EN7581_CLK_EMMC,
+ .name = "emmc",
+
+ .base_reg = REG_CRYPTO_CLKSRC2,
+ .base_bits = 1,
+ .base_shift = 12,
+ .base_values = emmc7581_base,
+ .n_base_values = ARRAY_SIZE(emmc7581_base),
+ }
+};
+
+static const u16 en7581_rst_ofs[] = {
+ REG_RST_CTRL2,
+ REG_RST_CTRL1,
+};
+
+static const u16 en7523_rst_map[] = {
+ /* RST_CTRL2 */
+ [EN7523_XPON_PHY_RST] = 0,
+ [EN7523_XSI_MAC_RST] = 7,
+ [EN7523_XSI_PHY_RST] = 8,
+ [EN7523_NPU_RST] = 9,
+ [EN7523_I2S_RST] = 10,
+ [EN7523_TRNG_RST] = 11,
+ [EN7523_TRNG_MSTART_RST] = 12,
+ [EN7523_DUAL_HSI0_RST] = 13,
+ [EN7523_DUAL_HSI1_RST] = 14,
+ [EN7523_HSI_RST] = 15,
+ [EN7523_DUAL_HSI0_MAC_RST] = 16,
+ [EN7523_DUAL_HSI1_MAC_RST] = 17,
+ [EN7523_HSI_MAC_RST] = 18,
+ [EN7523_WDMA_RST] = 19,
+ [EN7523_WOE0_RST] = 20,
+ [EN7523_WOE1_RST] = 21,
+ [EN7523_HSDMA_RST] = 22,
+ [EN7523_I2C2RBUS_RST] = 23,
+ [EN7523_TDMA_RST] = 24,
+ /* RST_CTRL1 */
+ [EN7523_PCM1_ZSI_ISI_RST] = RST_NR_PER_BANK + 0,
+ [EN7523_FE_PDMA_RST] = RST_NR_PER_BANK + 1,
+ [EN7523_FE_QDMA_RST] = RST_NR_PER_BANK + 2,
+ [EN7523_PCM_SPIWP_RST] = RST_NR_PER_BANK + 4,
+ [EN7523_CRYPTO_RST] = RST_NR_PER_BANK + 6,
+ [EN7523_TIMER_RST] = RST_NR_PER_BANK + 8,
+ [EN7523_PCM1_RST] = RST_NR_PER_BANK + 11,
+ [EN7523_UART_RST] = RST_NR_PER_BANK + 12,
+ [EN7523_GPIO_RST] = RST_NR_PER_BANK + 13,
+ [EN7523_GDMA_RST] = RST_NR_PER_BANK + 14,
+ [EN7523_I2C_MASTER_RST] = RST_NR_PER_BANK + 16,
+ [EN7523_PCM2_ZSI_ISI_RST] = RST_NR_PER_BANK + 17,
+ [EN7523_SFC_RST] = RST_NR_PER_BANK + 18,
+ [EN7523_UART2_RST] = RST_NR_PER_BANK + 19,
+ [EN7523_GDMP_RST] = RST_NR_PER_BANK + 20,
+ [EN7523_FE_RST] = RST_NR_PER_BANK + 21,
+ [EN7523_USB_HOST_P0_RST] = RST_NR_PER_BANK + 22,
+ [EN7523_GSW_RST] = RST_NR_PER_BANK + 23,
+ [EN7523_SFC2_PCM_RST] = RST_NR_PER_BANK + 25,
+ [EN7523_PCIE0_RST] = RST_NR_PER_BANK + 26,
+ [EN7523_PCIE1_RST] = RST_NR_PER_BANK + 27,
+ [EN7523_PCIE_HB_RST] = RST_NR_PER_BANK + 29,
+ [EN7523_XPON_MAC_RST] = RST_NR_PER_BANK + 31,
+};
+
+static const u16 en7581_rst_map[] = {
+ /* RST_CTRL2 */
+ [EN7581_XPON_PHY_RST] = 0,
+ [EN7581_CPU_TIMER2_RST] = 2,
+ [EN7581_HSUART_RST] = 3,
+ [EN7581_UART4_RST] = 4,
+ [EN7581_UART5_RST] = 5,
+ [EN7581_I2C2_RST] = 6,
+ [EN7581_XSI_MAC_RST] = 7,
+ [EN7581_XSI_PHY_RST] = 8,
+ [EN7581_NPU_RST] = 9,
+ [EN7581_I2S_RST] = 10,
+ [EN7581_TRNG_RST] = 11,
+ [EN7581_TRNG_MSTART_RST] = 12,
+ [EN7581_DUAL_HSI0_RST] = 13,
+ [EN7581_DUAL_HSI1_RST] = 14,
+ [EN7581_HSI_RST] = 15,
+ [EN7581_DUAL_HSI0_MAC_RST] = 16,
+ [EN7581_DUAL_HSI1_MAC_RST] = 17,
+ [EN7581_HSI_MAC_RST] = 18,
+ [EN7581_WDMA_RST] = 19,
+ [EN7581_WOE0_RST] = 20,
+ [EN7581_WOE1_RST] = 21,
+ [EN7581_HSDMA_RST] = 22,
+ [EN7581_TDMA_RST] = 24,
+ [EN7581_EMMC_RST] = 25,
+ [EN7581_SOE_RST] = 26,
+ [EN7581_PCIE2_RST] = 27,
+ [EN7581_XFP_MAC_RST] = 28,
+ [EN7581_USB_HOST_P1_RST] = 29,
+ [EN7581_USB_HOST_P1_U3_PHY_RST] = 30,
+ /* RST_CTRL1 */
+ [EN7581_PCM1_ZSI_ISI_RST] = RST_NR_PER_BANK + 0,
+ [EN7581_FE_PDMA_RST] = RST_NR_PER_BANK + 1,
+ [EN7581_FE_QDMA_RST] = RST_NR_PER_BANK + 2,
+ [EN7581_PCM_SPIWP_RST] = RST_NR_PER_BANK + 4,
+ [EN7581_CRYPTO_RST] = RST_NR_PER_BANK + 6,
+ [EN7581_TIMER_RST] = RST_NR_PER_BANK + 8,
+ [EN7581_PCM1_RST] = RST_NR_PER_BANK + 11,
+ [EN7581_UART_RST] = RST_NR_PER_BANK + 12,
+ [EN7581_GPIO_RST] = RST_NR_PER_BANK + 13,
+ [EN7581_GDMA_RST] = RST_NR_PER_BANK + 14,
+ [EN7581_I2C_MASTER_RST] = RST_NR_PER_BANK + 16,
+ [EN7581_PCM2_ZSI_ISI_RST] = RST_NR_PER_BANK + 17,
+ [EN7581_SFC_RST] = RST_NR_PER_BANK + 18,
+ [EN7581_UART2_RST] = RST_NR_PER_BANK + 19,
+ [EN7581_GDMP_RST] = RST_NR_PER_BANK + 20,
+ [EN7581_FE_RST] = RST_NR_PER_BANK + 21,
+ [EN7581_USB_HOST_P0_RST] = RST_NR_PER_BANK + 22,
+ [EN7581_GSW_RST] = RST_NR_PER_BANK + 23,
+ [EN7581_SFC2_PCM_RST] = RST_NR_PER_BANK + 25,
+ [EN7581_PCIE0_RST] = RST_NR_PER_BANK + 26,
+ [EN7581_PCIE1_RST] = RST_NR_PER_BANK + 27,
+ [EN7581_CPU_TIMER_RST] = RST_NR_PER_BANK + 28,
+ [EN7581_PCIE_HB_RST] = RST_NR_PER_BANK + 29,
+ [EN7581_XPON_MAC_RST] = RST_NR_PER_BANK + 31,
+};
+
+static int en7581_reset_register(struct device *dev, void __iomem *base,
+ const u16 *rst_map, int nr_resets);
+
+static u32 en7523_get_base_rate(const struct en_clk_desc *desc, u32 val)
+{
if (!desc->base_bits)
return desc->base_value;
- val = readl(base + desc->base_reg);
val >>= desc->base_shift;
val &= (1 << desc->base_bits) - 1;
@@ -186,23 +422,18 @@ static unsigned int en7523_get_base_rate(void __iomem *base, unsigned int i)
return desc->base_values[val];
}
-static u32 en7523_get_div(void __iomem *base, int i)
+static u32 en7523_get_div(const struct en_clk_desc *desc, u32 val)
{
- const struct en_clk_desc *desc = &en7523_base_clks[i];
- u32 reg, val;
-
if (!desc->div_bits)
return 1;
- reg = desc->div_reg ? desc->div_reg : desc->base_reg;
- val = readl(base + reg);
val >>= desc->div_shift;
val &= (1 << desc->div_bits) - 1;
if (!val && desc->div_val0)
return desc->div_val0;
- return (val + 1) * desc->div_step;
+ return (val + desc->div_offset) * desc->div_step;
}
static int en7523_pci_is_enabled(struct clk_hw *hw)
@@ -279,9 +510,8 @@ static struct clk_hw *en7523_register_pcie_clk(struct device *dev,
cg->base = np_base;
cg->hw.init = &init;
- if (init.ops->disable)
- init.ops->disable(&cg->hw);
- init.ops->unprepare(&cg->hw);
+ if (init.ops->unprepare)
+ init.ops->unprepare(&cg->hw);
if (clk_hw_register(dev, &cg->hw))
return NULL;
@@ -299,23 +529,6 @@ static int en7581_pci_is_enabled(struct clk_hw *hw)
return (val & mask) == mask;
}
-static int en7581_pci_prepare(struct clk_hw *hw)
-{
- struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw);
- void __iomem *np_base = cg->base;
- u32 val, mask;
-
- mask = REG_RESET_CONTROL_PCIE1 | REG_RESET_CONTROL_PCIE2 |
- REG_RESET_CONTROL_PCIEHB;
- val = readl(np_base + REG_RESET_CONTROL1);
- writel(val & ~mask, np_base + REG_RESET_CONTROL1);
- val = readl(np_base + REG_RESET_CONTROL2);
- writel(val & ~REG_RESET2_CONTROL_PCIE2, np_base + REG_RESET_CONTROL2);
- usleep_range(5000, 10000);
-
- return 0;
-}
-
static int en7581_pci_enable(struct clk_hw *hw)
{
struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw);
@@ -327,28 +540,10 @@ static int en7581_pci_enable(struct clk_hw *hw)
REG_PCI_CONTROL_PERSTOUT;
val = readl(np_base + REG_PCI_CONTROL);
writel(val | mask, np_base + REG_PCI_CONTROL);
- msleep(250);
return 0;
}
-static void en7581_pci_unprepare(struct clk_hw *hw)
-{
- struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw);
- void __iomem *np_base = cg->base;
- u32 val, mask;
-
- mask = REG_RESET_CONTROL_PCIE1 | REG_RESET_CONTROL_PCIE2 |
- REG_RESET_CONTROL_PCIEHB;
- val = readl(np_base + REG_RESET_CONTROL1);
- writel(val | mask, np_base + REG_RESET_CONTROL1);
- mask = REG_RESET_CONTROL_PCIE1 | REG_RESET_CONTROL_PCIE2;
- writel(val | mask, np_base + REG_RESET_CONTROL1);
- val = readl(np_base + REG_RESET_CONTROL2);
- writel(val | REG_RESET_CONTROL_PCIE2, np_base + REG_RESET_CONTROL2);
- msleep(100);
-}
-
static void en7581_pci_disable(struct clk_hw *hw)
{
struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw);
@@ -363,37 +558,6 @@ static void en7581_pci_disable(struct clk_hw *hw)
usleep_range(1000, 2000);
}
-static int en7581_clk_hw_init(struct platform_device *pdev,
- void __iomem *base,
- void __iomem *np_base)
-{
- void __iomem *pb_base;
- u32 val;
-
- pb_base = devm_platform_ioremap_resource(pdev, 2);
- if (IS_ERR(pb_base))
- return PTR_ERR(pb_base);
-
- val = readl(np_base + REG_NP_SCU_SSTR);
- val &= ~(REG_PCIE_XSI0_SEL_MASK | REG_PCIE_XSI1_SEL_MASK);
- writel(val, np_base + REG_NP_SCU_SSTR);
- val = readl(np_base + REG_NP_SCU_PCIC);
- writel(val | 3, np_base + REG_NP_SCU_PCIC);
-
- writel(0x20000000, pb_base + REG_PCIE0_MEM);
- writel(0xfc000000, pb_base + REG_PCIE0_MEM_MASK);
- writel(0x24000000, pb_base + REG_PCIE1_MEM);
- writel(0xfc000000, pb_base + REG_PCIE1_MEM_MASK);
- writel(0x28000000, pb_base + REG_PCIE2_MEM);
- writel(0xfc000000, pb_base + REG_PCIE2_MEM_MASK);
-
- val = readl(base + REG_PCIE_RESET_OPEN_DRAIN);
- writel(val | REG_PCIE_RESET_OPEN_DRAIN_MASK,
- base + REG_PCIE_RESET_OPEN_DRAIN);
-
- return 0;
-}
-
static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_data *clk_data,
void __iomem *base, void __iomem *np_base)
{
@@ -403,9 +567,12 @@ static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_dat
for (i = 0; i < ARRAY_SIZE(en7523_base_clks); i++) {
const struct en_clk_desc *desc = &en7523_base_clks[i];
+ u32 reg = desc->div_reg ? desc->div_reg : desc->base_reg;
+ u32 val = readl(base + desc->base_reg);
- rate = en7523_get_base_rate(base, i);
- rate /= en7523_get_div(base, i);
+ rate = en7523_get_base_rate(desc, val);
+ val = readl(base + reg);
+ rate /= en7523_get_div(desc, val);
hw = clk_hw_register_fixed_rate(dev, desc->name, NULL, 0, rate);
if (IS_ERR(hw)) {
@@ -419,17 +586,12 @@ static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_dat
hw = en7523_register_pcie_clk(dev, np_base);
clk_data->hws[EN7523_CLK_PCIE] = hw;
-
- clk_data->num = EN7523_NUM_CLOCKS;
}
-static int en7523_clk_probe(struct platform_device *pdev)
+static int en7523_clk_hw_init(struct platform_device *pdev,
+ struct clk_hw_onecell_data *clk_data)
{
- struct device_node *node = pdev->dev.of_node;
- const struct en_clk_soc_data *soc_data;
- struct clk_hw_onecell_data *clk_data;
void __iomem *base, *np_base;
- int r;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
@@ -439,44 +601,199 @@ static int en7523_clk_probe(struct platform_device *pdev)
if (IS_ERR(np_base))
return PTR_ERR(np_base);
- soc_data = device_get_match_data(&pdev->dev);
- if (soc_data->hw_init) {
- r = soc_data->hw_init(pdev, base, np_base);
- if (r)
- return r;
+ en7523_register_clocks(&pdev->dev, clk_data, base, np_base);
+
+ return en7581_reset_register(&pdev->dev, np_base, en7523_rst_map,
+ ARRAY_SIZE(en7523_rst_map));
+}
+
+static void en7581_register_clocks(struct device *dev, struct clk_hw_onecell_data *clk_data,
+ struct regmap *map, void __iomem *base)
+{
+ struct clk_hw *hw;
+ u32 rate;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(en7581_base_clks); i++) {
+ const struct en_clk_desc *desc = &en7581_base_clks[i];
+ u32 val, reg = desc->div_reg ? desc->div_reg : desc->base_reg;
+ int err;
+
+ err = regmap_read(map, desc->base_reg, &val);
+ if (err) {
+ pr_err("Failed reading fixed clk rate %s: %d\n",
+ desc->name, err);
+ continue;
+ }
+ rate = en7523_get_base_rate(desc, val);
+
+ err = regmap_read(map, reg, &val);
+ if (err) {
+ pr_err("Failed reading fixed clk div %s: %d\n",
+ desc->name, err);
+ continue;
+ }
+ rate /= en7523_get_div(desc, val);
+
+ hw = clk_hw_register_fixed_rate(dev, desc->name, NULL, 0, rate);
+ if (IS_ERR(hw)) {
+ pr_err("Failed to register clk %s: %ld\n",
+ desc->name, PTR_ERR(hw));
+ continue;
+ }
+
+ clk_data->hws[desc->id] = hw;
}
+ hw = en7523_register_pcie_clk(dev, base);
+ clk_data->hws[EN7523_CLK_PCIE] = hw;
+}
+
+static int en7523_reset_update(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
+{
+ struct en_rst_data *rst_data = container_of(rcdev, struct en_rst_data, rcdev);
+ void __iomem *addr = rst_data->base + rst_data->bank_ofs[id / RST_NR_PER_BANK];
+ u32 val;
+
+ val = readl(addr);
+ if (assert)
+ val |= BIT(id % RST_NR_PER_BANK);
+ else
+ val &= ~BIT(id % RST_NR_PER_BANK);
+ writel(val, addr);
+
+ return 0;
+}
+
+static int en7523_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return en7523_reset_update(rcdev, id, true);
+}
+
+static int en7523_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return en7523_reset_update(rcdev, id, false);
+}
+
+static int en7523_reset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct en_rst_data *rst_data = container_of(rcdev, struct en_rst_data, rcdev);
+ void __iomem *addr = rst_data->base + rst_data->bank_ofs[id / RST_NR_PER_BANK];
+
+ return !!(readl(addr) & BIT(id % RST_NR_PER_BANK));
+}
+
+static int en7523_reset_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ struct en_rst_data *rst_data = container_of(rcdev, struct en_rst_data, rcdev);
+
+ if (reset_spec->args[0] >= rcdev->nr_resets)
+ return -EINVAL;
+
+ return rst_data->idx_map[reset_spec->args[0]];
+}
+
+static const struct reset_control_ops en7581_reset_ops = {
+ .assert = en7523_reset_assert,
+ .deassert = en7523_reset_deassert,
+ .status = en7523_reset_status,
+};
+
+static int en7581_reset_register(struct device *dev, void __iomem *base,
+ const u16 *rst_map, int nr_resets)
+{
+ struct en_rst_data *rst_data;
+
+ rst_data = devm_kzalloc(dev, sizeof(*rst_data), GFP_KERNEL);
+ if (!rst_data)
+ return -ENOMEM;
+
+ rst_data->bank_ofs = en7581_rst_ofs;
+ rst_data->idx_map = rst_map;
+ rst_data->base = base;
+
+ rst_data->rcdev.nr_resets = nr_resets;
+ rst_data->rcdev.of_xlate = en7523_reset_xlate;
+ rst_data->rcdev.ops = &en7581_reset_ops;
+ rst_data->rcdev.of_node = dev->of_node;
+ rst_data->rcdev.of_reset_n_cells = 1;
+ rst_data->rcdev.owner = THIS_MODULE;
+ rst_data->rcdev.dev = dev;
+
+ return devm_reset_controller_register(dev, &rst_data->rcdev);
+}
+
+static int en7581_clk_hw_init(struct platform_device *pdev,
+ struct clk_hw_onecell_data *clk_data)
+{
+ struct regmap *map;
+ void __iomem *base;
+ u32 val;
+
+ map = syscon_regmap_lookup_by_compatible("airoha,en7581-chip-scu");
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ en7581_register_clocks(&pdev->dev, clk_data, map, base);
+
+ val = readl(base + REG_NP_SCU_SSTR);
+ val &= ~(REG_PCIE_XSI0_SEL_MASK | REG_PCIE_XSI1_SEL_MASK);
+ writel(val, base + REG_NP_SCU_SSTR);
+ val = readl(base + REG_NP_SCU_PCIC);
+ writel(val | 3, base + REG_NP_SCU_PCIC);
+
+ return en7581_reset_register(&pdev->dev, base, en7581_rst_map,
+ ARRAY_SIZE(en7581_rst_map));
+}
+
+static int en7523_clk_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ const struct en_clk_soc_data *soc_data;
+ struct clk_hw_onecell_data *clk_data;
+ int r;
+
+ soc_data = device_get_match_data(&pdev->dev);
+
clk_data = devm_kzalloc(&pdev->dev,
- struct_size(clk_data, hws, EN7523_NUM_CLOCKS),
+ struct_size(clk_data, hws, soc_data->num_clocks),
GFP_KERNEL);
if (!clk_data)
return -ENOMEM;
- en7523_register_clocks(&pdev->dev, clk_data, base, np_base);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ clk_data->num = soc_data->num_clocks;
+ r = soc_data->hw_init(pdev, clk_data);
if (r)
- dev_err(&pdev->dev,
- "could not register clock provider: %s: %d\n",
- pdev->name, r);
+ return r;
- return r;
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
static const struct en_clk_soc_data en7523_data = {
+ .num_clocks = ARRAY_SIZE(en7523_base_clks) + 1,
.pcie_ops = {
.is_enabled = en7523_pci_is_enabled,
.prepare = en7523_pci_prepare,
.unprepare = en7523_pci_unprepare,
},
+ .hw_init = en7523_clk_hw_init,
};
static const struct en_clk_soc_data en7581_data = {
+ /* We increment num_clocks by 1 to account for additional PCIe clock */
+ .num_clocks = ARRAY_SIZE(en7581_base_clks) + 1,
.pcie_ops = {
.is_enabled = en7581_pci_is_enabled,
- .prepare = en7581_pci_prepare,
.enable = en7581_pci_enable,
- .unprepare = en7581_pci_unprepare,
.disable = en7581_pci_disable,
},
.hw_init = en7581_clk_hw_init,
diff --git a/drivers/clk/clk-ep93xx.c b/drivers/clk/clk-ep93xx.c
new file mode 100644
index 000000000000..972aadd11493
--- /dev/null
+++ b/drivers/clk/clk-ep93xx.c
@@ -0,0 +1,853 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Clock control for Cirrus EP93xx chips.
+ * Copyright (C) 2021 Nikita Shubin <nikita.shubin@maquefel.me>
+ *
+ * Based on a rewrite of arch/arm/mach-ep93xx/clock.c:
+ * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
+ */
+#define pr_fmt(fmt) "ep93xx " KBUILD_MODNAME ": " fmt
+
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/clk-provider.h>
+#include <linux/math.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+
+#include <linux/soc/cirrus/ep93xx.h>
+#include <dt-bindings/clock/cirrus,ep9301-syscon.h>
+
+#include <asm/div64.h>
+
+#define EP93XX_EXT_CLK_RATE 14745600
+#define EP93XX_EXT_RTC_RATE 32768
+
+#define EP93XX_SYSCON_POWER_STATE 0x00
+#define EP93XX_SYSCON_PWRCNT 0x04
+#define EP93XX_SYSCON_PWRCNT_UARTBAUD BIT(29)
+#define EP93XX_SYSCON_PWRCNT_USH_EN 28
+#define EP93XX_SYSCON_PWRCNT_DMA_M2M1 27
+#define EP93XX_SYSCON_PWRCNT_DMA_M2M0 26
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P8 25
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P9 24
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P6 23
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P7 22
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P4 21
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P5 20
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P2 19
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P3 18
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P0 17
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P1 16
+#define EP93XX_SYSCON_CLKSET1 0x20
+#define EP93XX_SYSCON_CLKSET1_NBYP1 BIT(23)
+#define EP93XX_SYSCON_CLKSET2 0x24
+#define EP93XX_SYSCON_CLKSET2_NBYP2 BIT(19)
+#define EP93XX_SYSCON_CLKSET2_PLL2_EN BIT(18)
+#define EP93XX_SYSCON_DEVCFG 0x80
+#define EP93XX_SYSCON_DEVCFG_U3EN 24
+#define EP93XX_SYSCON_DEVCFG_U2EN 20
+#define EP93XX_SYSCON_DEVCFG_U1EN 18
+#define EP93XX_SYSCON_VIDCLKDIV 0x84
+#define EP93XX_SYSCON_CLKDIV_ENABLE 15
+#define EP93XX_SYSCON_CLKDIV_ESEL BIT(14)
+#define EP93XX_SYSCON_CLKDIV_PSEL BIT(13)
+#define EP93XX_SYSCON_CLKDIV_MASK GENMASK(14, 13)
+#define EP93XX_SYSCON_CLKDIV_PDIV_SHIFT 8
+#define EP93XX_SYSCON_I2SCLKDIV 0x8c
+#define EP93XX_SYSCON_I2SCLKDIV_SENA 31
+#define EP93XX_SYSCON_I2SCLKDIV_ORIDE BIT(29)
+#define EP93XX_SYSCON_I2SCLKDIV_SPOL BIT(19)
+#define EP93XX_SYSCON_KEYTCHCLKDIV 0x90
+#define EP93XX_SYSCON_KEYTCHCLKDIV_TSEN 31
+#define EP93XX_SYSCON_KEYTCHCLKDIV_ADIV 16
+#define EP93XX_SYSCON_KEYTCHCLKDIV_KEN 15
+#define EP93XX_SYSCON_KEYTCHCLKDIV_KDIV 0
+#define EP93XX_SYSCON_CHIPID 0x94
+#define EP93XX_SYSCON_CHIPID_ID 0x9213
+
+#define EP93XX_FIXED_CLK_COUNT 21
+
+static const char ep93xx_adc_divisors[] = { 16, 4 };
+static const char ep93xx_sclk_divisors[] = { 2, 4 };
+static const char ep93xx_lrclk_divisors[] = { 32, 64, 128 };
+
+struct ep93xx_clk {
+ struct clk_hw hw;
+ u16 idx;
+ u16 reg;
+ u32 mask;
+ u8 bit_idx;
+ u8 shift;
+ u8 width;
+ u8 num_div;
+ const char *div;
+};
+
+struct ep93xx_clk_priv {
+ spinlock_t lock;
+ struct ep93xx_regmap_adev *aux_dev;
+ struct device *dev;
+ void __iomem *base;
+ struct regmap *map;
+ struct clk_hw *fixed[EP93XX_FIXED_CLK_COUNT];
+ struct ep93xx_clk reg[];
+};
+
+static struct ep93xx_clk *ep93xx_clk_from(struct clk_hw *hw)
+{
+ return container_of(hw, struct ep93xx_clk, hw);
+}
+
+static struct ep93xx_clk_priv *ep93xx_priv_from(struct ep93xx_clk *clk)
+{
+ return container_of(clk, struct ep93xx_clk_priv, reg[clk->idx]);
+}
+
+static void ep93xx_clk_write(struct ep93xx_clk_priv *priv, unsigned int reg, unsigned int val)
+{
+ struct ep93xx_regmap_adev *aux = priv->aux_dev;
+
+ aux->write(aux->map, aux->lock, reg, val);
+}
+
+static int ep93xx_clk_is_enabled(struct clk_hw *hw)
+{
+ struct ep93xx_clk *clk = ep93xx_clk_from(hw);
+ struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk);
+ u32 val;
+
+ regmap_read(priv->map, clk->reg, &val);
+
+ return !!(val & BIT(clk->bit_idx));
+}
+
+static int ep93xx_clk_enable(struct clk_hw *hw)
+{
+ struct ep93xx_clk *clk = ep93xx_clk_from(hw);
+ struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk);
+ u32 val;
+
+ guard(spinlock_irqsave)(&priv->lock);
+
+ regmap_read(priv->map, clk->reg, &val);
+ val |= BIT(clk->bit_idx);
+
+ ep93xx_clk_write(priv, clk->reg, val);
+
+ return 0;
+}
+
+static void ep93xx_clk_disable(struct clk_hw *hw)
+{
+ struct ep93xx_clk *clk = ep93xx_clk_from(hw);
+ struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk);
+ u32 val;
+
+ guard(spinlock_irqsave)(&priv->lock);
+
+ regmap_read(priv->map, clk->reg, &val);
+ val &= ~BIT(clk->bit_idx);
+
+ ep93xx_clk_write(priv, clk->reg, val);
+}
+
+static const struct clk_ops clk_ep93xx_gate_ops = {
+ .enable = ep93xx_clk_enable,
+ .disable = ep93xx_clk_disable,
+ .is_enabled = ep93xx_clk_is_enabled,
+};
+
+static int ep93xx_clk_register_gate(struct ep93xx_clk *clk,
+ const char *name,
+ struct clk_parent_data *parent_data,
+ unsigned long flags,
+ unsigned int reg,
+ u8 bit_idx)
+{
+ struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk);
+ struct clk_init_data init = { };
+
+ init.name = name;
+ init.ops = &clk_ep93xx_gate_ops;
+ init.flags = flags;
+ init.parent_data = parent_data;
+ init.num_parents = 1;
+
+ clk->reg = reg;
+ clk->bit_idx = bit_idx;
+ clk->hw.init = &init;
+
+ return devm_clk_hw_register(priv->dev, &clk->hw);
+}
+
+static u8 ep93xx_mux_get_parent(struct clk_hw *hw)
+{
+ struct ep93xx_clk *clk = ep93xx_clk_from(hw);
+ struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk);
+ u32 val;
+
+ regmap_read(priv->map, clk->reg, &val);
+
+ val &= EP93XX_SYSCON_CLKDIV_MASK;
+
+ switch (val) {
+ case EP93XX_SYSCON_CLKDIV_ESEL:
+ return 1; /* PLL1 */
+ case EP93XX_SYSCON_CLKDIV_MASK:
+ return 2; /* PLL2 */
+ default:
+ return 0; /* XTALI */
+ };
+}
+
+static int ep93xx_mux_set_parent_lock(struct clk_hw *hw, u8 index)
+{
+ struct ep93xx_clk *clk = ep93xx_clk_from(hw);
+ struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk);
+ u32 val;
+
+ if (index >= 3)
+ return -EINVAL;
+
+ guard(spinlock_irqsave)(&priv->lock);
+
+ regmap_read(priv->map, clk->reg, &val);
+ val &= ~(EP93XX_SYSCON_CLKDIV_MASK);
+ val |= index > 0 ? EP93XX_SYSCON_CLKDIV_ESEL : 0;
+ val |= index > 1 ? EP93XX_SYSCON_CLKDIV_PSEL : 0;
+
+ ep93xx_clk_write(priv, clk->reg, val);
+
+ return 0;
+}
+
+static bool is_best(unsigned long rate, unsigned long now,
+ unsigned long best)
+{
+ return abs_diff(rate, now) < abs_diff(rate, best);
+}
+
+static int ep93xx_mux_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ unsigned long best_rate = 0, actual_rate, mclk_rate;
+ unsigned long rate = req->rate;
+ struct clk_hw *parent_best = NULL;
+ unsigned long parent_rate_best;
+ unsigned long parent_rate;
+ int div, pdiv;
+ unsigned int i;
+
+ /*
+ * Try the two pll's and the external clock,
+ * because the valid predividers are 2, 2.5 and 3, we multiply
+ * all the clocks by 2 to avoid floating point math.
+ *
+ * This is based on the algorithm in the ep93xx raster guide:
+ * http://be-a-maverick.com/en/pubs/appNote/AN269REV1.pdf
+ *
+ */
+ for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+ struct clk_hw *parent = clk_hw_get_parent_by_index(hw, i);
+
+ parent_rate = clk_hw_get_rate(parent);
+ mclk_rate = parent_rate * 2;
+
+ /* Try each predivider value */
+ for (pdiv = 4; pdiv <= 6; pdiv++) {
+ div = DIV_ROUND_CLOSEST(mclk_rate, rate * pdiv);
+ if (!in_range(div, 1, 127))
+ continue;
+
+ actual_rate = DIV_ROUND_CLOSEST(mclk_rate, pdiv * div);
+ if (is_best(rate, actual_rate, best_rate)) {
+ best_rate = actual_rate;
+ parent_rate_best = parent_rate;
+ parent_best = parent;
+ }
+ }
+ }
+
+ if (!parent_best)
+ return -EINVAL;
+
+ req->best_parent_rate = parent_rate_best;
+ req->best_parent_hw = parent_best;
+ req->rate = best_rate;
+
+ return 0;
+}
+
+static unsigned long ep93xx_ddiv_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ep93xx_clk *clk = ep93xx_clk_from(hw);
+ struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk);
+ unsigned int pdiv, div;
+ u32 val;
+
+ regmap_read(priv->map, clk->reg, &val);
+ pdiv = (val >> EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) & GENMASK(1, 0);
+ div = val & GENMASK(6, 0);
+ if (!div)
+ return 0;
+
+ return DIV_ROUND_CLOSEST(parent_rate * 2, (pdiv + 3) * div);
+}
+
+static int ep93xx_ddiv_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ep93xx_clk *clk = ep93xx_clk_from(hw);
+ struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk);
+ int pdiv, div, npdiv, ndiv;
+ unsigned long actual_rate, mclk_rate, rate_err = ULONG_MAX;
+ u32 val;
+
+ regmap_read(priv->map, clk->reg, &val);
+ mclk_rate = parent_rate * 2;
+
+ for (pdiv = 4; pdiv <= 6; pdiv++) {
+ div = DIV_ROUND_CLOSEST(mclk_rate, rate * pdiv);
+ if (!in_range(div, 1, 127))
+ continue;
+
+ actual_rate = DIV_ROUND_CLOSEST(mclk_rate, pdiv * div);
+ if (abs(actual_rate - rate) < rate_err) {
+ npdiv = pdiv - 3;
+ ndiv = div;
+ rate_err = abs(actual_rate - rate);
+ }
+ }
+
+ if (rate_err == ULONG_MAX)
+ return -EINVAL;
+
+ /*
+ * Clear old dividers.
+ * Bit 7 is reserved bit in all ClkDiv registers.
+ */
+ val &= ~(GENMASK(9, 0) & ~BIT(7));
+
+ /* Set the new pdiv and div bits for the new clock rate */
+ val |= (npdiv << EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) | ndiv;
+
+ ep93xx_clk_write(priv, clk->reg, val);
+
+ return 0;
+}
+
+static const struct clk_ops clk_ddiv_ops = {
+ .enable = ep93xx_clk_enable,
+ .disable = ep93xx_clk_disable,
+ .is_enabled = ep93xx_clk_is_enabled,
+ .get_parent = ep93xx_mux_get_parent,
+ .set_parent = ep93xx_mux_set_parent_lock,
+ .determine_rate = ep93xx_mux_determine_rate,
+ .recalc_rate = ep93xx_ddiv_recalc_rate,
+ .set_rate = ep93xx_ddiv_set_rate,
+};
+
+static int ep93xx_clk_register_ddiv(struct ep93xx_clk *clk,
+ const char *name,
+ struct clk_parent_data *parent_data,
+ u8 num_parents,
+ unsigned int reg,
+ u8 bit_idx)
+{
+ struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk);
+ struct clk_init_data init = { };
+
+ init.name = name;
+ init.ops = &clk_ddiv_ops;
+ init.flags = 0;
+ init.parent_data = parent_data;
+ init.num_parents = num_parents;
+
+ clk->reg = reg;
+ clk->bit_idx = bit_idx;
+ clk->hw.init = &init;
+
+ return devm_clk_hw_register(priv->dev, &clk->hw);
+}
+
+static unsigned long ep93xx_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ep93xx_clk *clk = ep93xx_clk_from(hw);
+ struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk);
+ u32 val;
+ u8 index;
+
+ regmap_read(priv->map, clk->reg, &val);
+ index = (val & clk->mask) >> clk->shift;
+ if (index >= clk->num_div)
+ return 0;
+
+ return DIV_ROUND_CLOSEST(parent_rate, clk->div[index]);
+}
+
+static int ep93xx_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct ep93xx_clk *clk = ep93xx_clk_from(hw);
+ unsigned long best = 0, now;
+ unsigned int i;
+
+ for (i = 0; i < clk->num_div; i++) {
+ if (req->rate * clk->div[i] == req->best_parent_rate)
+ return 0;
+
+ now = DIV_ROUND_CLOSEST(req->best_parent_rate, clk->div[i]);
+ if (!best || is_best(req->rate, now, best))
+ best = now;
+ }
+
+ req->rate = best;
+
+ return 0;
+}
+
+static int ep93xx_div_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ep93xx_clk *clk = ep93xx_clk_from(hw);
+ struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk);
+ unsigned int i;
+ u32 val;
+
+ regmap_read(priv->map, clk->reg, &val);
+ val &= ~clk->mask;
+ for (i = 0; i < clk->num_div; i++)
+ if (rate == DIV_ROUND_CLOSEST(parent_rate, clk->div[i]))
+ break;
+
+ if (i == clk->num_div)
+ return -EINVAL;
+
+ val |= i << clk->shift;
+
+ ep93xx_clk_write(priv, clk->reg, val);
+
+ return 0;
+}
+
+static const struct clk_ops ep93xx_div_ops = {
+ .enable = ep93xx_clk_enable,
+ .disable = ep93xx_clk_disable,
+ .is_enabled = ep93xx_clk_is_enabled,
+ .recalc_rate = ep93xx_div_recalc_rate,
+ .determine_rate = ep93xx_div_determine_rate,
+ .set_rate = ep93xx_div_set_rate,
+};
+
+static int ep93xx_register_div(struct ep93xx_clk *clk,
+ const char *name,
+ const struct clk_parent_data *parent_data,
+ unsigned int reg,
+ u8 enable_bit,
+ u8 shift,
+ u8 width,
+ const char *clk_divisors,
+ u8 num_div)
+{
+ struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk);
+ struct clk_init_data init = { };
+
+ init.name = name;
+ init.ops = &ep93xx_div_ops;
+ init.flags = 0;
+ init.parent_data = parent_data;
+ init.num_parents = 1;
+
+ clk->reg = reg;
+ clk->bit_idx = enable_bit;
+ clk->mask = GENMASK(shift + width - 1, shift);
+ clk->shift = shift;
+ clk->div = clk_divisors;
+ clk->num_div = num_div;
+ clk->hw.init = &init;
+
+ return devm_clk_hw_register(priv->dev, &clk->hw);
+}
+
+struct ep93xx_gate {
+ unsigned int idx;
+ unsigned int bit;
+ const char *name;
+};
+
+static const struct ep93xx_gate ep93xx_uarts[] = {
+ { EP93XX_CLK_UART1, EP93XX_SYSCON_DEVCFG_U1EN, "uart1" },
+ { EP93XX_CLK_UART2, EP93XX_SYSCON_DEVCFG_U2EN, "uart2" },
+ { EP93XX_CLK_UART3, EP93XX_SYSCON_DEVCFG_U3EN, "uart3" },
+};
+
+static int ep93xx_uart_clock_init(struct ep93xx_clk_priv *priv)
+{
+ struct clk_parent_data parent_data = { };
+ unsigned int i, idx, clk_uart_div;
+ struct ep93xx_clk *clk;
+ u32 val;
+ int ret;
+
+ regmap_read(priv->map, EP93XX_SYSCON_PWRCNT, &val);
+ if (val & EP93XX_SYSCON_PWRCNT_UARTBAUD)
+ clk_uart_div = 1;
+ else
+ clk_uart_div = 2;
+
+ priv->fixed[EP93XX_CLK_UART] =
+ devm_clk_hw_register_fixed_factor_index(priv->dev, "uart",
+ 0, /* XTALI external clock */
+ 0, 1, clk_uart_div);
+ parent_data.hw = priv->fixed[EP93XX_CLK_UART];
+
+ /* parenting uart gate clocks to uart clock */
+ for (i = 0; i < ARRAY_SIZE(ep93xx_uarts); i++) {
+ idx = ep93xx_uarts[i].idx - EP93XX_CLK_UART1;
+ clk = &priv->reg[idx];
+ clk->idx = idx;
+ ret = ep93xx_clk_register_gate(clk,
+ ep93xx_uarts[i].name,
+ &parent_data, CLK_SET_RATE_PARENT,
+ EP93XX_SYSCON_DEVCFG,
+ ep93xx_uarts[i].bit);
+ if (ret)
+ return dev_err_probe(priv->dev, ret,
+ "failed to register uart[%d] clock\n", i);
+ }
+
+ return 0;
+}
+
+static const struct ep93xx_gate ep93xx_dmas[] = {
+ { EP93XX_CLK_M2M0, EP93XX_SYSCON_PWRCNT_DMA_M2M0, "m2m0" },
+ { EP93XX_CLK_M2M1, EP93XX_SYSCON_PWRCNT_DMA_M2M1, "m2m1" },
+ { EP93XX_CLK_M2P0, EP93XX_SYSCON_PWRCNT_DMA_M2P0, "m2p0" },
+ { EP93XX_CLK_M2P1, EP93XX_SYSCON_PWRCNT_DMA_M2P1, "m2p1" },
+ { EP93XX_CLK_M2P2, EP93XX_SYSCON_PWRCNT_DMA_M2P2, "m2p2" },
+ { EP93XX_CLK_M2P3, EP93XX_SYSCON_PWRCNT_DMA_M2P3, "m2p3" },
+ { EP93XX_CLK_M2P4, EP93XX_SYSCON_PWRCNT_DMA_M2P4, "m2p4" },
+ { EP93XX_CLK_M2P5, EP93XX_SYSCON_PWRCNT_DMA_M2P5, "m2p5" },
+ { EP93XX_CLK_M2P6, EP93XX_SYSCON_PWRCNT_DMA_M2P6, "m2p6" },
+ { EP93XX_CLK_M2P7, EP93XX_SYSCON_PWRCNT_DMA_M2P7, "m2p7" },
+ { EP93XX_CLK_M2P8, EP93XX_SYSCON_PWRCNT_DMA_M2P8, "m2p8" },
+ { EP93XX_CLK_M2P9, EP93XX_SYSCON_PWRCNT_DMA_M2P9, "m2p9" },
+};
+
+static int ep93xx_dma_clock_init(struct ep93xx_clk_priv *priv)
+{
+ struct clk_parent_data parent_data = { };
+ unsigned int i, idx;
+
+ parent_data.hw = priv->fixed[EP93XX_CLK_HCLK];
+ for (i = 0; i < ARRAY_SIZE(ep93xx_dmas); i++) {
+ idx = ep93xx_dmas[i].idx;
+ priv->fixed[idx] = devm_clk_hw_register_gate_parent_data(priv->dev,
+ ep93xx_dmas[i].name,
+ &parent_data, 0,
+ priv->base + EP93XX_SYSCON_PWRCNT,
+ ep93xx_dmas[i].bit,
+ 0,
+ &priv->lock);
+ if (IS_ERR(priv->fixed[idx]))
+ return PTR_ERR(priv->fixed[idx]);
+ }
+
+ return 0;
+}
+
+static struct clk_hw *of_clk_ep93xx_get(struct of_phandle_args *clkspec, void *data)
+{
+ struct ep93xx_clk_priv *priv = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx < EP93XX_CLK_UART1)
+ return priv->fixed[idx];
+
+ if (idx <= EP93XX_CLK_I2S_LRCLK)
+ return &priv->reg[idx - EP93XX_CLK_UART1].hw;
+
+ return ERR_PTR(-EINVAL);
+}
+
+/*
+ * PLL rate = 14.7456 MHz * (X1FBD + 1) * (X2FBD + 1) / (X2IPD + 1) / 2^PS
+ */
+static unsigned long calc_pll_rate(u64 rate, u32 config_word)
+{
+ rate *= ((config_word >> 11) & GENMASK(4, 0)) + 1; /* X1FBD */
+ rate *= ((config_word >> 5) & GENMASK(5, 0)) + 1; /* X2FBD */
+ do_div(rate, (config_word & GENMASK(4, 0)) + 1); /* X2IPD */
+ rate >>= (config_word >> 16) & GENMASK(1, 0); /* PS */
+
+ return rate;
+}
+
+static int ep93xx_plls_init(struct ep93xx_clk_priv *priv)
+{
+ static const char fclk_divisors[] = { 1, 2, 4, 8, 16, 1, 1, 1 };
+ static const char hclk_divisors[] = { 1, 2, 4, 5, 6, 8, 16, 32 };
+ static const char pclk_divisors[] = { 1, 2, 4, 8 };
+ struct clk_parent_data xtali = { .index = 0 };
+ unsigned int clk_f_div, clk_h_div, clk_p_div;
+ unsigned long clk_pll1_rate, clk_pll2_rate;
+ struct device *dev = priv->dev;
+ struct clk_hw *hw, *pll1;
+ u32 value;
+
+ /* Determine the bootloader configured pll1 rate */
+ regmap_read(priv->map, EP93XX_SYSCON_CLKSET1, &value);
+
+ if (value & EP93XX_SYSCON_CLKSET1_NBYP1)
+ clk_pll1_rate = calc_pll_rate(EP93XX_EXT_CLK_RATE, value);
+ else
+ clk_pll1_rate = EP93XX_EXT_CLK_RATE;
+
+ pll1 = devm_clk_hw_register_fixed_rate_parent_data(dev, "pll1", &xtali,
+ 0, clk_pll1_rate);
+ if (IS_ERR(pll1))
+ return PTR_ERR(pll1);
+
+ priv->fixed[EP93XX_CLK_PLL1] = pll1;
+
+ /* Initialize the pll1 derived clocks */
+ clk_f_div = fclk_divisors[(value >> 25) & GENMASK(2, 0)];
+ clk_h_div = hclk_divisors[(value >> 20) & GENMASK(2, 0)];
+ clk_p_div = pclk_divisors[(value >> 18) & GENMASK(1, 0)];
+
+ hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, "fclk", pll1, 0, 1, clk_f_div);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ priv->fixed[EP93XX_CLK_FCLK] = hw;
+
+ hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, "hclk", pll1, 0, 1, clk_h_div);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ priv->fixed[EP93XX_CLK_HCLK] = hw;
+
+ hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, "pclk", hw, 0, 1, clk_p_div);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ priv->fixed[EP93XX_CLK_PCLK] = hw;
+
+ /* Determine the bootloader configured pll2 rate */
+ regmap_read(priv->map, EP93XX_SYSCON_CLKSET2, &value);
+ if (!(value & EP93XX_SYSCON_CLKSET2_NBYP2))
+ clk_pll2_rate = EP93XX_EXT_CLK_RATE;
+ else if (value & EP93XX_SYSCON_CLKSET2_PLL2_EN)
+ clk_pll2_rate = calc_pll_rate(EP93XX_EXT_CLK_RATE, value);
+ else
+ clk_pll2_rate = 0;
+
+ hw = devm_clk_hw_register_fixed_rate_parent_data(dev, "pll2", &xtali,
+ 0, clk_pll2_rate);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ priv->fixed[EP93XX_CLK_PLL2] = hw;
+
+ return 0;
+}
+
+static int ep93xx_clk_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct ep93xx_regmap_adev *rdev = to_ep93xx_regmap_adev(adev);
+ struct clk_parent_data xtali = { .index = 0 };
+ struct clk_parent_data ddiv_pdata[3] = { };
+ unsigned int clk_spi_div, clk_usb_div;
+ struct clk_parent_data pdata = {};
+ struct device *dev = &adev->dev;
+ struct ep93xx_clk_priv *priv;
+ struct ep93xx_clk *clk;
+ struct clk_hw *hw;
+ unsigned int idx;
+ int ret;
+ u32 value;
+
+ priv = devm_kzalloc(dev, struct_size(priv, reg, 10), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ spin_lock_init(&priv->lock);
+ priv->dev = dev;
+ priv->aux_dev = rdev;
+ priv->map = rdev->map;
+ priv->base = rdev->base;
+
+ ret = ep93xx_plls_init(priv);
+ if (ret)
+ return ret;
+
+ regmap_read(priv->map, EP93XX_SYSCON_CLKSET2, &value);
+ clk_usb_div = (value >> 28 & GENMASK(3, 0)) + 1;
+ hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, "usb_clk",
+ priv->fixed[EP93XX_CLK_PLL2], 0, 1,
+ clk_usb_div);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ priv->fixed[EP93XX_CLK_USB] = hw;
+
+ ret = ep93xx_uart_clock_init(priv);
+ if (ret)
+ return ret;
+
+ ret = ep93xx_dma_clock_init(priv);
+ if (ret)
+ return ret;
+
+ clk_spi_div = id->driver_data;
+ hw = devm_clk_hw_register_fixed_factor_index(dev, "ep93xx-spi.0",
+ 0, /* XTALI external clock */
+ 0, 1, clk_spi_div);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ priv->fixed[EP93XX_CLK_SPI] = hw;
+
+ /* PWM clock */
+ hw = devm_clk_hw_register_fixed_factor_index(dev, "pwm_clk", 0, /* XTALI external clock */
+ 0, 1, 1);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ priv->fixed[EP93XX_CLK_PWM] = hw;
+
+ /* USB clock */
+ pdata.hw = priv->fixed[EP93XX_CLK_USB];
+ hw = devm_clk_hw_register_gate_parent_data(priv->dev, "ohci-platform", &pdata,
+ 0, priv->base + EP93XX_SYSCON_PWRCNT,
+ EP93XX_SYSCON_PWRCNT_USH_EN, 0,
+ &priv->lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ priv->fixed[EP93XX_CLK_USB] = hw;
+
+ ddiv_pdata[0].index = 0; /* XTALI external clock */
+ ddiv_pdata[1].hw = priv->fixed[EP93XX_CLK_PLL1];
+ ddiv_pdata[2].hw = priv->fixed[EP93XX_CLK_PLL2];
+
+ /* touchscreen/ADC clock */
+ idx = EP93XX_CLK_ADC - EP93XX_CLK_UART1;
+ clk = &priv->reg[idx];
+ clk->idx = idx;
+ ret = ep93xx_register_div(clk, "ep93xx-adc", &xtali,
+ EP93XX_SYSCON_KEYTCHCLKDIV,
+ EP93XX_SYSCON_KEYTCHCLKDIV_TSEN,
+ EP93XX_SYSCON_KEYTCHCLKDIV_ADIV,
+ 1,
+ ep93xx_adc_divisors,
+ ARRAY_SIZE(ep93xx_adc_divisors));
+
+
+ /* keypad clock */
+ idx = EP93XX_CLK_KEYPAD - EP93XX_CLK_UART1;
+ clk = &priv->reg[idx];
+ clk->idx = idx;
+ ret = ep93xx_register_div(clk, "ep93xx-keypad", &xtali,
+ EP93XX_SYSCON_KEYTCHCLKDIV,
+ EP93XX_SYSCON_KEYTCHCLKDIV_KEN,
+ EP93XX_SYSCON_KEYTCHCLKDIV_KDIV,
+ 1,
+ ep93xx_adc_divisors,
+ ARRAY_SIZE(ep93xx_adc_divisors));
+
+ /*
+ * On reset PDIV and VDIV is set to zero, while PDIV zero
+ * means clock disable, VDIV shouldn't be zero.
+ * So we set both video and i2s dividers to minimum.
+ * ENA - Enable CLK divider.
+ * PDIV - 00 - Disable clock
+ * VDIV - at least 2
+ */
+
+ /* Check and enable video clk registers */
+ regmap_read(priv->map, EP93XX_SYSCON_VIDCLKDIV, &value);
+ value |= BIT(EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) | 2;
+ ep93xx_clk_write(priv, EP93XX_SYSCON_VIDCLKDIV, value);
+
+ /* Check and enable i2s clk registers */
+ regmap_read(priv->map, EP93XX_SYSCON_I2SCLKDIV, &value);
+ value |= BIT(EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) | 2;
+
+ /*
+ * Override the SAI_MSTR_CLK_CFG from the I2S block and use the
+ * I2SClkDiv Register settings. LRCLK transitions on the falling SCLK
+ * edge.
+ */
+ value |= EP93XX_SYSCON_I2SCLKDIV_ORIDE | EP93XX_SYSCON_I2SCLKDIV_SPOL;
+ ep93xx_clk_write(priv, EP93XX_SYSCON_I2SCLKDIV, value);
+
+ /* video clk */
+ idx = EP93XX_CLK_VIDEO - EP93XX_CLK_UART1;
+ clk = &priv->reg[idx];
+ clk->idx = idx;
+ ret = ep93xx_clk_register_ddiv(clk, "ep93xx-fb",
+ ddiv_pdata, ARRAY_SIZE(ddiv_pdata),
+ EP93XX_SYSCON_VIDCLKDIV,
+ EP93XX_SYSCON_CLKDIV_ENABLE);
+
+ /* i2s clk */
+ idx = EP93XX_CLK_I2S_MCLK - EP93XX_CLK_UART1;
+ clk = &priv->reg[idx];
+ clk->idx = idx;
+ ret = ep93xx_clk_register_ddiv(clk, "mclk",
+ ddiv_pdata, ARRAY_SIZE(ddiv_pdata),
+ EP93XX_SYSCON_I2SCLKDIV,
+ EP93XX_SYSCON_CLKDIV_ENABLE);
+
+ /* i2s sclk */
+ idx = EP93XX_CLK_I2S_SCLK - EP93XX_CLK_UART1;
+ clk = &priv->reg[idx];
+ clk->idx = idx;
+ pdata.hw = &priv->reg[EP93XX_CLK_I2S_MCLK - EP93XX_CLK_UART1].hw;
+ ret = ep93xx_register_div(clk, "sclk", &pdata,
+ EP93XX_SYSCON_I2SCLKDIV,
+ EP93XX_SYSCON_I2SCLKDIV_SENA,
+ 16, /* EP93XX_I2SCLKDIV_SDIV_SHIFT */
+ 1, /* EP93XX_I2SCLKDIV_SDIV_WIDTH */
+ ep93xx_sclk_divisors,
+ ARRAY_SIZE(ep93xx_sclk_divisors));
+
+ /* i2s lrclk */
+ idx = EP93XX_CLK_I2S_LRCLK - EP93XX_CLK_UART1;
+ clk = &priv->reg[idx];
+ clk->idx = idx;
+ pdata.hw = &priv->reg[EP93XX_CLK_I2S_SCLK - EP93XX_CLK_UART1].hw;
+ ret = ep93xx_register_div(clk, "lrclk", &pdata,
+ EP93XX_SYSCON_I2SCLKDIV,
+ EP93XX_SYSCON_I2SCLKDIV_SENA,
+ 17, /* EP93XX_I2SCLKDIV_LRDIV32_SHIFT */
+ 2, /* EP93XX_I2SCLKDIV_LRDIV32_WIDTH */
+ ep93xx_lrclk_divisors,
+ ARRAY_SIZE(ep93xx_lrclk_divisors));
+
+ /* IrDa clk uses same pattern but no init code presents in original clock driver */
+ return devm_of_clk_add_hw_provider(priv->dev, of_clk_ep93xx_get, priv);
+}
+
+static const struct auxiliary_device_id ep93xx_clk_ids[] = {
+ { .name = "soc_ep93xx.clk-ep93xx", .driver_data = 2, },
+ { .name = "soc_ep93xx.clk-ep93xx.e2", .driver_data = 1, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(auxiliary, ep93xx_clk_ids);
+
+static struct auxiliary_driver ep93xx_clk_driver = {
+ .probe = ep93xx_clk_probe,
+ .id_table = ep93xx_clk_ids,
+};
+module_auxiliary_driver(ep93xx_clk_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nikita Shubin <nikita.shubin@maquefel.me>");
+MODULE_DESCRIPTION("Clock control for Cirrus EP93xx chips");
diff --git a/drivers/clk/clk-eyeq.c b/drivers/clk/clk-eyeq.c
new file mode 100644
index 000000000000..ea1c3d78e7cd
--- /dev/null
+++ b/drivers/clk/clk-eyeq.c
@@ -0,0 +1,859 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PLL clock driver for the Mobileye EyeQ5, EyeQ6L and EyeQ6H platforms.
+ *
+ * This controller handles:
+ * - Read-only PLLs, all derived from the same main crystal clock.
+ * - It also exposes divider clocks, those are children to PLLs.
+ * - Fixed factor clocks, children to PLLs.
+ *
+ * Parent clock is expected to be constant. This driver's registers live in a
+ * shared region called OLB. Some PLLs and fixed-factors are initialised early
+ * by of_clk_init(); if so, two clk providers are registered.
+ *
+ * We use eqc_ as prefix, as-in "EyeQ Clock", but way shorter.
+ *
+ * Copyright (C) 2024 Mobileye Vision Technologies Ltd.
+ */
+
+/*
+ * Set pr_fmt() for printing from eqc_early_init().
+ * It is called at of_clk_init() stage (read: really early).
+ */
+#define pr_fmt(fmt) "clk-eyeq: " fmt
+
+#include <linux/array_size.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/overflow.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <dt-bindings/clock/mobileye,eyeq5-clk.h>
+
+/* In frac mode, it enables fractional noise canceling DAC. Else, no function. */
+#define PCSR0_DAC_EN BIT(0)
+/* Fractional or integer mode */
+#define PCSR0_DSM_EN BIT(1)
+#define PCSR0_PLL_EN BIT(2)
+/* All clocks output held at 0 */
+#define PCSR0_FOUTPOSTDIV_EN BIT(3)
+#define PCSR0_POST_DIV1 GENMASK(6, 4)
+#define PCSR0_POST_DIV2 GENMASK(9, 7)
+#define PCSR0_REF_DIV GENMASK(15, 10)
+#define PCSR0_INTIN GENMASK(27, 16)
+#define PCSR0_BYPASS BIT(28)
+/* Bits 30..29 are reserved */
+#define PCSR0_PLL_LOCKED BIT(31)
+
+#define PCSR1_RESET BIT(0)
+#define PCSR1_SSGC_DIV GENMASK(4, 1)
+/* Spread amplitude (% = 0.1 * SPREAD[4:0]) */
+#define PCSR1_SPREAD GENMASK(9, 5)
+#define PCSR1_DIS_SSCG BIT(10)
+/* Down-spread or center-spread */
+#define PCSR1_DOWN_SPREAD BIT(11)
+#define PCSR1_FRAC_IN GENMASK(31, 12)
+
+struct eqc_pll {
+ unsigned int index;
+ const char *name;
+ unsigned int reg64;
+};
+
+/*
+ * Divider clock. Divider is 2*(v+1), with v the register value.
+ * Min divider is 2, max is 2*(2^width).
+ */
+struct eqc_div {
+ unsigned int index;
+ const char *name;
+ unsigned int parent;
+ unsigned int reg;
+ u8 shift;
+ u8 width;
+};
+
+struct eqc_fixed_factor {
+ unsigned int index;
+ const char *name;
+ unsigned int mult;
+ unsigned int div;
+ unsigned int parent;
+};
+
+struct eqc_match_data {
+ unsigned int pll_count;
+ const struct eqc_pll *plls;
+
+ unsigned int div_count;
+ const struct eqc_div *divs;
+
+ unsigned int fixed_factor_count;
+ const struct eqc_fixed_factor *fixed_factors;
+
+ const char *reset_auxdev_name;
+ const char *pinctrl_auxdev_name;
+
+ unsigned int early_clk_count;
+};
+
+struct eqc_early_match_data {
+ unsigned int early_pll_count;
+ const struct eqc_pll *early_plls;
+
+ unsigned int early_fixed_factor_count;
+ const struct eqc_fixed_factor *early_fixed_factors;
+
+ /*
+ * We want our of_xlate callback to EPROBE_DEFER instead of dev_err()
+ * and EINVAL. For that, we must know the total clock count.
+ */
+ unsigned int late_clk_count;
+};
+
+/*
+ * Both factors (mult and div) must fit in 32 bits. When an operation overflows,
+ * this function throws away low bits so that factors still fit in 32 bits.
+ *
+ * Precision loss depends on amplitude of mult and div. Worst theoretical
+ * loss is: (UINT_MAX+1) / UINT_MAX - 1 = 2.3e-10.
+ * This is 1Hz every 4.3GHz.
+ */
+static void eqc_pll_downshift_factors(unsigned long *mult, unsigned long *div)
+{
+ unsigned long biggest;
+ unsigned int shift;
+
+ /* This function can be removed if mult/div switch to unsigned long. */
+ static_assert(sizeof_field(struct clk_fixed_factor, mult) == sizeof(unsigned int));
+ static_assert(sizeof_field(struct clk_fixed_factor, div) == sizeof(unsigned int));
+
+ /* No overflow, nothing to be done. */
+ if (*mult <= UINT_MAX && *div <= UINT_MAX)
+ return;
+
+ /*
+ * Compute the shift required to bring the biggest factor into unsigned
+ * int range. That is, shift its highest set bit to the unsigned int
+ * most significant bit.
+ */
+ biggest = max(*mult, *div);
+ shift = __fls(biggest) - (BITS_PER_BYTE * sizeof(unsigned int)) + 1;
+
+ *mult >>= shift;
+ *div >>= shift;
+}
+
+static int eqc_pll_parse_registers(u32 r0, u32 r1, unsigned long *mult,
+ unsigned long *div, unsigned long *acc)
+{
+ u32 spread;
+
+ if (r0 & PCSR0_BYPASS) {
+ *mult = 1;
+ *div = 1;
+ *acc = 0;
+ return 0;
+ }
+
+ if (!(r0 & PCSR0_PLL_LOCKED))
+ return -EINVAL;
+
+ *mult = FIELD_GET(PCSR0_INTIN, r0);
+ *div = FIELD_GET(PCSR0_REF_DIV, r0);
+ if (r0 & PCSR0_FOUTPOSTDIV_EN)
+ *div *= FIELD_GET(PCSR0_POST_DIV1, r0) * FIELD_GET(PCSR0_POST_DIV2, r0);
+
+ /* Fractional mode, in 2^20 (0x100000) parts. */
+ if (r0 & PCSR0_DSM_EN) {
+ *div *= (1ULL << 20);
+ *mult = *mult * (1ULL << 20) + FIELD_GET(PCSR1_FRAC_IN, r1);
+ }
+
+ if (!*mult || !*div)
+ return -EINVAL;
+
+ if (r1 & (PCSR1_RESET | PCSR1_DIS_SSCG)) {
+ *acc = 0;
+ return 0;
+ }
+
+ /*
+ * Spread spectrum.
+ *
+ * Spread is 1/1000 parts of frequency, accuracy is half of
+ * that. To get accuracy, convert to ppb (parts per billion).
+ *
+ * acc = spread * 1e6 / 2
+ * with acc in parts per billion and,
+ * spread in parts per thousand.
+ */
+ spread = FIELD_GET(PCSR1_SPREAD, r1);
+ *acc = spread * 500000;
+
+ if (r1 & PCSR1_DOWN_SPREAD) {
+ /*
+ * Downspreading: the central frequency is half a
+ * spread lower.
+ */
+ *mult *= 2000 - spread;
+ *div *= 2000;
+
+ /*
+ * Previous operation might overflow 32 bits. If it
+ * does, throw away the least amount of low bits.
+ */
+ eqc_pll_downshift_factors(mult, div);
+ }
+
+ return 0;
+}
+
+static void eqc_probe_init_plls(struct device *dev, const struct eqc_match_data *data,
+ void __iomem *base, struct clk_hw_onecell_data *cells)
+{
+ unsigned long mult, div, acc;
+ const struct eqc_pll *pll;
+ struct clk_hw *hw;
+ unsigned int i;
+ u32 r0, r1;
+ u64 val;
+ int ret;
+
+ for (i = 0; i < data->pll_count; i++) {
+ pll = &data->plls[i];
+
+ val = readq(base + pll->reg64);
+ r0 = val;
+ r1 = val >> 32;
+
+ ret = eqc_pll_parse_registers(r0, r1, &mult, &div, &acc);
+ if (ret) {
+ dev_warn(dev, "failed parsing state of %s\n", pll->name);
+ cells->hws[pll->index] = ERR_PTR(ret);
+ continue;
+ }
+
+ hw = clk_hw_register_fixed_factor_with_accuracy_fwname(dev,
+ dev->of_node, pll->name, "ref", 0, mult, div, acc);
+ cells->hws[pll->index] = hw;
+ if (IS_ERR(hw))
+ dev_warn(dev, "failed registering %s: %pe\n", pll->name, hw);
+ }
+}
+
+static void eqc_probe_init_divs(struct device *dev, const struct eqc_match_data *data,
+ void __iomem *base, struct clk_hw_onecell_data *cells)
+{
+ struct clk_parent_data parent_data = { };
+ const struct eqc_div *div;
+ struct clk_hw *parent;
+ void __iomem *reg;
+ struct clk_hw *hw;
+ unsigned int i;
+
+ for (i = 0; i < data->div_count; i++) {
+ div = &data->divs[i];
+ reg = base + div->reg;
+ parent = cells->hws[div->parent];
+
+ if (IS_ERR(parent)) {
+ /* Parent is in early clk provider. */
+ parent_data.index = div->parent;
+ parent_data.hw = NULL;
+ } else {
+ /* Avoid clock lookup when we already have the hw reference. */
+ parent_data.index = 0;
+ parent_data.hw = parent;
+ }
+
+ hw = clk_hw_register_divider_table_parent_data(dev, div->name,
+ &parent_data, 0, reg, div->shift, div->width,
+ CLK_DIVIDER_EVEN_INTEGERS, NULL, NULL);
+ cells->hws[div->index] = hw;
+ if (IS_ERR(hw))
+ dev_warn(dev, "failed registering %s: %pe\n",
+ div->name, hw);
+ }
+}
+
+static void eqc_probe_init_fixed_factors(struct device *dev,
+ const struct eqc_match_data *data,
+ struct clk_hw_onecell_data *cells)
+{
+ const struct eqc_fixed_factor *ff;
+ struct clk_hw *hw, *parent_hw;
+ unsigned int i;
+
+ for (i = 0; i < data->fixed_factor_count; i++) {
+ ff = &data->fixed_factors[i];
+ parent_hw = cells->hws[ff->parent];
+
+ if (IS_ERR(parent_hw)) {
+ /* Parent is in early clk provider. */
+ hw = clk_hw_register_fixed_factor_index(dev, ff->name,
+ ff->parent, 0, ff->mult, ff->div);
+ } else {
+ /* Avoid clock lookup when we already have the hw reference. */
+ hw = clk_hw_register_fixed_factor_parent_hw(dev, ff->name,
+ parent_hw, 0, ff->mult, ff->div);
+ }
+
+ cells->hws[ff->index] = hw;
+ if (IS_ERR(hw))
+ dev_warn(dev, "failed registering %s: %pe\n",
+ ff->name, hw);
+ }
+}
+
+static void eqc_auxdev_release(struct device *dev)
+{
+ struct auxiliary_device *adev = to_auxiliary_dev(dev);
+
+ kfree(adev);
+}
+
+static int eqc_auxdev_create(struct device *dev, void __iomem *base,
+ const char *name, u32 id)
+{
+ struct auxiliary_device *adev;
+ int ret;
+
+ adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ return -ENOMEM;
+
+ adev->name = name;
+ adev->dev.parent = dev;
+ adev->dev.platform_data = (void __force *)base;
+ adev->dev.release = eqc_auxdev_release;
+ adev->id = id;
+
+ ret = auxiliary_device_init(adev);
+ if (ret)
+ return ret;
+
+ ret = auxiliary_device_add(adev);
+ if (ret)
+ auxiliary_device_uninit(adev);
+
+ return ret;
+}
+
+static int eqc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const struct eqc_match_data *data;
+ struct clk_hw_onecell_data *cells;
+ unsigned int i, clk_count;
+ struct resource *res;
+ void __iomem *base;
+ int ret;
+
+ data = device_get_match_data(dev);
+ if (!data)
+ return 0; /* No clocks nor auxdevs, we are done. */
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ base = ioremap(res->start, resource_size(res));
+ if (!base)
+ return -ENOMEM;
+
+ /* Init optional reset auxiliary device. */
+ if (data->reset_auxdev_name) {
+ ret = eqc_auxdev_create(dev, base, data->reset_auxdev_name, 0);
+ if (ret)
+ dev_warn(dev, "failed creating auxiliary device %s.%s: %d\n",
+ KBUILD_MODNAME, data->reset_auxdev_name, ret);
+ }
+
+ /* Init optional pinctrl auxiliary device. */
+ if (data->pinctrl_auxdev_name) {
+ ret = eqc_auxdev_create(dev, base, data->pinctrl_auxdev_name, 0);
+ if (ret)
+ dev_warn(dev, "failed creating auxiliary device %s.%s: %d\n",
+ KBUILD_MODNAME, data->pinctrl_auxdev_name, ret);
+ }
+
+ if (data->pll_count + data->div_count + data->fixed_factor_count == 0)
+ return 0; /* Zero clocks, we are done. */
+
+ clk_count = data->pll_count + data->div_count +
+ data->fixed_factor_count + data->early_clk_count;
+ cells = kzalloc(struct_size(cells, hws, clk_count), GFP_KERNEL);
+ if (!cells)
+ return -ENOMEM;
+
+ cells->num = clk_count;
+
+ /* Early PLLs are marked as errors: the early provider will get queried. */
+ for (i = 0; i < clk_count; i++)
+ cells->hws[i] = ERR_PTR(-EINVAL);
+
+ eqc_probe_init_plls(dev, data, base, cells);
+
+ eqc_probe_init_divs(dev, data, base, cells);
+
+ eqc_probe_init_fixed_factors(dev, data, cells);
+
+ return of_clk_add_hw_provider(np, of_clk_hw_onecell_get, cells);
+}
+
+/* Required early for GIC timer (pll-cpu) and UARTs (pll-per). */
+static const struct eqc_pll eqc_eyeq5_early_plls[] = {
+ { .index = EQ5C_PLL_CPU, .name = "pll-cpu", .reg64 = 0x02C },
+ { .index = EQ5C_PLL_PER, .name = "pll-per", .reg64 = 0x05C },
+};
+
+static const struct eqc_pll eqc_eyeq5_plls[] = {
+ { .index = EQ5C_PLL_VMP, .name = "pll-vmp", .reg64 = 0x034 },
+ { .index = EQ5C_PLL_PMA, .name = "pll-pma", .reg64 = 0x03C },
+ { .index = EQ5C_PLL_VDI, .name = "pll-vdi", .reg64 = 0x044 },
+ { .index = EQ5C_PLL_DDR0, .name = "pll-ddr0", .reg64 = 0x04C },
+ { .index = EQ5C_PLL_PCI, .name = "pll-pci", .reg64 = 0x054 },
+ { .index = EQ5C_PLL_PMAC, .name = "pll-pmac", .reg64 = 0x064 },
+ { .index = EQ5C_PLL_MPC, .name = "pll-mpc", .reg64 = 0x06C },
+ { .index = EQ5C_PLL_DDR1, .name = "pll-ddr1", .reg64 = 0x074 },
+};
+
+enum {
+ /*
+ * EQ5C_PLL_CPU children.
+ * EQ5C_PER_OCC_PCI is the last clock exposed in dt-bindings.
+ */
+ EQ5C_CPU_OCC = EQ5C_PER_OCC_PCI + 1,
+ EQ5C_CPU_SI_CSS0,
+ EQ5C_CPU_CPC,
+ EQ5C_CPU_CM,
+ EQ5C_CPU_MEM,
+ EQ5C_CPU_OCC_ISRAM,
+ EQ5C_CPU_ISRAM,
+ EQ5C_CPU_OCC_DBU,
+ EQ5C_CPU_SI_DBU_TP,
+
+ /*
+ * EQ5C_PLL_VDI children.
+ */
+ EQ5C_VDI_OCC_VDI,
+ EQ5C_VDI_VDI,
+ EQ5C_VDI_OCC_CAN_SER,
+ EQ5C_VDI_CAN_SER,
+ EQ5C_VDI_I2C_SER,
+
+ /*
+ * EQ5C_PLL_PER children.
+ */
+ EQ5C_PER_PERIPH,
+ EQ5C_PER_CAN,
+ EQ5C_PER_TIMER,
+ EQ5C_PER_CCF,
+ EQ5C_PER_OCC_MJPEG,
+ EQ5C_PER_HSM,
+ EQ5C_PER_MJPEG,
+ EQ5C_PER_FCMU_A,
+};
+
+static const struct eqc_fixed_factor eqc_eyeq5_early_fixed_factors[] = {
+ /* EQ5C_PLL_CPU children */
+ { EQ5C_CPU_OCC, "occ-cpu", 1, 1, EQ5C_PLL_CPU },
+ { EQ5C_CPU_SI_CSS0, "si-css0", 1, 1, EQ5C_CPU_OCC },
+ { EQ5C_CPU_CORE0, "core0", 1, 1, EQ5C_CPU_SI_CSS0 },
+ { EQ5C_CPU_CORE1, "core1", 1, 1, EQ5C_CPU_SI_CSS0 },
+ { EQ5C_CPU_CORE2, "core2", 1, 1, EQ5C_CPU_SI_CSS0 },
+ { EQ5C_CPU_CORE3, "core3", 1, 1, EQ5C_CPU_SI_CSS0 },
+
+ /* EQ5C_PLL_PER children */
+ { EQ5C_PER_OCC, "occ-periph", 1, 16, EQ5C_PLL_PER },
+ { EQ5C_PER_UART, "uart", 1, 1, EQ5C_PER_OCC },
+};
+
+static const struct eqc_fixed_factor eqc_eyeq5_fixed_factors[] = {
+ /* EQ5C_PLL_CPU children */
+ { EQ5C_CPU_CPC, "cpc", 1, 1, EQ5C_CPU_SI_CSS0 },
+ { EQ5C_CPU_CM, "cm", 1, 1, EQ5C_CPU_SI_CSS0 },
+ { EQ5C_CPU_MEM, "mem", 1, 1, EQ5C_CPU_SI_CSS0 },
+ { EQ5C_CPU_OCC_ISRAM, "occ-isram", 1, 2, EQ5C_PLL_CPU },
+ { EQ5C_CPU_ISRAM, "isram", 1, 1, EQ5C_CPU_OCC_ISRAM },
+ { EQ5C_CPU_OCC_DBU, "occ-dbu", 1, 10, EQ5C_PLL_CPU },
+ { EQ5C_CPU_SI_DBU_TP, "si-dbu-tp", 1, 1, EQ5C_CPU_OCC_DBU },
+
+ /* EQ5C_PLL_VDI children */
+ { EQ5C_VDI_OCC_VDI, "occ-vdi", 1, 2, EQ5C_PLL_VDI },
+ { EQ5C_VDI_VDI, "vdi", 1, 1, EQ5C_VDI_OCC_VDI },
+ { EQ5C_VDI_OCC_CAN_SER, "occ-can-ser", 1, 16, EQ5C_PLL_VDI },
+ { EQ5C_VDI_CAN_SER, "can-ser", 1, 1, EQ5C_VDI_OCC_CAN_SER },
+ { EQ5C_VDI_I2C_SER, "i2c-ser", 1, 20, EQ5C_PLL_VDI },
+
+ /* EQ5C_PLL_PER children */
+ { EQ5C_PER_PERIPH, "periph", 1, 1, EQ5C_PER_OCC },
+ { EQ5C_PER_CAN, "can", 1, 1, EQ5C_PER_OCC },
+ { EQ5C_PER_SPI, "spi", 1, 1, EQ5C_PER_OCC },
+ { EQ5C_PER_I2C, "i2c", 1, 1, EQ5C_PER_OCC },
+ { EQ5C_PER_TIMER, "timer", 1, 1, EQ5C_PER_OCC },
+ { EQ5C_PER_GPIO, "gpio", 1, 1, EQ5C_PER_OCC },
+ { EQ5C_PER_EMMC, "emmc-sys", 1, 10, EQ5C_PLL_PER },
+ { EQ5C_PER_CCF, "ccf-ctrl", 1, 4, EQ5C_PLL_PER },
+ { EQ5C_PER_OCC_MJPEG, "occ-mjpeg", 1, 2, EQ5C_PLL_PER },
+ { EQ5C_PER_HSM, "hsm", 1, 1, EQ5C_PER_OCC_MJPEG },
+ { EQ5C_PER_MJPEG, "mjpeg", 1, 1, EQ5C_PER_OCC_MJPEG },
+ { EQ5C_PER_FCMU_A, "fcmu-a", 1, 20, EQ5C_PLL_PER },
+ { EQ5C_PER_OCC_PCI, "occ-pci-sys", 1, 8, EQ5C_PLL_PER },
+};
+
+static const struct eqc_div eqc_eyeq5_divs[] = {
+ {
+ .index = EQ5C_DIV_OSPI,
+ .name = "div-ospi",
+ .parent = EQ5C_PLL_PER,
+ .reg = 0x11C,
+ .shift = 0,
+ .width = 4,
+ },
+};
+
+static const struct eqc_early_match_data eqc_eyeq5_early_match_data __initconst = {
+ .early_pll_count = ARRAY_SIZE(eqc_eyeq5_early_plls),
+ .early_plls = eqc_eyeq5_early_plls,
+
+ .early_fixed_factor_count = ARRAY_SIZE(eqc_eyeq5_early_fixed_factors),
+ .early_fixed_factors = eqc_eyeq5_early_fixed_factors,
+
+ .late_clk_count = ARRAY_SIZE(eqc_eyeq5_plls) + ARRAY_SIZE(eqc_eyeq5_divs) +
+ ARRAY_SIZE(eqc_eyeq5_fixed_factors),
+};
+
+static const struct eqc_match_data eqc_eyeq5_match_data = {
+ .pll_count = ARRAY_SIZE(eqc_eyeq5_plls),
+ .plls = eqc_eyeq5_plls,
+
+ .div_count = ARRAY_SIZE(eqc_eyeq5_divs),
+ .divs = eqc_eyeq5_divs,
+
+ .fixed_factor_count = ARRAY_SIZE(eqc_eyeq5_fixed_factors),
+ .fixed_factors = eqc_eyeq5_fixed_factors,
+
+ .reset_auxdev_name = "reset",
+ .pinctrl_auxdev_name = "pinctrl",
+
+ .early_clk_count = ARRAY_SIZE(eqc_eyeq5_early_plls) +
+ ARRAY_SIZE(eqc_eyeq5_early_fixed_factors),
+};
+
+static const struct eqc_pll eqc_eyeq6l_plls[] = {
+ { .index = EQ6LC_PLL_DDR, .name = "pll-ddr", .reg64 = 0x02C },
+ { .index = EQ6LC_PLL_CPU, .name = "pll-cpu", .reg64 = 0x034 }, /* also acc */
+ { .index = EQ6LC_PLL_PER, .name = "pll-per", .reg64 = 0x03C },
+ { .index = EQ6LC_PLL_VDI, .name = "pll-vdi", .reg64 = 0x044 },
+};
+
+static const struct eqc_match_data eqc_eyeq6l_match_data = {
+ .pll_count = ARRAY_SIZE(eqc_eyeq6l_plls),
+ .plls = eqc_eyeq6l_plls,
+
+ .reset_auxdev_name = "reset",
+};
+
+static const struct eqc_match_data eqc_eyeq6h_west_match_data = {
+ .reset_auxdev_name = "reset_west",
+};
+
+static const struct eqc_pll eqc_eyeq6h_east_plls[] = {
+ { .index = 0, .name = "pll-east", .reg64 = 0x074 },
+};
+
+static const struct eqc_match_data eqc_eyeq6h_east_match_data = {
+ .pll_count = ARRAY_SIZE(eqc_eyeq6h_east_plls),
+ .plls = eqc_eyeq6h_east_plls,
+
+ .reset_auxdev_name = "reset_east",
+};
+
+static const struct eqc_pll eqc_eyeq6h_south_plls[] = {
+ { .index = EQ6HC_SOUTH_PLL_VDI, .name = "pll-vdi", .reg64 = 0x000 },
+ { .index = EQ6HC_SOUTH_PLL_PCIE, .name = "pll-pcie", .reg64 = 0x008 },
+ { .index = EQ6HC_SOUTH_PLL_PER, .name = "pll-per", .reg64 = 0x010 },
+ { .index = EQ6HC_SOUTH_PLL_ISP, .name = "pll-isp", .reg64 = 0x018 },
+};
+
+static const struct eqc_div eqc_eyeq6h_south_divs[] = {
+ {
+ .index = EQ6HC_SOUTH_DIV_EMMC,
+ .name = "div-emmc",
+ .parent = EQ6HC_SOUTH_PLL_PER,
+ .reg = 0x070,
+ .shift = 4,
+ .width = 4,
+ },
+ {
+ .index = EQ6HC_SOUTH_DIV_OSPI_REF,
+ .name = "div-ospi-ref",
+ .parent = EQ6HC_SOUTH_PLL_PER,
+ .reg = 0x090,
+ .shift = 4,
+ .width = 4,
+ },
+ {
+ .index = EQ6HC_SOUTH_DIV_OSPI_SYS,
+ .name = "div-ospi-sys",
+ .parent = EQ6HC_SOUTH_PLL_PER,
+ .reg = 0x090,
+ .shift = 8,
+ .width = 1,
+ },
+ {
+ .index = EQ6HC_SOUTH_DIV_TSU,
+ .name = "div-tsu",
+ .parent = EQ6HC_SOUTH_PLL_PCIE,
+ .reg = 0x098,
+ .shift = 4,
+ .width = 8,
+ },
+};
+
+static const struct eqc_match_data eqc_eyeq6h_south_match_data = {
+ .pll_count = ARRAY_SIZE(eqc_eyeq6h_south_plls),
+ .plls = eqc_eyeq6h_south_plls,
+
+ .div_count = ARRAY_SIZE(eqc_eyeq6h_south_divs),
+ .divs = eqc_eyeq6h_south_divs,
+};
+
+static const struct eqc_pll eqc_eyeq6h_ddr0_plls[] = {
+ { .index = 0, .name = "pll-ddr0", .reg64 = 0x074 },
+};
+
+static const struct eqc_match_data eqc_eyeq6h_ddr0_match_data = {
+ .pll_count = ARRAY_SIZE(eqc_eyeq6h_ddr0_plls),
+ .plls = eqc_eyeq6h_ddr0_plls,
+};
+
+static const struct eqc_pll eqc_eyeq6h_ddr1_plls[] = {
+ { .index = 0, .name = "pll-ddr1", .reg64 = 0x074 },
+};
+
+static const struct eqc_match_data eqc_eyeq6h_ddr1_match_data = {
+ .pll_count = ARRAY_SIZE(eqc_eyeq6h_ddr1_plls),
+ .plls = eqc_eyeq6h_ddr1_plls,
+};
+
+static const struct eqc_pll eqc_eyeq6h_acc_plls[] = {
+ { .index = EQ6HC_ACC_PLL_XNN, .name = "pll-xnn", .reg64 = 0x040 },
+ { .index = EQ6HC_ACC_PLL_VMP, .name = "pll-vmp", .reg64 = 0x050 },
+ { .index = EQ6HC_ACC_PLL_PMA, .name = "pll-pma", .reg64 = 0x05C },
+ { .index = EQ6HC_ACC_PLL_MPC, .name = "pll-mpc", .reg64 = 0x068 },
+ { .index = EQ6HC_ACC_PLL_NOC, .name = "pll-noc", .reg64 = 0x070 },
+};
+
+static const struct eqc_match_data eqc_eyeq6h_acc_match_data = {
+ .pll_count = ARRAY_SIZE(eqc_eyeq6h_acc_plls),
+ .plls = eqc_eyeq6h_acc_plls,
+
+ .reset_auxdev_name = "reset_acc",
+};
+
+static const struct of_device_id eqc_match_table[] = {
+ { .compatible = "mobileye,eyeq5-olb", .data = &eqc_eyeq5_match_data },
+ { .compatible = "mobileye,eyeq6l-olb", .data = &eqc_eyeq6l_match_data },
+ { .compatible = "mobileye,eyeq6h-west-olb", .data = &eqc_eyeq6h_west_match_data },
+ { .compatible = "mobileye,eyeq6h-east-olb", .data = &eqc_eyeq6h_east_match_data },
+ { .compatible = "mobileye,eyeq6h-south-olb", .data = &eqc_eyeq6h_south_match_data },
+ { .compatible = "mobileye,eyeq6h-ddr0-olb", .data = &eqc_eyeq6h_ddr0_match_data },
+ { .compatible = "mobileye,eyeq6h-ddr1-olb", .data = &eqc_eyeq6h_ddr1_match_data },
+ { .compatible = "mobileye,eyeq6h-acc-olb", .data = &eqc_eyeq6h_acc_match_data },
+ {}
+};
+
+static struct platform_driver eqc_driver = {
+ .probe = eqc_probe,
+ .driver = {
+ .name = "clk-eyeq",
+ .of_match_table = eqc_match_table,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(eqc_driver);
+
+/* Required early for GIC timer. */
+static const struct eqc_pll eqc_eyeq6h_central_early_plls[] = {
+ { .index = EQ6HC_CENTRAL_PLL_CPU, .name = "pll-cpu", .reg64 = 0x02C },
+};
+
+static const struct eqc_fixed_factor eqc_eyeq6h_central_early_fixed_factors[] = {
+ { EQ6HC_CENTRAL_CPU_OCC, "occ-cpu", 1, 1, EQ6HC_CENTRAL_PLL_CPU },
+};
+
+static const struct eqc_early_match_data eqc_eyeq6h_central_early_match_data __initconst = {
+ .early_pll_count = ARRAY_SIZE(eqc_eyeq6h_central_early_plls),
+ .early_plls = eqc_eyeq6h_central_early_plls,
+
+ .early_fixed_factor_count = ARRAY_SIZE(eqc_eyeq6h_central_early_fixed_factors),
+ .early_fixed_factors = eqc_eyeq6h_central_early_fixed_factors,
+};
+
+/* Required early for UART. */
+static const struct eqc_pll eqc_eyeq6h_west_early_plls[] = {
+ { .index = EQ6HC_WEST_PLL_PER, .name = "pll-west", .reg64 = 0x074 },
+};
+
+static const struct eqc_fixed_factor eqc_eyeq6h_west_early_fixed_factors[] = {
+ { EQ6HC_WEST_PER_OCC, "west-per-occ", 1, 10, EQ6HC_WEST_PLL_PER },
+ { EQ6HC_WEST_PER_UART, "west-per-uart", 1, 1, EQ6HC_WEST_PER_OCC },
+};
+
+static const struct eqc_early_match_data eqc_eyeq6h_west_early_match_data __initconst = {
+ .early_pll_count = ARRAY_SIZE(eqc_eyeq6h_west_early_plls),
+ .early_plls = eqc_eyeq6h_west_early_plls,
+
+ .early_fixed_factor_count = ARRAY_SIZE(eqc_eyeq6h_west_early_fixed_factors),
+ .early_fixed_factors = eqc_eyeq6h_west_early_fixed_factors,
+};
+
+static void __init eqc_early_init(struct device_node *np,
+ const struct eqc_early_match_data *early_data)
+{
+ struct clk_hw_onecell_data *cells;
+ unsigned int i, clk_count;
+ void __iomem *base;
+ int ret;
+
+ clk_count = early_data->early_pll_count + early_data->early_fixed_factor_count +
+ early_data->late_clk_count;
+ cells = kzalloc(struct_size(cells, hws, clk_count), GFP_KERNEL);
+ if (!cells) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ cells->num = clk_count;
+
+ /*
+ * Mark all clocks as deferred; some are registered here, the rest at
+ * platform device probe.
+ *
+ * Once the platform device is probed, its provider will take priority
+ * when looking up clocks.
+ */
+ for (i = 0; i < clk_count; i++)
+ cells->hws[i] = ERR_PTR(-EPROBE_DEFER);
+
+ /* Offsets (reg64) of early PLLs are relative to OLB block. */
+ base = of_iomap(np, 0);
+ if (!base) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ for (i = 0; i < early_data->early_pll_count; i++) {
+ const struct eqc_pll *pll = &early_data->early_plls[i];
+ unsigned long mult, div, acc;
+ struct clk_hw *hw;
+ u32 r0, r1;
+ u64 val;
+
+ val = readq(base + pll->reg64);
+ r0 = val;
+ r1 = val >> 32;
+
+ ret = eqc_pll_parse_registers(r0, r1, &mult, &div, &acc);
+ if (ret) {
+ pr_err("failed parsing state of %s\n", pll->name);
+ goto err;
+ }
+
+ hw = clk_hw_register_fixed_factor_with_accuracy_fwname(NULL,
+ np, pll->name, "ref", 0, mult, div, acc);
+ cells->hws[pll->index] = hw;
+ if (IS_ERR(hw)) {
+ pr_err("failed registering %s: %pe\n", pll->name, hw);
+ ret = PTR_ERR(hw);
+ goto err;
+ }
+ }
+
+ for (i = 0; i < early_data->early_fixed_factor_count; i++) {
+ const struct eqc_fixed_factor *ff = &early_data->early_fixed_factors[i];
+ struct clk_hw *parent_hw = cells->hws[ff->parent];
+ struct clk_hw *hw;
+
+ hw = clk_hw_register_fixed_factor_parent_hw(NULL, ff->name,
+ parent_hw, 0, ff->mult, ff->div);
+ cells->hws[ff->index] = hw;
+ if (IS_ERR(hw)) {
+ pr_err("failed registering %s: %pe\n", ff->name, hw);
+ ret = PTR_ERR(hw);
+ goto err;
+ }
+ }
+
+ ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, cells);
+ if (ret) {
+ pr_err("failed registering clk provider: %d\n", ret);
+ goto err;
+ }
+
+ return;
+
+err:
+ /*
+ * We are doomed. The system will not be able to boot.
+ *
+ * Let's still try to be good citizens by freeing resources and print
+ * a last error message that might help debugging.
+ */
+
+ pr_err("failed clk init: %d\n", ret);
+
+ if (cells) {
+ of_clk_del_provider(np);
+
+ for (i = 0; i < early_data->early_pll_count; i++) {
+ const struct eqc_pll *pll = &early_data->early_plls[i];
+ struct clk_hw *hw = cells->hws[pll->index];
+
+ if (!IS_ERR_OR_NULL(hw))
+ clk_hw_unregister_fixed_factor(hw);
+ }
+
+ kfree(cells);
+ }
+}
+
+static void __init eqc_eyeq5_early_init(struct device_node *np)
+{
+ eqc_early_init(np, &eqc_eyeq5_early_match_data);
+}
+CLK_OF_DECLARE_DRIVER(eqc_eyeq5, "mobileye,eyeq5-olb", eqc_eyeq5_early_init);
+
+static void __init eqc_eyeq6h_central_early_init(struct device_node *np)
+{
+ eqc_early_init(np, &eqc_eyeq6h_central_early_match_data);
+}
+CLK_OF_DECLARE_DRIVER(eqc_eyeq6h_central, "mobileye,eyeq6h-central-olb",
+ eqc_eyeq6h_central_early_init);
+
+static void __init eqc_eyeq6h_west_early_init(struct device_node *np)
+{
+ eqc_early_init(np, &eqc_eyeq6h_west_early_match_data);
+}
+CLK_OF_DECLARE_DRIVER(eqc_eyeq6h_west, "mobileye,eyeq6h-west-olb",
+ eqc_eyeq6h_west_early_init);
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index fe0500a1af3e..de658c9e4c53 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -30,19 +30,21 @@ static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
return (unsigned long)rate;
}
-static long clk_factor_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_factor_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_fixed_factor *fix = to_clk_fixed_factor(hw);
if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
unsigned long best_parent;
- best_parent = (rate / fix->mult) * fix->div;
- *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
+ best_parent = (req->rate / fix->mult) * fix->div;
+ req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
}
- return (*prate / fix->div) * fix->mult;
+ req->rate = (req->best_parent_rate / fix->div) * fix->mult;
+
+ return 0;
}
static int clk_factor_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -50,7 +52,7 @@ static int clk_factor_set_rate(struct clk_hw *hw, unsigned long rate,
{
/*
* We must report success but we can do so unconditionally because
- * clk_factor_round_rate returns values that ensure this call is a
+ * clk_factor_determine_rate returns values that ensure this call is a
* nop.
*/
@@ -69,7 +71,7 @@ static unsigned long clk_factor_recalc_accuracy(struct clk_hw *hw,
}
const struct clk_ops clk_fixed_factor_ops = {
- .round_rate = clk_factor_round_rate,
+ .determine_rate = clk_factor_determine_rate,
.set_rate = clk_factor_set_rate,
.recalc_rate = clk_factor_recalc_rate,
.recalc_accuracy = clk_factor_recalc_accuracy,
@@ -241,6 +243,17 @@ struct clk_hw *clk_hw_register_fixed_factor_with_accuracy_fwname(struct device *
}
EXPORT_SYMBOL_GPL(clk_hw_register_fixed_factor_with_accuracy_fwname);
+struct clk_hw *clk_hw_register_fixed_factor_index(struct device *dev,
+ const char *name, unsigned int index, unsigned long flags,
+ unsigned int mult, unsigned int div)
+{
+ const struct clk_parent_data pdata = { .index = index };
+
+ return __clk_hw_register_fixed_factor(dev, NULL, name, NULL, NULL, &pdata,
+ flags, mult, div, 0, 0, false);
+}
+EXPORT_SYMBOL_GPL(clk_hw_register_fixed_factor_index);
+
struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
unsigned int mult, unsigned int div)
@@ -405,7 +418,7 @@ static struct platform_driver of_fixed_factor_clk_driver = {
.of_match_table = of_fixed_factor_clk_ids,
},
.probe = of_fixed_factor_clk_probe,
- .remove_new = of_fixed_factor_clk_remove,
+ .remove = of_fixed_factor_clk_remove,
};
builtin_platform_driver(of_fixed_factor_clk_driver);
#endif
diff --git a/drivers/clk/clk-fixed-mmio.c b/drivers/clk/clk-fixed-mmio.c
index 0e08cb22c196..3bfcf4cd98a2 100644
--- a/drivers/clk/clk-fixed-mmio.c
+++ b/drivers/clk/clk-fixed-mmio.c
@@ -91,7 +91,7 @@ static struct platform_driver of_fixed_mmio_clk_driver = {
.of_match_table = of_fixed_mmio_clk_ids,
},
.probe = of_fixed_mmio_clk_probe,
- .remove_new = of_fixed_mmio_clk_remove,
+ .remove = of_fixed_mmio_clk_remove,
};
module_platform_driver(of_fixed_mmio_clk_driver);
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
index 3481eb8cdeb3..6b4f76b9c4da 100644
--- a/drivers/clk/clk-fixed-rate.c
+++ b/drivers/clk/clk-fixed-rate.c
@@ -232,7 +232,7 @@ static struct platform_driver of_fixed_clk_driver = {
.of_match_table = of_fixed_clk_ids,
},
.probe = of_fixed_clk_probe,
- .remove_new = of_fixed_clk_remove,
+ .remove = of_fixed_clk_remove,
};
builtin_platform_driver(of_fixed_clk_driver);
#endif
diff --git a/drivers/clk/clk-fixed-rate_test.c b/drivers/clk/clk-fixed-rate_test.c
new file mode 100644
index 000000000000..0e04c10a21aa
--- /dev/null
+++ b/drivers/clk/clk-fixed-rate_test.c
@@ -0,0 +1,380 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for clk fixed rate basic type
+ */
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/completion.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <kunit/clk.h>
+#include <kunit/of.h>
+#include <kunit/platform_device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+
+#include "clk-fixed-rate_test.h"
+
+/**
+ * struct clk_hw_fixed_rate_kunit_params - Parameters to pass to __clk_hw_register_fixed_rate()
+ * @dev: device registering clk
+ * @np: device_node of device registering clk
+ * @name: name of clk
+ * @parent_name: parent name of clk
+ * @parent_hw: clk_hw pointer to parent of clk
+ * @parent_data: parent_data describing parent of clk
+ * @flags: clk framework flags
+ * @fixed_rate: frequency of clk
+ * @fixed_accuracy: accuracy of clk
+ * @clk_fixed_flags: fixed rate specific clk flags
+ */
+struct clk_hw_fixed_rate_kunit_params {
+ struct device *dev;
+ struct device_node *np;
+ const char *name;
+ const char *parent_name;
+ const struct clk_hw *parent_hw;
+ const struct clk_parent_data *parent_data;
+ unsigned long flags;
+ unsigned long fixed_rate;
+ unsigned long fixed_accuracy;
+ unsigned long clk_fixed_flags;
+};
+
+static int
+clk_hw_register_fixed_rate_kunit_init(struct kunit_resource *res, void *context)
+{
+ struct clk_hw_fixed_rate_kunit_params *params = context;
+ struct clk_hw *hw;
+
+ hw = __clk_hw_register_fixed_rate(params->dev, params->np,
+ params->name,
+ params->parent_name,
+ params->parent_hw,
+ params->parent_data,
+ params->flags,
+ params->fixed_rate,
+ params->fixed_accuracy,
+ params->clk_fixed_flags,
+ false);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ res->data = hw;
+
+ return 0;
+}
+
+static void clk_hw_register_fixed_rate_kunit_exit(struct kunit_resource *res)
+{
+ struct clk_hw *hw = res->data;
+
+ clk_hw_unregister_fixed_rate(hw);
+}
+
+/**
+ * clk_hw_register_fixed_rate_kunit() - Test managed __clk_hw_register_fixed_rate()
+ * @test: The test context
+ * @params: Arguments to __clk_hw_register_fixed_rate()
+ *
+ * Return: Registered fixed rate clk_hw or ERR_PTR on failure
+ */
+static struct clk_hw *
+clk_hw_register_fixed_rate_kunit(struct kunit *test,
+ struct clk_hw_fixed_rate_kunit_params *params)
+{
+ struct clk_hw *hw;
+
+ hw = kunit_alloc_resource(test,
+ clk_hw_register_fixed_rate_kunit_init,
+ clk_hw_register_fixed_rate_kunit_exit,
+ GFP_KERNEL, params);
+ if (!hw)
+ return ERR_PTR(-EINVAL);
+
+ return hw;
+}
+
+/**
+ * clk_hw_unregister_fixed_rate_kunit() - Test managed clk_hw_unregister_fixed_rate()
+ * @test: The test context
+ * @hw: fixed rate clk to unregister upon test completion
+ *
+ * Automatically unregister @hw when @test is complete via
+ * clk_hw_unregister_fixed_rate().
+ *
+ * Return: 0 on success or negative errno on failure
+ */
+static int clk_hw_unregister_fixed_rate_kunit(struct kunit *test, struct clk_hw *hw)
+{
+ if (!kunit_alloc_resource(test, NULL,
+ clk_hw_register_fixed_rate_kunit_exit,
+ GFP_KERNEL, hw))
+ return -ENOMEM;
+
+ return 0;
+}
+
+/*
+ * Test that clk_get_rate() on a fixed rate clk registered with
+ * clk_hw_register_fixed_rate() gets the proper frequency.
+ */
+static void clk_fixed_rate_rate_test(struct kunit *test)
+{
+ struct clk_hw *hw;
+ struct clk *clk;
+ const unsigned long fixed_rate = 230000;
+
+ hw = clk_hw_register_fixed_rate(NULL, "test-fixed-rate", NULL, 0, fixed_rate);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_unregister_fixed_rate_kunit(test, hw));
+
+ clk = clk_hw_get_clk_prepared_enabled_kunit(test, hw, __func__);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
+
+ KUNIT_EXPECT_EQ(test, fixed_rate, clk_get_rate(clk));
+}
+
+/*
+ * Test that clk_get_accuracy() on a fixed rate clk registered via
+ * clk_hw_register_fixed_rate_with_accuracy() gets the proper accuracy.
+ */
+static void clk_fixed_rate_accuracy_test(struct kunit *test)
+{
+ struct clk_hw *hw;
+ struct clk *clk;
+ const unsigned long fixed_accuracy = 5000;
+
+ hw = clk_hw_register_fixed_rate_with_accuracy(NULL, "test-fixed-rate",
+ NULL, 0, 0,
+ fixed_accuracy);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_unregister_fixed_rate_kunit(test, hw));
+
+ clk = clk_hw_get_clk_kunit(test, hw, __func__);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
+
+ KUNIT_EXPECT_EQ(test, fixed_accuracy, clk_get_accuracy(clk));
+}
+
+/* Test suite for a fixed rate clk without any parent */
+static struct kunit_case clk_fixed_rate_test_cases[] = {
+ KUNIT_CASE(clk_fixed_rate_rate_test),
+ KUNIT_CASE(clk_fixed_rate_accuracy_test),
+ {}
+};
+
+static struct kunit_suite clk_fixed_rate_suite = {
+ .name = "clk_fixed_rate",
+ .test_cases = clk_fixed_rate_test_cases,
+};
+
+/*
+ * Test that clk_get_parent() on a fixed rate clk gets the proper parent.
+ */
+static void clk_fixed_rate_parent_test(struct kunit *test)
+{
+ struct clk_hw *hw, *parent_hw;
+ struct clk *expected_parent, *actual_parent;
+ struct clk *clk;
+ const char *parent_name = "test-fixed-rate-parent";
+ struct clk_hw_fixed_rate_kunit_params parent_params = {
+ .name = parent_name,
+ };
+
+ parent_hw = clk_hw_register_fixed_rate_kunit(test, &parent_params);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent_hw);
+ KUNIT_ASSERT_STREQ(test, parent_name, clk_hw_get_name(parent_hw));
+
+ expected_parent = clk_hw_get_clk_kunit(test, parent_hw, __func__);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_parent);
+
+ hw = clk_hw_register_fixed_rate(NULL, "test-fixed-rate", parent_name, 0, 0);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_unregister_fixed_rate_kunit(test, hw));
+
+ clk = clk_hw_get_clk_kunit(test, hw, __func__);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
+
+ actual_parent = clk_get_parent(clk);
+ KUNIT_EXPECT_TRUE(test, clk_is_match(expected_parent, actual_parent));
+}
+
+/*
+ * Test that clk_get_rate() on a fixed rate clk ignores the parent rate.
+ */
+static void clk_fixed_rate_parent_rate_test(struct kunit *test)
+{
+ struct clk_hw *hw, *parent_hw;
+ struct clk *clk;
+ const unsigned long expected_rate = 1405;
+ const unsigned long parent_rate = 90402;
+ const char *parent_name = "test-fixed-rate-parent";
+ struct clk_hw_fixed_rate_kunit_params parent_params = {
+ .name = parent_name,
+ .fixed_rate = parent_rate,
+ };
+
+ parent_hw = clk_hw_register_fixed_rate_kunit(test, &parent_params);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent_hw);
+ KUNIT_ASSERT_STREQ(test, parent_name, clk_hw_get_name(parent_hw));
+
+ hw = clk_hw_register_fixed_rate(NULL, "test-fixed-rate", parent_name, 0,
+ expected_rate);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_unregister_fixed_rate_kunit(test, hw));
+
+ clk = clk_hw_get_clk_prepared_enabled_kunit(test, hw, __func__);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
+
+ KUNIT_EXPECT_EQ(test, expected_rate, clk_get_rate(clk));
+}
+
+/*
+ * Test that clk_get_accuracy() on a fixed rate clk ignores the parent accuracy.
+ */
+static void clk_fixed_rate_parent_accuracy_test(struct kunit *test)
+{
+ struct clk_hw *hw, *parent_hw;
+ struct clk *clk;
+ const unsigned long expected_accuracy = 900;
+ const unsigned long parent_accuracy = 24000;
+ const char *parent_name = "test-fixed-rate-parent";
+ struct clk_hw_fixed_rate_kunit_params parent_params = {
+ .name = parent_name,
+ .fixed_accuracy = parent_accuracy,
+ };
+
+ parent_hw = clk_hw_register_fixed_rate_kunit(test, &parent_params);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent_hw);
+ KUNIT_ASSERT_STREQ(test, parent_name, clk_hw_get_name(parent_hw));
+
+ hw = clk_hw_register_fixed_rate_with_accuracy(NULL, "test-fixed-rate",
+ parent_name, 0, 0,
+ expected_accuracy);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_unregister_fixed_rate_kunit(test, hw));
+
+ clk = clk_hw_get_clk_kunit(test, hw, __func__);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
+
+ KUNIT_EXPECT_EQ(test, expected_accuracy, clk_get_accuracy(clk));
+}
+
+/* Test suite for a fixed rate clk with a parent */
+static struct kunit_case clk_fixed_rate_parent_test_cases[] = {
+ KUNIT_CASE(clk_fixed_rate_parent_test),
+ KUNIT_CASE(clk_fixed_rate_parent_rate_test),
+ KUNIT_CASE(clk_fixed_rate_parent_accuracy_test),
+ {}
+};
+
+static struct kunit_suite clk_fixed_rate_parent_suite = {
+ .name = "clk_fixed_rate_parent",
+ .test_cases = clk_fixed_rate_parent_test_cases,
+};
+
+struct clk_fixed_rate_of_test_context {
+ struct device *dev;
+ struct platform_driver pdrv;
+ struct completion probed;
+};
+
+static inline struct clk_fixed_rate_of_test_context *
+pdev_to_clk_fixed_rate_of_test_context(struct platform_device *pdev)
+{
+ return container_of(to_platform_driver(pdev->dev.driver),
+ struct clk_fixed_rate_of_test_context,
+ pdrv);
+}
+
+/*
+ * Test that of_fixed_clk_setup() registers a fixed rate clk with the proper
+ * rate.
+ */
+static void clk_fixed_rate_of_probe_test(struct kunit *test)
+{
+ struct clk_fixed_rate_of_test_context *ctx = test->priv;
+ struct device *dev = ctx->dev;
+ struct clk *clk;
+
+ clk = clk_get_kunit(test, dev, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
+
+ KUNIT_ASSERT_EQ(test, 0, clk_prepare_enable_kunit(test, clk));
+ KUNIT_EXPECT_EQ(test, TEST_FIXED_FREQUENCY, clk_get_rate(clk));
+}
+
+/*
+ * Test that of_fixed_clk_setup() registers a fixed rate clk with the proper
+ * accuracy.
+ */
+static void clk_fixed_rate_of_accuracy_test(struct kunit *test)
+{
+ struct clk_fixed_rate_of_test_context *ctx = test->priv;
+ struct device *dev = ctx->dev;
+ struct clk *clk;
+
+ clk = clk_get_kunit(test, dev, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
+
+ KUNIT_EXPECT_EQ(test, TEST_FIXED_ACCURACY, clk_get_accuracy(clk));
+}
+
+static struct kunit_case clk_fixed_rate_of_cases[] = {
+ KUNIT_CASE(clk_fixed_rate_of_probe_test),
+ KUNIT_CASE(clk_fixed_rate_of_accuracy_test),
+ {}
+};
+
+static int clk_fixed_rate_of_test_probe(struct platform_device *pdev)
+{
+ struct clk_fixed_rate_of_test_context *ctx;
+
+ ctx = pdev_to_clk_fixed_rate_of_test_context(pdev);
+ ctx->dev = &pdev->dev;
+ complete(&ctx->probed);
+
+ return 0;
+}
+
+static int clk_fixed_rate_of_init(struct kunit *test)
+{
+ struct clk_fixed_rate_of_test_context *ctx;
+ static const struct of_device_id match_table[] = {
+ { .compatible = "test,single-clk-consumer" },
+ { }
+ };
+
+ KUNIT_ASSERT_EQ(test, 0, of_overlay_apply_kunit(test, kunit_clk_fixed_rate_test));
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ test->priv = ctx;
+
+ ctx->pdrv.probe = clk_fixed_rate_of_test_probe;
+ ctx->pdrv.driver.of_match_table = match_table;
+ ctx->pdrv.driver.name = __func__;
+ ctx->pdrv.driver.owner = THIS_MODULE;
+ init_completion(&ctx->probed);
+
+ KUNIT_ASSERT_EQ(test, 0, kunit_platform_driver_register(test, &ctx->pdrv));
+ KUNIT_ASSERT_NE(test, 0, wait_for_completion_timeout(&ctx->probed, HZ));
+
+ return 0;
+}
+
+static struct kunit_suite clk_fixed_rate_of_suite = {
+ .name = "clk_fixed_rate_of",
+ .init = clk_fixed_rate_of_init,
+ .test_cases = clk_fixed_rate_of_cases,
+};
+
+kunit_test_suites(
+ &clk_fixed_rate_suite,
+ &clk_fixed_rate_of_suite,
+ &clk_fixed_rate_parent_suite,
+);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("KUnit test for clk fixed rate basic type");
diff --git a/drivers/clk/clk-fixed-rate_test.h b/drivers/clk/clk-fixed-rate_test.h
new file mode 100644
index 000000000000..e0d28e5b6081
--- /dev/null
+++ b/drivers/clk/clk-fixed-rate_test.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _CLK_FIXED_RATE_TEST_H
+#define _CLK_FIXED_RATE_TEST_H
+
+#define TEST_FIXED_FREQUENCY 50000000
+#define TEST_FIXED_ACCURACY 300
+
+#endif
diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c
index da057172cc90..cd36a6e27f25 100644
--- a/drivers/clk/clk-fractional-divider.c
+++ b/drivers/clk/clk-fractional-divider.c
@@ -151,25 +151,32 @@ void clk_fractional_divider_general_approximation(struct clk_hw *hw,
}
EXPORT_SYMBOL_GPL(clk_fractional_divider_general_approximation);
-static long clk_fd_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_fd_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_fractional_divider *fd = to_clk_fd(hw);
unsigned long m, n;
u64 ret;
- if (!rate || (!clk_hw_can_set_rate_parent(hw) && rate >= *parent_rate))
- return *parent_rate;
+ if (!req->rate || (!clk_hw_can_set_rate_parent(hw) && req->rate >= req->best_parent_rate)) {
+ req->rate = req->best_parent_rate;
+
+ return 0;
+ }
if (fd->approximation)
- fd->approximation(hw, rate, parent_rate, &m, &n);
+ fd->approximation(hw, req->rate, &req->best_parent_rate, &m, &n);
else
- clk_fractional_divider_general_approximation(hw, rate, parent_rate, &m, &n);
+ clk_fractional_divider_general_approximation(hw, req->rate,
+ &req->best_parent_rate,
+ &m, &n);
- ret = (u64)*parent_rate * m;
+ ret = (u64)req->best_parent_rate * m;
do_div(ret, n);
- return ret;
+ req->rate = ret;
+
+ return 0;
}
static int clk_fd_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -250,7 +257,7 @@ static void clk_fd_debug_init(struct clk_hw *hw, struct dentry *dentry)
const struct clk_ops clk_fractional_divider_ops = {
.recalc_rate = clk_fd_recalc_rate,
- .round_rate = clk_fd_round_rate,
+ .determine_rate = clk_fd_determine_rate,
.set_rate = clk_fd_set_rate,
#ifdef CONFIG_DEBUG_FS
.debug_init = clk_fd_debug_init,
diff --git a/drivers/clk/clk-fractional-divider_test.c b/drivers/clk/clk-fractional-divider_test.c
index 929eec927548..25fa35d89c1a 100644
--- a/drivers/clk/clk-fractional-divider_test.c
+++ b/drivers/clk/clk-fractional-divider_test.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Kunit test for clock fractional divider
+ * Kunit tests for clk fractional divider
*/
#include <linux/clk-provider.h>
#include <kunit/test.h>
@@ -144,4 +144,5 @@ static struct kunit_suite clk_fd_approximation_suite = {
kunit_test_suites(
&clk_fd_approximation_suite
);
+MODULE_DESCRIPTION("Kunit tests for clk fractional divider");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index 68e585a02fd9..4746f8219132 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -15,7 +15,7 @@
#include <linux/string.h>
/**
- * DOC: basic gatable clock which can gate and ungate its output
+ * DOC: basic gateable clock which can gate and ungate its output
*
* Traits of this clock:
* prepare - clk_(un)prepare only ensures parent is (un)prepared
diff --git a/drivers/clk/clk-gate_test.c b/drivers/clk/clk-gate_test.c
index c96d93b19ddf..e276cd974750 100644
--- a/drivers/clk/clk-gate_test.c
+++ b/drivers/clk/clk-gate_test.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Kunit test for clk gate basic type
+ * Kunit tests for clk gate
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
@@ -461,4 +461,5 @@ kunit_test_suites(
&clk_gate_test_hiword_suite,
&clk_gate_test_enabled_suite
);
+MODULE_DESCRIPTION("Kunit tests for clk gate");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/clk-gemini.c b/drivers/clk/clk-gemini.c
index 856b008e07c6..e94589c38568 100644
--- a/drivers/clk/clk-gemini.c
+++ b/drivers/clk/clk-gemini.c
@@ -126,13 +126,16 @@ static unsigned long gemini_pci_recalc_rate(struct clk_hw *hw,
return 33000000;
}
-static long gemini_pci_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int gemini_pci_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
/* We support 33 and 66 MHz */
- if (rate < 48000000)
- return 33000000;
- return 66000000;
+ if (req->rate < 48000000)
+ req->rate = 33000000;
+ else
+ req->rate = 66000000;
+
+ return 0;
}
static int gemini_pci_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -179,7 +182,7 @@ static int gemini_pci_is_enabled(struct clk_hw *hw)
static const struct clk_ops gemini_pci_clk_ops = {
.recalc_rate = gemini_pci_recalc_rate,
- .round_rate = gemini_pci_round_rate,
+ .determine_rate = gemini_pci_determine_rate,
.set_rate = gemini_pci_set_rate,
.enable = gemini_pci_enable,
.disable = gemini_pci_disable,
diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c
index 5b114043771d..9099c57e2715 100644
--- a/drivers/clk/clk-gpio.c
+++ b/drivers/clk/clk-gpio.c
@@ -17,13 +17,15 @@
#include <linux/device.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
/**
* DOC: basic gpio gated clock which can be enabled and disabled
* with gpio output
* Traits of this clock:
- * prepare - clk_(un)prepare only ensures parent is (un)prepared
- * enable - clk_enable and clk_disable are functional & control gpio
+ * prepare - clk_(un)prepare are functional and control a gpio that can sleep
+ * enable - clk_enable and clk_disable are functional & control
+ * non-sleeping gpio
* rate - inherits rate from parent. No clk_set_rate support
* parent - fixed parent. No clk_set_parent support
*/
@@ -199,7 +201,6 @@ static int gpio_clk_driver_probe(struct platform_device *pdev)
struct gpio_desc *gpiod;
struct clk_hw *hw;
bool is_mux;
- int ret;
is_mux = of_device_is_compatible(node, "gpio-mux-clock");
@@ -211,17 +212,9 @@ static int gpio_clk_driver_probe(struct platform_device *pdev)
gpio_name = is_mux ? "select" : "enable";
gpiod = devm_gpiod_get(dev, gpio_name, GPIOD_OUT_LOW);
- if (IS_ERR(gpiod)) {
- ret = PTR_ERR(gpiod);
- if (ret == -EPROBE_DEFER)
- pr_debug("%pOFn: %s: GPIOs not yet available, retry later\n",
- node, __func__);
- else
- pr_err("%pOFn: %s: Can't get '%s' named GPIO property\n",
- node, __func__,
- gpio_name);
- return ret;
- }
+ if (IS_ERR(gpiod))
+ return dev_err_probe(dev, PTR_ERR(gpiod),
+ "Can't get '%s' named GPIO property\n", gpio_name);
if (is_mux)
hw = clk_hw_register_gpio_mux(dev, gpiod);
@@ -247,3 +240,187 @@ static struct platform_driver gpio_clk_driver = {
},
};
builtin_platform_driver(gpio_clk_driver);
+
+/**
+ * DOC: gated fixed clock, controlled with a gpio output and a regulator
+ * Traits of this clock:
+ * prepare - clk_prepare and clk_unprepare are function & control regulator
+ * optionally a gpio that can sleep
+ * enable - clk_enable and clk_disable are functional & control gpio
+ * rate - rate is fixed and set on clock registration
+ * parent - fixed clock is a root clock and has no parent
+ */
+
+/**
+ * struct clk_gated_fixed - Gateable fixed rate clock
+ * @clk_gpio: instance of clk_gpio for gate-gpio
+ * @supply: supply regulator
+ * @rate: fixed rate
+ */
+struct clk_gated_fixed {
+ struct clk_gpio clk_gpio;
+ struct regulator *supply;
+ unsigned long rate;
+};
+
+#define to_clk_gated_fixed(_clk_gpio) container_of(_clk_gpio, struct clk_gated_fixed, clk_gpio)
+
+static unsigned long clk_gated_fixed_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return to_clk_gated_fixed(to_clk_gpio(hw))->rate;
+}
+
+static int clk_gated_fixed_prepare(struct clk_hw *hw)
+{
+ struct clk_gated_fixed *clk = to_clk_gated_fixed(to_clk_gpio(hw));
+
+ if (!clk->supply)
+ return 0;
+
+ return regulator_enable(clk->supply);
+}
+
+static void clk_gated_fixed_unprepare(struct clk_hw *hw)
+{
+ struct clk_gated_fixed *clk = to_clk_gated_fixed(to_clk_gpio(hw));
+
+ if (!clk->supply)
+ return;
+
+ regulator_disable(clk->supply);
+}
+
+static int clk_gated_fixed_is_prepared(struct clk_hw *hw)
+{
+ struct clk_gated_fixed *clk = to_clk_gated_fixed(to_clk_gpio(hw));
+
+ if (!clk->supply)
+ return true;
+
+ return regulator_is_enabled(clk->supply);
+}
+
+/*
+ * Fixed gated clock with non-sleeping gpio.
+ *
+ * Prepare operation turns on the supply regulator
+ * and the enable operation switches the enable-gpio.
+ */
+static const struct clk_ops clk_gated_fixed_ops = {
+ .prepare = clk_gated_fixed_prepare,
+ .unprepare = clk_gated_fixed_unprepare,
+ .is_prepared = clk_gated_fixed_is_prepared,
+ .enable = clk_gpio_gate_enable,
+ .disable = clk_gpio_gate_disable,
+ .is_enabled = clk_gpio_gate_is_enabled,
+ .recalc_rate = clk_gated_fixed_recalc_rate,
+};
+
+static int clk_sleeping_gated_fixed_prepare(struct clk_hw *hw)
+{
+ int ret;
+
+ ret = clk_gated_fixed_prepare(hw);
+ if (ret)
+ return ret;
+
+ ret = clk_sleeping_gpio_gate_prepare(hw);
+ if (ret)
+ clk_gated_fixed_unprepare(hw);
+
+ return ret;
+}
+
+static void clk_sleeping_gated_fixed_unprepare(struct clk_hw *hw)
+{
+ clk_gated_fixed_unprepare(hw);
+ clk_sleeping_gpio_gate_unprepare(hw);
+}
+
+/*
+ * Fixed gated clock with non-sleeping gpio.
+ *
+ * Enabling the supply regulator and switching the enable-gpio happens
+ * both in the prepare step.
+ * is_prepared only needs to check the gpio state, as toggling the
+ * gpio is the last step when preparing.
+ */
+static const struct clk_ops clk_sleeping_gated_fixed_ops = {
+ .prepare = clk_sleeping_gated_fixed_prepare,
+ .unprepare = clk_sleeping_gated_fixed_unprepare,
+ .is_prepared = clk_sleeping_gpio_gate_is_prepared,
+ .recalc_rate = clk_gated_fixed_recalc_rate,
+};
+
+static int clk_gated_fixed_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct clk_gated_fixed *clk;
+ const struct clk_ops *ops;
+ const char *clk_name;
+ u32 rate;
+ int ret;
+
+ clk = devm_kzalloc(dev, sizeof(*clk), GFP_KERNEL);
+ if (!clk)
+ return -ENOMEM;
+
+ ret = device_property_read_u32(dev, "clock-frequency", &rate);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get clock-frequency\n");
+ clk->rate = rate;
+
+ ret = device_property_read_string(dev, "clock-output-names", &clk_name);
+ if (ret)
+ clk_name = fwnode_get_name(dev->fwnode);
+
+ clk->supply = devm_regulator_get_optional(dev, "vdd");
+ if (IS_ERR(clk->supply)) {
+ if (PTR_ERR(clk->supply) != -ENODEV)
+ return dev_err_probe(dev, PTR_ERR(clk->supply),
+ "Failed to get regulator\n");
+ clk->supply = NULL;
+ }
+
+ clk->clk_gpio.gpiod = devm_gpiod_get_optional(dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(clk->clk_gpio.gpiod))
+ return dev_err_probe(dev, PTR_ERR(clk->clk_gpio.gpiod),
+ "Failed to get gpio\n");
+
+ if (gpiod_cansleep(clk->clk_gpio.gpiod))
+ ops = &clk_sleeping_gated_fixed_ops;
+ else
+ ops = &clk_gated_fixed_ops;
+
+ clk->clk_gpio.hw.init = CLK_HW_INIT_NO_PARENT(clk_name, ops, 0);
+
+ /* register the clock */
+ ret = devm_clk_hw_register(dev, &clk->clk_gpio.hw);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to register clock\n");
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
+ &clk->clk_gpio.hw);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to register clock provider\n");
+
+ return 0;
+}
+
+static const struct of_device_id gated_fixed_clk_match_table[] = {
+ { .compatible = "gated-fixed-clock" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver gated_fixed_clk_driver = {
+ .probe = clk_gated_fixed_probe,
+ .driver = {
+ .name = "gated-fixed-clk",
+ .of_match_table = gated_fixed_clk_match_table,
+ },
+};
+builtin_platform_driver(gated_fixed_clk_driver);
diff --git a/drivers/clk/clk-highbank.c b/drivers/clk/clk-highbank.c
index 6e68a41a70a1..cc583934ecf2 100644
--- a/drivers/clk/clk-highbank.c
+++ b/drivers/clk/clk-highbank.c
@@ -130,15 +130,17 @@ static void clk_pll_calc(unsigned long rate, unsigned long ref_freq,
*pdivf = divf;
}
-static long clk_pll_round_rate(struct clk_hw *hwclk, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u32 divq, divf;
- unsigned long ref_freq = *parent_rate;
+ unsigned long ref_freq = req->best_parent_rate;
- clk_pll_calc(rate, ref_freq, &divq, &divf);
+ clk_pll_calc(req->rate, ref_freq, &divq, &divf);
- return (ref_freq * (divf + 1)) / (1 << divq);
+ req->rate = (ref_freq * (divf + 1)) / (1 << divq);
+
+ return 0;
}
static int clk_pll_set_rate(struct clk_hw *hwclk, unsigned long rate,
@@ -185,7 +187,7 @@ static const struct clk_ops clk_pll_ops = {
.enable = clk_pll_enable,
.disable = clk_pll_disable,
.recalc_rate = clk_pll_recalc_rate,
- .round_rate = clk_pll_round_rate,
+ .determine_rate = clk_pll_determine_rate,
.set_rate = clk_pll_set_rate,
};
@@ -227,16 +229,18 @@ static unsigned long clk_periclk_recalc_rate(struct clk_hw *hwclk,
return parent_rate / div;
}
-static long clk_periclk_round_rate(struct clk_hw *hwclk, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_periclk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u32 div;
- div = *parent_rate / rate;
+ div = req->best_parent_rate / req->rate;
div++;
div &= ~0x1;
- return *parent_rate / div;
+ req->rate = req->best_parent_rate / div;
+
+ return 0;
}
static int clk_periclk_set_rate(struct clk_hw *hwclk, unsigned long rate,
@@ -255,7 +259,7 @@ static int clk_periclk_set_rate(struct clk_hw *hwclk, unsigned long rate,
static const struct clk_ops periclk_ops = {
.recalc_rate = clk_periclk_recalc_rate,
- .round_rate = clk_periclk_round_rate,
+ .determine_rate = clk_periclk_determine_rate,
.set_rate = clk_periclk_set_rate,
};
diff --git a/drivers/clk/clk-hsdk-pll.c b/drivers/clk/clk-hsdk-pll.c
index 5d2a90addf1a..7d56a47c2aa7 100644
--- a/drivers/clk/clk-hsdk-pll.c
+++ b/drivers/clk/clk-hsdk-pll.c
@@ -197,8 +197,8 @@ static unsigned long hsdk_pll_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long hsdk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int hsdk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int i;
unsigned long best_rate;
@@ -211,13 +211,15 @@ static long hsdk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
best_rate = pll_cfg[0].rate;
for (i = 1; pll_cfg[i].rate != 0; i++) {
- if (abs(rate - pll_cfg[i].rate) < abs(rate - best_rate))
+ if (abs(req->rate - pll_cfg[i].rate) < abs(req->rate - best_rate))
best_rate = pll_cfg[i].rate;
}
dev_dbg(clk->dev, "chosen best rate: %lu\n", best_rate);
- return best_rate;
+ req->rate = best_rate;
+
+ return 0;
}
static int hsdk_pll_comm_update_rate(struct hsdk_pll_clk *clk,
@@ -265,7 +267,7 @@ static int hsdk_pll_core_update_rate(struct hsdk_pll_clk *clk,
return -EINVAL;
/*
- * Program divider to div-by-1 if we succesfuly set core clock below
+ * Program divider to div-by-1 if we successfully set core clock below
* 500MHz threshold.
*/
if (rate <= CORE_IF_CLK_THRESHOLD_HZ)
@@ -296,7 +298,7 @@ static int hsdk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops hsdk_pll_ops = {
.recalc_rate = hsdk_pll_recalc_rate,
- .round_rate = hsdk_pll_round_rate,
+ .determine_rate = hsdk_pll_determine_rate,
.set_rate = hsdk_pll_set_rate,
};
diff --git a/drivers/clk/clk-lan966x.c b/drivers/clk/clk-lan966x.c
index 870fd7df50c1..3c7a48c616bb 100644
--- a/drivers/clk/clk-lan966x.c
+++ b/drivers/clk/clk-lan966x.c
@@ -16,21 +16,26 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <dt-bindings/clock/microchip,lan966x.h>
-
#define GCK_ENA BIT(0)
#define GCK_SRC_SEL GENMASK(9, 8)
#define GCK_PRESCALER GENMASK(23, 16)
#define DIV_MAX 255
-static const char *clk_names[N_CLOCKS] = {
+static const char * const lan966x_clk_names[] = {
"qspi0", "qspi1", "qspi2", "sdmmc0",
"pi", "mcan0", "mcan1", "flexcom0",
"flexcom1", "flexcom2", "flexcom3",
"flexcom4", "timer1", "usb_refclk",
};
+static const char * const lan969x_clk_names[] = {
+ "qspi0", "qspi2", "sdmmc0", "sdmmc1",
+ "mcan0", "mcan1", "flexcom0",
+ "flexcom1", "flexcom2", "flexcom3",
+ "timer1", "usb_refclk",
+};
+
struct lan966x_gck {
struct clk_hw hw;
void __iomem *reg;
@@ -53,7 +58,7 @@ struct clk_gate_soc_desc {
int bit_idx;
};
-static const struct clk_gate_soc_desc clk_gate_desc[] = {
+static const struct clk_gate_soc_desc lan966x_clk_gate_desc[] = {
{ "uhphs", 11 },
{ "udphs", 10 },
{ "mcramc", 9 },
@@ -61,6 +66,37 @@ static const struct clk_gate_soc_desc clk_gate_desc[] = {
{ }
};
+static const struct clk_gate_soc_desc lan969x_clk_gate_desc[] = {
+ { "usb_drd", 10 },
+ { "mcramc", 9 },
+ { "hmatrix", 8 },
+ { }
+};
+
+struct lan966x_match_data {
+ char *name;
+ const char * const *clk_name;
+ const struct clk_gate_soc_desc *clk_gate_desc;
+ u8 num_generic_clks;
+ u8 num_total_clks;
+};
+
+static struct lan966x_match_data lan966x_desc = {
+ .name = "lan966x",
+ .clk_name = lan966x_clk_names,
+ .clk_gate_desc = lan966x_clk_gate_desc,
+ .num_total_clks = 18,
+ .num_generic_clks = 14,
+};
+
+static struct lan966x_match_data lan969x_desc = {
+ .name = "lan969x",
+ .clk_name = lan969x_clk_names,
+ .clk_gate_desc = lan969x_clk_gate_desc,
+ .num_total_clks = 15,
+ .num_generic_clks = 12,
+};
+
static DEFINE_SPINLOCK(clk_gate_lock);
static void __iomem *base;
@@ -186,24 +222,26 @@ static struct clk_hw *lan966x_gck_clk_register(struct device *dev, int i)
};
static int lan966x_gate_clk_register(struct device *dev,
+ const struct lan966x_match_data *data,
struct clk_hw_onecell_data *hw_data,
void __iomem *gate_base)
{
- int i;
+ for (int i = data->num_generic_clks; i < data->num_total_clks; ++i) {
+ int idx = i - data->num_generic_clks;
+ const struct clk_gate_soc_desc *desc;
- for (i = GCK_GATE_UHPHS; i < N_CLOCKS; ++i) {
- int idx = i - GCK_GATE_UHPHS;
+ desc = &data->clk_gate_desc[idx];
hw_data->hws[i] =
- devm_clk_hw_register_gate(dev, clk_gate_desc[idx].name,
- "lan966x", 0, gate_base,
- clk_gate_desc[idx].bit_idx,
+ devm_clk_hw_register_gate(dev, desc->name,
+ data->name, 0, gate_base,
+ desc->bit_idx,
0, &clk_gate_lock);
if (IS_ERR(hw_data->hws[i]))
return dev_err_probe(dev, PTR_ERR(hw_data->hws[i]),
"failed to register %s clock\n",
- clk_gate_desc[idx].name);
+ desc->name);
}
return 0;
@@ -211,13 +249,18 @@ static int lan966x_gate_clk_register(struct device *dev,
static int lan966x_clk_probe(struct platform_device *pdev)
{
+ const struct lan966x_match_data *data;
struct clk_hw_onecell_data *hw_data;
struct device *dev = &pdev->dev;
void __iomem *gate_base;
struct resource *res;
int i, ret;
- hw_data = devm_kzalloc(dev, struct_size(hw_data, hws, N_CLOCKS),
+ data = device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ hw_data = devm_kzalloc(dev, struct_size(hw_data, hws, data->num_total_clks),
GFP_KERNEL);
if (!hw_data)
return -ENOMEM;
@@ -228,10 +271,10 @@ static int lan966x_clk_probe(struct platform_device *pdev)
init.ops = &lan966x_gck_ops;
- hw_data->num = GCK_GATE_UHPHS;
+ hw_data->num = data->num_generic_clks;
- for (i = 0; i < GCK_GATE_UHPHS; i++) {
- init.name = clk_names[i];
+ for (i = 0; i < data->num_generic_clks; i++) {
+ init.name = data->clk_name[i];
hw_data->hws[i] = lan966x_gck_clk_register(dev, i);
if (IS_ERR(hw_data->hws[i])) {
dev_err(dev, "failed to register %s clock\n",
@@ -246,9 +289,9 @@ static int lan966x_clk_probe(struct platform_device *pdev)
if (IS_ERR(gate_base))
return PTR_ERR(gate_base);
- hw_data->num = N_CLOCKS;
+ hw_data->num = data->num_total_clks;
- ret = lan966x_gate_clk_register(dev, hw_data, gate_base);
+ ret = lan966x_gate_clk_register(dev, data, hw_data, gate_base);
if (ret)
return ret;
}
@@ -257,7 +300,8 @@ static int lan966x_clk_probe(struct platform_device *pdev)
}
static const struct of_device_id lan966x_clk_dt_ids[] = {
- { .compatible = "microchip,lan966x-gck", },
+ { .compatible = "microchip,lan966x-gck", .data = &lan966x_desc },
+ { .compatible = "microchip,lan9691-gck", .data = &lan969x_desc },
{ }
};
MODULE_DEVICE_TABLE(of, lan966x_clk_dt_ids);
diff --git a/drivers/clk/clk-lmk04832.c b/drivers/clk/clk-lmk04832.c
index 99b271c1278a..b2107b31efa2 100644
--- a/drivers/clk/clk-lmk04832.c
+++ b/drivers/clk/clk-lmk04832.c
@@ -375,7 +375,7 @@ static unsigned long lmk04832_vco_recalc_rate(struct clk_hw *hw,
unsigned long prate)
{
struct lmk04832 *lmk = container_of(hw, struct lmk04832, vco);
- const unsigned int pll2_p[] = {8, 2, 2, 3, 4, 5, 6, 7};
+ static const unsigned int pll2_p[] = {8, 2, 2, 3, 4, 5, 6, 7};
unsigned int pll2_n, p, pll2_r;
unsigned int pll2_misc;
unsigned long vco_rate;
@@ -491,28 +491,33 @@ static long lmk04832_calc_pll2_params(unsigned long prate, unsigned long rate,
return DIV_ROUND_CLOSEST(prate * 2 * pll2_p * pll2_n, pll2_r);
}
-static long lmk04832_vco_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int lmk04832_vco_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct lmk04832 *lmk = container_of(hw, struct lmk04832, vco);
unsigned int n, p, r;
long vco_rate;
int ret;
- ret = lmk04832_check_vco_ranges(lmk, rate);
+ ret = lmk04832_check_vco_ranges(lmk, req->rate);
if (ret < 0)
return ret;
- vco_rate = lmk04832_calc_pll2_params(*prate, rate, &n, &p, &r);
+ vco_rate = lmk04832_calc_pll2_params(req->best_parent_rate, req->rate,
+ &n, &p, &r);
if (vco_rate < 0) {
dev_err(lmk->dev, "PLL2 parameters out of range\n");
- return vco_rate;
+ req->rate = vco_rate;
+
+ return 0;
}
- if (rate != vco_rate)
+ if (req->rate != vco_rate)
return -EINVAL;
- return vco_rate;
+ req->rate = vco_rate;
+
+ return 0;
}
static int lmk04832_vco_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -579,7 +584,7 @@ static const struct clk_ops lmk04832_vco_ops = {
.prepare = lmk04832_vco_prepare,
.unprepare = lmk04832_vco_unprepare,
.recalc_rate = lmk04832_vco_recalc_rate,
- .round_rate = lmk04832_vco_round_rate,
+ .determine_rate = lmk04832_vco_determine_rate,
.set_rate = lmk04832_vco_set_rate,
};
@@ -637,7 +642,7 @@ static int lmk04832_register_vco(struct lmk04832 *lmk)
static int lmk04832_clkout_set_ddly(struct lmk04832 *lmk, int id)
{
- const int dclk_div_adj[] = {0, 0, -2, -2, 0, 3, -1, 0};
+ static const int dclk_div_adj[] = {0, 0, -2, -2, 0, 3, -1, 0};
unsigned int sclkx_y_ddly = 10;
unsigned int dclkx_y_ddly;
unsigned int dclkx_y_div;
@@ -888,25 +893,27 @@ static unsigned long lmk04832_sclk_recalc_rate(struct clk_hw *hw,
return DIV_ROUND_CLOSEST(prate, sysref_div);
}
-static long lmk04832_sclk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int lmk04832_sclk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct lmk04832 *lmk = container_of(hw, struct lmk04832, sclk);
unsigned long sclk_rate;
unsigned int sysref_div;
- sysref_div = DIV_ROUND_CLOSEST(*prate, rate);
- sclk_rate = DIV_ROUND_CLOSEST(*prate, sysref_div);
+ sysref_div = DIV_ROUND_CLOSEST(req->best_parent_rate, req->rate);
+ sclk_rate = DIV_ROUND_CLOSEST(req->best_parent_rate, sysref_div);
if (sysref_div < 0x07 || sysref_div > 0x1fff) {
dev_err(lmk->dev, "SYSREF divider out of range\n");
return -EINVAL;
}
- if (rate != sclk_rate)
+ if (req->rate != sclk_rate)
return -EINVAL;
- return sclk_rate;
+ req->rate = sclk_rate;
+
+ return 0;
}
static int lmk04832_sclk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -945,7 +952,7 @@ static const struct clk_ops lmk04832_sclk_ops = {
.prepare = lmk04832_sclk_prepare,
.unprepare = lmk04832_sclk_unprepare,
.recalc_rate = lmk04832_sclk_recalc_rate,
- .round_rate = lmk04832_sclk_round_rate,
+ .determine_rate = lmk04832_sclk_determine_rate,
.set_rate = lmk04832_sclk_set_rate,
};
@@ -1069,26 +1076,28 @@ static unsigned long lmk04832_dclk_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long lmk04832_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int lmk04832_dclk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct lmk_dclk *dclk = container_of(hw, struct lmk_dclk, hw);
struct lmk04832 *lmk = dclk->lmk;
unsigned long dclk_rate;
unsigned int dclk_div;
- dclk_div = DIV_ROUND_CLOSEST(*prate, rate);
- dclk_rate = DIV_ROUND_CLOSEST(*prate, dclk_div);
+ dclk_div = DIV_ROUND_CLOSEST(req->best_parent_rate, req->rate);
+ dclk_rate = DIV_ROUND_CLOSEST(req->best_parent_rate, dclk_div);
if (dclk_div < 1 || dclk_div > 0x3ff) {
dev_err(lmk->dev, "%s_div out of range\n", clk_hw_get_name(hw));
return -EINVAL;
}
- if (rate != dclk_rate)
+ if (req->rate != dclk_rate)
return -EINVAL;
- return dclk_rate;
+ req->rate = dclk_rate;
+
+ return 0;
}
static int lmk04832_dclk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -1158,7 +1167,7 @@ static const struct clk_ops lmk04832_dclk_ops = {
.prepare = lmk04832_dclk_prepare,
.unprepare = lmk04832_dclk_unprepare,
.recalc_rate = lmk04832_dclk_recalc_rate,
- .round_rate = lmk04832_dclk_round_rate,
+ .determine_rate = lmk04832_dclk_determine_rate,
.set_rate = lmk04832_dclk_set_rate,
};
@@ -1405,16 +1414,12 @@ static int lmk04832_probe(struct spi_device *spi)
lmk->dev = &spi->dev;
- lmk->oscin = devm_clk_get(lmk->dev, "oscin");
+ lmk->oscin = devm_clk_get_enabled(lmk->dev, "oscin");
if (IS_ERR(lmk->oscin)) {
dev_err(lmk->dev, "failed to get oscin clock\n");
return PTR_ERR(lmk->oscin);
}
- ret = clk_prepare_enable(lmk->oscin);
- if (ret)
- return ret;
-
lmk->reset_gpio = devm_gpiod_get_optional(&spi->dev, "reset",
GPIOD_OUT_LOW);
@@ -1422,14 +1427,14 @@ static int lmk04832_probe(struct spi_device *spi)
sizeof(struct lmk_dclk), GFP_KERNEL);
if (!lmk->dclk) {
ret = -ENOMEM;
- goto err_disable_oscin;
+ return ret;
}
lmk->clkout = devm_kcalloc(lmk->dev, info->num_channels,
sizeof(*lmk->clkout), GFP_KERNEL);
if (!lmk->clkout) {
ret = -ENOMEM;
- goto err_disable_oscin;
+ return ret;
}
lmk->clk_data = devm_kzalloc(lmk->dev, struct_size(lmk->clk_data, hws,
@@ -1437,7 +1442,7 @@ static int lmk04832_probe(struct spi_device *spi)
GFP_KERNEL);
if (!lmk->clk_data) {
ret = -ENOMEM;
- goto err_disable_oscin;
+ return ret;
}
device_property_read_u32(lmk->dev, "ti,vco-hz", &lmk->vco_rate);
@@ -1465,7 +1470,7 @@ static int lmk04832_probe(struct spi_device *spi)
dev_err(lmk->dev, "missing reg property in child: %s\n",
child->full_name);
of_node_put(child);
- goto err_disable_oscin;
+ return ret;
}
of_property_read_u32(child, "ti,clkout-fmt",
@@ -1486,7 +1491,7 @@ static int lmk04832_probe(struct spi_device *spi)
__func__, PTR_ERR(lmk->regmap));
ret = PTR_ERR(lmk->regmap);
- goto err_disable_oscin;
+ return ret;
}
regmap_write(lmk->regmap, LMK04832_REG_RST3W, LMK04832_BIT_RESET);
@@ -1496,7 +1501,7 @@ static int lmk04832_probe(struct spi_device *spi)
&rdbk_pin);
ret = lmk04832_set_spi_rdbk(lmk, rdbk_pin);
if (ret)
- goto err_disable_oscin;
+ return ret;
}
regmap_bulk_read(lmk->regmap, LMK04832_REG_ID_PROD_MSB, &tmp, 3);
@@ -1504,13 +1509,13 @@ static int lmk04832_probe(struct spi_device *spi)
dev_err(lmk->dev, "unsupported device type: pid 0x%04x, maskrev 0x%02x\n",
tmp[0] << 8 | tmp[1], tmp[2]);
ret = -EINVAL;
- goto err_disable_oscin;
+ return ret;
}
ret = lmk04832_register_vco(lmk);
if (ret) {
dev_err(lmk->dev, "failed to init device clock path\n");
- goto err_disable_oscin;
+ return ret;
}
if (lmk->vco_rate) {
@@ -1518,21 +1523,21 @@ static int lmk04832_probe(struct spi_device *spi)
ret = clk_set_rate(lmk->vco.clk, lmk->vco_rate);
if (ret) {
dev_err(lmk->dev, "failed to set VCO rate\n");
- goto err_disable_oscin;
+ return ret;
}
}
ret = lmk04832_register_sclk(lmk);
if (ret) {
dev_err(lmk->dev, "failed to init SYNC/SYSREF clock path\n");
- goto err_disable_oscin;
+ return ret;
}
for (i = 0; i < info->num_channels; i++) {
ret = lmk04832_register_clkout(lmk, i);
if (ret) {
dev_err(lmk->dev, "failed to register clk %d\n", i);
- goto err_disable_oscin;
+ return ret;
}
}
@@ -1541,24 +1546,12 @@ static int lmk04832_probe(struct spi_device *spi)
lmk->clk_data);
if (ret) {
dev_err(lmk->dev, "failed to add provider (%d)\n", ret);
- goto err_disable_oscin;
+ return ret;
}
spi_set_drvdata(spi, lmk);
return 0;
-
-err_disable_oscin:
- clk_disable_unprepare(lmk->oscin);
-
- return ret;
-}
-
-static void lmk04832_remove(struct spi_device *spi)
-{
- struct lmk04832 *lmk = spi_get_drvdata(spi);
-
- clk_disable_unprepare(lmk->oscin);
}
static const struct spi_device_id lmk04832_id[] = {
@@ -1579,7 +1572,6 @@ static struct spi_driver lmk04832_driver = {
.of_match_table = lmk04832_of_id,
},
.probe = lmk04832_probe,
- .remove = lmk04832_remove,
.id_table = lmk04832_id,
};
module_spi_driver(lmk04832_driver);
diff --git a/drivers/clk/clk-loongson1.c b/drivers/clk/clk-loongson1.c
index a3467aa6790f..f9f060d08a5f 100644
--- a/drivers/clk/clk-loongson1.c
+++ b/drivers/clk/clk-loongson1.c
@@ -93,14 +93,16 @@ static unsigned long ls1x_divider_recalc_rate(struct clk_hw *hw,
d->flags, d->width);
}
-static long ls1x_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int ls1x_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ls1x_clk *ls1x_clk = to_ls1x_clk(hw);
const struct ls1x_clk_div_data *d = ls1x_clk->data;
- return divider_round_rate(hw, rate, prate, d->table,
- d->width, d->flags);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ d->table, d->width, d->flags);
+
+ return 0;
}
static int ls1x_divider_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -146,7 +148,7 @@ static int ls1x_divider_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops ls1x_clk_divider_ops = {
.recalc_rate = ls1x_divider_recalc_rate,
- .round_rate = ls1x_divider_round_rate,
+ .determine_rate = ls1x_divider_determine_rate,
.set_rate = ls1x_divider_set_rate,
};
diff --git a/drivers/clk/clk-loongson2.c b/drivers/clk/clk-loongson2.c
index 820bb1e9e3b7..9c4c6c99db3e 100644
--- a/drivers/clk/clk-loongson2.c
+++ b/drivers/clk/clk-loongson2.c
@@ -13,10 +13,6 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include <dt-bindings/clock/loongson,ls2k-clk.h>
-static const struct clk_parent_data pdata[] = {
- { .fw_name = "ref_100m", },
-};
-
enum loongson2_clk_type {
CLK_TYPE_PLL,
CLK_TYPE_SCALE,
@@ -29,8 +25,10 @@ enum loongson2_clk_type {
struct loongson2_clk_provider {
void __iomem *base;
struct device *dev;
- struct clk_hw_onecell_data clk_data;
spinlock_t clk_lock; /* protect access to DIV registers */
+
+ /* Must be last --ends in a flexible-array member. */
+ struct clk_hw_onecell_data clk_data;
};
struct loongson2_clk_data {
@@ -40,6 +38,7 @@ struct loongson2_clk_data {
u8 div_width;
u8 mult_shift;
u8 mult_width;
+ u8 bit_idx;
};
struct loongson2_clk_board_info {
@@ -48,6 +47,7 @@ struct loongson2_clk_board_info {
const char *name;
const char *parent_name;
unsigned long fixed_rate;
+ unsigned long flags;
u8 reg_offset;
u8 div_shift;
u8 div_width;
@@ -93,6 +93,19 @@ struct loongson2_clk_board_info {
.div_width = _dwidth, \
}
+#define CLK_SCALE_MODE(_id, _name, _pname, _offset, \
+ _dshift, _dwidth, _midx) \
+ { \
+ .id = _id, \
+ .type = CLK_TYPE_SCALE, \
+ .name = _name, \
+ .parent_name = _pname, \
+ .reg_offset = _offset, \
+ .div_shift = _dshift, \
+ .div_width = _dwidth, \
+ .bit_idx = _midx + 1, \
+ }
+
#define CLK_GATE(_id, _name, _pname, _offset, _bidx) \
{ \
.id = _id, \
@@ -103,6 +116,18 @@ struct loongson2_clk_board_info {
.bit_idx = _bidx, \
}
+#define CLK_GATE_FLAGS(_id, _name, _pname, _offset, _bidx, \
+ _flags) \
+ { \
+ .id = _id, \
+ .type = CLK_TYPE_GATE, \
+ .name = _name, \
+ .parent_name = _pname, \
+ .reg_offset = _offset, \
+ .bit_idx = _bidx, \
+ .flags = _flags \
+ }
+
#define CLK_FIXED(_id, _name, _pname, _rate) \
{ \
.id = _id, \
@@ -112,6 +137,51 @@ struct loongson2_clk_board_info {
.fixed_rate = _rate, \
}
+static const struct loongson2_clk_board_info ls2k0300_clks[] = {
+ /* Reference Clock */
+ CLK_PLL(LS2K0300_NODE_PLL, "pll_node", 0x00, 15, 9, 8, 7),
+ CLK_PLL(LS2K0300_DDR_PLL, "pll_ddr", 0x08, 15, 9, 8, 7),
+ CLK_PLL(LS2K0300_PIX_PLL, "pll_pix", 0x10, 15, 9, 8, 7),
+ CLK_FIXED(LS2K0300_CLK_STABLE, "clk_stable", NULL, 100000000),
+ CLK_FIXED(LS2K0300_CLK_THSENS, "clk_thsens", NULL, 10000000),
+ /* Node PLL */
+ CLK_DIV(LS2K0300_CLK_NODE_DIV, "clk_node_div", "pll_node", 0x00, 24, 7),
+ CLK_DIV(LS2K0300_CLK_GMAC_DIV, "clk_gmac_div", "pll_node", 0x04, 0, 7),
+ CLK_DIV(LS2K0300_CLK_I2S_DIV, "clk_i2s_div", "pll_node", 0x04, 8, 7),
+ CLK_GATE(LS2K0300_CLK_NODE_PLL_GATE, "clk_node_pll_gate", "clk_node_div", 0x00, 0),
+ CLK_GATE(LS2K0300_CLK_GMAC_GATE, "clk_gmac_gate", "clk_gmac_div", 0x00, 1),
+ CLK_GATE(LS2K0300_CLK_I2S_GATE, "clk_i2s_gate", "clk_i2s_div", 0x00, 2),
+ CLK_GATE_FLAGS(LS2K0300_CLK_NODE_GATE, "clk_node_gate", "clk_node_scale", 0x24, 0,
+ CLK_IS_CRITICAL),
+ CLK_SCALE_MODE(LS2K0300_CLK_NODE_SCALE, "clk_node_scale", "clk_node_pll_gate", 0x20, 0, 3,
+ 3),
+ /* DDR PLL */
+ CLK_DIV(LS2K0300_CLK_DDR_DIV, "clk_ddr_div", "pll_ddr", 0x08, 24, 7),
+ CLK_DIV(LS2K0300_CLK_NET_DIV, "clk_net_div", "pll_ddr", 0x0c, 0, 7),
+ CLK_DIV(LS2K0300_CLK_DEV_DIV, "clk_dev_div", "pll_ddr", 0x0c, 8, 7),
+ CLK_GATE(LS2K0300_CLK_NET_GATE, "clk_net_gate", "clk_net_div", 0x08, 1),
+ CLK_GATE(LS2K0300_CLK_DEV_GATE, "clk_dev_gate", "clk_dev_div", 0x08, 2),
+ CLK_GATE_FLAGS(LS2K0300_CLK_DDR_GATE, "clk_ddr_gate", "clk_ddr_div", 0x08, 0,
+ CLK_IS_CRITICAL),
+ /* PIX PLL */
+ CLK_DIV(LS2K0300_CLK_PIX_DIV, "clk_pix_div", "pll_pix", 0x10, 24, 7),
+ CLK_DIV(LS2K0300_CLK_GMACBP_DIV, "clk_gmacbp_div", "pll_pix", 0x14, 0, 7),
+ CLK_GATE(LS2K0300_CLK_PIX_PLL_GATE, "clk_pix_pll_gate", "clk_pix_div", 0x10, 0),
+ CLK_GATE(LS2K0300_CLK_PIX_GATE, "clk_pix_gate", "clk_pix_scale", 0x24, 6),
+ CLK_GATE(LS2K0300_CLK_GMACBP_GATE, "clk_gmacbp_gate", "clk_gmacbp_div", 0x10, 1),
+ CLK_SCALE_MODE(LS2K0300_CLK_PIX_SCALE, "clk_pix_scale", "clk_pix_pll_gate", 0x20, 4, 3, 7),
+ /* clk_dev_gate */
+ CLK_DIV(LS2K0300_CLK_SDIO_SCALE, "clk_sdio_scale", "clk_dev_gate", 0x20, 24, 4),
+ CLK_GATE(LS2K0300_CLK_USB_GATE, "clk_usb_gate", "clk_usb_scale", 0x24, 2),
+ CLK_GATE(LS2K0300_CLK_SDIO_GATE, "clk_sdio_gate", "clk_sdio_scale", 0x24, 4),
+ CLK_GATE(LS2K0300_CLK_APB_GATE, "clk_apb_gate", "clk_apb_scale", 0x24, 3),
+ CLK_GATE_FLAGS(LS2K0300_CLK_BOOT_GATE, "clk_boot_gate", "clk_boot_scale", 0x24, 1,
+ CLK_IS_CRITICAL),
+ CLK_SCALE_MODE(LS2K0300_CLK_USB_SCALE, "clk_usb_scale", "clk_dev_gate", 0x20, 12, 3, 15),
+ CLK_SCALE_MODE(LS2K0300_CLK_APB_SCALE, "clk_apb_scale", "clk_dev_gate", 0x20, 16, 3, 19),
+ CLK_SCALE_MODE(LS2K0300_CLK_BOOT_SCALE, "clk_boot_scale", "clk_dev_gate", 0x20, 8, 3, 11),
+};
+
static const struct loongson2_clk_board_info ls2k0500_clks[] = {
CLK_PLL(LOONGSON2_NODE_PLL, "pll_node", 0, 16, 8, 8, 6),
CLK_PLL(LOONGSON2_DDR_PLL, "pll_ddr", 0x8, 16, 8, 8, 6),
@@ -228,20 +298,26 @@ static const struct clk_ops loongson2_pll_recalc_ops = {
static unsigned long loongson2_freqscale_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- u64 val, mult;
+ u64 val, scale;
+ u32 mode = 0;
struct loongson2_clk_data *clk = to_loongson2_clk(hw);
val = readq(clk->reg);
- mult = loongson2_rate_part(val, clk->div_shift, clk->div_width) + 1;
+ scale = loongson2_rate_part(val, clk->div_shift, clk->div_width) + 1;
+
+ if (clk->bit_idx)
+ mode = val & BIT(clk->bit_idx - 1);
- return div_u64((u64)parent_rate * mult, 8);
+ return mode == 0 ? div_u64((u64)parent_rate * scale, 8) :
+ div_u64((u64)parent_rate, scale);
}
static const struct clk_ops loongson2_freqscale_recalc_ops = {
.recalc_rate = loongson2_freqscale_recalc_rate,
};
-static struct clk_hw *loongson2_clk_register(struct loongson2_clk_provider *clp,
+static struct clk_hw *loongson2_clk_register(const char *parent,
+ struct loongson2_clk_provider *clp,
const struct loongson2_clk_board_info *cld,
const struct clk_ops *ops)
{
@@ -258,17 +334,14 @@ static struct clk_hw *loongson2_clk_register(struct loongson2_clk_provider *clp,
init.ops = ops;
init.flags = 0;
init.num_parents = 1;
-
- if (!cld->parent_name)
- init.parent_data = pdata;
- else
- init.parent_names = &cld->parent_name;
+ init.parent_names = &parent;
clk->reg = clp->base + cld->reg_offset;
clk->div_shift = cld->div_shift;
clk->div_width = cld->div_width;
clk->mult_shift = cld->mult_shift;
clk->mult_width = cld->mult_width;
+ clk->bit_idx = cld->bit_idx;
clk->hw.init = &init;
hw = &clk->hw;
@@ -286,13 +359,19 @@ static int loongson2_clk_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct loongson2_clk_provider *clp;
const struct loongson2_clk_board_info *p, *data;
+ const char *refclk_name, *parent_name;
data = device_get_match_data(dev);
if (!data)
return -EINVAL;
+ refclk_name = of_clk_get_parent_name(dev->of_node, 0);
+ if (IS_ERR(refclk_name))
+ return dev_err_probe(dev, PTR_ERR(refclk_name),
+ "failed to get refclk name\n");
+
for (p = data; p->name; p++)
- clks_num++;
+ clks_num = max(clks_num, p->id + 1);
clp = devm_kzalloc(dev, struct_size(clp, clk_data.hws, clks_num),
GFP_KERNEL);
@@ -304,37 +383,44 @@ static int loongson2_clk_probe(struct platform_device *pdev)
return PTR_ERR(clp->base);
spin_lock_init(&clp->clk_lock);
- clp->clk_data.num = clks_num + 1;
+ clp->clk_data.num = clks_num;
clp->dev = dev;
+ /* Avoid returning NULL for unused id */
+ memset_p((void **)clp->clk_data.hws, ERR_PTR(-ENOENT), clks_num);
+
for (i = 0; i < clks_num; i++) {
p = &data[i];
+ parent_name = p->parent_name ? p->parent_name : refclk_name;
+
switch (p->type) {
case CLK_TYPE_PLL:
- hw = loongson2_clk_register(clp, p,
+ hw = loongson2_clk_register(parent_name, clp, p,
&loongson2_pll_recalc_ops);
break;
case CLK_TYPE_SCALE:
- hw = loongson2_clk_register(clp, p,
+ hw = loongson2_clk_register(parent_name, clp, p,
&loongson2_freqscale_recalc_ops);
break;
case CLK_TYPE_DIVIDER:
hw = devm_clk_hw_register_divider(dev, p->name,
- p->parent_name, 0,
+ parent_name, 0,
clp->base + p->reg_offset,
p->div_shift, p->div_width,
- CLK_DIVIDER_ONE_BASED,
+ CLK_DIVIDER_ONE_BASED |
+ CLK_DIVIDER_ALLOW_ZERO,
&clp->clk_lock);
break;
case CLK_TYPE_GATE:
- hw = devm_clk_hw_register_gate(dev, p->name, p->parent_name, 0,
+ hw = devm_clk_hw_register_gate(dev, p->name, parent_name,
+ p->flags,
clp->base + p->reg_offset,
p->bit_idx, 0,
&clp->clk_lock);
break;
case CLK_TYPE_FIXED:
- hw = clk_hw_register_fixed_rate_parent_data(dev, p->name, pdata,
- 0, p->fixed_rate);
+ hw = devm_clk_hw_register_fixed_rate(dev, p->name, parent_name,
+ 0, p->fixed_rate);
break;
default:
return dev_err_probe(dev, -EINVAL, "Invalid clk type\n");
@@ -352,6 +438,7 @@ static int loongson2_clk_probe(struct platform_device *pdev)
}
static const struct of_device_id loongson2_clk_match_table[] = {
+ { .compatible = "loongson,ls2k0300-clk", .data = &ls2k0300_clks },
{ .compatible = "loongson,ls2k0500-clk", .data = &ls2k0500_clks },
{ .compatible = "loongson,ls2k-clk", .data = &ls2k1000_clks },
{ .compatible = "loongson,ls2k2000-clk", .data = &ls2k2000_clks },
diff --git a/drivers/clk/clk-max9485.c b/drivers/clk/clk-max9485.c
index be9020b6c789..0515e3e41162 100644
--- a/drivers/clk/clk-max9485.c
+++ b/drivers/clk/clk-max9485.c
@@ -159,29 +159,32 @@ static unsigned long max9485_clkout_recalc_rate(struct clk_hw *hw,
return 0;
}
-static long max9485_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int max9485_clkout_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
const struct max9485_rate *curr, *prev = NULL;
for (curr = max9485_rates; curr->out != 0; curr++) {
/* Exact matches */
- if (curr->out == rate)
- return rate;
+ if (curr->out == req->rate)
+ return 0;
/*
* Find the first entry that has a frequency higher than the
* requested one.
*/
- if (curr->out > rate) {
+ if (curr->out > req->rate) {
unsigned int mid;
/*
* If this is the first entry, clamp the value to the
* lowest possible frequency.
*/
- if (!prev)
- return curr->out;
+ if (!prev) {
+ req->rate = curr->out;
+
+ return 0;
+ }
/*
* Otherwise, determine whether the previous entry or
@@ -189,14 +192,18 @@ static long max9485_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
*/
mid = prev->out + ((curr->out - prev->out) / 2);
- return (mid > rate) ? prev->out : curr->out;
+ req->rate = mid > req->rate ? prev->out : curr->out;
+
+ return 0;
}
prev = curr;
}
/* If the last entry was still too high, clamp the value */
- return prev->out;
+ req->rate = prev->out;
+
+ return 0;
}
struct max9485_clk {
@@ -221,7 +228,7 @@ static const struct max9485_clk max9485_clks[MAX9485_NUM_CLKS] = {
.parent_index = -1,
.ops = {
.set_rate = max9485_clkout_set_rate,
- .round_rate = max9485_clkout_round_rate,
+ .determine_rate = max9485_clkout_determine_rate,
.recalc_rate = max9485_clkout_recalc_rate,
},
},
diff --git a/drivers/clk/clk-milbeaut.c b/drivers/clk/clk-milbeaut.c
index 18c20aff45f7..b4f9b7143eaa 100644
--- a/drivers/clk/clk-milbeaut.c
+++ b/drivers/clk/clk-milbeaut.c
@@ -386,8 +386,8 @@ static unsigned long m10v_clk_divider_recalc_rate(struct clk_hw *hw,
divider->flags, divider->width);
}
-static long m10v_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int m10v_clk_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct m10v_clk_divider *divider = to_m10v_div(hw);
@@ -398,13 +398,19 @@ static long m10v_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
val = readl(divider->reg) >> divider->shift;
val &= clk_div_mask(divider->width);
- return divider_ro_round_rate(hw, rate, prate, divider->table,
- divider->width, divider->flags,
- val);
+ req->rate = divider_ro_round_rate(hw, req->rate,
+ &req->best_parent_rate,
+ divider->table,
+ divider->width,
+ divider->flags, val);
+
+ return 0;
}
- return divider_round_rate(hw, rate, prate, divider->table,
- divider->width, divider->flags);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ divider->table, divider->width, divider->flags);
+
+ return 0;
}
static int m10v_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -450,7 +456,7 @@ static int m10v_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops m10v_clk_divider_ops = {
.recalc_rate = m10v_clk_divider_recalc_rate,
- .round_rate = m10v_clk_divider_round_rate,
+ .determine_rate = m10v_clk_divider_determine_rate,
.set_rate = m10v_clk_divider_set_rate,
};
diff --git a/drivers/clk/clk-multiplier.c b/drivers/clk/clk-multiplier.c
index e507aa958da9..6f2955d408b6 100644
--- a/drivers/clk/clk-multiplier.c
+++ b/drivers/clk/clk-multiplier.c
@@ -112,14 +112,16 @@ static unsigned long __bestmult(struct clk_hw *hw, unsigned long rate,
return bestmult;
}
-static long clk_multiplier_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_multiplier_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_multiplier *mult = to_clk_multiplier(hw);
- unsigned long factor = __bestmult(hw, rate, parent_rate,
+ unsigned long factor = __bestmult(hw, req->rate, &req->best_parent_rate,
mult->width, mult->flags);
- return *parent_rate * factor;
+ req->rate = req->best_parent_rate * factor;
+
+ return 0;
}
static int clk_multiplier_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -150,7 +152,7 @@ static int clk_multiplier_set_rate(struct clk_hw *hw, unsigned long rate,
const struct clk_ops clk_multiplier_ops = {
.recalc_rate = clk_multiplier_recalc_rate,
- .round_rate = clk_multiplier_round_rate,
+ .determine_rate = clk_multiplier_determine_rate,
.set_rate = clk_multiplier_set_rate,
};
EXPORT_SYMBOL_GPL(clk_multiplier_ops);
diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c
index 06245681dac7..fc0aeb4247f2 100644
--- a/drivers/clk/clk-nomadik.c
+++ b/drivers/clk/clk-nomadik.c
@@ -17,6 +17,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include <linux/reboot.h>
/*
@@ -116,9 +117,9 @@ static void __init nomadik_src_init(void)
val = readl(src_base + SRC_XTALCR);
pr_info("SXTALO is %s\n",
- (val & SRC_XTALCR_SXTALDIS) ? "disabled" : "enabled");
+ str_disabled_enabled(val & SRC_XTALCR_SXTALDIS));
pr_info("MXTAL is %s\n",
- (val & SRC_XTALCR_MXTALSTAT) ? "enabled" : "disabled");
+ str_enabled_disabled(val & SRC_XTALCR_MXTALSTAT));
if (of_property_read_bool(np, "disable-sxtalo")) {
/* The machine uses an external oscillator circuit */
val |= SRC_XTALCR_SXTALDIS;
diff --git a/drivers/clk/clk-npcm8xx.c b/drivers/clk/clk-npcm8xx.c
new file mode 100644
index 000000000000..2138c011411d
--- /dev/null
+++ b/drivers/clk/clk-npcm8xx.c
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Nuvoton NPCM8xx Clock Generator
+ * All the clocks are initialized by the bootloader, so this driver allows only
+ * reading of current settings directly from the hardware.
+ *
+ * Copyright (C) 2020 Nuvoton Technologies
+ * Author: Tomer Maimon <tomer.maimon@nuvoton.com>
+ */
+
+#define pr_fmt(fmt) "npcm8xx_clk: " fmt
+
+#include <linux/auxiliary_bus.h>
+#include <linux/bitfield.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/nuvoton,npcm845-clk.h>
+#include <soc/nuvoton/clock-npcm8xx.h>
+
+/* npcm8xx clock registers*/
+#define NPCM8XX_CLKSEL 0x04
+#define NPCM8XX_CLKDIV1 0x08
+#define NPCM8XX_CLKDIV2 0x2C
+#define NPCM8XX_CLKDIV3 0x58
+#define NPCM8XX_CLKDIV4 0x7C
+#define NPCM8XX_PLLCON0 0x0C
+#define NPCM8XX_PLLCON1 0x10
+#define NPCM8XX_PLLCON2 0x54
+#define NPCM8XX_PLLCONG 0x60
+#define NPCM8XX_THRTL_CNT 0xC0
+
+#define PLLCON_LOKI BIT(31)
+#define PLLCON_LOKS BIT(30)
+#define PLLCON_FBDV GENMASK(27, 16)
+#define PLLCON_OTDV2 GENMASK(15, 13)
+#define PLLCON_PWDEN BIT(12)
+#define PLLCON_OTDV1 GENMASK(10, 8)
+#define PLLCON_INDV GENMASK(5, 0)
+
+static void __iomem *clk_base;
+
+struct npcm8xx_clk_pll {
+ void __iomem *pllcon;
+ unsigned int id;
+ const char *name;
+ unsigned long flags;
+ struct clk_hw hw;
+};
+
+#define to_npcm8xx_clk_pll(_hw) container_of(_hw, struct npcm8xx_clk_pll, hw)
+
+struct npcm8xx_clk_pll_data {
+ const char *name;
+ struct clk_parent_data parent;
+ unsigned int reg;
+ unsigned long flags;
+ struct clk_hw hw;
+};
+
+struct npcm8xx_clk_div_data {
+ u32 reg;
+ u8 shift;
+ u8 width;
+ const char *name;
+ const struct clk_hw *parent_hw;
+ unsigned long clk_divider_flags;
+ unsigned long flags;
+ int onecell_idx;
+ struct clk_hw hw;
+};
+
+struct npcm8xx_clk_mux_data {
+ u8 shift;
+ u32 mask;
+ const u32 *table;
+ const char *name;
+ const struct clk_parent_data *parent_data;
+ u8 num_parents;
+ unsigned long flags;
+ struct clk_hw hw;
+};
+
+static struct clk_hw hw_pll1_div2, hw_pll2_div2, hw_gfx_div2, hw_pre_clk;
+static struct npcm8xx_clk_pll_data npcm8xx_pll_clks[] = {
+ { "pll0", { .index = 0 }, NPCM8XX_PLLCON0, 0 },
+ { "pll1", { .index = 0 }, NPCM8XX_PLLCON1, 0 },
+ { "pll2", { .index = 0 }, NPCM8XX_PLLCON2, 0 },
+ { "pll_gfx", { .index = 0 }, NPCM8XX_PLLCONG, 0 },
+};
+
+static const u32 cpuck_mux_table[] = { 0, 1, 2, 7 };
+static const struct clk_parent_data cpuck_mux_parents[] = {
+ { .hw = &npcm8xx_pll_clks[0].hw },
+ { .hw = &npcm8xx_pll_clks[1].hw },
+ { .index = 0 },
+ { .hw = &npcm8xx_pll_clks[2].hw }
+};
+
+static const u32 pixcksel_mux_table[] = { 0, 2 };
+static const struct clk_parent_data pixcksel_mux_parents[] = {
+ { .hw = &npcm8xx_pll_clks[3].hw },
+ { .index = 0 }
+};
+
+static const u32 default_mux_table[] = { 0, 1, 2, 3 };
+static const struct clk_parent_data default_mux_parents[] = {
+ { .hw = &npcm8xx_pll_clks[0].hw },
+ { .hw = &npcm8xx_pll_clks[1].hw },
+ { .index = 0 },
+ { .hw = &hw_pll2_div2 }
+};
+
+static const u32 sucksel_mux_table[] = { 2, 3 };
+static const struct clk_parent_data sucksel_mux_parents[] = {
+ { .index = 0 },
+ { .hw = &hw_pll2_div2 }
+};
+
+static const u32 mccksel_mux_table[] = { 0, 2 };
+static const struct clk_parent_data mccksel_mux_parents[] = {
+ { .hw = &hw_pll1_div2 },
+ { .index = 0 }
+};
+
+static const u32 clkoutsel_mux_table[] = { 0, 1, 2, 3, 4 };
+static const struct clk_parent_data clkoutsel_mux_parents[] = {
+ { .hw = &npcm8xx_pll_clks[0].hw },
+ { .hw = &npcm8xx_pll_clks[1].hw },
+ { .index = 0 },
+ { .hw = &hw_gfx_div2 },
+ { .hw = &hw_pll2_div2 }
+};
+
+static const u32 gfxmsel_mux_table[] = { 2, 3 };
+static const struct clk_parent_data gfxmsel_mux_parents[] = {
+ { .index = 0 },
+ { .hw = &npcm8xx_pll_clks[2].hw }
+};
+
+static const u32 dvcssel_mux_table[] = { 2, 3 };
+static const struct clk_parent_data dvcssel_mux_parents[] = {
+ { .index = 0 },
+ { .hw = &npcm8xx_pll_clks[2].hw }
+};
+
+static const u32 default3_mux_table[] = { 0, 1, 2 };
+static const struct clk_parent_data default3_mux_parents[] = {
+ { .hw = &npcm8xx_pll_clks[0].hw },
+ { .hw = &npcm8xx_pll_clks[1].hw },
+ { .index = 0 }
+};
+
+static struct npcm8xx_clk_mux_data npcm8xx_muxes[] = {
+ { 0, 3, cpuck_mux_table, "cpu_mux", cpuck_mux_parents,
+ ARRAY_SIZE(cpuck_mux_parents), CLK_IS_CRITICAL },
+ { 4, 2, pixcksel_mux_table, "gfx_pixel_mux", pixcksel_mux_parents,
+ ARRAY_SIZE(pixcksel_mux_parents), 0 },
+ { 6, 2, default_mux_table, "sd_mux", default_mux_parents,
+ ARRAY_SIZE(default_mux_parents), 0 },
+ { 8, 2, default_mux_table, "uart_mux", default_mux_parents,
+ ARRAY_SIZE(default_mux_parents), 0 },
+ { 10, 2, sucksel_mux_table, "serial_usb_mux", sucksel_mux_parents,
+ ARRAY_SIZE(sucksel_mux_parents), 0 },
+ { 12, 2, mccksel_mux_table, "mc_mux", mccksel_mux_parents,
+ ARRAY_SIZE(mccksel_mux_parents), 0 },
+ { 14, 2, default_mux_table, "adc_mux", default_mux_parents,
+ ARRAY_SIZE(default_mux_parents), 0 },
+ { 16, 2, default_mux_table, "gfx_mux", default_mux_parents,
+ ARRAY_SIZE(default_mux_parents), 0 },
+ { 18, 3, clkoutsel_mux_table, "clkout_mux", clkoutsel_mux_parents,
+ ARRAY_SIZE(clkoutsel_mux_parents), 0 },
+ { 21, 2, gfxmsel_mux_table, "gfxm_mux", gfxmsel_mux_parents,
+ ARRAY_SIZE(gfxmsel_mux_parents), 0 },
+ { 23, 2, dvcssel_mux_table, "dvc_mux", dvcssel_mux_parents,
+ ARRAY_SIZE(dvcssel_mux_parents), 0 },
+ { 25, 2, default3_mux_table, "rg_mux", default3_mux_parents,
+ ARRAY_SIZE(default3_mux_parents), 0 },
+ { 27, 2, default3_mux_table, "rcp_mux", default3_mux_parents,
+ ARRAY_SIZE(default3_mux_parents), 0 },
+};
+
+/* configurable pre dividers: */
+static struct npcm8xx_clk_div_data npcm8xx_pre_divs[] = {
+ { NPCM8XX_CLKDIV1, 21, 5, "pre_adc", &npcm8xx_muxes[6].hw, CLK_DIVIDER_READ_ONLY, 0, -1 },
+ { NPCM8XX_CLKDIV1, 26, 2, "ahb", &hw_pre_clk, CLK_DIVIDER_READ_ONLY, CLK_IS_CRITICAL, NPCM8XX_CLK_AHB },
+};
+
+/* configurable dividers: */
+static struct npcm8xx_clk_div_data npcm8xx_divs[] = {
+ { NPCM8XX_CLKDIV1, 28, 3, "adc", &npcm8xx_pre_divs[0].hw, CLK_DIVIDER_READ_ONLY | CLK_DIVIDER_POWER_OF_TWO, 0, NPCM8XX_CLK_ADC },
+ { NPCM8XX_CLKDIV1, 16, 5, "uart", &npcm8xx_muxes[3].hw, 0, 0, NPCM8XX_CLK_UART },
+ { NPCM8XX_CLKDIV1, 11, 5, "mmc", &npcm8xx_muxes[2].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_MMC },
+ { NPCM8XX_CLKDIV1, 6, 5, "spi3", &npcm8xx_pre_divs[1].hw, 0, 0, NPCM8XX_CLK_SPI3 },
+ { NPCM8XX_CLKDIV1, 2, 4, "pci", &npcm8xx_muxes[7].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_PCI },
+
+ { NPCM8XX_CLKDIV2, 30, 2, "apb4", &npcm8xx_pre_divs[1].hw, CLK_DIVIDER_READ_ONLY | CLK_DIVIDER_POWER_OF_TWO, 0, NPCM8XX_CLK_APB4 },
+ { NPCM8XX_CLKDIV2, 28, 2, "apb3", &npcm8xx_pre_divs[1].hw, CLK_DIVIDER_READ_ONLY | CLK_DIVIDER_POWER_OF_TWO, 0, NPCM8XX_CLK_APB3 },
+ { NPCM8XX_CLKDIV2, 26, 2, "apb2", &npcm8xx_pre_divs[1].hw, CLK_DIVIDER_READ_ONLY | CLK_DIVIDER_POWER_OF_TWO, 0, NPCM8XX_CLK_APB2 },
+ { NPCM8XX_CLKDIV2, 24, 2, "apb1", &npcm8xx_pre_divs[1].hw, CLK_DIVIDER_READ_ONLY | CLK_DIVIDER_POWER_OF_TWO, 0, NPCM8XX_CLK_APB1 },
+ { NPCM8XX_CLKDIV2, 22, 2, "apb5", &npcm8xx_pre_divs[1].hw, CLK_DIVIDER_READ_ONLY | CLK_DIVIDER_POWER_OF_TWO, 0, NPCM8XX_CLK_APB5 },
+ { NPCM8XX_CLKDIV2, 16, 5, "clkout", &npcm8xx_muxes[8].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_CLKOUT },
+ { NPCM8XX_CLKDIV2, 13, 3, "gfx", &npcm8xx_muxes[7].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_GFX },
+ { NPCM8XX_CLKDIV2, 8, 5, "usb_bridge", &npcm8xx_muxes[4].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_SU },
+ { NPCM8XX_CLKDIV2, 4, 4, "usb_host", &npcm8xx_muxes[4].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_SU48 },
+ { NPCM8XX_CLKDIV2, 0, 4, "sdhc", &npcm8xx_muxes[2].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_SDHC },
+
+ { NPCM8XX_CLKDIV3, 16, 8, "spi1", &npcm8xx_pre_divs[1].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_SPI1 },
+ { NPCM8XX_CLKDIV3, 11, 5, "uart2", &npcm8xx_muxes[3].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_UART2 },
+ { NPCM8XX_CLKDIV3, 6, 5, "spi0", &npcm8xx_pre_divs[1].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_SPI0 },
+ { NPCM8XX_CLKDIV3, 1, 5, "spix", &npcm8xx_pre_divs[1].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_SPIX },
+
+ { NPCM8XX_CLKDIV4, 28, 4, "rg", &npcm8xx_muxes[11].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_RG },
+ { NPCM8XX_CLKDIV4, 12, 4, "rcp", &npcm8xx_muxes[12].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_RCP },
+
+ { NPCM8XX_THRTL_CNT, 0, 2, "th", &npcm8xx_muxes[0].hw, CLK_DIVIDER_READ_ONLY | CLK_DIVIDER_POWER_OF_TWO, 0, NPCM8XX_CLK_TH },
+};
+
+static unsigned long npcm8xx_clk_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct npcm8xx_clk_pll *pll = to_npcm8xx_clk_pll(hw);
+ unsigned long fbdv, indv, otdv1, otdv2;
+ unsigned int val;
+ u64 ret;
+
+ if (parent_rate == 0) {
+ pr_debug("%s: parent rate is zero\n", __func__);
+ return 0;
+ }
+
+ val = readl_relaxed(pll->pllcon);
+
+ indv = FIELD_GET(PLLCON_INDV, val);
+ fbdv = FIELD_GET(PLLCON_FBDV, val);
+ otdv1 = FIELD_GET(PLLCON_OTDV1, val);
+ otdv2 = FIELD_GET(PLLCON_OTDV2, val);
+
+ ret = (u64)parent_rate * fbdv;
+ do_div(ret, indv * otdv1 * otdv2);
+
+ return ret;
+}
+
+static const struct clk_ops npcm8xx_clk_pll_ops = {
+ .recalc_rate = npcm8xx_clk_pll_recalc_rate,
+};
+
+static struct clk_hw *
+npcm8xx_clk_register_pll(struct device *dev, void __iomem *pllcon,
+ const char *name, const struct clk_parent_data *parent,
+ unsigned long flags)
+{
+ struct npcm8xx_clk_pll *pll;
+ struct clk_init_data init = {};
+ int ret;
+
+ pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &npcm8xx_clk_pll_ops;
+ init.parent_data = parent;
+ init.num_parents = 1;
+ init.flags = flags;
+
+ pll->pllcon = pllcon;
+ pll->hw.init = &init;
+
+ ret = devm_clk_hw_register(dev, &pll->hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &pll->hw;
+}
+
+static DEFINE_SPINLOCK(npcm8xx_clk_lock);
+
+static int npcm8xx_clk_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct npcm_clock_adev *rdev = to_npcm_clock_adev(adev);
+ struct clk_hw_onecell_data *npcm8xx_clk_data;
+ struct device *dev = &adev->dev;
+ struct clk_hw *hw;
+ unsigned int i;
+
+ npcm8xx_clk_data = devm_kzalloc(dev, struct_size(npcm8xx_clk_data, hws,
+ NPCM8XX_NUM_CLOCKS),
+ GFP_KERNEL);
+ if (!npcm8xx_clk_data)
+ return -ENOMEM;
+
+ clk_base = rdev->base;
+
+ npcm8xx_clk_data->num = NPCM8XX_NUM_CLOCKS;
+
+ for (i = 0; i < NPCM8XX_NUM_CLOCKS; i++)
+ npcm8xx_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER);
+
+ /* Register plls */
+ for (i = 0; i < ARRAY_SIZE(npcm8xx_pll_clks); i++) {
+ struct npcm8xx_clk_pll_data *pll_clk = &npcm8xx_pll_clks[i];
+
+ hw = npcm8xx_clk_register_pll(dev, clk_base + pll_clk->reg,
+ pll_clk->name, &pll_clk->parent,
+ pll_clk->flags);
+ if (IS_ERR(hw))
+ return dev_err_probe(dev, PTR_ERR(hw), "Can't register pll\n");
+ pll_clk->hw = *hw;
+ }
+
+ /* Register fixed dividers */
+ hw = devm_clk_hw_register_fixed_factor(dev, "pll1_div2", "pll1", 0, 1, 2);
+ if (IS_ERR(hw))
+ return dev_err_probe(dev, PTR_ERR(hw), "Can't register fixed div\n");
+ hw_pll1_div2 = *hw;
+
+ hw = devm_clk_hw_register_fixed_factor(dev, "pll2_div2", "pll2", 0, 1, 2);
+ if (IS_ERR(hw))
+ return dev_err_probe(dev, PTR_ERR(hw), "Can't register pll2 div2\n");
+ hw_pll2_div2 = *hw;
+
+ hw = devm_clk_hw_register_fixed_factor(dev, "pll_gfx_div2", "pll_gfx", 0, 1, 2);
+ if (IS_ERR(hw))
+ return dev_err_probe(dev, PTR_ERR(hw), "Can't register gfx div2\n");
+ hw_gfx_div2 = *hw;
+
+ /* Register muxes */
+ for (i = 0; i < ARRAY_SIZE(npcm8xx_muxes); i++) {
+ struct npcm8xx_clk_mux_data *mux_data = &npcm8xx_muxes[i];
+
+ hw = devm_clk_hw_register_mux_parent_data_table(dev,
+ mux_data->name,
+ mux_data->parent_data,
+ mux_data->num_parents,
+ mux_data->flags,
+ clk_base + NPCM8XX_CLKSEL,
+ mux_data->shift,
+ mux_data->mask,
+ 0,
+ mux_data->table,
+ &npcm8xx_clk_lock);
+ if (IS_ERR(hw))
+ return dev_err_probe(dev, PTR_ERR(hw), "Can't register mux\n");
+ mux_data->hw = *hw;
+ }
+
+ hw = devm_clk_hw_register_fixed_factor(dev, "pre_clk", "cpu_mux", 0, 1, 2);
+ if (IS_ERR(hw))
+ return dev_err_probe(dev, PTR_ERR(hw), "Can't register pre clk div2\n");
+ hw_pre_clk = *hw;
+
+ hw = devm_clk_hw_register_fixed_factor(dev, "axi", "th", 0, 1, 2);
+ if (IS_ERR(hw))
+ return dev_err_probe(dev, PTR_ERR(hw), "Can't register axi div2\n");
+ npcm8xx_clk_data->hws[NPCM8XX_CLK_AXI] = hw;
+
+ hw = devm_clk_hw_register_fixed_factor(dev, "atb", "axi", 0, 1, 2);
+ if (IS_ERR(hw))
+ return dev_err_probe(dev, PTR_ERR(hw), "Can't register atb div2\n");
+ npcm8xx_clk_data->hws[NPCM8XX_CLK_ATB] = hw;
+
+ /* Register pre dividers */
+ for (i = 0; i < ARRAY_SIZE(npcm8xx_pre_divs); i++) {
+ struct npcm8xx_clk_div_data *div_data = &npcm8xx_pre_divs[i];
+
+ hw = devm_clk_hw_register_divider_parent_hw(dev, div_data->name,
+ div_data->parent_hw,
+ div_data->flags,
+ clk_base + div_data->reg,
+ div_data->shift,
+ div_data->width,
+ div_data->clk_divider_flags,
+ &npcm8xx_clk_lock);
+ if (IS_ERR(hw))
+ return dev_err_probe(dev, PTR_ERR(hw), "Can't register pre div\n");
+ div_data->hw = *hw;
+
+ if (div_data->onecell_idx >= 0)
+ npcm8xx_clk_data->hws[div_data->onecell_idx] = hw;
+ }
+
+ /* Register dividers */
+ for (i = 0; i < ARRAY_SIZE(npcm8xx_divs); i++) {
+ struct npcm8xx_clk_div_data *div_data = &npcm8xx_divs[i];
+
+ hw = devm_clk_hw_register_divider_parent_hw(dev, div_data->name,
+ div_data->parent_hw,
+ div_data->flags,
+ clk_base + div_data->reg,
+ div_data->shift,
+ div_data->width,
+ div_data->clk_divider_flags,
+ &npcm8xx_clk_lock);
+ if (IS_ERR(hw))
+ return dev_err_probe(dev, PTR_ERR(hw), "Can't register div\n");
+
+ if (div_data->onecell_idx >= 0)
+ npcm8xx_clk_data->hws[div_data->onecell_idx] = hw;
+ }
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ npcm8xx_clk_data);
+}
+
+static const struct auxiliary_device_id npcm8xx_clock_ids[] = {
+ {
+ .name = "reset_npcm.clk-npcm8xx",
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(auxiliary, npcm8xx_clock_ids);
+
+static struct auxiliary_driver npcm8xx_clock_driver = {
+ .probe = npcm8xx_clk_probe,
+ .id_table = npcm8xx_clock_ids,
+};
+module_auxiliary_driver(npcm8xx_clock_driver);
+
+MODULE_DESCRIPTION("Clock driver for Nuvoton NPCM8XX BMC SoC");
+MODULE_AUTHOR("Tomer Maimon <tomer.maimon@nuvoton.com>");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/clk/clk-palmas.c b/drivers/clk/clk-palmas.c
index 5efb10776ae5..39049f62dbbb 100644
--- a/drivers/clk/clk-palmas.c
+++ b/drivers/clk/clk-palmas.c
@@ -281,7 +281,7 @@ static struct platform_driver palmas_clks_driver = {
.of_match_table = palmas_clks_of_match,
},
.probe = palmas_clks_probe,
- .remove_new = palmas_clks_remove,
+ .remove = palmas_clks_remove,
};
module_platform_driver(palmas_clks_driver);
diff --git a/drivers/clk/clk-pwm.c b/drivers/clk/clk-pwm.c
index 3dd2b83d0404..4709f0338e37 100644
--- a/drivers/clk/clk-pwm.c
+++ b/drivers/clk/clk-pwm.c
@@ -14,6 +14,7 @@
struct clk_pwm {
struct clk_hw hw;
struct pwm_device *pwm;
+ struct pwm_state state;
u32 fixed_rate;
};
@@ -22,11 +23,28 @@ static inline struct clk_pwm *to_clk_pwm(struct clk_hw *hw)
return container_of(hw, struct clk_pwm, hw);
}
+static int clk_pwm_enable(struct clk_hw *hw)
+{
+ struct clk_pwm *clk_pwm = to_clk_pwm(hw);
+
+ return pwm_apply_atomic(clk_pwm->pwm, &clk_pwm->state);
+}
+
+static void clk_pwm_disable(struct clk_hw *hw)
+{
+ struct clk_pwm *clk_pwm = to_clk_pwm(hw);
+ struct pwm_state state = clk_pwm->state;
+
+ state.enabled = false;
+
+ pwm_apply_atomic(clk_pwm->pwm, &state);
+}
+
static int clk_pwm_prepare(struct clk_hw *hw)
{
struct clk_pwm *clk_pwm = to_clk_pwm(hw);
- return pwm_enable(clk_pwm->pwm);
+ return pwm_apply_might_sleep(clk_pwm->pwm, &clk_pwm->state);
}
static void clk_pwm_unprepare(struct clk_hw *hw)
@@ -48,8 +66,11 @@ static int clk_pwm_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
{
struct clk_pwm *clk_pwm = to_clk_pwm(hw);
struct pwm_state state;
+ int ret;
- pwm_get_state(clk_pwm->pwm, &state);
+ ret = pwm_get_state_hw(clk_pwm->pwm, &state);
+ if (ret)
+ return ret;
duty->num = state.duty_cycle;
duty->den = state.period;
@@ -57,6 +78,13 @@ static int clk_pwm_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
return 0;
}
+static const struct clk_ops clk_pwm_ops_atomic = {
+ .enable = clk_pwm_enable,
+ .disable = clk_pwm_disable,
+ .recalc_rate = clk_pwm_recalc_rate,
+ .get_duty_cycle = clk_pwm_get_duty_cycle,
+};
+
static const struct clk_ops clk_pwm_ops = {
.prepare = clk_pwm_prepare,
.unprepare = clk_pwm_unprepare,
@@ -103,20 +131,19 @@ static int clk_pwm_probe(struct platform_device *pdev)
return -EINVAL;
}
- /*
- * FIXME: pwm_apply_args() should be removed when switching to the
- * atomic PWM API.
- */
- pwm_apply_args(pwm);
- ret = pwm_config(pwm, (pargs.period + 1) >> 1, pargs.period);
- if (ret < 0)
- return ret;
+ pwm_init_state(pwm, &clk_pwm->state);
+ pwm_set_relative_duty_cycle(&clk_pwm->state, 1, 2);
+ clk_pwm->state.enabled = true;
clk_name = node->name;
of_property_read_string(node, "clock-output-names", &clk_name);
init.name = clk_name;
- init.ops = &clk_pwm_ops;
+ if (pwm_might_sleep(pwm))
+ init.ops = &clk_pwm_ops;
+ else
+ init.ops = &clk_pwm_ops_atomic;
+
init.flags = 0;
init.num_parents = 0;
@@ -142,7 +169,7 @@ MODULE_DEVICE_TABLE(of, clk_pwm_dt_ids);
static struct platform_driver clk_pwm_driver = {
.probe = clk_pwm_probe,
- .remove_new = clk_pwm_remove,
+ .remove = clk_pwm_remove,
.driver = {
.name = "pwm-clock",
.of_match_table = clk_pwm_dt_ids,
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index 4dcde305944c..a560edeb4b55 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -9,6 +9,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <dt-bindings/clock/fsl,qoriq-clockgen.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
@@ -1065,11 +1066,8 @@ static void __init _clockgen_init(struct device_node *np, bool legacy);
static void __init legacy_init_clockgen(struct device_node *np)
{
if (!clockgen.node) {
- struct device_node *parent_np;
-
- parent_np = of_get_parent(np);
+ struct device_node *parent_np __free(device_node) = of_get_parent(np);
_clockgen_init(parent_np, true);
- of_node_put(parent_np);
}
}
diff --git a/drivers/clk/clk-rp1.c b/drivers/clk/clk-rp1.c
new file mode 100644
index 000000000000..fd144755b879
--- /dev/null
+++ b/drivers/clk/clk-rp1.c
@@ -0,0 +1,2462 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 Raspberry Pi Ltd.
+ *
+ * Clock driver for RP1 PCIe multifunction chip.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/units.h>
+
+#include <dt-bindings/clock/raspberrypi,rp1-clocks.h>
+
+#define PLL_SYS_OFFSET 0x08000
+#define PLL_SYS_CS (PLL_SYS_OFFSET + 0x00)
+#define PLL_SYS_PWR (PLL_SYS_OFFSET + 0x04)
+#define PLL_SYS_FBDIV_INT (PLL_SYS_OFFSET + 0x08)
+#define PLL_SYS_FBDIV_FRAC (PLL_SYS_OFFSET + 0x0c)
+#define PLL_SYS_PRIM (PLL_SYS_OFFSET + 0x10)
+#define PLL_SYS_SEC (PLL_SYS_OFFSET + 0x14)
+
+#define PLL_AUDIO_OFFSET 0x0c000
+#define PLL_AUDIO_CS (PLL_AUDIO_OFFSET + 0x00)
+#define PLL_AUDIO_PWR (PLL_AUDIO_OFFSET + 0x04)
+#define PLL_AUDIO_FBDIV_INT (PLL_AUDIO_OFFSET + 0x08)
+#define PLL_AUDIO_FBDIV_FRAC (PLL_AUDIO_OFFSET + 0x0c)
+#define PLL_AUDIO_PRIM (PLL_AUDIO_OFFSET + 0x10)
+#define PLL_AUDIO_SEC (PLL_AUDIO_OFFSET + 0x14)
+#define PLL_AUDIO_TERN (PLL_AUDIO_OFFSET + 0x18)
+
+#define PLL_VIDEO_OFFSET 0x10000
+#define PLL_VIDEO_CS (PLL_VIDEO_OFFSET + 0x00)
+#define PLL_VIDEO_PWR (PLL_VIDEO_OFFSET + 0x04)
+#define PLL_VIDEO_FBDIV_INT (PLL_VIDEO_OFFSET + 0x08)
+#define PLL_VIDEO_FBDIV_FRAC (PLL_VIDEO_OFFSET + 0x0c)
+#define PLL_VIDEO_PRIM (PLL_VIDEO_OFFSET + 0x10)
+#define PLL_VIDEO_SEC (PLL_VIDEO_OFFSET + 0x14)
+
+#define GPCLK_OE_CTRL 0x00000
+
+#define CLK_SYS_OFFSET 0x00014
+#define CLK_SYS_CTRL (CLK_SYS_OFFSET + 0x00)
+#define CLK_SYS_DIV_INT (CLK_SYS_OFFSET + 0x04)
+#define CLK_SYS_SEL (CLK_SYS_OFFSET + 0x0c)
+
+#define CLK_SLOW_OFFSET 0x00024
+#define CLK_SLOW_SYS_CTRL (CLK_SLOW_OFFSET + 0x00)
+#define CLK_SLOW_SYS_DIV_INT (CLK_SLOW_OFFSET + 0x04)
+#define CLK_SLOW_SYS_SEL (CLK_SLOW_OFFSET + 0x0c)
+
+#define CLK_DMA_OFFSET 0x00044
+#define CLK_DMA_CTRL (CLK_DMA_OFFSET + 0x00)
+#define CLK_DMA_DIV_INT (CLK_DMA_OFFSET + 0x04)
+#define CLK_DMA_SEL (CLK_DMA_OFFSET + 0x0c)
+
+#define CLK_UART_OFFSET 0x00054
+#define CLK_UART_CTRL (CLK_UART_OFFSET + 0x00)
+#define CLK_UART_DIV_INT (CLK_UART_OFFSET + 0x04)
+#define CLK_UART_SEL (CLK_UART_OFFSET + 0x0c)
+
+#define CLK_ETH_OFFSET 0x00064
+#define CLK_ETH_CTRL (CLK_ETH_OFFSET + 0x00)
+#define CLK_ETH_DIV_INT (CLK_ETH_OFFSET + 0x04)
+#define CLK_ETH_SEL (CLK_ETH_OFFSET + 0x0c)
+
+#define CLK_PWM0_OFFSET 0x00074
+#define CLK_PWM0_CTRL (CLK_PWM0_OFFSET + 0x00)
+#define CLK_PWM0_DIV_INT (CLK_PWM0_OFFSET + 0x04)
+#define CLK_PWM0_DIV_FRAC (CLK_PWM0_OFFSET + 0x08)
+#define CLK_PWM0_SEL (CLK_PWM0_OFFSET + 0x0c)
+
+#define CLK_PWM1_OFFSET 0x00084
+#define CLK_PWM1_CTRL (CLK_PWM1_OFFSET + 0x00)
+#define CLK_PWM1_DIV_INT (CLK_PWM1_OFFSET + 0x04)
+#define CLK_PWM1_DIV_FRAC (CLK_PWM1_OFFSET + 0x08)
+#define CLK_PWM1_SEL (CLK_PWM1_OFFSET + 0x0c)
+
+#define CLK_AUDIO_IN_OFFSET 0x00094
+#define CLK_AUDIO_IN_CTRL (CLK_AUDIO_IN_OFFSET + 0x00)
+#define CLK_AUDIO_IN_DIV_INT (CLK_AUDIO_IN_OFFSET + 0x04)
+#define CLK_AUDIO_IN_SEL (CLK_AUDIO_IN_OFFSET + 0x0c)
+
+#define CLK_AUDIO_OUT_OFFSET 0x000a4
+#define CLK_AUDIO_OUT_CTRL (CLK_AUDIO_OUT_OFFSET + 0x00)
+#define CLK_AUDIO_OUT_DIV_INT (CLK_AUDIO_OUT_OFFSET + 0x04)
+#define CLK_AUDIO_OUT_SEL (CLK_AUDIO_OUT_OFFSET + 0x0c)
+
+#define CLK_I2S_OFFSET 0x000b4
+#define CLK_I2S_CTRL (CLK_I2S_OFFSET + 0x00)
+#define CLK_I2S_DIV_INT (CLK_I2S_OFFSET + 0x04)
+#define CLK_I2S_SEL (CLK_I2S_OFFSET + 0x0c)
+
+#define CLK_MIPI0_CFG_OFFSET 0x000c4
+#define CLK_MIPI0_CFG_CTRL (CLK_MIPI0_CFG_OFFSET + 0x00)
+#define CLK_MIPI0_CFG_DIV_INT (CLK_MIPI0_CFG_OFFSET + 0x04)
+#define CLK_MIPI0_CFG_SEL (CLK_MIPI0_CFG_OFFSET + 0x0c)
+
+#define CLK_MIPI1_CFG_OFFSET 0x000d4
+#define CLK_MIPI1_CFG_CTRL (CLK_MIPI1_CFG_OFFSET + 0x00)
+#define CLK_MIPI1_CFG_DIV_INT (CLK_MIPI1_CFG_OFFSET + 0x04)
+#define CLK_MIPI1_CFG_SEL (CLK_MIPI1_CFG_OFFSET + 0x0c)
+
+#define CLK_PCIE_AUX_OFFSET 0x000e4
+#define CLK_PCIE_AUX_CTRL (CLK_PCIE_AUX_OFFSET + 0x00)
+#define CLK_PCIE_AUX_DIV_INT (CLK_PCIE_AUX_OFFSET + 0x04)
+#define CLK_PCIE_AUX_SEL (CLK_PCIE_AUX_OFFSET + 0x0c)
+
+#define CLK_USBH0_MICROFRAME_OFFSET 0x000f4
+#define CLK_USBH0_MICROFRAME_CTRL (CLK_USBH0_MICROFRAME_OFFSET + 0x00)
+#define CLK_USBH0_MICROFRAME_DIV_INT (CLK_USBH0_MICROFRAME_OFFSET + 0x04)
+#define CLK_USBH0_MICROFRAME_SEL (CLK_USBH0_MICROFRAME_OFFSET + 0x0c)
+
+#define CLK_USBH1_MICROFRAME_OFFSET 0x00104
+#define CLK_USBH1_MICROFRAME_CTRL (CLK_USBH1_MICROFRAME_OFFSET + 0x00)
+#define CLK_USBH1_MICROFRAME_DIV_INT (CLK_USBH1_MICROFRAME_OFFSET + 0x04)
+#define CLK_USBH1_MICROFRAME_SEL (CLK_USBH1_MICROFRAME_OFFSET + 0x0c)
+
+#define CLK_USBH0_SUSPEND_OFFSET 0x00114
+#define CLK_USBH0_SUSPEND_CTRL (CLK_USBH0_SUSPEND_OFFSET + 0x00)
+#define CLK_USBH0_SUSPEND_DIV_INT (CLK_USBH0_SUSPEND_OFFSET + 0x04)
+#define CLK_USBH0_SUSPEND_SEL (CLK_USBH0_SUSPEND_OFFSET + 0x0c)
+
+#define CLK_USBH1_SUSPEND_OFFSET 0x00124
+#define CLK_USBH1_SUSPEND_CTRL (CLK_USBH1_SUSPEND_OFFSET + 0x00)
+#define CLK_USBH1_SUSPEND_DIV_INT (CLK_USBH1_SUSPEND_OFFSET + 0x04)
+#define CLK_USBH1_SUSPEND_SEL (CLK_USBH1_SUSPEND_OFFSET + 0x0c)
+
+#define CLK_ETH_TSU_OFFSET 0x00134
+#define CLK_ETH_TSU_CTRL (CLK_ETH_TSU_OFFSET + 0x00)
+#define CLK_ETH_TSU_DIV_INT (CLK_ETH_TSU_OFFSET + 0x04)
+#define CLK_ETH_TSU_SEL (CLK_ETH_TSU_OFFSET + 0x0c)
+
+#define CLK_ADC_OFFSET 0x00144
+#define CLK_ADC_CTRL (CLK_ADC_OFFSET + 0x00)
+#define CLK_ADC_DIV_INT (CLK_ADC_OFFSET + 0x04)
+#define CLK_ADC_SEL (CLK_ADC_OFFSET + 0x0c)
+
+#define CLK_SDIO_TIMER_OFFSET 0x00154
+#define CLK_SDIO_TIMER_CTRL (CLK_SDIO_TIMER_OFFSET + 0x00)
+#define CLK_SDIO_TIMER_DIV_INT (CLK_SDIO_TIMER_OFFSET + 0x04)
+#define CLK_SDIO_TIMER_SEL (CLK_SDIO_TIMER_OFFSET + 0x0c)
+
+#define CLK_SDIO_ALT_SRC_OFFSET 0x00164
+#define CLK_SDIO_ALT_SRC_CTRL (CLK_SDIO_ALT_SRC_OFFSET + 0x00)
+#define CLK_SDIO_ALT_SRC_DIV_INT (CLK_SDIO_ALT_SRC_OFFSET + 0x04)
+#define CLK_SDIO_ALT_SRC_SEL (CLK_SDIO_ALT_SRC_OFFSET + 0x0c)
+
+#define CLK_GP0_OFFSET 0x00174
+#define CLK_GP0_CTRL (CLK_GP0_OFFSET + 0x00)
+#define CLK_GP0_DIV_INT (CLK_GP0_OFFSET + 0x04)
+#define CLK_GP0_DIV_FRAC (CLK_GP0_OFFSET + 0x08)
+#define CLK_GP0_SEL (CLK_GP0_OFFSET + 0x0c)
+
+#define CLK_GP1_OFFSET 0x00184
+#define CLK_GP1_CTRL (CLK_GP1_OFFSET + 0x00)
+#define CLK_GP1_DIV_INT (CLK_GP1_OFFSET + 0x04)
+#define CLK_GP1_DIV_FRAC (CLK_GP1_OFFSET + 0x08)
+#define CLK_GP1_SEL (CLK_GP1_OFFSET + 0x0c)
+
+#define CLK_GP2_OFFSET 0x00194
+#define CLK_GP2_CTRL (CLK_GP2_OFFSET + 0x00)
+#define CLK_GP2_DIV_INT (CLK_GP2_OFFSET + 0x04)
+#define CLK_GP2_DIV_FRAC (CLK_GP2_OFFSET + 0x08)
+#define CLK_GP2_SEL (CLK_GP2_OFFSET + 0x0c)
+
+#define CLK_GP3_OFFSET 0x001a4
+#define CLK_GP3_CTRL (CLK_GP3_OFFSET + 0x00)
+#define CLK_GP3_DIV_INT (CLK_GP3_OFFSET + 0x04)
+#define CLK_GP3_DIV_FRAC (CLK_GP3_OFFSET + 0x08)
+#define CLK_GP3_SEL (CLK_GP3_OFFSET + 0x0c)
+
+#define CLK_GP4_OFFSET 0x001b4
+#define CLK_GP4_CTRL (CLK_GP4_OFFSET + 0x00)
+#define CLK_GP4_DIV_INT (CLK_GP4_OFFSET + 0x04)
+#define CLK_GP4_DIV_FRAC (CLK_GP4_OFFSET + 0x08)
+#define CLK_GP4_SEL (CLK_GP4_OFFSET + 0x0c)
+
+#define CLK_GP5_OFFSET 0x001c4
+#define CLK_GP5_CTRL (CLK_GP5_OFFSET + 0x00)
+#define CLK_GP5_DIV_INT (CLK_GP5_OFFSET + 0x04)
+#define CLK_GP5_DIV_FRAC (CLK_GP5_OFFSET + 0x08)
+#define CLK_GP5_SEL (CLK_GP5_OFFSET + 0x0c)
+
+#define CLK_SYS_RESUS_CTRL 0x0020c
+
+#define CLK_SLOW_SYS_RESUS_CTRL 0x00214
+
+#define FC0_OFFSET 0x0021c
+#define FC0_REF_KHZ (FC0_OFFSET + 0x00)
+#define FC0_MIN_KHZ (FC0_OFFSET + 0x04)
+#define FC0_MAX_KHZ (FC0_OFFSET + 0x08)
+#define FC0_DELAY (FC0_OFFSET + 0x0c)
+#define FC0_INTERVAL (FC0_OFFSET + 0x10)
+#define FC0_SRC (FC0_OFFSET + 0x14)
+#define FC0_STATUS (FC0_OFFSET + 0x18)
+#define FC0_RESULT (FC0_OFFSET + 0x1c)
+#define FC_SIZE 0x20
+#define FC_COUNT 8
+#define FC_NUM(idx, off) ((idx) * 32 + (off))
+
+#define AUX_SEL 1
+
+#define VIDEO_CLOCKS_OFFSET 0x4000
+#define VIDEO_CLK_VEC_CTRL (VIDEO_CLOCKS_OFFSET + 0x0000)
+#define VIDEO_CLK_VEC_DIV_INT (VIDEO_CLOCKS_OFFSET + 0x0004)
+#define VIDEO_CLK_VEC_SEL (VIDEO_CLOCKS_OFFSET + 0x000c)
+#define VIDEO_CLK_DPI_CTRL (VIDEO_CLOCKS_OFFSET + 0x0010)
+#define VIDEO_CLK_DPI_DIV_INT (VIDEO_CLOCKS_OFFSET + 0x0014)
+#define VIDEO_CLK_DPI_SEL (VIDEO_CLOCKS_OFFSET + 0x001c)
+#define VIDEO_CLK_MIPI0_DPI_CTRL (VIDEO_CLOCKS_OFFSET + 0x0020)
+#define VIDEO_CLK_MIPI0_DPI_DIV_INT (VIDEO_CLOCKS_OFFSET + 0x0024)
+#define VIDEO_CLK_MIPI0_DPI_DIV_FRAC (VIDEO_CLOCKS_OFFSET + 0x0028)
+#define VIDEO_CLK_MIPI0_DPI_SEL (VIDEO_CLOCKS_OFFSET + 0x002c)
+#define VIDEO_CLK_MIPI1_DPI_CTRL (VIDEO_CLOCKS_OFFSET + 0x0030)
+#define VIDEO_CLK_MIPI1_DPI_DIV_INT (VIDEO_CLOCKS_OFFSET + 0x0034)
+#define VIDEO_CLK_MIPI1_DPI_DIV_FRAC (VIDEO_CLOCKS_OFFSET + 0x0038)
+#define VIDEO_CLK_MIPI1_DPI_SEL (VIDEO_CLOCKS_OFFSET + 0x003c)
+
+#define DIV_INT_8BIT_MAX GENMASK(7, 0) /* max divide for most clocks */
+#define DIV_INT_16BIT_MAX GENMASK(15, 0) /* max divide for GPx, PWM */
+#define DIV_INT_24BIT_MAX GENMASK(23, 0) /* max divide for CLK_SYS */
+
+#define FC0_STATUS_DONE BIT(4)
+#define FC0_STATUS_RUNNING BIT(8)
+#define FC0_RESULT_FRAC_SHIFT 5
+
+#define PLL_PRIM_DIV1_MASK GENMASK(18, 16)
+#define PLL_PRIM_DIV2_MASK GENMASK(14, 12)
+
+#define PLL_SEC_DIV_MASK GENMASK(12, 8)
+
+#define PLL_CS_LOCK BIT(31)
+#define PLL_CS_REFDIV_MASK BIT(1)
+
+#define PLL_PWR_PD BIT(0)
+#define PLL_PWR_DACPD BIT(1)
+#define PLL_PWR_DSMPD BIT(2)
+#define PLL_PWR_POSTDIVPD BIT(3)
+#define PLL_PWR_4PHASEPD BIT(4)
+#define PLL_PWR_VCOPD BIT(5)
+#define PLL_PWR_MASK GENMASK(5, 0)
+
+#define PLL_SEC_RST BIT(16)
+#define PLL_SEC_IMPL BIT(31)
+
+/* PLL phase output for both PRI and SEC */
+#define PLL_PH_EN BIT(4)
+#define PLL_PH_PHASE_SHIFT 0
+
+#define RP1_PLL_PHASE_0 0
+#define RP1_PLL_PHASE_90 1
+#define RP1_PLL_PHASE_180 2
+#define RP1_PLL_PHASE_270 3
+
+/* Clock fields for all clocks */
+#define CLK_CTRL_ENABLE BIT(11)
+#define CLK_CTRL_AUXSRC_MASK GENMASK(9, 5)
+#define CLK_CTRL_SRC_SHIFT 0
+#define CLK_DIV_FRAC_BITS 16
+
+#define LOCK_TIMEOUT_US 100000
+#define LOCK_POLL_DELAY_US 5
+
+#define MAX_CLK_PARENTS 16
+
+#define PLL_DIV_INVALID 19
+/*
+ * Secondary PLL channel output divider table.
+ * Divider values range from 8 to 19, where
+ * 19 means invalid.
+ */
+static const struct clk_div_table pll_sec_div_table[] = {
+ { 0x00, PLL_DIV_INVALID },
+ { 0x01, PLL_DIV_INVALID },
+ { 0x02, PLL_DIV_INVALID },
+ { 0x03, PLL_DIV_INVALID },
+ { 0x04, PLL_DIV_INVALID },
+ { 0x05, PLL_DIV_INVALID },
+ { 0x06, PLL_DIV_INVALID },
+ { 0x07, PLL_DIV_INVALID },
+ { 0x08, 8 },
+ { 0x09, 9 },
+ { 0x0a, 10 },
+ { 0x0b, 11 },
+ { 0x0c, 12 },
+ { 0x0d, 13 },
+ { 0x0e, 14 },
+ { 0x0f, 15 },
+ { 0x10, 16 },
+ { 0x11, 17 },
+ { 0x12, 18 },
+ { 0x13, PLL_DIV_INVALID },
+ { 0x14, PLL_DIV_INVALID },
+ { 0x15, PLL_DIV_INVALID },
+ { 0x16, PLL_DIV_INVALID },
+ { 0x17, PLL_DIV_INVALID },
+ { 0x18, PLL_DIV_INVALID },
+ { 0x19, PLL_DIV_INVALID },
+ { 0x1a, PLL_DIV_INVALID },
+ { 0x1b, PLL_DIV_INVALID },
+ { 0x1c, PLL_DIV_INVALID },
+ { 0x1d, PLL_DIV_INVALID },
+ { 0x1e, PLL_DIV_INVALID },
+ { 0x1f, PLL_DIV_INVALID },
+ { 0 }
+};
+
+struct rp1_clockman {
+ struct device *dev;
+ void __iomem *regs;
+ struct regmap *regmap;
+ spinlock_t regs_lock; /* spinlock for all clocks */
+
+ /* Must be last */
+ struct clk_hw_onecell_data onecell;
+};
+
+struct rp1_pll_core_data {
+ u32 cs_reg;
+ u32 pwr_reg;
+ u32 fbdiv_int_reg;
+ u32 fbdiv_frac_reg;
+ u32 fc0_src;
+};
+
+struct rp1_pll_data {
+ u32 ctrl_reg;
+ u32 fc0_src;
+};
+
+struct rp1_pll_ph_data {
+ unsigned int phase;
+ unsigned int fixed_divider;
+ u32 ph_reg;
+ u32 fc0_src;
+};
+
+struct rp1_pll_divider_data {
+ u32 sec_reg;
+ u32 fc0_src;
+};
+
+struct rp1_clock_data {
+ int num_std_parents;
+ int num_aux_parents;
+ u32 oe_mask;
+ u32 clk_src_mask;
+ u32 ctrl_reg;
+ u32 div_int_reg;
+ u32 div_frac_reg;
+ u32 sel_reg;
+ u32 div_int_max;
+ unsigned long max_freq;
+ u32 fc0_src;
+};
+
+struct rp1_clk_desc {
+ struct clk_hw *(*clk_register)(struct rp1_clockman *clockman,
+ struct rp1_clk_desc *desc);
+ const void *data;
+ struct clk_hw hw;
+ struct rp1_clockman *clockman;
+ unsigned long cached_rate;
+ struct clk_divider div;
+};
+
+static struct rp1_clk_desc *clk_audio_core;
+static struct rp1_clk_desc *clk_audio;
+static struct rp1_clk_desc *clk_i2s;
+static struct clk_hw *clk_xosc;
+
+static inline
+void clockman_write(struct rp1_clockman *clockman, u32 reg, u32 val)
+{
+ regmap_write(clockman->regmap, reg, val);
+}
+
+static inline u32 clockman_read(struct rp1_clockman *clockman, u32 reg)
+{
+ u32 val;
+
+ regmap_read(clockman->regmap, reg, &val);
+
+ return val;
+}
+
+static int rp1_pll_core_is_on(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *pll_core = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = pll_core->clockman;
+ const struct rp1_pll_core_data *data = pll_core->data;
+ u32 pwr = clockman_read(clockman, data->pwr_reg);
+
+ return (pwr & PLL_PWR_PD) || (pwr & PLL_PWR_POSTDIVPD);
+}
+
+static int rp1_pll_core_on(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *pll_core = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = pll_core->clockman;
+ const struct rp1_pll_core_data *data = pll_core->data;
+ u32 fbdiv_frac, val;
+ int ret;
+
+ spin_lock(&clockman->regs_lock);
+
+ if (!(clockman_read(clockman, data->cs_reg) & PLL_CS_LOCK)) {
+ /* Reset to a known state. */
+ clockman_write(clockman, data->pwr_reg, PLL_PWR_MASK);
+ clockman_write(clockman, data->fbdiv_int_reg, 20);
+ clockman_write(clockman, data->fbdiv_frac_reg, 0);
+ clockman_write(clockman, data->cs_reg, PLL_CS_REFDIV_MASK);
+ }
+
+ /* Come out of reset. */
+ fbdiv_frac = clockman_read(clockman, data->fbdiv_frac_reg);
+ clockman_write(clockman, data->pwr_reg, fbdiv_frac ? 0 : PLL_PWR_DSMPD);
+ spin_unlock(&clockman->regs_lock);
+
+ /* Wait for the PLL to lock. */
+ ret = regmap_read_poll_timeout(clockman->regmap, data->cs_reg, val,
+ val & PLL_CS_LOCK,
+ LOCK_POLL_DELAY_US, LOCK_TIMEOUT_US);
+ if (ret)
+ dev_err(clockman->dev, "%s: can't lock PLL\n",
+ clk_hw_get_name(hw));
+
+ return ret;
+}
+
+static void rp1_pll_core_off(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *pll_core = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = pll_core->clockman;
+ const struct rp1_pll_core_data *data = pll_core->data;
+
+ spin_lock(&clockman->regs_lock);
+ clockman_write(clockman, data->pwr_reg, 0);
+ spin_unlock(&clockman->regs_lock);
+}
+
+static inline unsigned long get_pll_core_divider(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate,
+ u32 *div_int, u32 *div_frac)
+{
+ u32 fbdiv_int, fbdiv_frac;
+ unsigned long calc_rate;
+ u64 shifted_fbdiv_int;
+ u64 div_fp64; /* 32.32 fixed point fraction. */
+
+ /* Factor of reference clock to VCO frequency. */
+ div_fp64 = (u64)(rate) << 32;
+ div_fp64 = DIV_ROUND_CLOSEST_ULL(div_fp64, parent_rate);
+
+ /* Round the fractional component at 24 bits. */
+ div_fp64 += 1 << (32 - 24 - 1);
+
+ fbdiv_int = div_fp64 >> 32;
+ fbdiv_frac = (div_fp64 >> (32 - 24)) & 0xffffff;
+
+ shifted_fbdiv_int = (u64)fbdiv_int << 24;
+ calc_rate = (u64)parent_rate * (shifted_fbdiv_int + fbdiv_frac);
+ calc_rate += BIT(23);
+ calc_rate >>= 24;
+
+ *div_int = fbdiv_int;
+ *div_frac = fbdiv_frac;
+
+ return calc_rate;
+}
+
+static int rp1_pll_core_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct rp1_clk_desc *pll_core = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = pll_core->clockman;
+ const struct rp1_pll_core_data *data = pll_core->data;
+ u32 fbdiv_int, fbdiv_frac;
+
+ /* Disable dividers to start with. */
+ spin_lock(&clockman->regs_lock);
+ clockman_write(clockman, data->fbdiv_int_reg, 0);
+ clockman_write(clockman, data->fbdiv_frac_reg, 0);
+ spin_unlock(&clockman->regs_lock);
+
+ get_pll_core_divider(hw, rate, parent_rate,
+ &fbdiv_int, &fbdiv_frac);
+
+ spin_lock(&clockman->regs_lock);
+ clockman_write(clockman, data->pwr_reg, fbdiv_frac ? 0 : PLL_PWR_DSMPD);
+ clockman_write(clockman, data->fbdiv_int_reg, fbdiv_int);
+ clockman_write(clockman, data->fbdiv_frac_reg, fbdiv_frac);
+ spin_unlock(&clockman->regs_lock);
+
+ /* Check that reference frequency is no greater than VCO / 16. */
+ if (WARN_ON_ONCE(parent_rate > (rate / 16)))
+ return -ERANGE;
+
+ spin_lock(&clockman->regs_lock);
+ /* Don't need to divide ref unless parent_rate > (output freq / 16) */
+ clockman_write(clockman, data->cs_reg,
+ clockman_read(clockman, data->cs_reg) |
+ PLL_CS_REFDIV_MASK);
+ spin_unlock(&clockman->regs_lock);
+
+ return 0;
+}
+
+static unsigned long rp1_pll_core_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct rp1_clk_desc *pll_core = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = pll_core->clockman;
+ const struct rp1_pll_core_data *data = pll_core->data;
+ u32 fbdiv_int, fbdiv_frac;
+ unsigned long calc_rate;
+ u64 shifted_fbdiv_int;
+
+ fbdiv_int = clockman_read(clockman, data->fbdiv_int_reg);
+ fbdiv_frac = clockman_read(clockman, data->fbdiv_frac_reg);
+
+ shifted_fbdiv_int = (u64)fbdiv_int << 24;
+ calc_rate = (u64)parent_rate * (shifted_fbdiv_int + fbdiv_frac);
+ calc_rate += BIT(23);
+ calc_rate >>= 24;
+
+ return calc_rate;
+}
+
+static int rp1_pll_core_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ u32 fbdiv_int, fbdiv_frac;
+
+ req->rate = get_pll_core_divider(hw, req->rate, req->best_parent_rate,
+ &fbdiv_int,
+ &fbdiv_frac);
+
+ return 0;
+}
+
+static void get_pll_prim_dividers(unsigned long rate, unsigned long parent_rate,
+ u32 *divider1, u32 *divider2)
+{
+ unsigned int div1, div2;
+ unsigned int best_div1 = 7, best_div2 = 7;
+ unsigned long best_rate_diff =
+ abs_diff(DIV_ROUND_CLOSEST(parent_rate, best_div1 * best_div2), rate);
+ unsigned long rate_diff, calc_rate;
+
+ for (div1 = 1; div1 <= 7; div1++) {
+ for (div2 = 1; div2 <= div1; div2++) {
+ calc_rate = DIV_ROUND_CLOSEST(parent_rate, div1 * div2);
+ rate_diff = abs_diff(calc_rate, rate);
+
+ if (calc_rate == rate) {
+ best_div1 = div1;
+ best_div2 = div2;
+ goto done;
+ } else if (rate_diff < best_rate_diff) {
+ best_div1 = div1;
+ best_div2 = div2;
+ best_rate_diff = rate_diff;
+ }
+ }
+ }
+
+done:
+ *divider1 = best_div1;
+ *divider2 = best_div2;
+}
+
+static int rp1_pll_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct rp1_clk_desc *pll = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = pll->clockman;
+ const struct rp1_pll_data *data = pll->data;
+
+ u32 prim, prim_div1, prim_div2;
+
+ get_pll_prim_dividers(rate, parent_rate, &prim_div1, &prim_div2);
+
+ spin_lock(&clockman->regs_lock);
+ prim = clockman_read(clockman, data->ctrl_reg);
+ prim &= ~PLL_PRIM_DIV1_MASK;
+ prim |= FIELD_PREP(PLL_PRIM_DIV1_MASK, prim_div1);
+ prim &= ~PLL_PRIM_DIV2_MASK;
+ prim |= FIELD_PREP(PLL_PRIM_DIV2_MASK, prim_div2);
+ clockman_write(clockman, data->ctrl_reg, prim);
+ spin_unlock(&clockman->regs_lock);
+
+ return 0;
+}
+
+static unsigned long rp1_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct rp1_clk_desc *pll = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = pll->clockman;
+ const struct rp1_pll_data *data = pll->data;
+ u32 prim, prim_div1, prim_div2;
+
+ prim = clockman_read(clockman, data->ctrl_reg);
+ prim_div1 = FIELD_GET(PLL_PRIM_DIV1_MASK, prim);
+ prim_div2 = FIELD_GET(PLL_PRIM_DIV2_MASK, prim);
+
+ if (!prim_div1 || !prim_div2) {
+ dev_err(clockman->dev, "%s: (%s) zero divider value\n",
+ __func__, clk_hw_get_name(hw));
+ return 0;
+ }
+
+ return DIV_ROUND_CLOSEST(parent_rate, prim_div1 * prim_div2);
+}
+
+static int rp1_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_hw *clk_audio_hw = &clk_audio->hw;
+ u32 div1, div2;
+
+ if (hw == clk_audio_hw && clk_audio->cached_rate == req->rate)
+ req->best_parent_rate = clk_audio_core->cached_rate;
+
+ get_pll_prim_dividers(req->rate, req->best_parent_rate, &div1, &div2);
+
+ req->rate = DIV_ROUND_CLOSEST(req->best_parent_rate, div1 * div2);
+
+ return 0;
+}
+
+static int rp1_pll_ph_is_on(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *pll_ph = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = pll_ph->clockman;
+ const struct rp1_pll_ph_data *data = pll_ph->data;
+
+ return !!(clockman_read(clockman, data->ph_reg) & PLL_PH_EN);
+}
+
+static int rp1_pll_ph_on(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *pll_ph = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = pll_ph->clockman;
+ const struct rp1_pll_ph_data *data = pll_ph->data;
+ u32 ph_reg;
+
+ spin_lock(&clockman->regs_lock);
+ ph_reg = clockman_read(clockman, data->ph_reg);
+ ph_reg |= data->phase << PLL_PH_PHASE_SHIFT;
+ ph_reg |= PLL_PH_EN;
+ clockman_write(clockman, data->ph_reg, ph_reg);
+ spin_unlock(&clockman->regs_lock);
+
+ return 0;
+}
+
+static void rp1_pll_ph_off(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *pll_ph = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = pll_ph->clockman;
+ const struct rp1_pll_ph_data *data = pll_ph->data;
+
+ spin_lock(&clockman->regs_lock);
+ clockman_write(clockman, data->ph_reg,
+ clockman_read(clockman, data->ph_reg) & ~PLL_PH_EN);
+ spin_unlock(&clockman->regs_lock);
+}
+
+static unsigned long rp1_pll_ph_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct rp1_clk_desc *pll_ph = container_of(hw, struct rp1_clk_desc, hw);
+ const struct rp1_pll_ph_data *data = pll_ph->data;
+
+ return parent_rate / data->fixed_divider;
+}
+
+static int rp1_pll_ph_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct rp1_clk_desc *pll_ph = container_of(hw, struct rp1_clk_desc, hw);
+ const struct rp1_pll_ph_data *data = pll_ph->data;
+
+ req->rate = req->best_parent_rate / data->fixed_divider;
+
+ return 0;
+}
+
+static int rp1_pll_divider_is_on(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *divider = container_of(hw, struct rp1_clk_desc, div.hw);
+ struct rp1_clockman *clockman = divider->clockman;
+ const struct rp1_pll_data *data = divider->data;
+
+ return !(clockman_read(clockman, data->ctrl_reg) & PLL_SEC_RST);
+}
+
+static int rp1_pll_divider_on(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *divider = container_of(hw, struct rp1_clk_desc, div.hw);
+ struct rp1_clockman *clockman = divider->clockman;
+ const struct rp1_pll_data *data = divider->data;
+
+ spin_lock(&clockman->regs_lock);
+ /* Check the implementation bit is set! */
+ WARN_ON(!(clockman_read(clockman, data->ctrl_reg) & PLL_SEC_IMPL));
+ clockman_write(clockman, data->ctrl_reg,
+ clockman_read(clockman, data->ctrl_reg) & ~PLL_SEC_RST);
+ spin_unlock(&clockman->regs_lock);
+
+ return 0;
+}
+
+static void rp1_pll_divider_off(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *divider = container_of(hw, struct rp1_clk_desc, div.hw);
+ struct rp1_clockman *clockman = divider->clockman;
+ const struct rp1_pll_data *data = divider->data;
+
+ spin_lock(&clockman->regs_lock);
+ clockman_write(clockman, data->ctrl_reg,
+ clockman_read(clockman, data->ctrl_reg) | PLL_SEC_RST);
+ spin_unlock(&clockman->regs_lock);
+}
+
+static int rp1_pll_divider_set_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct rp1_clk_desc *divider = container_of(hw, struct rp1_clk_desc, div.hw);
+ struct rp1_clockman *clockman = divider->clockman;
+ const struct rp1_pll_data *data = divider->data;
+ u32 div, sec;
+
+ div = DIV_ROUND_UP_ULL(parent_rate, rate);
+ div = clamp(div, 8u, 19u);
+
+ spin_lock(&clockman->regs_lock);
+ sec = clockman_read(clockman, data->ctrl_reg);
+ sec &= ~PLL_SEC_DIV_MASK;
+ sec |= FIELD_PREP(PLL_SEC_DIV_MASK, div);
+
+ /* Must keep the divider in reset to change the value. */
+ sec |= PLL_SEC_RST;
+ clockman_write(clockman, data->ctrl_reg, sec);
+
+ /* must sleep 10 pll vco cycles */
+ ndelay(div64_ul(10ULL * div * NSEC_PER_SEC, parent_rate));
+
+ sec &= ~PLL_SEC_RST;
+ clockman_write(clockman, data->ctrl_reg, sec);
+ spin_unlock(&clockman->regs_lock);
+
+ return 0;
+}
+
+static unsigned long rp1_pll_divider_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return clk_divider_ops.recalc_rate(hw, parent_rate);
+}
+
+static int rp1_pll_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ req->rate = clk_divider_ops.determine_rate(hw, req);
+
+ return 0;
+}
+
+static int rp1_clock_is_on(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = clock->clockman;
+ const struct rp1_clock_data *data = clock->data;
+
+ return !!(clockman_read(clockman, data->ctrl_reg) & CLK_CTRL_ENABLE);
+}
+
+static unsigned long rp1_clock_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = clock->clockman;
+ const struct rp1_clock_data *data = clock->data;
+ u64 calc_rate;
+ u64 div;
+ u32 frac;
+
+ div = clockman_read(clockman, data->div_int_reg);
+ frac = (data->div_frac_reg != 0) ?
+ clockman_read(clockman, data->div_frac_reg) : 0;
+
+ /* If the integer portion of the divider is 0, treat it as 2^16 */
+ if (!div)
+ div = 1 << 16;
+
+ div = (div << CLK_DIV_FRAC_BITS) | (frac >> (32 - CLK_DIV_FRAC_BITS));
+
+ calc_rate = (u64)parent_rate << CLK_DIV_FRAC_BITS;
+ calc_rate = div64_u64(calc_rate, div);
+
+ return calc_rate;
+}
+
+static int rp1_clock_on(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = clock->clockman;
+ const struct rp1_clock_data *data = clock->data;
+
+ spin_lock(&clockman->regs_lock);
+ clockman_write(clockman, data->ctrl_reg,
+ clockman_read(clockman, data->ctrl_reg) | CLK_CTRL_ENABLE);
+ /* If this is a GPCLK, turn on the output-enable */
+ if (data->oe_mask)
+ clockman_write(clockman, GPCLK_OE_CTRL,
+ clockman_read(clockman, GPCLK_OE_CTRL) | data->oe_mask);
+ spin_unlock(&clockman->regs_lock);
+
+ return 0;
+}
+
+static void rp1_clock_off(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = clock->clockman;
+ const struct rp1_clock_data *data = clock->data;
+
+ spin_lock(&clockman->regs_lock);
+ clockman_write(clockman, data->ctrl_reg,
+ clockman_read(clockman, data->ctrl_reg) & ~CLK_CTRL_ENABLE);
+ /* If this is a GPCLK, turn off the output-enable */
+ if (data->oe_mask)
+ clockman_write(clockman, GPCLK_OE_CTRL,
+ clockman_read(clockman, GPCLK_OE_CTRL) & ~data->oe_mask);
+ spin_unlock(&clockman->regs_lock);
+}
+
+static u32 rp1_clock_choose_div(unsigned long rate, unsigned long parent_rate,
+ const struct rp1_clock_data *data)
+{
+ u64 div;
+
+ /*
+ * Due to earlier rounding, calculated parent_rate may differ from
+ * expected value. Don't fail on a small discrepancy near unity divide.
+ */
+ if (!rate || rate > parent_rate + (parent_rate >> CLK_DIV_FRAC_BITS))
+ return 0;
+
+ /*
+ * Always express div in fixed-point format for fractional division;
+ * If no fractional divider is present, the fraction part will be zero.
+ */
+ if (data->div_frac_reg) {
+ div = (u64)parent_rate << CLK_DIV_FRAC_BITS;
+ div = DIV_ROUND_CLOSEST_ULL(div, rate);
+ } else {
+ div = DIV_ROUND_CLOSEST_ULL(parent_rate, rate);
+ div <<= CLK_DIV_FRAC_BITS;
+ }
+
+ div = clamp(div,
+ 1ull << CLK_DIV_FRAC_BITS,
+ (u64)data->div_int_max << CLK_DIV_FRAC_BITS);
+
+ return div;
+}
+
+static u8 rp1_clock_get_parent(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = clock->clockman;
+ const struct rp1_clock_data *data = clock->data;
+ u32 sel, ctrl;
+ u8 parent;
+
+ /* Sel is one-hot, so find the first bit set */
+ sel = clockman_read(clockman, data->sel_reg);
+ parent = ffs(sel) - 1;
+
+ /* sel == 0 implies the parent clock is not enabled yet. */
+ if (!sel) {
+ /* Read the clock src from the CTRL register instead */
+ ctrl = clockman_read(clockman, data->ctrl_reg);
+ parent = (ctrl & data->clk_src_mask) >> CLK_CTRL_SRC_SHIFT;
+ }
+
+ if (parent >= data->num_std_parents)
+ parent = AUX_SEL;
+
+ if (parent == AUX_SEL) {
+ /*
+ * Clock parent is an auxiliary source, so get the parent from
+ * the AUXSRC register field.
+ */
+ ctrl = clockman_read(clockman, data->ctrl_reg);
+ parent = FIELD_GET(CLK_CTRL_AUXSRC_MASK, ctrl);
+ parent += data->num_std_parents;
+ }
+
+ return parent;
+}
+
+static int rp1_clock_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = clock->clockman;
+ const struct rp1_clock_data *data = clock->data;
+ u32 ctrl, sel;
+
+ spin_lock(&clockman->regs_lock);
+ ctrl = clockman_read(clockman, data->ctrl_reg);
+
+ if (index >= data->num_std_parents) {
+ /* This is an aux source request */
+ if (index >= data->num_std_parents + data->num_aux_parents) {
+ spin_unlock(&clockman->regs_lock);
+ return -EINVAL;
+ }
+
+ /* Select parent from aux list */
+ ctrl &= ~CLK_CTRL_AUXSRC_MASK;
+ ctrl |= FIELD_PREP(CLK_CTRL_AUXSRC_MASK, index - data->num_std_parents);
+ /* Set src to aux list */
+ ctrl &= ~data->clk_src_mask;
+ ctrl |= (AUX_SEL << CLK_CTRL_SRC_SHIFT) & data->clk_src_mask;
+ } else {
+ ctrl &= ~data->clk_src_mask;
+ ctrl |= (index << CLK_CTRL_SRC_SHIFT) & data->clk_src_mask;
+ }
+
+ clockman_write(clockman, data->ctrl_reg, ctrl);
+ spin_unlock(&clockman->regs_lock);
+
+ sel = rp1_clock_get_parent(hw);
+ if (sel != index)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int rp1_clock_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate,
+ u8 parent)
+{
+ struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = clock->clockman;
+ const struct rp1_clock_data *data = clock->data;
+ u32 div = rp1_clock_choose_div(rate, parent_rate, data);
+
+ spin_lock(&clockman->regs_lock);
+
+ clockman_write(clockman, data->div_int_reg, div >> CLK_DIV_FRAC_BITS);
+ if (data->div_frac_reg)
+ clockman_write(clockman, data->div_frac_reg, div << (32 - CLK_DIV_FRAC_BITS));
+
+ spin_unlock(&clockman->regs_lock);
+
+ if (parent != 0xff)
+ return rp1_clock_set_parent(hw, parent);
+
+ return 0;
+}
+
+static int rp1_clock_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return rp1_clock_set_rate_and_parent(hw, rate, parent_rate, 0xff);
+}
+
+static unsigned long calc_core_pll_rate(struct clk_hw *pll_hw,
+ unsigned long target_rate,
+ int *pdiv_prim, int *pdiv_clk)
+{
+ static const int prim_divs[] = {
+ 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 15, 16,
+ 18, 20, 21, 24, 25, 28, 30, 35, 36, 42, 49,
+ };
+ const unsigned long xosc_rate = clk_hw_get_rate(clk_xosc);
+ const unsigned long core_min = xosc_rate * 16;
+ const unsigned long core_max = 2400000000;
+ int best_div_prim = 1, best_div_clk = 1;
+ unsigned long best_rate = core_max + 1;
+ unsigned long core_rate = 0;
+ int div_int, div_frac;
+ u64 div;
+ int i;
+
+ /* Given the target rate, choose a set of divisors/multipliers */
+ for (i = 0; i < ARRAY_SIZE(prim_divs); i++) {
+ int div_prim = prim_divs[i];
+ int div_clk;
+
+ for (div_clk = 1; div_clk <= 256; div_clk++) {
+ core_rate = target_rate * div_clk * div_prim;
+ if (core_rate >= core_min) {
+ if (core_rate < best_rate) {
+ best_rate = core_rate;
+ best_div_prim = div_prim;
+ best_div_clk = div_clk;
+ }
+ break;
+ }
+ }
+ }
+
+ if (best_rate < core_max) {
+ div = ((best_rate << 24) + xosc_rate / 2) / xosc_rate;
+ div_int = div >> 24;
+ div_frac = div % (1 << 24);
+ core_rate = (xosc_rate * ((div_int << 24) + div_frac) + (1 << 23)) >> 24;
+ } else {
+ core_rate = 0;
+ }
+
+ if (pdiv_prim)
+ *pdiv_prim = best_div_prim;
+ if (pdiv_clk)
+ *pdiv_clk = best_div_clk;
+
+ return core_rate;
+}
+
+static void rp1_clock_choose_div_and_prate(struct clk_hw *hw,
+ int parent_idx,
+ unsigned long rate,
+ unsigned long *prate,
+ unsigned long *calc_rate)
+{
+ struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
+ const struct rp1_clock_data *data = clock->data;
+ struct clk_hw *clk_audio_hw = &clk_audio->hw;
+ struct clk_hw *clk_i2s_hw = &clk_i2s->hw;
+ struct clk_hw *parent;
+ u32 div;
+ u64 tmp;
+
+ parent = clk_hw_get_parent_by_index(hw, parent_idx);
+
+ if (hw == clk_i2s_hw && clk_i2s->cached_rate == rate && parent == clk_audio_hw) {
+ *prate = clk_audio->cached_rate;
+ *calc_rate = rate;
+ return;
+ }
+
+ if (hw == clk_i2s_hw && parent == clk_audio_hw) {
+ unsigned long core_rate, audio_rate, i2s_rate;
+ int div_prim, div_clk;
+
+ core_rate = calc_core_pll_rate(parent, rate, &div_prim, &div_clk);
+ audio_rate = DIV_ROUND_CLOSEST(core_rate, div_prim);
+ i2s_rate = DIV_ROUND_CLOSEST(audio_rate, div_clk);
+ clk_audio_core->cached_rate = core_rate;
+ clk_audio->cached_rate = audio_rate;
+ clk_i2s->cached_rate = i2s_rate;
+ *prate = audio_rate;
+ *calc_rate = i2s_rate;
+ return;
+ }
+
+ *prate = clk_hw_get_rate(parent);
+ div = rp1_clock_choose_div(rate, *prate, data);
+
+ if (!div) {
+ *calc_rate = 0;
+ return;
+ }
+
+ /* Recalculate to account for rounding errors */
+ tmp = (u64)*prate << CLK_DIV_FRAC_BITS;
+ tmp = div_u64(tmp, div);
+
+ /*
+ * Prevent overclocks - if all parent choices result in
+ * a downstream clock in excess of the maximum, then the
+ * call to set the clock will fail.
+ */
+ if (tmp > data->max_freq)
+ *calc_rate = 0;
+ else
+ *calc_rate = tmp;
+}
+
+static int rp1_clock_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_hw *parent, *best_parent = NULL;
+ unsigned long best_rate = 0;
+ unsigned long best_prate = 0;
+ unsigned long best_rate_diff = ULONG_MAX;
+ unsigned long prate, calc_rate;
+ size_t i;
+
+ /*
+ * If the NO_REPARENT flag is set, try to use existing parent.
+ */
+ if ((clk_hw_get_flags(hw) & CLK_SET_RATE_NO_REPARENT)) {
+ i = rp1_clock_get_parent(hw);
+ parent = clk_hw_get_parent_by_index(hw, i);
+ if (parent) {
+ rp1_clock_choose_div_and_prate(hw, i, req->rate, &prate,
+ &calc_rate);
+ if (calc_rate > 0) {
+ req->best_parent_hw = parent;
+ req->best_parent_rate = prate;
+ req->rate = calc_rate;
+ return 0;
+ }
+ }
+ }
+
+ /*
+ * Select parent clock that results in the closest rate (lower or
+ * higher)
+ */
+ for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+ parent = clk_hw_get_parent_by_index(hw, i);
+ if (!parent)
+ continue;
+
+ rp1_clock_choose_div_and_prate(hw, i, req->rate, &prate,
+ &calc_rate);
+
+ if (abs_diff(calc_rate, req->rate) < best_rate_diff) {
+ best_parent = parent;
+ best_prate = prate;
+ best_rate = calc_rate;
+ best_rate_diff = abs_diff(calc_rate, req->rate);
+
+ if (best_rate_diff == 0)
+ break;
+ }
+ }
+
+ if (best_rate == 0)
+ return -EINVAL;
+
+ req->best_parent_hw = best_parent;
+ req->best_parent_rate = best_prate;
+ req->rate = best_rate;
+
+ return 0;
+}
+
+static int rp1_varsrc_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
+
+ /*
+ * "varsrc" exists purely to let clock dividers know the frequency
+ * of an externally-managed clock source (such as MIPI DSI byte-clock)
+ * which may change at run-time as a side-effect of some other driver.
+ */
+ clock->cached_rate = rate;
+ return 0;
+}
+
+static unsigned long rp1_varsrc_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
+
+ return clock->cached_rate;
+}
+
+static int rp1_varsrc_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ return 0;
+}
+
+static const struct clk_ops rp1_pll_core_ops = {
+ .is_prepared = rp1_pll_core_is_on,
+ .prepare = rp1_pll_core_on,
+ .unprepare = rp1_pll_core_off,
+ .set_rate = rp1_pll_core_set_rate,
+ .recalc_rate = rp1_pll_core_recalc_rate,
+ .determine_rate = rp1_pll_core_determine_rate,
+};
+
+static const struct clk_ops rp1_pll_ops = {
+ .set_rate = rp1_pll_set_rate,
+ .recalc_rate = rp1_pll_recalc_rate,
+ .determine_rate = rp1_pll_determine_rate,
+};
+
+static const struct clk_ops rp1_pll_ph_ops = {
+ .is_prepared = rp1_pll_ph_is_on,
+ .prepare = rp1_pll_ph_on,
+ .unprepare = rp1_pll_ph_off,
+ .recalc_rate = rp1_pll_ph_recalc_rate,
+ .determine_rate = rp1_pll_ph_determine_rate,
+};
+
+static const struct clk_ops rp1_pll_divider_ops = {
+ .is_prepared = rp1_pll_divider_is_on,
+ .prepare = rp1_pll_divider_on,
+ .unprepare = rp1_pll_divider_off,
+ .set_rate = rp1_pll_divider_set_rate,
+ .recalc_rate = rp1_pll_divider_recalc_rate,
+ .determine_rate = rp1_pll_divider_determine_rate,
+};
+
+static const struct clk_ops rp1_clk_ops = {
+ .is_prepared = rp1_clock_is_on,
+ .prepare = rp1_clock_on,
+ .unprepare = rp1_clock_off,
+ .recalc_rate = rp1_clock_recalc_rate,
+ .get_parent = rp1_clock_get_parent,
+ .set_parent = rp1_clock_set_parent,
+ .set_rate_and_parent = rp1_clock_set_rate_and_parent,
+ .set_rate = rp1_clock_set_rate,
+ .determine_rate = rp1_clock_determine_rate,
+};
+
+static const struct clk_ops rp1_varsrc_ops = {
+ .set_rate = rp1_varsrc_set_rate,
+ .recalc_rate = rp1_varsrc_recalc_rate,
+ .determine_rate = rp1_varsrc_determine_rate,
+};
+
+static struct clk_hw *rp1_register_pll(struct rp1_clockman *clockman,
+ struct rp1_clk_desc *desc)
+{
+ int ret;
+
+ desc->clockman = clockman;
+
+ ret = devm_clk_hw_register(clockman->dev, &desc->hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &desc->hw;
+}
+
+static struct clk_hw *rp1_register_pll_divider(struct rp1_clockman *clockman,
+ struct rp1_clk_desc *desc)
+{
+ const struct rp1_pll_data *divider_data = desc->data;
+ int ret;
+
+ desc->div.reg = clockman->regs + divider_data->ctrl_reg;
+ desc->div.shift = __ffs(PLL_SEC_DIV_MASK);
+ desc->div.width = __ffs(~(PLL_SEC_DIV_MASK >> desc->div.shift));
+ desc->div.flags = CLK_DIVIDER_ROUND_CLOSEST;
+ desc->div.lock = &clockman->regs_lock;
+ desc->div.hw.init = desc->hw.init;
+ desc->div.table = pll_sec_div_table;
+
+ desc->clockman = clockman;
+
+ ret = devm_clk_hw_register(clockman->dev, &desc->div.hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &desc->div.hw;
+}
+
+static struct clk_hw *rp1_register_clock(struct rp1_clockman *clockman,
+ struct rp1_clk_desc *desc)
+{
+ const struct rp1_clock_data *clock_data = desc->data;
+ int ret;
+
+ if (WARN_ON_ONCE(MAX_CLK_PARENTS <
+ clock_data->num_std_parents + clock_data->num_aux_parents))
+ return ERR_PTR(-EINVAL);
+
+ /* There must be a gap for the AUX selector */
+ if (WARN_ON_ONCE(clock_data->num_std_parents > AUX_SEL &&
+ desc->hw.init->parent_data[AUX_SEL].index != -1))
+ return ERR_PTR(-EINVAL);
+
+ desc->clockman = clockman;
+
+ ret = devm_clk_hw_register(clockman->dev, &desc->hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &desc->hw;
+}
+
+/* Assignment helper macros for different clock types. */
+#define _REGISTER(f, ...) { .clk_register = f, __VA_ARGS__ }
+
+#define CLK_DATA(type, ...) .data = &(struct type) { __VA_ARGS__ }
+
+#define REGISTER_PLL(...) _REGISTER(&rp1_register_pll, \
+ __VA_ARGS__)
+
+#define REGISTER_PLL_DIV(...) _REGISTER(&rp1_register_pll_divider, \
+ __VA_ARGS__)
+
+#define REGISTER_CLK(...) _REGISTER(&rp1_register_clock, \
+ __VA_ARGS__)
+
+static struct rp1_clk_desc pll_sys_core_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_sys_core",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_pll_core_ops,
+ CLK_IS_CRITICAL
+ ),
+ CLK_DATA(rp1_pll_core_data,
+ .cs_reg = PLL_SYS_CS,
+ .pwr_reg = PLL_SYS_PWR,
+ .fbdiv_int_reg = PLL_SYS_FBDIV_INT,
+ .fbdiv_frac_reg = PLL_SYS_FBDIV_FRAC,
+ )
+);
+
+static struct rp1_clk_desc pll_audio_core_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_audio_core",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_pll_core_ops,
+ CLK_IS_CRITICAL
+ ),
+ CLK_DATA(rp1_pll_core_data,
+ .cs_reg = PLL_AUDIO_CS,
+ .pwr_reg = PLL_AUDIO_PWR,
+ .fbdiv_int_reg = PLL_AUDIO_FBDIV_INT,
+ .fbdiv_frac_reg = PLL_AUDIO_FBDIV_FRAC,
+ )
+);
+
+static struct rp1_clk_desc pll_video_core_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_video_core",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_pll_core_ops,
+ CLK_IS_CRITICAL
+ ),
+ CLK_DATA(rp1_pll_core_data,
+ .cs_reg = PLL_VIDEO_CS,
+ .pwr_reg = PLL_VIDEO_PWR,
+ .fbdiv_int_reg = PLL_VIDEO_FBDIV_INT,
+ .fbdiv_frac_reg = PLL_VIDEO_FBDIV_FRAC,
+ )
+);
+
+static struct rp1_clk_desc pll_sys_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_sys",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_sys_core_desc.hw }
+ },
+ &rp1_pll_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_data,
+ .ctrl_reg = PLL_SYS_PRIM,
+ .fc0_src = FC_NUM(0, 2),
+ )
+);
+
+static struct rp1_clk_desc pll_audio_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_audio",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_audio_core_desc.hw }
+ },
+ &rp1_pll_ops,
+ CLK_SET_RATE_PARENT
+ ),
+ CLK_DATA(rp1_pll_data,
+ .ctrl_reg = PLL_AUDIO_PRIM,
+ .fc0_src = FC_NUM(4, 2),
+ )
+);
+
+static struct rp1_clk_desc pll_video_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_video",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_video_core_desc.hw }
+ },
+ &rp1_pll_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_data,
+ .ctrl_reg = PLL_VIDEO_PRIM,
+ .fc0_src = FC_NUM(3, 2),
+ )
+);
+
+static struct rp1_clk_desc pll_sys_sec_desc = REGISTER_PLL_DIV(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_sys_sec",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_sys_core_desc.hw }
+ },
+ &rp1_pll_divider_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_data,
+ .ctrl_reg = PLL_SYS_SEC,
+ .fc0_src = FC_NUM(2, 2),
+ )
+);
+
+static struct rp1_clk_desc pll_video_sec_desc = REGISTER_PLL_DIV(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_video_sec",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_video_core_desc.hw }
+ },
+ &rp1_pll_divider_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_data,
+ .ctrl_reg = PLL_VIDEO_SEC,
+ .fc0_src = FC_NUM(5, 3),
+ )
+);
+
+static const struct clk_parent_data clk_eth_tsu_parents[] = {
+ { .index = 0 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_eth_tsu_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_eth_tsu",
+ clk_eth_tsu_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 8,
+ .ctrl_reg = CLK_ETH_TSU_CTRL,
+ .div_int_reg = CLK_ETH_TSU_DIV_INT,
+ .sel_reg = CLK_ETH_TSU_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(5, 7),
+ )
+);
+
+static const struct clk_parent_data clk_eth_parents[] = {
+ { .hw = &pll_sys_sec_desc.div.hw },
+ { .hw = &pll_sys_desc.hw },
+ { .hw = &pll_video_sec_desc.hw },
+};
+
+static struct rp1_clk_desc clk_eth_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_eth",
+ clk_eth_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 3,
+ .ctrl_reg = CLK_ETH_CTRL,
+ .div_int_reg = CLK_ETH_DIV_INT,
+ .sel_reg = CLK_ETH_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 125 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(4, 6),
+ )
+);
+
+static const struct clk_parent_data clk_sys_parents[] = {
+ { .index = 0 },
+ { .index = -1 },
+ { .hw = &pll_sys_desc.hw },
+};
+
+static struct rp1_clk_desc clk_sys_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_sys",
+ clk_sys_parents,
+ &rp1_clk_ops,
+ CLK_IS_CRITICAL
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 3,
+ .num_aux_parents = 0,
+ .ctrl_reg = CLK_SYS_CTRL,
+ .div_int_reg = CLK_SYS_DIV_INT,
+ .sel_reg = CLK_SYS_SEL,
+ .div_int_max = DIV_INT_24BIT_MAX,
+ .max_freq = 200 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(0, 4),
+ .clk_src_mask = 0x3,
+ )
+);
+
+static struct rp1_clk_desc pll_sys_pri_ph_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_sys_pri_ph",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_sys_desc.hw }
+ },
+ &rp1_pll_ph_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_ph_data,
+ .ph_reg = PLL_SYS_PRIM,
+ .fixed_divider = 2,
+ .phase = RP1_PLL_PHASE_0,
+ .fc0_src = FC_NUM(1, 2),
+ )
+);
+
+static struct rp1_clk_desc pll_audio_pri_ph_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_audio_pri_ph",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_audio_desc.hw }
+ },
+ &rp1_pll_ph_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_ph_data,
+ .ph_reg = PLL_AUDIO_PRIM,
+ .fixed_divider = 2,
+ .phase = RP1_PLL_PHASE_0,
+ .fc0_src = FC_NUM(5, 1),
+ )
+);
+
+static struct rp1_clk_desc pll_video_pri_ph_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_video_pri_ph",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_video_desc.hw }
+ },
+ &rp1_pll_ph_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_ph_data,
+ .ph_reg = PLL_VIDEO_PRIM,
+ .fixed_divider = 2,
+ .phase = RP1_PLL_PHASE_0,
+ .fc0_src = FC_NUM(4, 3),
+ )
+);
+
+static struct rp1_clk_desc pll_audio_sec_desc = REGISTER_PLL_DIV(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_audio_sec",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_audio_core_desc.hw }
+ },
+ &rp1_pll_divider_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_data,
+ .ctrl_reg = PLL_AUDIO_SEC,
+ .fc0_src = FC_NUM(6, 2),
+ )
+);
+
+static struct rp1_clk_desc pll_audio_tern_desc = REGISTER_PLL_DIV(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_audio_tern",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_audio_core_desc.hw }
+ },
+ &rp1_pll_divider_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_data,
+ .ctrl_reg = PLL_AUDIO_TERN,
+ .fc0_src = FC_NUM(6, 2),
+ )
+);
+
+static struct rp1_clk_desc clk_slow_sys_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_slow_sys",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_clk_ops,
+ CLK_IS_CRITICAL
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 1,
+ .num_aux_parents = 0,
+ .ctrl_reg = CLK_SLOW_SYS_CTRL,
+ .div_int_reg = CLK_SLOW_SYS_DIV_INT,
+ .sel_reg = CLK_SLOW_SYS_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(1, 4),
+ .clk_src_mask = 0x1,
+ )
+);
+
+static const struct clk_parent_data clk_dma_parents[] = {
+ { .hw = &pll_sys_pri_ph_desc.hw },
+ { .hw = &pll_video_desc.hw },
+ { .index = 0 },
+};
+
+static struct rp1_clk_desc clk_dma_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_dma",
+ clk_dma_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 3,
+ .ctrl_reg = CLK_DMA_CTRL,
+ .div_int_reg = CLK_DMA_DIV_INT,
+ .sel_reg = CLK_DMA_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(2, 2),
+ )
+);
+
+static const struct clk_parent_data clk_uart_parents[] = {
+ { .hw = &pll_sys_pri_ph_desc.hw },
+ { .hw = &pll_video_desc.hw },
+ { .index = 0 },
+};
+
+static struct rp1_clk_desc clk_uart_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_uart",
+ clk_uart_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 3,
+ .ctrl_reg = CLK_UART_CTRL,
+ .div_int_reg = CLK_UART_DIV_INT,
+ .sel_reg = CLK_UART_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(6, 7),
+ )
+);
+
+static const struct clk_parent_data clk_pwm0_parents[] = {
+ { .index = -1 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .index = 0 },
+};
+
+static struct rp1_clk_desc clk_pwm0_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_pwm0",
+ clk_pwm0_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 3,
+ .ctrl_reg = CLK_PWM0_CTRL,
+ .div_int_reg = CLK_PWM0_DIV_INT,
+ .div_frac_reg = CLK_PWM0_DIV_FRAC,
+ .sel_reg = CLK_PWM0_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 76800 * HZ_PER_KHZ,
+ .fc0_src = FC_NUM(0, 5),
+ )
+);
+
+static const struct clk_parent_data clk_pwm1_parents[] = {
+ { .index = -1 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .index = 0 },
+};
+
+static struct rp1_clk_desc clk_pwm1_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_pwm1",
+ clk_pwm1_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 3,
+ .ctrl_reg = CLK_PWM1_CTRL,
+ .div_int_reg = CLK_PWM1_DIV_INT,
+ .div_frac_reg = CLK_PWM1_DIV_FRAC,
+ .sel_reg = CLK_PWM1_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 76800 * HZ_PER_KHZ,
+ .fc0_src = FC_NUM(1, 5),
+ )
+);
+
+static const struct clk_parent_data clk_audio_in_parents[] = {
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .index = 0 },
+};
+
+static struct rp1_clk_desc clk_audio_in_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_audio_in",
+ clk_audio_in_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 5,
+ .ctrl_reg = CLK_AUDIO_IN_CTRL,
+ .div_int_reg = CLK_AUDIO_IN_DIV_INT,
+ .sel_reg = CLK_AUDIO_IN_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 76800 * HZ_PER_KHZ,
+ .fc0_src = FC_NUM(2, 5),
+ )
+);
+
+static const struct clk_parent_data clk_audio_out_parents[] = {
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .index = 0 },
+};
+
+static struct rp1_clk_desc clk_audio_out_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_audio_out",
+ clk_audio_out_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 4,
+ .ctrl_reg = CLK_AUDIO_OUT_CTRL,
+ .div_int_reg = CLK_AUDIO_OUT_DIV_INT,
+ .sel_reg = CLK_AUDIO_OUT_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 153600 * HZ_PER_KHZ,
+ .fc0_src = FC_NUM(3, 5),
+ )
+);
+
+static const struct clk_parent_data clk_i2s_parents[] = {
+ { .index = 0 },
+ { .hw = &pll_audio_desc.hw },
+ { .hw = &pll_audio_sec_desc.hw },
+};
+
+static struct rp1_clk_desc clk_i2s_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_i2s",
+ clk_i2s_parents,
+ &rp1_clk_ops,
+ CLK_SET_RATE_PARENT
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 3,
+ .ctrl_reg = CLK_I2S_CTRL,
+ .div_int_reg = CLK_I2S_DIV_INT,
+ .sel_reg = CLK_I2S_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(4, 4),
+ )
+);
+
+static struct rp1_clk_desc clk_mipi0_cfg_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_mipi0_cfg",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 1,
+ .ctrl_reg = CLK_MIPI0_CFG_CTRL,
+ .div_int_reg = CLK_MIPI0_CFG_DIV_INT,
+ .sel_reg = CLK_MIPI0_CFG_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(4, 5),
+ )
+);
+
+static struct rp1_clk_desc clk_mipi1_cfg_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_mipi1_cfg",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 1,
+ .ctrl_reg = CLK_MIPI1_CFG_CTRL,
+ .div_int_reg = CLK_MIPI1_CFG_DIV_INT,
+ .sel_reg = CLK_MIPI1_CFG_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(5, 6),
+ .clk_src_mask = 0x1,
+ )
+);
+
+static struct rp1_clk_desc clk_adc_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_adc",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 1,
+ .ctrl_reg = CLK_ADC_CTRL,
+ .div_int_reg = CLK_ADC_DIV_INT,
+ .sel_reg = CLK_ADC_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(5, 5),
+ )
+);
+
+static struct rp1_clk_desc clk_sdio_timer_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_sdio_timer",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 1,
+ .ctrl_reg = CLK_SDIO_TIMER_CTRL,
+ .div_int_reg = CLK_SDIO_TIMER_DIV_INT,
+ .sel_reg = CLK_SDIO_TIMER_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(3, 4),
+ )
+);
+
+static struct rp1_clk_desc clk_sdio_alt_src_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_sdio_alt_src",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_sys_desc.hw }
+ },
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 1,
+ .ctrl_reg = CLK_SDIO_ALT_SRC_CTRL,
+ .div_int_reg = CLK_SDIO_ALT_SRC_DIV_INT,
+ .sel_reg = CLK_SDIO_ALT_SRC_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 200 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(5, 4),
+ )
+);
+
+static const struct clk_parent_data clk_dpi_parents[] = {
+ { .hw = &pll_sys_desc.hw },
+ { .hw = &pll_video_sec_desc.hw },
+ { .hw = &pll_video_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_dpi_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_dpi",
+ clk_dpi_parents,
+ &rp1_clk_ops,
+ CLK_SET_RATE_NO_REPARENT /* Let DPI driver set parent */
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 8,
+ .ctrl_reg = VIDEO_CLK_DPI_CTRL,
+ .div_int_reg = VIDEO_CLK_DPI_DIV_INT,
+ .sel_reg = VIDEO_CLK_DPI_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 200 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(1, 6),
+ )
+);
+
+static const struct clk_parent_data clk_gp0_parents[] = {
+ { .index = 0 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_sys_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &clk_i2s_desc.hw },
+ { .hw = &clk_adc_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &clk_sys_desc.hw },
+};
+
+static struct rp1_clk_desc clk_gp0_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_gp0",
+ clk_gp0_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 16,
+ .oe_mask = BIT(0),
+ .ctrl_reg = CLK_GP0_CTRL,
+ .div_int_reg = CLK_GP0_DIV_INT,
+ .div_frac_reg = CLK_GP0_DIV_FRAC,
+ .sel_reg = CLK_GP0_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(0, 1),
+ )
+);
+
+static const struct clk_parent_data clk_gp1_parents[] = {
+ { .hw = &clk_sdio_timer_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_sys_pri_ph_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &clk_adc_desc.hw },
+ { .hw = &clk_dpi_desc.hw },
+ { .hw = &clk_pwm0_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_gp1_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_gp1",
+ clk_gp1_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 16,
+ .oe_mask = BIT(1),
+ .ctrl_reg = CLK_GP1_CTRL,
+ .div_int_reg = CLK_GP1_DIV_INT,
+ .div_frac_reg = CLK_GP1_DIV_FRAC,
+ .sel_reg = CLK_GP1_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(1, 1),
+ )
+);
+
+static struct rp1_clk_desc clksrc_mipi0_dsi_byteclk_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clksrc_mipi0_dsi_byteclk",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_varsrc_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 1,
+ .num_aux_parents = 0,
+ )
+);
+
+static struct rp1_clk_desc clksrc_mipi1_dsi_byteclk_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clksrc_mipi1_dsi_byteclk",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_varsrc_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 1,
+ .num_aux_parents = 0,
+ )
+);
+
+static const struct clk_parent_data clk_mipi0_dpi_parents[] = {
+ { .hw = &pll_sys_desc.hw },
+ { .hw = &pll_video_sec_desc.hw },
+ { .hw = &pll_video_desc.hw },
+ { .hw = &clksrc_mipi0_dsi_byteclk_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_mipi0_dpi_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_mipi0_dpi",
+ clk_mipi0_dpi_parents,
+ &rp1_clk_ops,
+ CLK_SET_RATE_NO_REPARENT /* Let DSI driver set parent */
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 8,
+ .ctrl_reg = VIDEO_CLK_MIPI0_DPI_CTRL,
+ .div_int_reg = VIDEO_CLK_MIPI0_DPI_DIV_INT,
+ .div_frac_reg = VIDEO_CLK_MIPI0_DPI_DIV_FRAC,
+ .sel_reg = VIDEO_CLK_MIPI0_DPI_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 200 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(2, 6),
+ )
+);
+
+static const struct clk_parent_data clk_mipi1_dpi_parents[] = {
+ { .hw = &pll_sys_desc.hw },
+ { .hw = &pll_video_sec_desc.hw },
+ { .hw = &pll_video_desc.hw },
+ { .hw = &clksrc_mipi1_dsi_byteclk_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_mipi1_dpi_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_mipi1_dpi",
+ clk_mipi1_dpi_parents,
+ &rp1_clk_ops,
+ CLK_SET_RATE_NO_REPARENT /* Let DSI driver set parent */
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 8,
+ .ctrl_reg = VIDEO_CLK_MIPI1_DPI_CTRL,
+ .div_int_reg = VIDEO_CLK_MIPI1_DPI_DIV_INT,
+ .div_frac_reg = VIDEO_CLK_MIPI1_DPI_DIV_FRAC,
+ .sel_reg = VIDEO_CLK_MIPI1_DPI_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 200 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(3, 6),
+ )
+);
+
+static const struct clk_parent_data clk_gp2_parents[] = {
+ { .hw = &clk_sdio_alt_src_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_sys_sec_desc.hw },
+ { .index = -1 },
+ { .hw = &pll_video_desc.hw },
+ { .hw = &clk_audio_in_desc.hw },
+ { .hw = &clk_dpi_desc.hw },
+ { .hw = &clk_pwm0_desc.hw },
+ { .hw = &clk_pwm1_desc.hw },
+ { .hw = &clk_mipi0_dpi_desc.hw },
+ { .hw = &clk_mipi1_cfg_desc.hw },
+ { .hw = &clk_sys_desc.hw },
+};
+
+static struct rp1_clk_desc clk_gp2_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_gp2",
+ clk_gp2_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 16,
+ .oe_mask = BIT(2),
+ .ctrl_reg = CLK_GP2_CTRL,
+ .div_int_reg = CLK_GP2_DIV_INT,
+ .div_frac_reg = CLK_GP2_DIV_FRAC,
+ .sel_reg = CLK_GP2_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(2, 1),
+ )
+);
+
+static const struct clk_parent_data clk_gp3_parents[] = {
+ { .index = 0 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_video_pri_ph_desc.hw },
+ { .hw = &clk_audio_out_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &clk_mipi1_dpi_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_gp3_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_gp3",
+ clk_gp3_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 16,
+ .oe_mask = BIT(3),
+ .ctrl_reg = CLK_GP3_CTRL,
+ .div_int_reg = CLK_GP3_DIV_INT,
+ .div_frac_reg = CLK_GP3_DIV_FRAC,
+ .sel_reg = CLK_GP3_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(3, 1),
+ )
+);
+
+static const struct clk_parent_data clk_gp4_parents[] = {
+ { .index = 0 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &clk_mipi0_cfg_desc.hw },
+ { .hw = &clk_uart_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &clk_sys_desc.hw },
+};
+
+static struct rp1_clk_desc clk_gp4_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_gp4",
+ clk_gp4_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 16,
+ .oe_mask = BIT(4),
+ .ctrl_reg = CLK_GP4_CTRL,
+ .div_int_reg = CLK_GP4_DIV_INT,
+ .div_frac_reg = CLK_GP4_DIV_FRAC,
+ .sel_reg = CLK_GP4_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(4, 1),
+ )
+);
+
+static const struct clk_parent_data clk_vec_parents[] = {
+ { .hw = &pll_sys_pri_ph_desc.hw },
+ { .hw = &pll_video_sec_desc.hw },
+ { .hw = &pll_video_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_vec_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_vec",
+ clk_vec_parents,
+ &rp1_clk_ops,
+ CLK_SET_RATE_NO_REPARENT /* Let VEC driver set parent */
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 8,
+ .ctrl_reg = VIDEO_CLK_VEC_CTRL,
+ .div_int_reg = VIDEO_CLK_VEC_DIV_INT,
+ .sel_reg = VIDEO_CLK_VEC_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 108 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(0, 6),
+ )
+);
+
+static const struct clk_parent_data clk_gp5_parents[] = {
+ { .index = 0 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .hw = &clk_eth_tsu_desc.hw },
+ { .index = -1 },
+ { .hw = &clk_vec_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_gp5_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_gp5",
+ clk_gp5_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 16,
+ .oe_mask = BIT(5),
+ .ctrl_reg = CLK_GP5_CTRL,
+ .div_int_reg = CLK_GP5_DIV_INT,
+ .div_frac_reg = CLK_GP5_DIV_FRAC,
+ .sel_reg = CLK_GP5_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(5, 1),
+ )
+);
+
+static struct rp1_clk_desc *const clk_desc_array[] = {
+ [RP1_PLL_SYS_CORE] = &pll_sys_core_desc,
+ [RP1_PLL_AUDIO_CORE] = &pll_audio_core_desc,
+ [RP1_PLL_VIDEO_CORE] = &pll_video_core_desc,
+ [RP1_PLL_SYS] = &pll_sys_desc,
+ [RP1_CLK_ETH_TSU] = &clk_eth_tsu_desc,
+ [RP1_CLK_ETH] = &clk_eth_desc,
+ [RP1_CLK_SYS] = &clk_sys_desc,
+ [RP1_PLL_SYS_PRI_PH] = &pll_sys_pri_ph_desc,
+ [RP1_PLL_SYS_SEC] = &pll_sys_sec_desc,
+ [RP1_PLL_AUDIO] = &pll_audio_desc,
+ [RP1_PLL_VIDEO] = &pll_video_desc,
+ [RP1_PLL_AUDIO_PRI_PH] = &pll_audio_pri_ph_desc,
+ [RP1_PLL_VIDEO_PRI_PH] = &pll_video_pri_ph_desc,
+ [RP1_PLL_AUDIO_SEC] = &pll_audio_sec_desc,
+ [RP1_PLL_VIDEO_SEC] = &pll_video_sec_desc,
+ [RP1_PLL_AUDIO_TERN] = &pll_audio_tern_desc,
+ [RP1_CLK_SLOW_SYS] = &clk_slow_sys_desc,
+ [RP1_CLK_DMA] = &clk_dma_desc,
+ [RP1_CLK_UART] = &clk_uart_desc,
+ [RP1_CLK_PWM0] = &clk_pwm0_desc,
+ [RP1_CLK_PWM1] = &clk_pwm1_desc,
+ [RP1_CLK_AUDIO_IN] = &clk_audio_in_desc,
+ [RP1_CLK_AUDIO_OUT] = &clk_audio_out_desc,
+ [RP1_CLK_I2S] = &clk_i2s_desc,
+ [RP1_CLK_MIPI0_CFG] = &clk_mipi0_cfg_desc,
+ [RP1_CLK_MIPI1_CFG] = &clk_mipi1_cfg_desc,
+ [RP1_CLK_ADC] = &clk_adc_desc,
+ [RP1_CLK_SDIO_TIMER] = &clk_sdio_timer_desc,
+ [RP1_CLK_SDIO_ALT_SRC] = &clk_sdio_alt_src_desc,
+ [RP1_CLK_GP0] = &clk_gp0_desc,
+ [RP1_CLK_GP1] = &clk_gp1_desc,
+ [RP1_CLK_GP2] = &clk_gp2_desc,
+ [RP1_CLK_GP3] = &clk_gp3_desc,
+ [RP1_CLK_GP4] = &clk_gp4_desc,
+ [RP1_CLK_GP5] = &clk_gp5_desc,
+ [RP1_CLK_VEC] = &clk_vec_desc,
+ [RP1_CLK_DPI] = &clk_dpi_desc,
+ [RP1_CLK_MIPI0_DPI] = &clk_mipi0_dpi_desc,
+ [RP1_CLK_MIPI1_DPI] = &clk_mipi1_dpi_desc,
+ [RP1_CLK_MIPI0_DSI_BYTECLOCK] = &clksrc_mipi0_dsi_byteclk_desc,
+ [RP1_CLK_MIPI1_DSI_BYTECLOCK] = &clksrc_mipi1_dsi_byteclk_desc,
+};
+
+static const struct regmap_range rp1_reg_ranges[] = {
+ regmap_reg_range(PLL_SYS_CS, PLL_SYS_SEC),
+ regmap_reg_range(PLL_AUDIO_CS, PLL_AUDIO_TERN),
+ regmap_reg_range(PLL_VIDEO_CS, PLL_VIDEO_SEC),
+ regmap_reg_range(GPCLK_OE_CTRL, GPCLK_OE_CTRL),
+ regmap_reg_range(CLK_SYS_CTRL, CLK_SYS_DIV_INT),
+ regmap_reg_range(CLK_SYS_SEL, CLK_SYS_SEL),
+ regmap_reg_range(CLK_SLOW_SYS_CTRL, CLK_SLOW_SYS_DIV_INT),
+ regmap_reg_range(CLK_SLOW_SYS_SEL, CLK_SLOW_SYS_SEL),
+ regmap_reg_range(CLK_DMA_CTRL, CLK_DMA_DIV_INT),
+ regmap_reg_range(CLK_DMA_SEL, CLK_DMA_SEL),
+ regmap_reg_range(CLK_UART_CTRL, CLK_UART_DIV_INT),
+ regmap_reg_range(CLK_UART_SEL, CLK_UART_SEL),
+ regmap_reg_range(CLK_ETH_CTRL, CLK_ETH_DIV_INT),
+ regmap_reg_range(CLK_ETH_SEL, CLK_ETH_SEL),
+ regmap_reg_range(CLK_PWM0_CTRL, CLK_PWM0_SEL),
+ regmap_reg_range(CLK_PWM1_CTRL, CLK_PWM1_SEL),
+ regmap_reg_range(CLK_AUDIO_IN_CTRL, CLK_AUDIO_IN_DIV_INT),
+ regmap_reg_range(CLK_AUDIO_IN_SEL, CLK_AUDIO_IN_SEL),
+ regmap_reg_range(CLK_AUDIO_OUT_CTRL, CLK_AUDIO_OUT_DIV_INT),
+ regmap_reg_range(CLK_AUDIO_OUT_SEL, CLK_AUDIO_OUT_SEL),
+ regmap_reg_range(CLK_I2S_CTRL, CLK_I2S_DIV_INT),
+ regmap_reg_range(CLK_I2S_SEL, CLK_I2S_SEL),
+ regmap_reg_range(CLK_MIPI0_CFG_CTRL, CLK_MIPI0_CFG_DIV_INT),
+ regmap_reg_range(CLK_MIPI0_CFG_SEL, CLK_MIPI0_CFG_SEL),
+ regmap_reg_range(CLK_MIPI1_CFG_CTRL, CLK_MIPI1_CFG_DIV_INT),
+ regmap_reg_range(CLK_MIPI1_CFG_SEL, CLK_MIPI1_CFG_SEL),
+ regmap_reg_range(CLK_PCIE_AUX_CTRL, CLK_PCIE_AUX_DIV_INT),
+ regmap_reg_range(CLK_PCIE_AUX_SEL, CLK_PCIE_AUX_SEL),
+ regmap_reg_range(CLK_USBH0_MICROFRAME_CTRL, CLK_USBH0_MICROFRAME_DIV_INT),
+ regmap_reg_range(CLK_USBH0_MICROFRAME_SEL, CLK_USBH0_MICROFRAME_SEL),
+ regmap_reg_range(CLK_USBH1_MICROFRAME_CTRL, CLK_USBH1_MICROFRAME_DIV_INT),
+ regmap_reg_range(CLK_USBH1_MICROFRAME_SEL, CLK_USBH1_MICROFRAME_SEL),
+ regmap_reg_range(CLK_USBH0_SUSPEND_CTRL, CLK_USBH0_SUSPEND_DIV_INT),
+ regmap_reg_range(CLK_USBH0_SUSPEND_SEL, CLK_USBH0_SUSPEND_SEL),
+ regmap_reg_range(CLK_USBH1_SUSPEND_CTRL, CLK_USBH1_SUSPEND_DIV_INT),
+ regmap_reg_range(CLK_USBH1_SUSPEND_SEL, CLK_USBH1_SUSPEND_SEL),
+ regmap_reg_range(CLK_ETH_TSU_CTRL, CLK_ETH_TSU_DIV_INT),
+ regmap_reg_range(CLK_ETH_TSU_SEL, CLK_ETH_TSU_SEL),
+ regmap_reg_range(CLK_ADC_CTRL, CLK_ADC_DIV_INT),
+ regmap_reg_range(CLK_ADC_SEL, CLK_ADC_SEL),
+ regmap_reg_range(CLK_SDIO_TIMER_CTRL, CLK_SDIO_TIMER_DIV_INT),
+ regmap_reg_range(CLK_SDIO_TIMER_SEL, CLK_SDIO_TIMER_SEL),
+ regmap_reg_range(CLK_SDIO_ALT_SRC_CTRL, CLK_SDIO_ALT_SRC_DIV_INT),
+ regmap_reg_range(CLK_SDIO_ALT_SRC_SEL, CLK_SDIO_ALT_SRC_SEL),
+ regmap_reg_range(CLK_GP0_CTRL, CLK_GP0_SEL),
+ regmap_reg_range(CLK_GP1_CTRL, CLK_GP1_SEL),
+ regmap_reg_range(CLK_GP2_CTRL, CLK_GP2_SEL),
+ regmap_reg_range(CLK_GP3_CTRL, CLK_GP3_SEL),
+ regmap_reg_range(CLK_GP4_CTRL, CLK_GP4_SEL),
+ regmap_reg_range(CLK_GP5_CTRL, CLK_GP5_SEL),
+ regmap_reg_range(CLK_SYS_RESUS_CTRL, CLK_SYS_RESUS_CTRL),
+ regmap_reg_range(CLK_SLOW_SYS_RESUS_CTRL, CLK_SLOW_SYS_RESUS_CTRL),
+ regmap_reg_range(FC0_REF_KHZ, FC0_RESULT),
+ regmap_reg_range(VIDEO_CLK_VEC_CTRL, VIDEO_CLK_VEC_DIV_INT),
+ regmap_reg_range(VIDEO_CLK_VEC_SEL, VIDEO_CLK_DPI_DIV_INT),
+ regmap_reg_range(VIDEO_CLK_DPI_SEL, VIDEO_CLK_MIPI1_DPI_SEL),
+};
+
+static const struct regmap_access_table rp1_reg_table = {
+ .yes_ranges = rp1_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(rp1_reg_ranges),
+};
+
+static const struct regmap_config rp1_clk_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = PLL_VIDEO_SEC,
+ .name = "rp1-clk",
+ .rd_table = &rp1_reg_table,
+ .disable_locking = true,
+};
+
+static int rp1_clk_probe(struct platform_device *pdev)
+{
+ const size_t asize = ARRAY_SIZE(clk_desc_array);
+ struct rp1_clk_desc *desc;
+ struct device *dev = &pdev->dev;
+ struct rp1_clockman *clockman;
+ struct clk_hw **hws;
+ unsigned int i;
+
+ clockman = devm_kzalloc(dev, struct_size(clockman, onecell.hws, asize),
+ GFP_KERNEL);
+ if (!clockman)
+ return -ENOMEM;
+
+ spin_lock_init(&clockman->regs_lock);
+ clockman->dev = dev;
+
+ clockman->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(clockman->regs))
+ return PTR_ERR(clockman->regs);
+
+ clockman->regmap = devm_regmap_init_mmio(dev, clockman->regs,
+ &rp1_clk_regmap_cfg);
+ if (IS_ERR(clockman->regmap)) {
+ dev_err_probe(dev, PTR_ERR(clockman->regmap),
+ "could not init clock regmap\n");
+ return PTR_ERR(clockman->regmap);
+ }
+
+ clockman->onecell.num = asize;
+ hws = clockman->onecell.hws;
+
+ for (i = 0; i < asize; i++) {
+ desc = clk_desc_array[i];
+ if (desc && desc->clk_register && desc->data)
+ hws[i] = desc->clk_register(clockman, desc);
+ }
+
+ clk_audio_core = &pll_audio_core_desc;
+ clk_audio = &pll_audio_desc;
+ clk_i2s = &clk_i2s_desc;
+ clk_xosc = clk_hw_get_parent_by_index(&clk_i2s->hw, 0);
+
+ platform_set_drvdata(pdev, clockman);
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ &clockman->onecell);
+}
+
+static const struct of_device_id rp1_clk_of_match[] = {
+ { .compatible = "raspberrypi,rp1-clocks" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rp1_clk_of_match);
+
+static struct platform_driver rp1_clk_driver = {
+ .driver = {
+ .name = "rp1-clk",
+ .of_match_table = rp1_clk_of_match,
+ },
+ .probe = rp1_clk_probe,
+};
+
+module_platform_driver(rp1_clk_driver);
+
+MODULE_AUTHOR("Naushir Patuck <naush@raspberrypi.com>");
+MODULE_AUTHOR("Andrea della Porta <andrea.porta@suse.com>");
+MODULE_DESCRIPTION("RP1 clock driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clk-rpmi.c b/drivers/clk/clk-rpmi.c
new file mode 100644
index 000000000000..921296aafa68
--- /dev/null
+++ b/drivers/clk/clk-rpmi.c
@@ -0,0 +1,620 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RISC-V MPXY Based Clock Driver
+ *
+ * Copyright (C) 2025 Ventana Micro Systems Ltd.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/mailbox_client.h>
+#include <linux/mailbox/riscv-rpmi-message.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/wordpart.h>
+
+#define RPMI_CLK_DISCRETE_MAX_NUM_RATES 16
+#define RPMI_CLK_NAME_LEN 16
+
+#define to_rpmi_clk(clk) container_of(clk, struct rpmi_clk, hw)
+
+enum rpmi_clk_config {
+ RPMI_CLK_DISABLE = 0,
+ RPMI_CLK_ENABLE = 1,
+ RPMI_CLK_CONFIG_MAX_IDX
+};
+
+#define RPMI_CLK_TYPE_MASK GENMASK(1, 0)
+enum rpmi_clk_type {
+ RPMI_CLK_DISCRETE = 0,
+ RPMI_CLK_LINEAR = 1,
+ RPMI_CLK_TYPE_MAX_IDX
+};
+
+struct rpmi_clk_context {
+ struct device *dev;
+ struct mbox_chan *chan;
+ struct mbox_client client;
+ u32 max_msg_data_size;
+};
+
+/*
+ * rpmi_clk_rates represents the rates format
+ * as specified by the RPMI specification.
+ * No other data format (e.g., struct linear_range)
+ * is required to avoid to and from conversion.
+ */
+union rpmi_clk_rates {
+ u64 discrete[RPMI_CLK_DISCRETE_MAX_NUM_RATES];
+ struct {
+ u64 min;
+ u64 max;
+ u64 step;
+ } linear;
+};
+
+struct rpmi_clk {
+ struct rpmi_clk_context *context;
+ u32 id;
+ u32 num_rates;
+ u32 transition_latency;
+ enum rpmi_clk_type type;
+ union rpmi_clk_rates *rates;
+ char name[RPMI_CLK_NAME_LEN];
+ struct clk_hw hw;
+};
+
+struct rpmi_clk_rate_discrete {
+ __le32 lo;
+ __le32 hi;
+};
+
+struct rpmi_clk_rate_linear {
+ __le32 min_lo;
+ __le32 min_hi;
+ __le32 max_lo;
+ __le32 max_hi;
+ __le32 step_lo;
+ __le32 step_hi;
+};
+
+struct rpmi_get_num_clocks_rx {
+ __le32 status;
+ __le32 num_clocks;
+};
+
+struct rpmi_get_attrs_tx {
+ __le32 clkid;
+};
+
+struct rpmi_get_attrs_rx {
+ __le32 status;
+ __le32 flags;
+ __le32 num_rates;
+ __le32 transition_latency;
+ char name[RPMI_CLK_NAME_LEN];
+};
+
+struct rpmi_get_supp_rates_tx {
+ __le32 clkid;
+ __le32 clk_rate_idx;
+};
+
+struct rpmi_get_supp_rates_rx {
+ __le32 status;
+ __le32 flags;
+ __le32 remaining;
+ __le32 returned;
+ __le32 rates[];
+};
+
+struct rpmi_get_rate_tx {
+ __le32 clkid;
+};
+
+struct rpmi_get_rate_rx {
+ __le32 status;
+ __le32 lo;
+ __le32 hi;
+};
+
+struct rpmi_set_rate_tx {
+ __le32 clkid;
+ __le32 flags;
+ __le32 lo;
+ __le32 hi;
+};
+
+struct rpmi_set_rate_rx {
+ __le32 status;
+};
+
+struct rpmi_set_config_tx {
+ __le32 clkid;
+ __le32 config;
+};
+
+struct rpmi_set_config_rx {
+ __le32 status;
+};
+
+static inline u64 rpmi_clkrate_u64(u32 __hi, u32 __lo)
+{
+ return (((u64)(__hi) << 32) | (u32)(__lo));
+}
+
+static u32 rpmi_clk_get_num_clocks(struct rpmi_clk_context *context)
+{
+ struct rpmi_get_num_clocks_rx rx, *resp;
+ struct rpmi_mbox_message msg;
+ int ret;
+
+ rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_GET_NUM_CLOCKS,
+ NULL, 0, &rx, sizeof(rx));
+
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return 0;
+
+ resp = rpmi_mbox_get_msg_response(&msg);
+ if (!resp || resp->status)
+ return 0;
+
+ return le32_to_cpu(resp->num_clocks);
+}
+
+static int rpmi_clk_get_attrs(u32 clkid, struct rpmi_clk *rpmi_clk)
+{
+ struct rpmi_clk_context *context = rpmi_clk->context;
+ struct rpmi_mbox_message msg;
+ struct rpmi_get_attrs_tx tx;
+ struct rpmi_get_attrs_rx rx, *resp;
+ u8 format;
+ int ret;
+
+ tx.clkid = cpu_to_le32(clkid);
+ rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_GET_ATTRIBUTES,
+ &tx, sizeof(tx), &rx, sizeof(rx));
+
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return ret;
+
+ resp = rpmi_mbox_get_msg_response(&msg);
+ if (!resp)
+ return -EINVAL;
+ if (resp->status)
+ return rpmi_to_linux_error(le32_to_cpu(resp->status));
+
+ rpmi_clk->id = clkid;
+ rpmi_clk->num_rates = le32_to_cpu(resp->num_rates);
+ rpmi_clk->transition_latency = le32_to_cpu(resp->transition_latency);
+ strscpy(rpmi_clk->name, resp->name, RPMI_CLK_NAME_LEN);
+
+ format = le32_to_cpu(resp->flags) & RPMI_CLK_TYPE_MASK;
+ if (format >= RPMI_CLK_TYPE_MAX_IDX)
+ return -EINVAL;
+
+ rpmi_clk->type = format;
+
+ return 0;
+}
+
+static int rpmi_clk_get_supported_rates(u32 clkid, struct rpmi_clk *rpmi_clk)
+{
+ struct rpmi_clk_context *context = rpmi_clk->context;
+ struct rpmi_clk_rate_discrete *rate_discrete;
+ struct rpmi_clk_rate_linear *rate_linear;
+ struct rpmi_get_supp_rates_tx tx;
+ struct rpmi_get_supp_rates_rx *resp;
+ struct rpmi_mbox_message msg;
+ size_t clk_rate_idx;
+ int ret, rateidx, j;
+
+ tx.clkid = cpu_to_le32(clkid);
+ tx.clk_rate_idx = 0;
+
+ /*
+ * Make sure we allocate rx buffer sufficient to be accommodate all
+ * the rates sent in one RPMI message.
+ */
+ struct rpmi_get_supp_rates_rx *rx __free(kfree) =
+ kzalloc(context->max_msg_data_size, GFP_KERNEL);
+ if (!rx)
+ return -ENOMEM;
+
+ rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_GET_SUPPORTED_RATES,
+ &tx, sizeof(tx), rx, context->max_msg_data_size);
+
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return ret;
+
+ resp = rpmi_mbox_get_msg_response(&msg);
+ if (!resp)
+ return -EINVAL;
+ if (resp->status)
+ return rpmi_to_linux_error(le32_to_cpu(resp->status));
+ if (!le32_to_cpu(resp->returned))
+ return -EINVAL;
+
+ if (rpmi_clk->type == RPMI_CLK_DISCRETE) {
+ rate_discrete = (struct rpmi_clk_rate_discrete *)resp->rates;
+
+ for (rateidx = 0; rateidx < le32_to_cpu(resp->returned); rateidx++) {
+ rpmi_clk->rates->discrete[rateidx] =
+ rpmi_clkrate_u64(le32_to_cpu(rate_discrete[rateidx].hi),
+ le32_to_cpu(rate_discrete[rateidx].lo));
+ }
+
+ /*
+ * Keep sending the request message until all
+ * the rates are received.
+ */
+ clk_rate_idx = 0;
+ while (le32_to_cpu(resp->remaining)) {
+ clk_rate_idx += le32_to_cpu(resp->returned);
+ tx.clk_rate_idx = cpu_to_le32(clk_rate_idx);
+
+ rpmi_mbox_init_send_with_response(&msg,
+ RPMI_CLK_SRV_GET_SUPPORTED_RATES,
+ &tx, sizeof(tx),
+ rx, context->max_msg_data_size);
+
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return ret;
+
+ resp = rpmi_mbox_get_msg_response(&msg);
+ if (!resp)
+ return -EINVAL;
+ if (resp->status)
+ return rpmi_to_linux_error(le32_to_cpu(resp->status));
+ if (!le32_to_cpu(resp->returned))
+ return -EINVAL;
+
+ for (j = 0; j < le32_to_cpu(resp->returned); j++) {
+ if (rateidx >= clk_rate_idx + le32_to_cpu(resp->returned))
+ break;
+ rpmi_clk->rates->discrete[rateidx++] =
+ rpmi_clkrate_u64(le32_to_cpu(rate_discrete[j].hi),
+ le32_to_cpu(rate_discrete[j].lo));
+ }
+ }
+ } else if (rpmi_clk->type == RPMI_CLK_LINEAR) {
+ rate_linear = (struct rpmi_clk_rate_linear *)resp->rates;
+
+ rpmi_clk->rates->linear.min = rpmi_clkrate_u64(le32_to_cpu(rate_linear->min_hi),
+ le32_to_cpu(rate_linear->min_lo));
+ rpmi_clk->rates->linear.max = rpmi_clkrate_u64(le32_to_cpu(rate_linear->max_hi),
+ le32_to_cpu(rate_linear->max_lo));
+ rpmi_clk->rates->linear.step = rpmi_clkrate_u64(le32_to_cpu(rate_linear->step_hi),
+ le32_to_cpu(rate_linear->step_lo));
+ }
+
+ return 0;
+}
+
+static unsigned long rpmi_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw);
+ struct rpmi_clk_context *context = rpmi_clk->context;
+ struct rpmi_mbox_message msg;
+ struct rpmi_get_rate_tx tx;
+ struct rpmi_get_rate_rx rx, *resp;
+ int ret;
+
+ tx.clkid = cpu_to_le32(rpmi_clk->id);
+
+ rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_GET_RATE,
+ &tx, sizeof(tx), &rx, sizeof(rx));
+
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return ret;
+
+ resp = rpmi_mbox_get_msg_response(&msg);
+ if (!resp)
+ return -EINVAL;
+ if (resp->status)
+ return rpmi_to_linux_error(le32_to_cpu(resp->status));
+
+ return rpmi_clkrate_u64(le32_to_cpu(resp->hi), le32_to_cpu(resp->lo));
+}
+
+static int rpmi_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw);
+ u64 fmin, fmax, ftmp;
+
+ /*
+ * Keep the requested rate if the clock format
+ * is of discrete type. Let the platform which
+ * is actually controlling the clock handle that.
+ */
+ if (rpmi_clk->type == RPMI_CLK_DISCRETE)
+ return 0;
+
+ fmin = rpmi_clk->rates->linear.min;
+ fmax = rpmi_clk->rates->linear.max;
+
+ if (req->rate <= fmin) {
+ req->rate = fmin;
+ return 0;
+ } else if (req->rate >= fmax) {
+ req->rate = fmax;
+ return 0;
+ }
+
+ ftmp = req->rate - fmin;
+ ftmp += rpmi_clk->rates->linear.step - 1;
+ do_div(ftmp, rpmi_clk->rates->linear.step);
+
+ req->rate = ftmp * rpmi_clk->rates->linear.step + fmin;
+
+ return 0;
+}
+
+static int rpmi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw);
+ struct rpmi_clk_context *context = rpmi_clk->context;
+ struct rpmi_mbox_message msg;
+ struct rpmi_set_rate_tx tx;
+ struct rpmi_set_rate_rx rx, *resp;
+ int ret;
+
+ tx.clkid = cpu_to_le32(rpmi_clk->id);
+ tx.lo = cpu_to_le32(lower_32_bits(rate));
+ tx.hi = cpu_to_le32(upper_32_bits(rate));
+
+ rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_SET_RATE,
+ &tx, sizeof(tx), &rx, sizeof(rx));
+
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return ret;
+
+ resp = rpmi_mbox_get_msg_response(&msg);
+ if (!resp)
+ return -EINVAL;
+ if (resp->status)
+ return rpmi_to_linux_error(le32_to_cpu(resp->status));
+
+ return 0;
+}
+
+static int rpmi_clk_enable(struct clk_hw *hw)
+{
+ struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw);
+ struct rpmi_clk_context *context = rpmi_clk->context;
+ struct rpmi_mbox_message msg;
+ struct rpmi_set_config_tx tx;
+ struct rpmi_set_config_rx rx, *resp;
+ int ret;
+
+ tx.config = cpu_to_le32(RPMI_CLK_ENABLE);
+ tx.clkid = cpu_to_le32(rpmi_clk->id);
+
+ rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_SET_CONFIG,
+ &tx, sizeof(tx), &rx, sizeof(rx));
+
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return ret;
+
+ resp = rpmi_mbox_get_msg_response(&msg);
+ if (!resp)
+ return -EINVAL;
+ if (resp->status)
+ return rpmi_to_linux_error(le32_to_cpu(resp->status));
+
+ return 0;
+}
+
+static void rpmi_clk_disable(struct clk_hw *hw)
+{
+ struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw);
+ struct rpmi_clk_context *context = rpmi_clk->context;
+ struct rpmi_mbox_message msg;
+ struct rpmi_set_config_tx tx;
+ struct rpmi_set_config_rx rx;
+
+ tx.config = cpu_to_le32(RPMI_CLK_DISABLE);
+ tx.clkid = cpu_to_le32(rpmi_clk->id);
+
+ rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_SET_CONFIG,
+ &tx, sizeof(tx), &rx, sizeof(rx));
+
+ rpmi_mbox_send_message(context->chan, &msg);
+}
+
+static const struct clk_ops rpmi_clk_ops = {
+ .recalc_rate = rpmi_clk_recalc_rate,
+ .determine_rate = rpmi_clk_determine_rate,
+ .set_rate = rpmi_clk_set_rate,
+ .prepare = rpmi_clk_enable,
+ .unprepare = rpmi_clk_disable,
+};
+
+static struct clk_hw *rpmi_clk_enumerate(struct rpmi_clk_context *context, u32 clkid)
+{
+ struct device *dev = context->dev;
+ unsigned long min_rate, max_rate;
+ union rpmi_clk_rates *rates;
+ struct rpmi_clk *rpmi_clk;
+ struct clk_init_data init = {};
+ struct clk_hw *clk_hw;
+ int ret;
+
+ rates = devm_kzalloc(dev, sizeof(*rates), GFP_KERNEL);
+ if (!rates)
+ return ERR_PTR(-ENOMEM);
+
+ rpmi_clk = devm_kzalloc(dev, sizeof(*rpmi_clk), GFP_KERNEL);
+ if (!rpmi_clk)
+ return ERR_PTR(-ENOMEM);
+
+ rpmi_clk->context = context;
+ rpmi_clk->rates = rates;
+
+ ret = rpmi_clk_get_attrs(clkid, rpmi_clk);
+ if (ret)
+ return dev_err_ptr_probe(dev, ret,
+ "Failed to get clk-%u attributes\n",
+ clkid);
+
+ ret = rpmi_clk_get_supported_rates(clkid, rpmi_clk);
+ if (ret)
+ return dev_err_ptr_probe(dev, ret,
+ "Get supported rates failed for clk-%u\n",
+ clkid);
+
+ init.flags = CLK_GET_RATE_NOCACHE;
+ init.num_parents = 0;
+ init.ops = &rpmi_clk_ops;
+ init.name = rpmi_clk->name;
+ clk_hw = &rpmi_clk->hw;
+ clk_hw->init = &init;
+
+ ret = devm_clk_hw_register(dev, clk_hw);
+ if (ret)
+ return dev_err_ptr_probe(dev, ret,
+ "Unable to register clk-%u\n",
+ clkid);
+
+ if (rpmi_clk->type == RPMI_CLK_DISCRETE) {
+ min_rate = rpmi_clk->rates->discrete[0];
+ max_rate = rpmi_clk->rates->discrete[rpmi_clk->num_rates - 1];
+ } else {
+ min_rate = rpmi_clk->rates->linear.min;
+ max_rate = rpmi_clk->rates->linear.max;
+ }
+
+ clk_hw_set_rate_range(clk_hw, min_rate, max_rate);
+
+ return clk_hw;
+}
+
+static void rpmi_clk_mbox_chan_release(void *data)
+{
+ struct mbox_chan *chan = data;
+
+ mbox_free_channel(chan);
+}
+
+static int rpmi_clk_probe(struct platform_device *pdev)
+{
+ int ret;
+ unsigned int num_clocks, i;
+ struct clk_hw_onecell_data *clk_data;
+ struct rpmi_clk_context *context;
+ struct rpmi_mbox_message msg;
+ struct clk_hw *hw_ptr;
+ struct device *dev = &pdev->dev;
+
+ context = devm_kzalloc(dev, sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return -ENOMEM;
+ context->dev = dev;
+ platform_set_drvdata(pdev, context);
+
+ context->client.dev = context->dev;
+ context->client.rx_callback = NULL;
+ context->client.tx_block = false;
+ context->client.knows_txdone = true;
+ context->client.tx_tout = 0;
+
+ context->chan = mbox_request_channel(&context->client, 0);
+ if (IS_ERR(context->chan))
+ return PTR_ERR(context->chan);
+
+ ret = devm_add_action_or_reset(dev, rpmi_clk_mbox_chan_release, context->chan);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add rpmi mbox channel cleanup\n");
+
+ rpmi_mbox_init_get_attribute(&msg, RPMI_MBOX_ATTR_SPEC_VERSION);
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get spec version\n");
+ if (msg.attr.value < RPMI_MKVER(1, 0)) {
+ return dev_err_probe(dev, -EINVAL,
+ "msg protocol version mismatch, expected 0x%x, found 0x%x\n",
+ RPMI_MKVER(1, 0), msg.attr.value);
+ }
+
+ rpmi_mbox_init_get_attribute(&msg, RPMI_MBOX_ATTR_SERVICEGROUP_ID);
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get service group ID\n");
+ if (msg.attr.value != RPMI_SRVGRP_CLOCK) {
+ return dev_err_probe(dev, -EINVAL,
+ "service group match failed, expected 0x%x, found 0x%x\n",
+ RPMI_SRVGRP_CLOCK, msg.attr.value);
+ }
+
+ rpmi_mbox_init_get_attribute(&msg, RPMI_MBOX_ATTR_SERVICEGROUP_VERSION);
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get service group version\n");
+ if (msg.attr.value < RPMI_MKVER(1, 0)) {
+ return dev_err_probe(dev, -EINVAL,
+ "service group version failed, expected 0x%x, found 0x%x\n",
+ RPMI_MKVER(1, 0), msg.attr.value);
+ }
+
+ rpmi_mbox_init_get_attribute(&msg, RPMI_MBOX_ATTR_MAX_MSG_DATA_SIZE);
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get max message data size\n");
+
+ context->max_msg_data_size = msg.attr.value;
+ num_clocks = rpmi_clk_get_num_clocks(context);
+ if (!num_clocks)
+ return dev_err_probe(dev, -ENODEV, "No clocks found\n");
+
+ clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, num_clocks),
+ GFP_KERNEL);
+ if (!clk_data)
+ return dev_err_probe(dev, -ENOMEM, "No memory for clock data\n");
+ clk_data->num = num_clocks;
+
+ for (i = 0; i < clk_data->num; i++) {
+ hw_ptr = rpmi_clk_enumerate(context, i);
+ if (IS_ERR(hw_ptr)) {
+ return dev_err_probe(dev, PTR_ERR(hw_ptr),
+ "Failed to register clk-%d\n", i);
+ }
+ clk_data->hws[i] = hw_ptr;
+ }
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register clock HW provider\n");
+
+ return 0;
+}
+
+static const struct of_device_id rpmi_clk_of_match[] = {
+ { .compatible = "riscv,rpmi-clock" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, rpmi_clk_of_match);
+
+static struct platform_driver rpmi_clk_driver = {
+ .driver = {
+ .name = "riscv-rpmi-clock",
+ .of_match_table = rpmi_clk_of_match,
+ },
+ .probe = rpmi_clk_probe,
+};
+module_platform_driver(rpmi_clk_driver);
+
+MODULE_AUTHOR("Rahul Pathak <rpathak@ventanamicro.com>");
+MODULE_DESCRIPTION("Clock Driver based on RPMI message protocol");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index 38c456540d1b..ff7ce12a5da6 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -11,6 +11,7 @@
#include <linux/regmap.h>
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
+#include <linux/mfd/samsung/s2mpg10.h>
#include <linux/mfd/samsung/s2mps11.h>
#include <linux/mfd/samsung/s2mps13.h>
#include <linux/mfd/samsung/s2mps14.h>
@@ -137,7 +138,12 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
if (!clk_data)
return -ENOMEM;
+ clk_data->num = S2MPS11_CLKS_NUM;
+
switch (hwid) {
+ case S2MPG10:
+ s2mps11_reg = S2MPG10_PMIC_RTCBUF;
+ break;
case S2MPS11X:
s2mps11_reg = S2MPS11_REG_RTC_CTRL;
break;
@@ -186,7 +192,6 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
clk_data->hws[i] = &s2mps11_clks[i].hw;
}
- clk_data->num = S2MPS11_CLKS_NUM;
of_clk_add_hw_provider(s2mps11_clks->clk_np, of_clk_hw_onecell_get,
clk_data);
@@ -220,6 +225,7 @@ static void s2mps11_clk_remove(struct platform_device *pdev)
}
static const struct platform_device_id s2mps11_clk_id[] = {
+ { "s2mpg10-clk", S2MPG10},
{ "s2mps11-clk", S2MPS11X},
{ "s2mps13-clk", S2MPS13X},
{ "s2mps14-clk", S2MPS14X},
@@ -234,12 +240,15 @@ MODULE_DEVICE_TABLE(platform, s2mps11_clk_id);
* through platform_device_id.
*
* However if device's DT node contains proper clock compatible and driver is
- * built as a module, then the *module* matching will be done trough DT aliases.
+ * built as a module, then the *module* matching will be done through DT aliases.
* This requires of_device_id table. In the same time this will not change the
* actual *device* matching so do not add .of_match_table.
*/
static const struct of_device_id s2mps11_dt_match[] __used = {
{
+ .compatible = "samsung,s2mpg10-clk",
+ .data = (void *)S2MPG10,
+ }, {
.compatible = "samsung,s2mps11-clk",
.data = (void *)S2MPS11X,
}, {
@@ -263,7 +272,7 @@ static struct platform_driver s2mps11_clk_driver = {
.name = "s2mps11-clk",
},
.probe = s2mps11_clk_probe,
- .remove_new = s2mps11_clk_remove,
+ .remove = s2mps11_clk_remove,
.id_table = s2mps11_clk_id,
};
module_platform_driver(s2mps11_clk_driver);
diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
index d86a02563f6c..6b286ea6f121 100644
--- a/drivers/clk/clk-scmi.c
+++ b/drivers/clk/clk-scmi.c
@@ -54,8 +54,8 @@ static unsigned long scmi_clk_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long scmi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int scmi_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u64 fmin, fmax, ftmp;
struct scmi_clk *clk = to_scmi_clk(hw);
@@ -67,20 +67,27 @@ static long scmi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
* running at then.
*/
if (clk->info->rate_discrete)
- return rate;
+ return 0;
fmin = clk->info->range.min_rate;
fmax = clk->info->range.max_rate;
- if (rate <= fmin)
- return fmin;
- else if (rate >= fmax)
- return fmax;
+ if (req->rate <= fmin) {
+ req->rate = fmin;
+
+ return 0;
+ } else if (req->rate >= fmax) {
+ req->rate = fmax;
- ftmp = rate - fmin;
+ return 0;
+ }
+
+ ftmp = req->rate - fmin;
ftmp += clk->info->range.step_size - 1; /* to round up */
do_div(ftmp, clk->info->range.step_size);
- return ftmp * clk->info->range.step_size + fmin;
+ req->rate = ftmp * clk->info->range.step_size + fmin;
+
+ return 0;
}
static int scmi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -119,15 +126,6 @@ static u8 scmi_clk_get_parent(struct clk_hw *hw)
return p_idx;
}
-static int scmi_clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
-{
- /*
- * Suppose all the requested rates are supported, and let firmware
- * to handle the left work.
- */
- return 0;
-}
-
static int scmi_clk_enable(struct clk_hw *hw)
{
struct scmi_clk *clk = to_scmi_clk(hw);
@@ -156,13 +154,13 @@ static void scmi_clk_atomic_disable(struct clk_hw *hw)
scmi_proto_clk_ops->disable(clk->ph, clk->id, ATOMIC);
}
-static int scmi_clk_atomic_is_enabled(struct clk_hw *hw)
+static int __scmi_clk_is_enabled(struct clk_hw *hw, bool atomic)
{
int ret;
bool enabled = false;
struct scmi_clk *clk = to_scmi_clk(hw);
- ret = scmi_proto_clk_ops->state_get(clk->ph, clk->id, &enabled, ATOMIC);
+ ret = scmi_proto_clk_ops->state_get(clk->ph, clk->id, &enabled, atomic);
if (ret)
dev_warn(clk->dev,
"Failed to get state for clock ID %d\n", clk->id);
@@ -170,6 +168,16 @@ static int scmi_clk_atomic_is_enabled(struct clk_hw *hw)
return !!enabled;
}
+static int scmi_clk_atomic_is_enabled(struct clk_hw *hw)
+{
+ return __scmi_clk_is_enabled(hw, ATOMIC);
+}
+
+static int scmi_clk_is_enabled(struct clk_hw *hw)
+{
+ return __scmi_clk_is_enabled(hw, NOT_ATOMIC);
+}
+
static int scmi_clk_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
{
int ret;
@@ -285,10 +293,11 @@ scmi_clk_ops_alloc(struct device *dev, unsigned long feats_key)
if (feats_key & BIT(SCMI_CLK_ATOMIC_SUPPORTED))
ops->is_enabled = scmi_clk_atomic_is_enabled;
+ else
+ ops->is_prepared = scmi_clk_is_enabled;
/* Rate ops */
ops->recalc_rate = scmi_clk_recalc_rate;
- ops->round_rate = scmi_clk_round_rate;
ops->determine_rate = scmi_clk_determine_rate;
if (feats_key & BIT(SCMI_CLK_RATE_CTRL_SUPPORTED))
ops->set_rate = scmi_clk_set_rate;
@@ -337,6 +346,8 @@ scmi_clk_ops_select(struct scmi_clk *sclk, bool atomic_capable,
unsigned int atomic_threshold_us,
const struct clk_ops **clk_ops_db, size_t db_size)
{
+ int ret;
+ u32 val;
const struct scmi_clock_info *ci = sclk->info;
unsigned int feats_key = 0;
const struct clk_ops *ops;
@@ -358,8 +369,13 @@ scmi_clk_ops_select(struct scmi_clk *sclk, bool atomic_capable,
if (!ci->parent_ctrl_forbidden)
feats_key |= BIT(SCMI_CLK_PARENT_CTRL_SUPPORTED);
- if (ci->extended_config)
- feats_key |= BIT(SCMI_CLK_DUTY_CYCLE_SUPPORTED);
+ if (ci->extended_config) {
+ ret = scmi_proto_clk_ops->config_oem_get(sclk->ph, sclk->id,
+ SCMI_CLOCK_CFG_DUTY_CYCLE,
+ &val, NULL, false);
+ if (!ret)
+ feats_key |= BIT(SCMI_CLK_DUTY_CYCLE_SUPPORTED);
+ }
if (WARN_ON(feats_key >= db_size))
return NULL;
@@ -392,6 +408,7 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
const struct scmi_handle *handle = sdev->handle;
struct scmi_protocol_handle *ph;
const struct clk_ops *scmi_clk_ops_db[SCMI_MAX_CLK_OPS] = {};
+ struct scmi_clk *sclks;
if (!handle)
return -ENODEV;
@@ -418,18 +435,21 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
transport_is_atomic = handle->is_transport_atomic(handle,
&atomic_threshold_us);
+ sclks = devm_kcalloc(dev, count, sizeof(*sclks), GFP_KERNEL);
+ if (!sclks)
+ return -ENOMEM;
+
+ for (idx = 0; idx < count; idx++)
+ hws[idx] = &sclks[idx].hw;
+
for (idx = 0; idx < count; idx++) {
- struct scmi_clk *sclk;
+ struct scmi_clk *sclk = &sclks[idx];
const struct clk_ops *scmi_ops;
- sclk = devm_kzalloc(dev, sizeof(*sclk), GFP_KERNEL);
- if (!sclk)
- return -ENOMEM;
-
sclk->info = scmi_proto_clk_ops->info_get(ph, idx);
if (!sclk->info) {
dev_dbg(dev, "invalid clock info for idx %d\n", idx);
- devm_kfree(dev, sclk);
+ hws[idx] = NULL;
continue;
}
@@ -439,7 +459,7 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
/*
* Note that the scmi_clk_ops_db is on the stack, not global,
- * because it cannot be shared between mulitple probe-sequences
+ * because it cannot be shared between multiple probe-sequences
* to avoid sharing the devm_ allocated clk_ops between multiple
* SCMI clk driver instances.
*/
@@ -467,13 +487,11 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
if (err) {
dev_err(dev, "failed to register clock %d\n", idx);
devm_kfree(dev, sclk->parent_data);
- devm_kfree(dev, sclk);
hws[idx] = NULL;
} else {
dev_dbg(dev, "Registered clock:%s%s\n",
sclk->info->name,
scmi_ops->enable ? " (atomic ops)" : "");
- hws[idx] = &sclk->hw;
}
}
diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c
index 108b697bd317..0b592de7bdb2 100644
--- a/drivers/clk/clk-scpi.c
+++ b/drivers/clk/clk-scpi.c
@@ -32,8 +32,8 @@ static unsigned long scpi_clk_recalc_rate(struct clk_hw *hw,
return clk->scpi_ops->clk_get_val(clk->id);
}
-static long scpi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int scpi_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
/*
* We can't figure out what rate it will be, so just return the
@@ -41,7 +41,7 @@ static long scpi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
* after the rate is set and we'll know what rate the clock is
* running at then.
*/
- return rate;
+ return 0;
}
static int scpi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -54,7 +54,7 @@ static int scpi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops scpi_clk_ops = {
.recalc_rate = scpi_clk_recalc_rate,
- .round_rate = scpi_clk_round_rate,
+ .determine_rate = scpi_clk_determine_rate,
.set_rate = scpi_clk_set_rate,
};
@@ -92,12 +92,14 @@ static unsigned long scpi_dvfs_recalc_rate(struct clk_hw *hw,
return opp->freq;
}
-static long scpi_dvfs_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int scpi_dvfs_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct scpi_clk *clk = to_scpi_clk(hw);
- return __scpi_dvfs_round_rate(clk, rate);
+ req->rate = __scpi_dvfs_round_rate(clk, req->rate);
+
+ return 0;
}
static int __scpi_find_dvfs_index(struct scpi_clk *clk, unsigned long rate)
@@ -124,7 +126,7 @@ static int scpi_dvfs_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops scpi_dvfs_ops = {
.recalc_rate = scpi_dvfs_recalc_rate,
- .round_rate = scpi_dvfs_round_rate,
+ .determine_rate = scpi_dvfs_determine_rate,
.set_rate = scpi_dvfs_set_rate,
};
@@ -303,7 +305,7 @@ static struct platform_driver scpi_clocks_driver = {
.of_match_table = scpi_clocks_ids,
},
.probe = scpi_clocks_probe,
- .remove_new = scpi_clocks_remove,
+ .remove = scpi_clocks_remove,
};
module_platform_driver(scpi_clocks_driver);
diff --git a/drivers/clk/clk-si514.c b/drivers/clk/clk-si514.c
index 6ee148e5469d..f61590d70575 100644
--- a/drivers/clk/clk-si514.c
+++ b/drivers/clk/clk-si514.c
@@ -227,20 +227,28 @@ static unsigned long si514_recalc_rate(struct clk_hw *hw,
return si514_calc_rate(&settings);
}
-static long si514_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int si514_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_si514_muldiv settings;
int err;
- if (!rate)
+ if (!req->rate) {
+ req->rate = 0;
+
return 0;
+ }
- err = si514_calc_muldiv(&settings, rate);
- if (err)
- return err;
+ err = si514_calc_muldiv(&settings, req->rate);
+ if (err) {
+ req->rate = err;
- return si514_calc_rate(&settings);
+ return 0;
+ }
+
+ req->rate = si514_calc_rate(&settings);
+
+ return 0;
}
/*
@@ -289,7 +297,7 @@ static const struct clk_ops si514_clk_ops = {
.unprepare = si514_unprepare,
.is_prepared = si514_is_prepared,
.recalc_rate = si514_recalc_rate,
- .round_rate = si514_round_rate,
+ .determine_rate = si514_determine_rate,
.set_rate = si514_set_rate,
};
@@ -371,7 +379,7 @@ static int si514_probe(struct i2c_client *client)
}
static const struct i2c_device_id si514_id[] = {
- { "si514", 0 },
+ { "si514" },
{ }
};
MODULE_DEVICE_TABLE(i2c, si514_id);
diff --git a/drivers/clk/clk-si521xx.c b/drivers/clk/clk-si521xx.c
index 4f7b74f889f1..4ed4e1a5f4f2 100644
--- a/drivers/clk/clk-si521xx.c
+++ b/drivers/clk/clk-si521xx.c
@@ -164,15 +164,17 @@ static unsigned long si521xx_diff_recalc_rate(struct clk_hw *hw,
return (unsigned long)rate;
}
-static long si521xx_diff_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int si521xx_diff_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned long best_parent;
- best_parent = (rate / SI521XX_DIFF_MULT) * SI521XX_DIFF_DIV;
- *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
+ best_parent = (req->rate / SI521XX_DIFF_MULT) * SI521XX_DIFF_DIV;
+ req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
- return (*prate / SI521XX_DIFF_DIV) * SI521XX_DIFF_MULT;
+ req->rate = (req->best_parent_rate / SI521XX_DIFF_DIV) * SI521XX_DIFF_MULT;
+
+ return 0;
}
static int si521xx_diff_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -208,7 +210,7 @@ static void si521xx_diff_unprepare(struct clk_hw *hw)
}
static const struct clk_ops si521xx_diff_clk_ops = {
- .round_rate = si521xx_diff_round_rate,
+ .determine_rate = si521xx_diff_determine_rate,
.set_rate = si521xx_diff_set_rate,
.recalc_rate = si521xx_diff_recalc_rate,
.prepare = si521xx_diff_prepare,
diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c
index 6e8dd7387cfd..2499b771cd83 100644
--- a/drivers/clk/clk-si5341.c
+++ b/drivers/clk/clk-si5341.c
@@ -21,7 +21,7 @@
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#define SI5341_NUM_INPUTS 4
@@ -663,8 +663,8 @@ static unsigned long si5341_synth_clk_recalc_rate(struct clk_hw *hw,
return f;
}
-static long si5341_synth_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int si5341_synth_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_si5341_synth *synth = to_clk_si5341_synth(hw);
u64 f;
@@ -672,15 +672,21 @@ static long si5341_synth_clk_round_rate(struct clk_hw *hw, unsigned long rate,
/* The synthesizer accuracy is such that anything in range will work */
f = synth->data->freq_vco;
do_div(f, SI5341_SYNTH_N_MAX);
- if (rate < f)
- return f;
+ if (req->rate < f) {
+ req->rate = f;
+
+ return 0;
+ }
f = synth->data->freq_vco;
do_div(f, SI5341_SYNTH_N_MIN);
- if (rate > f)
- return f;
+ if (req->rate > f) {
+ req->rate = f;
- return rate;
+ return 0;
+ }
+
+ return 0;
}
static int si5341_synth_program(struct clk_si5341_synth *synth,
@@ -741,7 +747,7 @@ static const struct clk_ops si5341_synth_clk_ops = {
.prepare = si5341_synth_clk_prepare,
.unprepare = si5341_synth_clk_unprepare,
.recalc_rate = si5341_synth_clk_recalc_rate,
- .round_rate = si5341_synth_clk_round_rate,
+ .determine_rate = si5341_synth_clk_determine_rate,
.set_rate = si5341_synth_clk_set_rate,
};
diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c
index 4ce83c5265b8..e755db545e2e 100644
--- a/drivers/clk/clk-si5351.c
+++ b/drivers/clk/clk-si5351.c
@@ -655,7 +655,7 @@ static int si5351_msynth_determine_rate(struct clk_hw *hw,
unsigned long a, b, c;
int divby4;
- /* multisync6-7 can only handle freqencies < 150MHz */
+ /* multisync6-7 can only handle frequencies < 150MHz */
if (hwdata->num >= 6 && rate > SI5351_MULTISYNTH67_MAX_FREQ)
rate = SI5351_MULTISYNTH67_MAX_FREQ;
@@ -1048,11 +1048,11 @@ static int si5351_clkout_determine_rate(struct clk_hw *hw,
unsigned long rate = req->rate;
unsigned char rdiv;
- /* clkout6/7 can only handle output freqencies < 150MHz */
+ /* clkout6/7 can only handle output frequencies < 150MHz */
if (hwdata->num >= 6 && rate > SI5351_CLKOUT67_MAX_FREQ)
rate = SI5351_CLKOUT67_MAX_FREQ;
- /* clkout freqency is 8kHz - 160MHz */
+ /* clkout frequency is 8kHz - 160MHz */
if (rate > SI5351_CLKOUT_MAX_FREQ)
rate = SI5351_CLKOUT_MAX_FREQ;
if (rate < SI5351_CLKOUT_MIN_FREQ)
@@ -1175,8 +1175,8 @@ static int si5351_dt_parse(struct i2c_client *client,
{
struct device_node *child, *np = client->dev.of_node;
struct si5351_platform_data *pdata;
- struct property *prop;
- const __be32 *p;
+ u32 array[4];
+ int sz, i;
int num = 0;
u32 val;
@@ -1191,20 +1191,24 @@ static int si5351_dt_parse(struct i2c_client *client,
* property silabs,pll-source : <num src>, [<..>]
* allow to selectively set pll source
*/
- of_property_for_each_u32(np, "silabs,pll-source", prop, p, num) {
+ sz = of_property_read_variable_u32_array(np, "silabs,pll-source", array, 2, 4);
+ sz = (sz == -EINVAL) ? 0 : sz; /* Missing property is OK */
+ if (sz < 0)
+ return dev_err_probe(&client->dev, sz, "invalid pll-source\n");
+ if (sz % 2)
+ return dev_err_probe(&client->dev, -EINVAL,
+ "missing pll-source for pll %d\n", array[sz - 1]);
+
+ for (i = 0; i < sz; i += 2) {
+ num = array[i];
+ val = array[i + 1];
+
if (num >= 2) {
dev_err(&client->dev,
"invalid pll %d on pll-source prop\n", num);
return -EINVAL;
}
- p = of_prop_next_u32(prop, p, &val);
- if (!p) {
- dev_err(&client->dev,
- "missing pll-source for pll %d\n", num);
- return -EINVAL;
- }
-
switch (val) {
case 0:
pdata->pll_src[num] = SI5351_PLL_SRC_XTAL;
@@ -1232,19 +1236,24 @@ static int si5351_dt_parse(struct i2c_client *client,
pdata->pll_reset[0] = true;
pdata->pll_reset[1] = true;
- of_property_for_each_u32(np, "silabs,pll-reset-mode", prop, p, num) {
+ sz = of_property_read_variable_u32_array(np, "silabs,pll-reset-mode", array, 2, 4);
+ sz = (sz == -EINVAL) ? 0 : sz; /* Missing property is OK */
+ if (sz < 0)
+ return dev_err_probe(&client->dev, sz, "invalid pll-reset-mode\n");
+ if (sz % 2)
+ return dev_err_probe(&client->dev, -EINVAL,
+ "missing pll-reset-mode for pll %d\n", array[sz - 1]);
+
+ for (i = 0; i < sz; i += 2) {
+ num = array[i];
+ val = array[i + 1];
+
if (num >= 2) {
dev_err(&client->dev,
"invalid pll %d on pll-reset-mode prop\n", num);
return -EINVAL;
}
- p = of_prop_next_u32(prop, p, &val);
- if (!p) {
- dev_err(&client->dev,
- "missing pll-reset-mode for pll %d\n", num);
- return -EINVAL;
- }
switch (val) {
case 0:
diff --git a/drivers/clk/clk-si544.c b/drivers/clk/clk-si544.c
index c88650558f32..09c06ecec1a5 100644
--- a/drivers/clk/clk-si544.c
+++ b/drivers/clk/clk-si544.c
@@ -39,7 +39,7 @@
/* Max freq depends on speed grade */
#define SI544_MIN_FREQ 200000U
-/* Si544 Internal oscilator runs at 55.05 MHz */
+/* Si544 Internal oscillator runs at 55.05 MHz */
#define FXO 55050000U
/* VCO range is 10.8 .. 12.1 GHz, max depends on speed grade */
@@ -307,16 +307,16 @@ static unsigned long si544_recalc_rate(struct clk_hw *hw,
return si544_calc_rate(&settings);
}
-static long si544_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int si544_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_si544 *data = to_clk_si544(hw);
- if (!is_valid_frequency(data, rate))
+ if (!is_valid_frequency(data, req->rate))
return -EINVAL;
/* The accuracy is less than 1 Hz, so any rate is possible */
- return rate;
+ return 0;
}
/* Calculates the maximum "small" change, 950 * rate / 1000000 */
@@ -408,7 +408,7 @@ static const struct clk_ops si544_clk_ops = {
.unprepare = si544_unprepare,
.is_prepared = si544_is_prepared,
.recalc_rate = si544_recalc_rate,
- .round_rate = si544_round_rate,
+ .determine_rate = si544_determine_rate,
.set_rate = si544_set_rate,
};
diff --git a/drivers/clk/clk-si570.c b/drivers/clk/clk-si570.c
index a549ea13be20..b0b1830dd430 100644
--- a/drivers/clk/clk-si570.c
+++ b/drivers/clk/clk-si570.c
@@ -63,7 +63,7 @@ struct clk_si570_info {
* struct clk_si570:
* @hw: Clock hw struct
* @regmap: Device's regmap
- * @div_offset: Rgister offset for dividers
+ * @div_offset: Register offset for dividers
* @info: Device info
* @fxtal: Factory xtal frequency
* @n1: Clock divider N1
@@ -181,7 +181,7 @@ static int si570_update_rfreq(struct clk_si570 *data)
}
/**
- * si570_calc_divs() - Caluclate clock dividers
+ * si570_calc_divs() - Calculate clock dividers
* @frequency: Target frequency
* @data: Driver data structure
* @out_rfreq: RFREG fractional multiplier (output)
@@ -246,34 +246,40 @@ static unsigned long si570_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long si570_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int si570_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int err;
u64 rfreq;
unsigned int n1, hs_div;
struct clk_si570 *data = to_clk_si570(hw);
- if (!rate)
+ if (!req->rate) {
+ req->rate = 0;
+
return 0;
+ }
- if (div64_u64(abs(rate - data->frequency) * 10000LL,
+ if (div64_u64(abs(req->rate - data->frequency) * 10000LL,
data->frequency) < 35) {
- rfreq = div64_u64((data->rfreq * rate) +
- div64_u64(data->frequency, 2), data->frequency);
+ rfreq = div64_u64((data->rfreq * req->rate) +
+ div64_u64(data->frequency, 2),
+ data->frequency);
n1 = data->n1;
hs_div = data->hs_div;
} else {
- err = si570_calc_divs(rate, data, &rfreq, &n1, &hs_div);
+ err = si570_calc_divs(req->rate, data, &rfreq, &n1, &hs_div);
if (err) {
dev_err(&data->i2c_client->dev,
"unable to round rate\n");
+ req->rate = 0;
+
return 0;
}
}
- return rate;
+ return 0;
}
/**
@@ -368,7 +374,7 @@ static int si570_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops si570_clk_ops = {
.recalc_rate = si570_recalc_rate,
- .round_rate = si570_round_rate,
+ .determine_rate = si570_determine_rate,
.set_rate = si570_set_rate,
};
diff --git a/drivers/clk/clk-sp7021.c b/drivers/clk/clk-sp7021.c
index 7cb7d501d7a6..36528a71a2e6 100644
--- a/drivers/clk/clk-sp7021.c
+++ b/drivers/clk/clk-sp7021.c
@@ -7,6 +7,7 @@
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/bitfield.h>
+#include <linux/hw_bitfield.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/err.h>
@@ -14,7 +15,7 @@
#include <dt-bindings/clock/sunplus,sp7021-clkc.h>
-/* speical div_width values for PLLTV/PLLA */
+/* special div_width values for PLLTV/PLLA */
#define DIV_TV 33
#define DIV_A 34
@@ -38,13 +39,6 @@ enum {
#define MASK_DIVN GENMASK(7, 0)
#define MASK_DIVM GENMASK(14, 8)
-/* HIWORD_MASK FIELD_PREP */
-#define HWM_FIELD_PREP(mask, value) \
-({ \
- u64 _m = mask; \
- (_m << 16) | FIELD_PREP(_m, value); \
-})
-
struct sp_pll {
struct clk_hw hw;
void __iomem *reg;
@@ -313,15 +307,15 @@ static int plltv_set_rate(struct sp_pll *clk)
u32 r0, r1, r2;
r0 = BIT(clk->bp_bit + 16);
- r0 |= HWM_FIELD_PREP(MASK_SEL_FRA, clk->p[SEL_FRA]);
- r0 |= HWM_FIELD_PREP(MASK_SDM_MOD, clk->p[SDM_MOD]);
- r0 |= HWM_FIELD_PREP(MASK_PH_SEL, clk->p[PH_SEL]);
- r0 |= HWM_FIELD_PREP(MASK_NFRA, clk->p[NFRA]);
+ r0 |= FIELD_PREP_WM16(MASK_SEL_FRA, clk->p[SEL_FRA]);
+ r0 |= FIELD_PREP_WM16(MASK_SDM_MOD, clk->p[SDM_MOD]);
+ r0 |= FIELD_PREP_WM16(MASK_PH_SEL, clk->p[PH_SEL]);
+ r0 |= FIELD_PREP_WM16(MASK_NFRA, clk->p[NFRA]);
- r1 = HWM_FIELD_PREP(MASK_DIVR, clk->p[DIVR]);
+ r1 = FIELD_PREP_WM16(MASK_DIVR, clk->p[DIVR]);
- r2 = HWM_FIELD_PREP(MASK_DIVN, clk->p[DIVN] - 1);
- r2 |= HWM_FIELD_PREP(MASK_DIVM, clk->p[DIVM] - 1);
+ r2 = FIELD_PREP_WM16(MASK_DIVN, clk->p[DIVN] - 1);
+ r2 |= FIELD_PREP_WM16(MASK_DIVM, clk->p[DIVM] - 1);
spin_lock_irqsave(&clk->lock, flags);
writel(r0, clk->reg);
@@ -412,25 +406,27 @@ static long sp_pll_calc_div(struct sp_pll *clk, unsigned long rate)
return fbdiv;
}
-static long sp_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int sp_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct sp_pll *clk = to_sp_pll(hw);
long ret;
- if (rate == *prate) {
- ret = *prate; /* bypass */
+ if (req->rate == req->best_parent_rate) {
+ ret = req->best_parent_rate; /* bypass */
} else if (clk->div_width == DIV_A) {
- ret = plla_round_rate(clk, rate);
+ ret = plla_round_rate(clk, req->rate);
} else if (clk->div_width == DIV_TV) {
- ret = plltv_div(clk, rate);
+ ret = plltv_div(clk, req->rate);
if (ret < 0)
- ret = *prate;
+ ret = req->best_parent_rate;
} else {
- ret = sp_pll_calc_div(clk, rate) * clk->brate;
+ ret = sp_pll_calc_div(clk, req->rate) * clk->brate;
}
- return ret;
+ req->rate = ret;
+
+ return 0;
}
static unsigned long sp_pll_recalc_rate(struct clk_hw *hw,
@@ -535,7 +531,7 @@ static const struct clk_ops sp_pll_ops = {
.enable = sp_pll_enable,
.disable = sp_pll_disable,
.is_enabled = sp_pll_is_enabled,
- .round_rate = sp_pll_round_rate,
+ .determine_rate = sp_pll_determine_rate,
.recalc_rate = sp_pll_recalc_rate,
.set_rate = sp_pll_set_rate
};
diff --git a/drivers/clk/clk-sparx5.c b/drivers/clk/clk-sparx5.c
index 0fad0c1a0186..b2facc9c95d4 100644
--- a/drivers/clk/clk-sparx5.c
+++ b/drivers/clk/clk-sparx5.c
@@ -213,19 +213,21 @@ static unsigned long s5_pll_recalc_rate(struct clk_hw *hw,
return conf.freq;
}
-static long s5_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int s5_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct s5_pll_conf conf;
- return s5_calc_params(rate, *parent_rate, &conf);
+ req->rate = s5_calc_params(req->rate, req->best_parent_rate, &conf);
+
+ return 0;
}
static const struct clk_ops s5_pll_ops = {
.enable = s5_pll_enable,
.disable = s5_pll_disable,
.set_rate = s5_pll_set_rate,
- .round_rate = s5_pll_round_rate,
+ .determine_rate = s5_pll_determine_rate,
.recalc_rate = s5_pll_recalc_rate,
};
diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c
index 07c13ebe327d..b5d4d48432a0 100644
--- a/drivers/clk/clk-stm32f4.c
+++ b/drivers/clk/clk-stm32f4.c
@@ -5,6 +5,7 @@
* Inspired by clk-asm9260.c .
*/
+#include <linux/bitfield.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/io.h>
@@ -18,7 +19,7 @@
#include <linux/mfd/syscon.h>
/*
- * Include list of clocks wich are not derived from system clock (SYSCLOCK)
+ * Include list of clocks which are not derived from system clock (SYSCLOCK)
* The index of these clocks is the secondary index of DT bindings
*
*/
@@ -34,11 +35,20 @@
#define STM32F4_RCC_APB2ENR 0x44
#define STM32F4_RCC_BDCR 0x70
#define STM32F4_RCC_CSR 0x74
+#define STM32F4_RCC_SSCGR 0x80
#define STM32F4_RCC_PLLI2SCFGR 0x84
#define STM32F4_RCC_PLLSAICFGR 0x88
#define STM32F4_RCC_DCKCFGR 0x8c
#define STM32F7_RCC_DCKCFGR2 0x90
+#define STM32F4_RCC_PLLCFGR_N_MASK GENMASK(14, 6)
+
+#define STM32F4_RCC_SSCGR_SSCGEN BIT(31)
+#define STM32F4_RCC_SSCGR_SPREADSEL BIT(30)
+#define STM32F4_RCC_SSCGR_RESERVED_MASK GENMASK(29, 28)
+#define STM32F4_RCC_SSCGR_INCSTEP_MASK GENMASK(27, 13)
+#define STM32F4_RCC_SSCGR_MODPER_MASK GENMASK(12, 0)
+
#define NONE -1
#define NO_IDX NONE
#define NO_MUX NONE
@@ -364,6 +374,16 @@ static const struct stm32f4_gate_data stm32f769_gates[] __initconst = {
{ STM32F4_RCC_APB2ENR, 30, "mdio", "apb2_div" },
};
+enum stm32f4_pll_ssc_mod_type {
+ STM32F4_PLL_SSC_CENTER_SPREAD,
+ STM32F4_PLL_SSC_DOWN_SPREAD,
+};
+
+static const char * const stm32f4_ssc_mod_methods[] __initconst = {
+ [STM32F4_PLL_SSC_DOWN_SPREAD] = "down-spread",
+ [STM32F4_PLL_SSC_CENTER_SPREAD] = "center-spread",
+};
+
/*
* This bitmask tells us which bit offsets (0..192) on STM32F4[23]xxx
* have gate bits associated with them. Its combined hweight is 71.
@@ -423,8 +443,8 @@ static unsigned long clk_apb_mul_recalc_rate(struct clk_hw *hw,
return parent_rate;
}
-static long clk_apb_mul_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_apb_mul_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_apb_mul *am = to_clk_apb_mul(hw);
unsigned long mult = 1;
@@ -433,12 +453,14 @@ static long clk_apb_mul_round_rate(struct clk_hw *hw, unsigned long rate,
mult = 2;
if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
- unsigned long best_parent = rate / mult;
+ unsigned long best_parent = req->rate / mult;
- *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
+ req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
}
- return *prate * mult;
+ req->rate = req->best_parent_rate * mult;
+
+ return 0;
}
static int clk_apb_mul_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -454,7 +476,7 @@ static int clk_apb_mul_set_rate(struct clk_hw *hw, unsigned long rate,
}
static const struct clk_ops clk_apb_mul_factor_ops = {
- .round_rate = clk_apb_mul_round_rate,
+ .determine_rate = clk_apb_mul_determine_rate,
.set_rate = clk_apb_mul_set_rate,
.recalc_rate = clk_apb_mul_recalc_rate,
};
@@ -509,6 +531,12 @@ static const struct clk_div_table pll_divr_table[] = {
{ 2, 2 }, { 3, 3 }, { 4, 4 }, { 5, 5 }, { 6, 6 }, { 7, 7 }, { 0 }
};
+struct stm32f4_pll_ssc {
+ unsigned int mod_freq;
+ unsigned int mod_depth;
+ enum stm32f4_pll_ssc_mod_type mod_type;
+};
+
struct stm32f4_pll {
spinlock_t *lock;
struct clk_gate gate;
@@ -516,6 +544,8 @@ struct stm32f4_pll {
u8 bit_rdy_idx;
u8 status;
u8 n_start;
+ bool ssc_enable;
+ struct stm32f4_pll_ssc ssc_conf;
};
#define to_stm32f4_pll(_gate) container_of(_gate, struct stm32f4_pll, gate)
@@ -538,6 +568,7 @@ struct stm32f4_vco_data {
u8 offset;
u8 bit_idx;
u8 bit_rdy_idx;
+ bool sscg;
};
static const struct stm32f4_vco_data vco_data[] = {
@@ -632,28 +663,58 @@ static unsigned long stm32f4_pll_recalc(struct clk_hw *hw,
{
struct clk_gate *gate = to_clk_gate(hw);
struct stm32f4_pll *pll = to_stm32f4_pll(gate);
+ unsigned long val;
unsigned long n;
- n = (readl(base + pll->offset) >> 6) & 0x1ff;
+ val = readl(base + pll->offset);
+ n = FIELD_GET(STM32F4_RCC_PLLCFGR_N_MASK, val);
return parent_rate * n;
}
-static long stm32f4_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int stm32f4_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_gate *gate = to_clk_gate(hw);
struct stm32f4_pll *pll = to_stm32f4_pll(gate);
unsigned long n;
- n = rate / *prate;
+ n = req->rate / req->best_parent_rate;
if (n < pll->n_start)
n = pll->n_start;
else if (n > 432)
n = 432;
- return *prate * n;
+ req->rate = req->best_parent_rate * n;
+
+ return 0;
+}
+
+static void stm32f4_pll_set_ssc(struct clk_hw *hw, unsigned long parent_rate,
+ unsigned int ndiv)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ struct stm32f4_pll *pll = to_stm32f4_pll(gate);
+ struct stm32f4_pll_ssc *ssc = &pll->ssc_conf;
+ u32 modeper, incstep;
+ u32 sscgr;
+
+ sscgr = readl(base + STM32F4_RCC_SSCGR);
+ /* reserved field must be kept at reset value */
+ sscgr &= STM32F4_RCC_SSCGR_RESERVED_MASK;
+
+ modeper = DIV_ROUND_CLOSEST(parent_rate, 4 * ssc->mod_freq);
+ incstep = DIV_ROUND_CLOSEST(((1 << 15) - 1) * ssc->mod_depth * ndiv,
+ 5 * 10000 * modeper);
+ sscgr |= STM32F4_RCC_SSCGR_SSCGEN |
+ FIELD_PREP(STM32F4_RCC_SSCGR_INCSTEP_MASK, incstep) |
+ FIELD_PREP(STM32F4_RCC_SSCGR_MODPER_MASK, modeper);
+
+ if (ssc->mod_type)
+ sscgr |= STM32F4_RCC_SSCGR_SPREADSEL;
+
+ writel(sscgr, base + STM32F4_RCC_SSCGR);
}
static int stm32f4_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -673,9 +734,13 @@ static int stm32f4_pll_set_rate(struct clk_hw *hw, unsigned long rate,
n = rate / parent_rate;
- val = readl(base + pll->offset) & ~(0x1ff << 6);
+ val = readl(base + pll->offset) & ~STM32F4_RCC_PLLCFGR_N_MASK;
+ val |= FIELD_PREP(STM32F4_RCC_PLLCFGR_N_MASK, n);
- writel(val | ((n & 0x1ff) << 6), base + pll->offset);
+ writel(val, base + pll->offset);
+
+ if (pll->ssc_enable)
+ stm32f4_pll_set_ssc(hw, parent_rate, n);
if (pll_state)
stm32f4_pll_enable(hw);
@@ -688,7 +753,7 @@ static const struct clk_ops stm32f4_pll_gate_ops = {
.disable = stm32f4_pll_disable,
.is_enabled = stm32f4_pll_is_enabled,
.recalc_rate = stm32f4_pll_recalc,
- .round_rate = stm32f4_pll_round_rate,
+ .determine_rate = stm32f4_pll_determine_rate,
.set_rate = stm32f4_pll_set_rate,
};
@@ -782,6 +847,84 @@ static struct clk_hw *clk_register_pll_div(const char *name,
return hw;
}
+static int __init stm32f4_pll_init_ssc(struct clk_hw *hw,
+ const struct stm32f4_pll_ssc *conf)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ struct stm32f4_pll *pll = to_stm32f4_pll(gate);
+ struct clk_hw *parent;
+ unsigned long parent_rate;
+ int pll_state;
+ unsigned long n, val;
+
+ parent = clk_hw_get_parent(hw);
+ if (!parent) {
+ pr_err("%s: failed to get clock parent\n", __func__);
+ return -ENODEV;
+ }
+
+ parent_rate = clk_hw_get_rate(parent);
+
+ pll->ssc_enable = true;
+ memcpy(&pll->ssc_conf, conf, sizeof(pll->ssc_conf));
+
+ pll_state = stm32f4_pll_is_enabled(hw);
+
+ if (pll_state)
+ stm32f4_pll_disable(hw);
+
+ val = readl(base + pll->offset);
+ n = FIELD_GET(STM32F4_RCC_PLLCFGR_N_MASK, val);
+
+ pr_debug("%s: pll: %s, parent: %s, parent-rate: %lu, n: %lu\n",
+ __func__, clk_hw_get_name(hw), clk_hw_get_name(parent),
+ parent_rate, n);
+
+ stm32f4_pll_set_ssc(hw, parent_rate, n);
+
+ if (pll_state)
+ stm32f4_pll_enable(hw);
+
+ return 0;
+}
+
+static int __init stm32f4_pll_ssc_parse_dt(struct device_node *np,
+ struct stm32f4_pll_ssc *conf)
+{
+ int ret;
+
+ if (!conf)
+ return -EINVAL;
+
+ ret = of_property_read_u32(np, "st,ssc-modfreq-hz", &conf->mod_freq);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(np, "st,ssc-moddepth-permyriad",
+ &conf->mod_depth);
+ if (ret) {
+ pr_err("%pOF: missing st,ssc-moddepth-permyriad\n", np);
+ return ret;
+ }
+
+ ret = fwnode_property_match_property_string(of_fwnode_handle(np),
+ "st,ssc-modmethod",
+ stm32f4_ssc_mod_methods,
+ ARRAY_SIZE(stm32f4_ssc_mod_methods));
+ if (ret < 0) {
+ pr_err("%pOF: failed to get st,ssc-modmethod\n", np);
+ return ret;
+ }
+
+ conf->mod_type = ret;
+
+ pr_debug("%pOF: SSCG settings: mod_freq: %d, mod_depth: %d mod_method: %s [%d]\n",
+ np, conf->mod_freq, conf->mod_depth,
+ stm32f4_ssc_mod_methods[ret], conf->mod_type);
+
+ return 0;
+}
+
static struct clk_hw *stm32f4_rcc_register_pll(const char *pllsrc,
const struct stm32f4_pll_data *data, spinlock_t *lock)
{
@@ -1689,7 +1832,8 @@ static void __init stm32f4_rcc_init(struct device_node *np)
const struct of_device_id *match;
const struct stm32f4_clk_data *data;
unsigned long pllm;
- struct clk_hw *pll_src_hw;
+ struct clk_hw *pll_src_hw, *pll_vco_hw;
+ struct stm32f4_pll_ssc ssc_conf;
base = of_iomap(np, 0);
if (!base) {
@@ -1748,8 +1892,8 @@ static void __init stm32f4_rcc_init(struct device_node *np)
clk_hw_register_fixed_factor(NULL, "vco_in", pll_src,
0, 1, pllm);
- stm32f4_rcc_register_pll("vco_in", &data->pll_data[0],
- &stm32f4_clk_lock);
+ pll_vco_hw = stm32f4_rcc_register_pll("vco_in", &data->pll_data[0],
+ &stm32f4_clk_lock);
clks[PLL_VCO_I2S] = stm32f4_rcc_register_pll("vco_in",
&data->pll_data[1], &stm32f4_clk_lock);
@@ -1894,6 +2038,9 @@ static void __init stm32f4_rcc_init(struct device_node *np)
of_clk_add_hw_provider(np, stm32f4_rcc_lookup_clk, NULL);
+ if (!stm32f4_pll_ssc_parse_dt(np, &ssc_conf))
+ stm32f4_pll_init_ssc(pll_vco_hw, &ssc_conf);
+
return;
fail:
kfree(clks);
diff --git a/drivers/clk/clk-tps68470.c b/drivers/clk/clk-tps68470.c
index 38f44b5b9b1b..9511248c6bc9 100644
--- a/drivers/clk/clk-tps68470.c
+++ b/drivers/clk/clk-tps68470.c
@@ -146,12 +146,14 @@ static unsigned int tps68470_clk_cfg_lookup(unsigned long rate)
return best_idx;
}
-static long tps68470_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int tps68470_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned int idx = tps68470_clk_cfg_lookup(rate);
+ unsigned int idx = tps68470_clk_cfg_lookup(req->rate);
+
+ req->rate = clk_freqs[idx].freq;
- return clk_freqs[idx].freq;
+ return 0;
}
static int tps68470_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -186,7 +188,7 @@ static const struct clk_ops tps68470_clk_ops = {
.prepare = tps68470_clk_prepare,
.unprepare = tps68470_clk_unprepare,
.recalc_rate = tps68470_clk_recalc_rate,
- .round_rate = tps68470_clk_round_rate,
+ .determine_rate = tps68470_clk_determine_rate,
.set_rate = tps68470_clk_set_rate,
};
diff --git a/drivers/clk/clk-twl.c b/drivers/clk/clk-twl.c
index eab9d3c8ed8a..20bc3bf8fd62 100644
--- a/drivers/clk/clk-twl.c
+++ b/drivers/clk/clk-twl.c
@@ -11,13 +11,29 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
-#define VREG_STATE 2
+#define VREG_STATE 2
+#define VREG_GRP 0
#define TWL6030_CFG_STATE_OFF 0x00
#define TWL6030_CFG_STATE_ON 0x01
#define TWL6030_CFG_STATE_MASK 0x03
+#define TWL6030_CFG_STATE_GRP_SHIFT 5
+#define TWL6030_CFG_STATE_APP_SHIFT 2
+#define TWL6030_CFG_STATE_APP_MASK (0x03 << TWL6030_CFG_STATE_APP_SHIFT)
+#define TWL6030_CFG_STATE_APP(v) (((v) & TWL6030_CFG_STATE_APP_MASK) >>\
+ TWL6030_CFG_STATE_APP_SHIFT)
+#define P1_GRP BIT(0) /* processor power group */
+#define P2_GRP BIT(1)
+#define P3_GRP BIT(2)
+#define ALL_GRP (P1_GRP | P2_GRP | P3_GRP)
+
+enum twl_type {
+ TWL_TYPE_6030,
+ TWL_TYPE_6032,
+};
struct twl_clock_info {
struct device *dev;
+ enum twl_type type;
u8 base;
struct clk_hw hw;
};
@@ -56,14 +72,21 @@ static unsigned long twl_clks_recalc_rate(struct clk_hw *hw,
static int twl6032_clks_prepare(struct clk_hw *hw)
{
struct twl_clock_info *cinfo = to_twl_clks_info(hw);
- int ret;
- ret = twlclk_write(cinfo, TWL_MODULE_PM_RECEIVER, VREG_STATE,
- TWL6030_CFG_STATE_ON);
- if (ret < 0)
- dev_err(cinfo->dev, "clk prepare failed\n");
+ if (cinfo->type == TWL_TYPE_6030) {
+ int grp;
+
+ grp = twlclk_read(cinfo, TWL_MODULE_PM_RECEIVER, VREG_GRP);
+ if (grp < 0)
+ return grp;
- return ret;
+ return twlclk_write(cinfo, TWL_MODULE_PM_RECEIVER, VREG_STATE,
+ grp << TWL6030_CFG_STATE_GRP_SHIFT |
+ TWL6030_CFG_STATE_ON);
+ }
+
+ return twlclk_write(cinfo, TWL_MODULE_PM_RECEIVER, VREG_STATE,
+ TWL6030_CFG_STATE_ON);
}
static void twl6032_clks_unprepare(struct clk_hw *hw)
@@ -71,32 +94,21 @@ static void twl6032_clks_unprepare(struct clk_hw *hw)
struct twl_clock_info *cinfo = to_twl_clks_info(hw);
int ret;
- ret = twlclk_write(cinfo, TWL_MODULE_PM_RECEIVER, VREG_STATE,
- TWL6030_CFG_STATE_OFF);
+ if (cinfo->type == TWL_TYPE_6030)
+ ret = twlclk_write(cinfo, TWL_MODULE_PM_RECEIVER, VREG_STATE,
+ ALL_GRP << TWL6030_CFG_STATE_GRP_SHIFT |
+ TWL6030_CFG_STATE_OFF);
+ else
+ ret = twlclk_write(cinfo, TWL_MODULE_PM_RECEIVER, VREG_STATE,
+ TWL6030_CFG_STATE_OFF);
+
if (ret < 0)
dev_err(cinfo->dev, "clk unprepare failed\n");
}
-static int twl6032_clks_is_prepared(struct clk_hw *hw)
-{
- struct twl_clock_info *cinfo = to_twl_clks_info(hw);
- int val;
-
- val = twlclk_read(cinfo, TWL_MODULE_PM_RECEIVER, VREG_STATE);
- if (val < 0) {
- dev_err(cinfo->dev, "clk read failed\n");
- return val;
- }
-
- val &= TWL6030_CFG_STATE_MASK;
-
- return val == TWL6030_CFG_STATE_ON;
-}
-
static const struct clk_ops twl6032_clks_ops = {
.prepare = twl6032_clks_prepare,
.unprepare = twl6032_clks_unprepare,
- .is_prepared = twl6032_clks_is_prepared,
.recalc_rate = twl_clks_recalc_rate,
};
@@ -155,6 +167,7 @@ static int twl_clks_probe(struct platform_device *pdev)
for (i = 0; i < count; i++) {
cinfo[i].base = hw_data[i].base;
cinfo[i].dev = &pdev->dev;
+ cinfo[i].type = platform_get_device_id(pdev)->driver_data;
cinfo[i].hw.init = &hw_data[i].init;
ret = devm_clk_hw_register(&pdev->dev, &cinfo[i].hw);
if (ret) {
@@ -176,7 +189,11 @@ static int twl_clks_probe(struct platform_device *pdev)
static const struct platform_device_id twl_clks_id[] = {
{
+ .name = "twl6030-clk",
+ .driver_data = TWL_TYPE_6030,
+ }, {
.name = "twl6032-clk",
+ .driver_data = TWL_TYPE_6032,
}, {
/* sentinel */
}
diff --git a/drivers/clk/clk-versaclock3.c b/drivers/clk/clk-versaclock3.c
index 76d7ea1964c3..1849863dbd67 100644
--- a/drivers/clk/clk-versaclock3.c
+++ b/drivers/clk/clk-versaclock3.c
@@ -78,9 +78,6 @@
#define VC3_PLL1_VCO_MIN 300000000UL
#define VC3_PLL1_VCO_MAX 600000000UL
-#define VC3_PLL2_VCO_MIN 400000000UL
-#define VC3_PLL2_VCO_MAX 1200000000UL
-
#define VC3_PLL3_VCO_MIN 300000000UL
#define VC3_PLL3_VCO_MAX 800000000UL
@@ -147,9 +144,13 @@ struct vc3_pfd_data {
u8 mdiv2_bitmsk;
};
+struct vc3_vco {
+ unsigned long min;
+ unsigned long max;
+};
+
struct vc3_pll_data {
- unsigned long vco_min;
- unsigned long vco_max;
+ struct vc3_vco vco;
u8 num;
u8 int_div_msb_offs;
u8 int_div_lsb_offs;
@@ -166,12 +167,17 @@ struct vc3_div_data {
struct vc3_hw_data {
struct clk_hw hw;
struct regmap *regmap;
- const void *data;
+ void *data;
u32 div_int;
u32 div_frc;
};
+struct vc3_hw_cfg {
+ struct vc3_vco pll2_vco;
+ u32 se2_clk_sel_msk;
+};
+
static const struct clk_div_table div1_divs[] = {
{ .val = 0, .div = 1, }, { .val = 1, .div = 4, },
{ .val = 2, .div = 5, }, { .val = 3, .div = 6, },
@@ -283,22 +289,25 @@ static unsigned long vc3_pfd_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long vc3_pfd_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vc3_pfd_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw);
const struct vc3_pfd_data *pfd = vc3->data;
unsigned long idiv;
/* PLL cannot operate with input clock above 50 MHz. */
- if (rate > 50000000)
+ if (req->rate > 50000000)
return -EINVAL;
/* CLKIN within range of PLL input, feed directly to PLL. */
- if (*parent_rate <= 50000000)
- return *parent_rate;
+ if (req->best_parent_rate <= 50000000) {
+ req->rate = req->best_parent_rate;
+
+ return 0;
+ }
- idiv = DIV_ROUND_UP(*parent_rate, rate);
+ idiv = DIV_ROUND_UP(req->best_parent_rate, req->rate);
if (pfd->num == VC3_PFD1 || pfd->num == VC3_PFD3) {
if (idiv > 63)
return -EINVAL;
@@ -307,7 +316,9 @@ static long vc3_pfd_round_rate(struct clk_hw *hw, unsigned long rate,
return -EINVAL;
}
- return *parent_rate / idiv;
+ req->rate = req->best_parent_rate / idiv;
+
+ return 0;
}
static int vc3_pfd_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -348,7 +359,7 @@ static int vc3_pfd_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vc3_pfd_ops = {
.recalc_rate = vc3_pfd_recalc_rate,
- .round_rate = vc3_pfd_round_rate,
+ .determine_rate = vc3_pfd_determine_rate,
.set_rate = vc3_pfd_set_rate,
};
@@ -379,36 +390,38 @@ static unsigned long vc3_pll_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long vc3_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vc3_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw);
const struct vc3_pll_data *pll = vc3->data;
u64 div_frc;
- if (rate < pll->vco_min)
- rate = pll->vco_min;
- if (rate > pll->vco_max)
- rate = pll->vco_max;
+ if (req->rate < pll->vco.min)
+ req->rate = pll->vco.min;
+ if (req->rate > pll->vco.max)
+ req->rate = pll->vco.max;
- vc3->div_int = rate / *parent_rate;
+ vc3->div_int = req->rate / req->best_parent_rate;
if (pll->num == VC3_PLL2) {
if (vc3->div_int > 0x7ff)
- rate = *parent_rate * 0x7ff;
+ req->rate = req->best_parent_rate * 0x7ff;
/* Determine best fractional part, which is 16 bit wide */
- div_frc = rate % *parent_rate;
+ div_frc = req->rate % req->best_parent_rate;
div_frc *= BIT(16) - 1;
- vc3->div_frc = min_t(u64, div64_ul(div_frc, *parent_rate), U16_MAX);
- rate = (*parent_rate *
- (vc3->div_int * VC3_2_POW_16 + vc3->div_frc) / VC3_2_POW_16);
+ vc3->div_frc = min_t(u64,
+ div64_ul(div_frc, req->best_parent_rate),
+ U16_MAX);
+ req->rate = (req->best_parent_rate *
+ (vc3->div_int * VC3_2_POW_16 + vc3->div_frc) / VC3_2_POW_16);
} else {
- rate = *parent_rate * vc3->div_int;
+ req->rate = req->best_parent_rate * vc3->div_int;
}
- return rate;
+ return 0;
}
static int vc3_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -435,7 +448,7 @@ static int vc3_pll_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vc3_pll_ops = {
.recalc_rate = vc3_pll_recalc_rate,
- .round_rate = vc3_pll_round_rate,
+ .determine_rate = vc3_pll_determine_rate,
.set_rate = vc3_pll_set_rate,
};
@@ -492,8 +505,8 @@ static unsigned long vc3_div_recalc_rate(struct clk_hw *hw,
div_data->flags, div_data->width);
}
-static long vc3_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vc3_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw);
const struct vc3_div_data *div_data = vc3->data;
@@ -505,11 +518,16 @@ static long vc3_div_round_rate(struct clk_hw *hw, unsigned long rate,
bestdiv >>= div_data->shift;
bestdiv &= VC3_DIV_MASK(div_data->width);
bestdiv = vc3_get_div(div_data->table, bestdiv, div_data->flags);
- return DIV_ROUND_UP(*parent_rate, bestdiv);
+ req->rate = DIV_ROUND_UP(req->best_parent_rate, bestdiv);
+
+ return 0;
}
- return divider_round_rate(hw, rate, parent_rate, div_data->table,
- div_data->width, div_data->flags);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ div_data->table,
+ div_data->width, div_data->flags);
+
+ return 0;
}
static int vc3_div_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -528,7 +546,7 @@ static int vc3_div_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vc3_div_ops = {
.recalc_rate = vc3_div_recalc_rate,
- .round_rate = vc3_div_round_rate,
+ .determine_rate = vc3_div_determine_rate,
.set_rate = vc3_div_set_rate,
};
@@ -680,8 +698,10 @@ static struct vc3_hw_data clk_pll[] = {
.num = VC3_PLL1,
.int_div_msb_offs = VC3_PLL1_LOOP_FILTER_N_DIV_MSB,
.int_div_lsb_offs = VC3_PLL1_VCO_N_DIVIDER,
- .vco_min = VC3_PLL1_VCO_MIN,
- .vco_max = VC3_PLL1_VCO_MAX
+ .vco = {
+ .min = VC3_PLL1_VCO_MIN,
+ .max = VC3_PLL1_VCO_MAX
+ }
},
.hw.init = &(struct clk_init_data) {
.name = "pll1",
@@ -698,8 +718,6 @@ static struct vc3_hw_data clk_pll[] = {
.num = VC3_PLL2,
.int_div_msb_offs = VC3_PLL2_FB_INT_DIV_MSB,
.int_div_lsb_offs = VC3_PLL2_FB_INT_DIV_LSB,
- .vco_min = VC3_PLL2_VCO_MIN,
- .vco_max = VC3_PLL2_VCO_MAX
},
.hw.init = &(struct clk_init_data) {
.name = "pll2",
@@ -716,8 +734,10 @@ static struct vc3_hw_data clk_pll[] = {
.num = VC3_PLL3,
.int_div_msb_offs = VC3_PLL3_LOOP_FILTER_N_DIV_MSB,
.int_div_lsb_offs = VC3_PLL3_N_DIVIDER,
- .vco_min = VC3_PLL3_VCO_MIN,
- .vco_max = VC3_PLL3_VCO_MAX
+ .vco = {
+ .min = VC3_PLL3_VCO_MIN,
+ .max = VC3_PLL3_VCO_MAX
+ }
},
.hw.init = &(struct clk_init_data) {
.name = "pll3",
@@ -901,7 +921,6 @@ static struct vc3_hw_data clk_mux[] = {
[VC3_SE2_MUX] = {
.data = &(struct vc3_clk_data) {
.offs = VC3_SE2_CTRL_REG0,
- .bitmsk = VC3_SE2_CTRL_REG0_SE2_CLK_SEL
},
.hw.init = &(struct clk_init_data) {
.name = "se2_mux",
@@ -982,6 +1001,7 @@ static int vc3_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
u8 settings[NUM_CONFIG_REGISTERS];
+ const struct vc3_hw_cfg *data;
struct regmap *regmap;
const char *name;
int ret, i;
@@ -1029,9 +1049,16 @@ static int vc3_probe(struct i2c_client *client)
clk_pfd[i].hw.init->name);
}
+ data = i2c_get_match_data(client);
+
/* Register pll's */
for (i = 0; i < ARRAY_SIZE(clk_pll); i++) {
clk_pll[i].regmap = regmap;
+ if (i == VC3_PLL2) {
+ struct vc3_pll_data *pll_data = clk_pll[i].data;
+
+ pll_data->vco = data->pll2_vco;
+ }
ret = devm_clk_hw_register(dev, &clk_pll[i].hw);
if (ret)
return dev_err_probe(dev, ret, "%s failed\n",
@@ -1059,6 +1086,11 @@ static int vc3_probe(struct i2c_client *client)
/* Register clk muxes */
for (i = 0; i < ARRAY_SIZE(clk_mux); i++) {
clk_mux[i].regmap = regmap;
+ if (i == VC3_SE2_MUX) {
+ struct vc3_clk_data *clk_data = clk_mux[i].data;
+
+ clk_data->bitmsk = data->se2_clk_sel_msk;
+ }
ret = devm_clk_hw_register(dev, &clk_mux[i].hw);
if (ret)
return dev_err_probe(dev, ret, "%s failed\n",
@@ -1108,8 +1140,19 @@ static int vc3_probe(struct i2c_client *client)
return ret;
}
+static const struct vc3_hw_cfg vc3_5p = {
+ .pll2_vco = { .min = 400000000UL, .max = 1200000000UL },
+ .se2_clk_sel_msk = BIT(6),
+};
+
+static const struct vc3_hw_cfg vc3_5l = {
+ .pll2_vco = { .min = 30000000UL, .max = 130000000UL },
+ .se2_clk_sel_msk = BIT(0),
+};
+
static const struct of_device_id dev_ids[] = {
- { .compatible = "renesas,5p35023" },
+ { .compatible = "renesas,5p35023", .data = &vc3_5p },
+ { .compatible = "renesas,5l35023", .data = &vc3_5l },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, dev_ids);
diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c
index 6d31cd54d7cf..57228e88e81d 100644
--- a/drivers/clk/clk-versaclock5.c
+++ b/drivers/clk/clk-versaclock5.c
@@ -136,7 +136,7 @@
#define VC5_MAX_FOD_NUM 4
/* flags to describe chip features */
-/* chip has built-in oscilator */
+/* chip has built-in oscillator */
#define VC5_HAS_INTERNAL_XTAL BIT(0)
/* chip has PFD requency doubler */
#define VC5_HAS_PFD_FREQ_DBL BIT(1)
@@ -304,11 +304,11 @@ static unsigned long vc5_dbl_recalc_rate(struct clk_hw *hw,
return parent_rate;
}
-static long vc5_dbl_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vc5_dbl_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- if ((*parent_rate == rate) || ((*parent_rate * 2) == rate))
- return rate;
+ if ((req->best_parent_rate == req->rate) || ((req->best_parent_rate * 2) == req->rate))
+ return 0;
else
return -EINVAL;
}
@@ -332,7 +332,7 @@ static int vc5_dbl_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vc5_dbl_ops = {
.recalc_rate = vc5_dbl_recalc_rate,
- .round_rate = vc5_dbl_round_rate,
+ .determine_rate = vc5_dbl_determine_rate,
.set_rate = vc5_dbl_set_rate,
};
@@ -363,24 +363,29 @@ static unsigned long vc5_pfd_recalc_rate(struct clk_hw *hw,
return parent_rate / VC5_REF_DIVIDER_REF_DIV(div);
}
-static long vc5_pfd_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vc5_pfd_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned long idiv;
/* PLL cannot operate with input clock above 50 MHz. */
- if (rate > 50000000)
+ if (req->rate > 50000000)
return -EINVAL;
/* CLKIN within range of PLL input, feed directly to PLL. */
- if (*parent_rate <= 50000000)
- return *parent_rate;
+ if (req->best_parent_rate <= 50000000) {
+ req->rate = req->best_parent_rate;
+
+ return 0;
+ }
- idiv = DIV_ROUND_UP(*parent_rate, rate);
+ idiv = DIV_ROUND_UP(req->best_parent_rate, req->rate);
if (idiv > 127)
return -EINVAL;
- return *parent_rate / idiv;
+ req->rate = req->best_parent_rate / idiv;
+
+ return 0;
}
static int vc5_pfd_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -420,7 +425,7 @@ static int vc5_pfd_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vc5_pfd_ops = {
.recalc_rate = vc5_pfd_recalc_rate,
- .round_rate = vc5_pfd_round_rate,
+ .determine_rate = vc5_pfd_determine_rate,
.set_rate = vc5_pfd_set_rate,
};
@@ -444,30 +449,32 @@ static unsigned long vc5_pll_recalc_rate(struct clk_hw *hw,
return (parent_rate * div_int) + ((parent_rate * div_frc) >> 24);
}
-static long vc5_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vc5_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw);
struct vc5_driver_data *vc5 = hwdata->vc5;
u32 div_int;
u64 div_frc;
- rate = clamp(rate, VC5_PLL_VCO_MIN, vc5->chip_info->vco_max);
+ req->rate = clamp(req->rate, VC5_PLL_VCO_MIN, vc5->chip_info->vco_max);
/* Determine integer part, which is 12 bit wide */
- div_int = rate / *parent_rate;
+ div_int = req->rate / req->best_parent_rate;
if (div_int > 0xfff)
- rate = *parent_rate * 0xfff;
+ req->rate = req->best_parent_rate * 0xfff;
/* Determine best fractional part, which is 24 bit wide */
- div_frc = rate % *parent_rate;
+ div_frc = req->rate % req->best_parent_rate;
div_frc *= BIT(24) - 1;
- do_div(div_frc, *parent_rate);
+ do_div(div_frc, req->best_parent_rate);
hwdata->div_int = div_int;
hwdata->div_frc = (u32)div_frc;
- return (*parent_rate * div_int) + ((*parent_rate * div_frc) >> 24);
+ req->rate = (req->best_parent_rate * div_int) + ((req->best_parent_rate * div_frc) >> 24);
+
+ return 0;
}
static int vc5_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -488,7 +495,7 @@ static int vc5_pll_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vc5_pll_ops = {
.recalc_rate = vc5_pll_recalc_rate,
- .round_rate = vc5_pll_round_rate,
+ .determine_rate = vc5_pll_determine_rate,
.set_rate = vc5_pll_set_rate,
};
@@ -520,17 +527,17 @@ static unsigned long vc5_fod_recalc_rate(struct clk_hw *hw,
return div64_u64((u64)f_in << 24ULL, ((u64)div_int << 24ULL) + div_frc);
}
-static long vc5_fod_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vc5_fod_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw);
/* VCO frequency is divided by two before entering FOD */
- u32 f_in = *parent_rate / 2;
+ u32 f_in = req->best_parent_rate / 2;
u32 div_int;
u64 div_frc;
/* Determine integer part, which is 12 bit wide */
- div_int = f_in / rate;
+ div_int = f_in / req->rate;
/*
* WARNING: The clock chip does not output signal if the integer part
* of the divider is 0xfff and fractional part is non-zero.
@@ -538,18 +545,20 @@ static long vc5_fod_round_rate(struct clk_hw *hw, unsigned long rate,
*/
if (div_int > 0xffe) {
div_int = 0xffe;
- rate = f_in / div_int;
+ req->rate = f_in / div_int;
}
/* Determine best fractional part, which is 30 bit wide */
- div_frc = f_in % rate;
+ div_frc = f_in % req->rate;
div_frc <<= 24;
- do_div(div_frc, rate);
+ do_div(div_frc, req->rate);
hwdata->div_int = div_int;
hwdata->div_frc = (u32)div_frc;
- return div64_u64((u64)f_in << 24ULL, ((u64)div_int << 24ULL) + div_frc);
+ req->rate = div64_u64((u64)f_in << 24ULL, ((u64)div_int << 24ULL) + div_frc);
+
+ return 0;
}
static int vc5_fod_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -589,7 +598,7 @@ static int vc5_fod_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vc5_fod_ops = {
.recalc_rate = vc5_fod_recalc_rate,
- .round_rate = vc5_fod_round_rate,
+ .determine_rate = vc5_fod_determine_rate,
.set_rate = vc5_fod_set_rate,
};
diff --git a/drivers/clk/clk-versaclock7.c b/drivers/clk/clk-versaclock7.c
index f323263e32c3..adcc603e3259 100644
--- a/drivers/clk/clk-versaclock7.c
+++ b/drivers/clk/clk-versaclock7.c
@@ -900,17 +900,18 @@ static unsigned long vc7_fod_recalc_rate(struct clk_hw *hw, unsigned long parent
return fod_rate;
}
-static long vc7_fod_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate)
+static int vc7_fod_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vc7_fod_data *fod = container_of(hw, struct vc7_fod_data, hw);
unsigned long fod_rate;
pr_debug("%s - %s: requested rate: %lu, parent_rate: %lu\n",
- __func__, clk_hw_get_name(hw), rate, *parent_rate);
+ __func__, clk_hw_get_name(hw), req->rate, req->best_parent_rate);
- vc7_calc_fod_divider(rate, *parent_rate,
+ vc7_calc_fod_divider(req->rate, req->best_parent_rate,
&fod->fod_1st_int, &fod->fod_2nd_int, &fod->fod_frac);
- fod_rate = vc7_calc_fod_2nd_stage_rate(*parent_rate, fod->fod_1st_int,
+ fod_rate = vc7_calc_fod_2nd_stage_rate(req->best_parent_rate, fod->fod_1st_int,
fod->fod_2nd_int, fod->fod_frac);
pr_debug("%s - %s: fod_1st_int: %u, fod_2nd_int: %u, fod_frac: %llu\n",
@@ -918,7 +919,9 @@ static long vc7_fod_round_rate(struct clk_hw *hw, unsigned long rate, unsigned l
fod->fod_1st_int, fod->fod_2nd_int, fod->fod_frac);
pr_debug("%s - %s rate: %lu\n", __func__, clk_hw_get_name(hw), fod_rate);
- return fod_rate;
+ req->rate = fod_rate;
+
+ return 0;
}
static int vc7_fod_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate)
@@ -952,7 +955,7 @@ static int vc7_fod_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long
static const struct clk_ops vc7_fod_ops = {
.recalc_rate = vc7_fod_recalc_rate,
- .round_rate = vc7_fod_round_rate,
+ .determine_rate = vc7_fod_determine_rate,
.set_rate = vc7_fod_set_rate,
};
@@ -978,21 +981,24 @@ static unsigned long vc7_iod_recalc_rate(struct clk_hw *hw, unsigned long parent
return iod_rate;
}
-static long vc7_iod_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate)
+static int vc7_iod_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vc7_iod_data *iod = container_of(hw, struct vc7_iod_data, hw);
unsigned long iod_rate;
pr_debug("%s - %s: requested rate: %lu, parent_rate: %lu\n",
- __func__, clk_hw_get_name(hw), rate, *parent_rate);
+ __func__, clk_hw_get_name(hw), req->rate, req->best_parent_rate);
- vc7_calc_iod_divider(rate, *parent_rate, &iod->iod_int);
- iod_rate = div64_u64(*parent_rate, iod->iod_int);
+ vc7_calc_iod_divider(req->rate, req->best_parent_rate, &iod->iod_int);
+ iod_rate = div64_u64(req->best_parent_rate, iod->iod_int);
pr_debug("%s - %s: iod_int: %u\n", __func__, clk_hw_get_name(hw), iod->iod_int);
pr_debug("%s - %s rate: %ld\n", __func__, clk_hw_get_name(hw), iod_rate);
- return iod_rate;
+ req->rate = iod_rate;
+
+ return 0;
}
static int vc7_iod_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate)
@@ -1023,7 +1029,7 @@ static int vc7_iod_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long
static const struct clk_ops vc7_iod_ops = {
.recalc_rate = vc7_iod_recalc_rate,
- .round_rate = vc7_iod_round_rate,
+ .determine_rate = vc7_iod_determine_rate,
.set_rate = vc7_iod_set_rate,
};
@@ -1257,7 +1263,7 @@ static const struct vc7_chip_info vc7_rc21008a_info = {
.num_outputs = 8,
};
-static struct regmap_range_cfg vc7_range_cfg[] = {
+static const struct regmap_range_cfg vc7_range_cfg[] = {
{
.range_min = 0,
.range_max = VC7_MAX_REG,
diff --git a/drivers/clk/clk-vt8500.c b/drivers/clk/clk-vt8500.c
index 2a74a713ad59..eae5b3fbfb82 100644
--- a/drivers/clk/clk-vt8500.c
+++ b/drivers/clk/clk-vt8500.c
@@ -128,30 +128,31 @@ static unsigned long vt8500_dclk_recalc_rate(struct clk_hw *hw,
return parent_rate / div;
}
-static long vt8500_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int vt8500_dclk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_device *cdev = to_clk_device(hw);
u32 divisor;
- if (rate == 0)
+ if (req->rate == 0)
return 0;
- divisor = *prate / rate;
+ divisor = req->best_parent_rate / req->rate;
/* If prate / rate would be decimal, incr the divisor */
- if (rate * divisor < *prate)
+ if (req->rate * divisor < req->best_parent_rate)
divisor++;
/*
* If this is a request for SDMMC we have to adjust the divisor
* when >31 to use the fixed predivisor
*/
- if ((cdev->div_mask == 0x3F) && (divisor > 31)) {
+ if ((cdev->div_mask == 0x3F) && (divisor > 31))
divisor = 64 * ((divisor / 64) + 1);
- }
- return *prate / divisor;
+ req->rate = req->best_parent_rate / divisor;
+
+ return 0;
}
static int vt8500_dclk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -202,7 +203,7 @@ static const struct clk_ops vt8500_gated_clk_ops = {
};
static const struct clk_ops vt8500_divisor_clk_ops = {
- .round_rate = vt8500_dclk_round_rate,
+ .determine_rate = vt8500_dclk_determine_rate,
.set_rate = vt8500_dclk_set_rate,
.recalc_rate = vt8500_dclk_recalc_rate,
};
@@ -211,7 +212,7 @@ static const struct clk_ops vt8500_gated_divisor_clk_ops = {
.enable = vt8500_dclk_enable,
.disable = vt8500_dclk_disable,
.is_enabled = vt8500_dclk_is_enabled,
- .round_rate = vt8500_dclk_round_rate,
+ .determine_rate = vt8500_dclk_determine_rate,
.set_rate = vt8500_dclk_set_rate,
.recalc_rate = vt8500_dclk_recalc_rate,
};
@@ -594,8 +595,8 @@ static int vtwm_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static long vtwm_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int vtwm_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_pll *pll = to_clk_pll(hw);
u32 filter, mul, div1, div2;
@@ -604,33 +605,43 @@ static long vtwm_pll_round_rate(struct clk_hw *hw, unsigned long rate,
switch (pll->type) {
case PLL_TYPE_VT8500:
- ret = vt8500_find_pll_bits(rate, *prate, &mul, &div1);
+ ret = vt8500_find_pll_bits(req->rate, req->best_parent_rate,
+ &mul, &div1);
if (!ret)
- round_rate = VT8500_BITS_TO_FREQ(*prate, mul, div1);
+ round_rate = VT8500_BITS_TO_FREQ(req->best_parent_rate,
+ mul, div1);
break;
case PLL_TYPE_WM8650:
- ret = wm8650_find_pll_bits(rate, *prate, &mul, &div1, &div2);
+ ret = wm8650_find_pll_bits(req->rate, req->best_parent_rate,
+ &mul, &div1, &div2);
if (!ret)
- round_rate = WM8650_BITS_TO_FREQ(*prate, mul, div1, div2);
+ round_rate = WM8650_BITS_TO_FREQ(req->best_parent_rate,
+ mul, div1, div2);
break;
case PLL_TYPE_WM8750:
- ret = wm8750_find_pll_bits(rate, *prate, &filter, &mul, &div1, &div2);
+ ret = wm8750_find_pll_bits(req->rate, req->best_parent_rate,
+ &filter, &mul, &div1, &div2);
if (!ret)
- round_rate = WM8750_BITS_TO_FREQ(*prate, mul, div1, div2);
+ round_rate = WM8750_BITS_TO_FREQ(req->best_parent_rate,
+ mul, div1, div2);
break;
case PLL_TYPE_WM8850:
- ret = wm8850_find_pll_bits(rate, *prate, &mul, &div1, &div2);
+ ret = wm8850_find_pll_bits(req->rate, req->best_parent_rate,
+ &mul, &div1, &div2);
if (!ret)
- round_rate = WM8850_BITS_TO_FREQ(*prate, mul, div1, div2);
+ round_rate = WM8850_BITS_TO_FREQ(req->best_parent_rate,
+ mul, div1, div2);
break;
default:
- ret = -EINVAL;
+ return -EINVAL;
}
if (ret)
- return ret;
+ req->rate = ret;
+ else
+ req->rate = round_rate;
- return round_rate;
+ return 0;
}
static unsigned long vtwm_pll_recalc_rate(struct clk_hw *hw,
@@ -665,7 +676,7 @@ static unsigned long vtwm_pll_recalc_rate(struct clk_hw *hw,
}
static const struct clk_ops vtwm_pll_ops = {
- .round_rate = vtwm_pll_round_rate,
+ .determine_rate = vtwm_pll_determine_rate,
.set_rate = vtwm_pll_set_rate,
.recalc_rate = vtwm_pll_recalc_rate,
};
diff --git a/drivers/clk/clk-wm831x.c b/drivers/clk/clk-wm831x.c
index 34e9d4d541e2..263e927138c2 100644
--- a/drivers/clk/clk-wm831x.c
+++ b/drivers/clk/clk-wm831x.c
@@ -133,18 +133,20 @@ static unsigned long wm831x_fll_recalc_rate(struct clk_hw *hw,
return 0;
}
-static long wm831x_fll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *unused)
+static int wm831x_fll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int best = 0;
int i;
for (i = 0; i < ARRAY_SIZE(wm831x_fll_auto_rates); i++)
- if (abs(wm831x_fll_auto_rates[i] - rate) <
- abs(wm831x_fll_auto_rates[best] - rate))
+ if (abs(wm831x_fll_auto_rates[i] - req->rate) <
+ abs(wm831x_fll_auto_rates[best] - req->rate))
best = i;
- return wm831x_fll_auto_rates[best];
+ req->rate = wm831x_fll_auto_rates[best];
+
+ return 0;
}
static int wm831x_fll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -214,7 +216,7 @@ static const struct clk_ops wm831x_fll_ops = {
.is_prepared = wm831x_fll_is_prepared,
.prepare = wm831x_fll_prepare,
.unprepare = wm831x_fll_unprepare,
- .round_rate = wm831x_fll_round_rate,
+ .determine_rate = wm831x_fll_determine_rate,
.recalc_rate = wm831x_fll_recalc_rate,
.set_rate = wm831x_fll_set_rate,
.get_parent = wm831x_fll_get_parent,
diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
index 0c3d0cee98c8..92e39f3237c2 100644
--- a/drivers/clk/clk-xgene.c
+++ b/drivers/clk/clk-xgene.c
@@ -7,6 +7,7 @@
*/
#include <linux/module.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/clkdev.h>
@@ -270,23 +271,28 @@ static unsigned long xgene_clk_pmd_recalc_rate(struct clk_hw *hw,
return ret;
}
-static long xgene_clk_pmd_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int xgene_clk_pmd_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct xgene_clk_pmd *fd = to_xgene_clk_pmd(hw);
u64 ret, scale;
- if (!rate || rate >= *parent_rate)
- return *parent_rate;
+ if (!req->rate || req->rate >= req->best_parent_rate) {
+ req->rate = req->best_parent_rate;
+
+ return 0;
+ }
/* freq = parent_rate * scaler / denom */
- ret = rate * fd->denom;
- scale = DIV_ROUND_UP_ULL(ret, *parent_rate);
+ ret = req->rate * fd->denom;
+ scale = DIV_ROUND_UP_ULL(ret, req->best_parent_rate);
- ret = (u64)*parent_rate * scale;
+ ret = (u64)req->best_parent_rate * scale;
do_div(ret, fd->denom);
- return ret;
+ req->rate = ret;
+
+ return 0;
}
static int xgene_clk_pmd_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -332,7 +338,7 @@ static int xgene_clk_pmd_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops xgene_clk_pmd_ops = {
.recalc_rate = xgene_clk_pmd_recalc_rate,
- .round_rate = xgene_clk_pmd_round_rate,
+ .determine_rate = xgene_clk_pmd_determine_rate,
.set_rate = xgene_clk_pmd_set_rate,
};
@@ -520,8 +526,7 @@ static int xgene_clk_is_enabled(struct clk_hw *hw)
data = xgene_clk_read(pclk->param.csr_reg +
pclk->param.reg_clk_offset);
pr_debug("%s clock is %s\n", clk_hw_get_name(hw),
- data & pclk->param.reg_clk_mask ? "enabled" :
- "disabled");
+ str_enabled_disabled(data & pclk->param.reg_clk_mask));
} else {
return 1;
}
@@ -593,23 +598,25 @@ static int xgene_clk_set_rate(struct clk_hw *hw, unsigned long rate,
return parent_rate / divider_save;
}
-static long xgene_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int xgene_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct xgene_clk *pclk = to_xgene_clk(hw);
- unsigned long parent_rate = *prate;
+ unsigned long parent_rate = req->best_parent_rate;
u32 divider;
if (pclk->param.divider_reg) {
/* Let's compute the divider */
- if (rate > parent_rate)
- rate = parent_rate;
- divider = parent_rate / rate; /* Rounded down */
+ if (req->rate > parent_rate)
+ req->rate = parent_rate;
+ divider = parent_rate / req->rate; /* Rounded down */
} else {
divider = 1;
}
- return parent_rate / divider;
+ req->rate = parent_rate / divider;
+
+ return 0;
}
static const struct clk_ops xgene_clk_ops = {
@@ -618,7 +625,7 @@ static const struct clk_ops xgene_clk_ops = {
.is_enabled = xgene_clk_is_enabled,
.recalc_rate = xgene_clk_recalc_rate,
.set_rate = xgene_clk_set_rate,
- .round_rate = xgene_clk_round_rate,
+ .determine_rate = xgene_clk_determine_rate,
};
static struct clk *xgene_register_clk(struct device *dev,
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 8cca52be993f..85d2f2481acf 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -6,21 +6,24 @@
* Standard functionality for the common clock API. See Documentation/driver-api/clk.rst
*/
+#include <linux/clk/clk-conf.h>
+#include <linux/clkdev.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
-#include <linux/clk/clk-conf.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/spinlock.h>
+#include <linux/device.h>
#include <linux/err.h>
+#include <linux/hashtable.h>
+#include <linux/init.h>
#include <linux/list.h>
-#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/device.h>
-#include <linux/init.h>
#include <linux/pm_runtime.h>
#include <linux/sched.h>
-#include <linux/clkdev.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/stringhash.h>
#include "clk.h"
@@ -33,6 +36,9 @@ static struct task_struct *enable_owner;
static int prepare_refcnt;
static int enable_refcnt;
+#define CLK_HASH_BITS 9
+static DEFINE_HASHTABLE(clk_hashtable, CLK_HASH_BITS);
+
static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list);
static LIST_HEAD(clk_notifier_list);
@@ -87,6 +93,7 @@ struct clk_core {
struct clk_duty duty;
struct hlist_head children;
struct hlist_node child_node;
+ struct hlist_node hashtable_node;
struct hlist_head clks;
unsigned int notifier_count;
#ifdef CONFIG_DEBUG_FS
@@ -365,6 +372,18 @@ const char *clk_hw_get_name(const struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(clk_hw_get_name);
+struct device *clk_hw_get_dev(const struct clk_hw *hw)
+{
+ return hw->core->dev;
+}
+EXPORT_SYMBOL_GPL(clk_hw_get_dev);
+
+struct device_node *clk_hw_get_of_node(const struct clk_hw *hw)
+{
+ return hw->core->of_node;
+}
+EXPORT_SYMBOL_GPL(clk_hw_get_of_node);
+
struct clk_hw *__clk_get_hw(struct clk *clk)
{
return !clk ? NULL : clk->core->hw;
@@ -383,45 +402,20 @@ struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(clk_hw_get_parent);
-static struct clk_core *__clk_lookup_subtree(const char *name,
- struct clk_core *core)
-{
- struct clk_core *child;
- struct clk_core *ret;
-
- if (!strcmp(core->name, name))
- return core;
-
- hlist_for_each_entry(child, &core->children, child_node) {
- ret = __clk_lookup_subtree(name, child);
- if (ret)
- return ret;
- }
-
- return NULL;
-}
-
static struct clk_core *clk_core_lookup(const char *name)
{
- struct clk_core *root_clk;
- struct clk_core *ret;
+ struct clk_core *core;
+ u32 hash;
if (!name)
return NULL;
- /* search the 'proper' clk tree first */
- hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
- ret = __clk_lookup_subtree(name, root_clk);
- if (ret)
- return ret;
- }
+ hash = full_name_hash(NULL, name, strlen(name));
- /* if not found, then search the orphan tree */
- hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
- ret = __clk_lookup_subtree(name, root_clk);
- if (ret)
- return ret;
- }
+ /* search the hashtable */
+ hash_for_each_possible(clk_hashtable, core, hashtable_node, hash)
+ if (!strcmp(core->name, name))
+ return core;
return NULL;
}
@@ -608,12 +602,6 @@ bool clk_hw_is_prepared(const struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(clk_hw_is_prepared);
-bool clk_hw_rate_is_protected(const struct clk_hw *hw)
-{
- return clk_core_rate_is_protected(hw->core);
-}
-EXPORT_SYMBOL_GPL(clk_hw_rate_is_protected);
-
bool clk_hw_is_enabled(const struct clk_hw *hw)
{
return clk_core_is_enabled(hw->core);
@@ -2289,7 +2277,7 @@ static struct clk_core *clk_calc_new_rates(struct clk_core *core,
unsigned long min_rate;
unsigned long max_rate;
int p_index = 0;
- long ret;
+ int ret;
/* sanity */
if (IS_ERR_OR_NULL(core))
@@ -4007,6 +3995,8 @@ static int __clk_core_init(struct clk_core *core)
hlist_add_head(&core->child_node, &clk_orphan_list);
core->orphan = true;
}
+ hash_add(clk_hashtable, &core->hashtable_node,
+ full_name_hash(NULL, core->name, strlen(core->name)));
/*
* Set clk's accuracy. The preferred method is to use
@@ -4083,6 +4073,7 @@ out:
clk_pm_runtime_put(core);
unlock:
if (ret) {
+ hash_del(&core->hashtable_node);
hlist_del_init(&core->child_node);
core->hw->core = NULL;
}
@@ -4403,6 +4394,13 @@ fail_ops:
fail_name:
kref_put(&core->ref, __clk_release);
fail_out:
+ if (dev) {
+ dev_err_probe(dev, ret, "failed to register clk '%s' (%pS)\n",
+ init->name, hw);
+ } else {
+ pr_err("%pOF: error %pe: failed to register clk '%s' (%pS)\n",
+ np, ERR_PTR(ret), init->name, hw);
+ }
return ERR_PTR(ret);
}
@@ -4597,6 +4595,7 @@ void clk_unregister(struct clk *clk)
clk_core_evict_parent_cache(clk->core);
+ hash_del(&clk->core->hashtable_node);
hlist_del_init(&clk->core->child_node);
if (clk->core->prepare_count)
@@ -4762,7 +4761,7 @@ void __clk_put(struct clk *clk)
clk->exclusive_count = 0;
}
- hlist_del(&clk->clks_node);
+ clk_core_unlink_consumer(clk);
/* If we had any boundaries on that clock, let's drop them. */
if (clk->min_rate > 0 || clk->max_rate < ULONG_MAX)
@@ -5232,7 +5231,7 @@ static int of_parse_clkspec(const struct device_node *np, int index,
* clocks.
*/
np = np->parent;
- if (np && !of_get_property(np, "clock-ranges", NULL))
+ if (np && !of_property_present(np, "clock-ranges"))
break;
index = 0;
}
@@ -5264,6 +5263,10 @@ of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
if (!clkspec)
return ERR_PTR(-EINVAL);
+ /* Check if node in clkspec is in disabled/fail state */
+ if (!of_device_is_available(clkspec->np))
+ return ERR_PTR(-ENOENT);
+
mutex_lock(&of_clk_mutex);
list_for_each_entry(provider, &of_clk_providers, link) {
if (provider->node == clkspec->np) {
@@ -5364,9 +5367,8 @@ EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
const char *of_clk_get_parent_name(const struct device_node *np, int index)
{
struct of_phandle_args clkspec;
- struct property *prop;
const char *clk_name;
- const __be32 *vp;
+ bool found = false;
u32 pv;
int rc;
int count;
@@ -5383,16 +5385,19 @@ const char *of_clk_get_parent_name(const struct device_node *np, int index)
/* if there is an indices property, use it to transfer the index
* specified into an array offset for the clock-output-names property.
*/
- of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
+ of_property_for_each_u32(clkspec.np, "clock-indices", pv) {
if (index == pv) {
index = count;
+ found = true;
break;
}
count++;
}
/* We went off the end of 'clock-indices' without finding it */
- if (prop && !vp)
+ if (of_property_present(clkspec.np, "clock-indices") && !found) {
+ of_node_put(clkspec.np);
return NULL;
+ }
if (of_property_read_string_index(clkspec.np, "clock-output-names",
index,
@@ -5504,14 +5509,12 @@ static int parent_ready(struct device_node *np)
int of_clk_detect_critical(struct device_node *np, int index,
unsigned long *flags)
{
- struct property *prop;
- const __be32 *cur;
uint32_t idx;
if (!np || !flags)
return -EINVAL;
- of_property_for_each_u32(np, "clock-critical", prop, cur, idx)
+ of_property_for_each_u32(np, "clock-critical", idx)
if (index == idx)
*flags |= CLK_IS_CRITICAL;
diff --git a/drivers/clk/clk_kunit_helpers.c b/drivers/clk/clk_kunit_helpers.c
new file mode 100644
index 000000000000..68a28e70bb61
--- /dev/null
+++ b/drivers/clk/clk_kunit_helpers.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit helpers for clk providers and consumers
+ */
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include <kunit/clk.h>
+#include <kunit/resource.h>
+
+KUNIT_DEFINE_ACTION_WRAPPER(clk_disable_unprepare_wrapper,
+ clk_disable_unprepare, struct clk *);
+/**
+ * clk_prepare_enable_kunit() - Test managed clk_prepare_enable()
+ * @test: The test context
+ * @clk: clk to prepare and enable
+ *
+ * Return: 0 on success, or negative errno on failure.
+ */
+int clk_prepare_enable_kunit(struct kunit *test, struct clk *clk)
+{
+ int ret;
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ return kunit_add_action_or_reset(test, clk_disable_unprepare_wrapper,
+ clk);
+}
+EXPORT_SYMBOL_GPL(clk_prepare_enable_kunit);
+
+KUNIT_DEFINE_ACTION_WRAPPER(clk_put_wrapper, clk_put, struct clk *);
+
+static struct clk *__clk_get_kunit(struct kunit *test, struct clk *clk)
+{
+ int ret;
+
+ if (IS_ERR(clk))
+ return clk;
+
+ ret = kunit_add_action_or_reset(test, clk_put_wrapper, clk);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return clk;
+}
+
+/**
+ * clk_get_kunit() - Test managed clk_get()
+ * @test: The test context
+ * @dev: device for clock "consumer"
+ * @con_id: clock consumer ID
+ *
+ * Just like clk_get(), except the clk is managed by the test case and is
+ * automatically put with clk_put() after the test case concludes.
+ *
+ * Return: new clk consumer or ERR_PTR on failure.
+ */
+struct clk *
+clk_get_kunit(struct kunit *test, struct device *dev, const char *con_id)
+{
+ struct clk *clk;
+
+ clk = clk_get(dev, con_id);
+
+ return __clk_get_kunit(test, clk);
+}
+EXPORT_SYMBOL_GPL(clk_get_kunit);
+
+/**
+ * of_clk_get_kunit() - Test managed of_clk_get()
+ * @test: The test context
+ * @np: device_node for clock "consumer"
+ * @index: index in 'clocks' property of @np
+ *
+ * Just like of_clk_get(), except the clk is managed by the test case and is
+ * automatically put with clk_put() after the test case concludes.
+ *
+ * Return: new clk consumer or ERR_PTR on failure.
+ */
+struct clk *
+of_clk_get_kunit(struct kunit *test, struct device_node *np, int index)
+{
+ struct clk *clk;
+
+ clk = of_clk_get(np, index);
+
+ return __clk_get_kunit(test, clk);
+}
+EXPORT_SYMBOL_GPL(of_clk_get_kunit);
+
+/**
+ * clk_hw_get_clk_kunit() - Test managed clk_hw_get_clk()
+ * @test: The test context
+ * @hw: clk_hw associated with the clk being consumed
+ * @con_id: connection ID string on device
+ *
+ * Just like clk_hw_get_clk(), except the clk is managed by the test case and
+ * is automatically put with clk_put() after the test case concludes.
+ *
+ * Return: new clk consumer or ERR_PTR on failure.
+ */
+struct clk *
+clk_hw_get_clk_kunit(struct kunit *test, struct clk_hw *hw, const char *con_id)
+{
+ struct clk *clk;
+
+ clk = clk_hw_get_clk(hw, con_id);
+
+ return __clk_get_kunit(test, clk);
+}
+EXPORT_SYMBOL_GPL(clk_hw_get_clk_kunit);
+
+/**
+ * clk_hw_get_clk_prepared_enabled_kunit() - Test managed clk_hw_get_clk() + clk_prepare_enable()
+ * @test: The test context
+ * @hw: clk_hw associated with the clk being consumed
+ * @con_id: connection ID string on device
+ *
+ * Just like
+ *
+ * .. code-block:: c
+ *
+ * struct clk *clk = clk_hw_get_clk(...);
+ * clk_prepare_enable(clk);
+ *
+ * except the clk is managed by the test case and is automatically disabled and
+ * unprepared with clk_disable_unprepare() and put with clk_put() after the
+ * test case concludes.
+ *
+ * Return: new clk consumer that is prepared and enabled or ERR_PTR on failure.
+ */
+struct clk *
+clk_hw_get_clk_prepared_enabled_kunit(struct kunit *test, struct clk_hw *hw,
+ const char *con_id)
+{
+ int ret;
+ struct clk *clk;
+
+ clk = clk_hw_get_clk_kunit(test, hw, con_id);
+ if (IS_ERR(clk))
+ return clk;
+
+ ret = clk_prepare_enable_kunit(test, clk);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return clk;
+}
+EXPORT_SYMBOL_GPL(clk_hw_get_clk_prepared_enabled_kunit);
+
+KUNIT_DEFINE_ACTION_WRAPPER(clk_hw_unregister_wrapper,
+ clk_hw_unregister, struct clk_hw *);
+
+/**
+ * clk_hw_register_kunit() - Test managed clk_hw_register()
+ * @test: The test context
+ * @dev: device that is registering this clock
+ * @hw: link to hardware-specific clock data
+ *
+ * Just like clk_hw_register(), except the clk registration is managed by the
+ * test case and is automatically unregistered after the test case concludes.
+ *
+ * Return: 0 on success or a negative errno value on failure.
+ */
+int clk_hw_register_kunit(struct kunit *test, struct device *dev, struct clk_hw *hw)
+{
+ int ret;
+
+ ret = clk_hw_register(dev, hw);
+ if (ret)
+ return ret;
+
+ return kunit_add_action_or_reset(test, clk_hw_unregister_wrapper, hw);
+}
+EXPORT_SYMBOL_GPL(clk_hw_register_kunit);
+
+/**
+ * of_clk_hw_register_kunit() - Test managed of_clk_hw_register()
+ * @test: The test context
+ * @node: device_node of device that is registering this clock
+ * @hw: link to hardware-specific clock data
+ *
+ * Just like of_clk_hw_register(), except the clk registration is managed by
+ * the test case and is automatically unregistered after the test case
+ * concludes.
+ *
+ * Return: 0 on success or a negative errno value on failure.
+ */
+int of_clk_hw_register_kunit(struct kunit *test, struct device_node *node, struct clk_hw *hw)
+{
+ int ret;
+
+ ret = of_clk_hw_register(node, hw);
+ if (ret)
+ return ret;
+
+ return kunit_add_action_or_reset(test, clk_hw_unregister_wrapper, hw);
+}
+EXPORT_SYMBOL_GPL(of_clk_hw_register_kunit);
+
+KUNIT_DEFINE_ACTION_WRAPPER(of_clk_del_provider_wrapper,
+ of_clk_del_provider, struct device_node *);
+
+/**
+ * of_clk_add_hw_provider_kunit() - Test managed of_clk_add_hw_provider()
+ * @test: The test context
+ * @np: Device node pointer associated with clock provider
+ * @get: Callback for decoding clk_hw
+ * @data: Context pointer for @get callback.
+ *
+ * Just like of_clk_add_hw_provider(), except the clk_hw provider is managed by
+ * the test case and is automatically unregistered after the test case
+ * concludes.
+ *
+ * Return: 0 on success or a negative errno value on failure.
+ */
+int of_clk_add_hw_provider_kunit(struct kunit *test, struct device_node *np,
+ struct clk_hw *(*get)(struct of_phandle_args *clkspec, void *data),
+ void *data)
+{
+ int ret;
+
+ ret = of_clk_add_hw_provider(np, get, data);
+ if (ret)
+ return ret;
+
+ return kunit_add_action_or_reset(test, of_clk_del_provider_wrapper, np);
+}
+EXPORT_SYMBOL_GPL(of_clk_add_hw_provider_kunit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("KUnit helpers for clk providers and consumers");
diff --git a/drivers/clk/clk_parent_data_test.h b/drivers/clk/clk_parent_data_test.h
new file mode 100644
index 000000000000..eedd53ae910d
--- /dev/null
+++ b/drivers/clk/clk_parent_data_test.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _CLK_PARENT_DATA_TEST_H
+#define _CLK_PARENT_DATA_TEST_H
+
+#define CLK_PARENT_DATA_1MHZ_NAME "1mhz_fixed_legacy"
+#define CLK_PARENT_DATA_PARENT1 "parent_fwname"
+#define CLK_PARENT_DATA_PARENT2 "50"
+#define CLK_PARENT_DATA_50MHZ_NAME "50_clk"
+
+#endif
diff --git a/drivers/clk/clk_test.c b/drivers/clk/clk_test.c
index 39e2b5ff4f51..a268d7b5d4cb 100644
--- a/drivers/clk/clk_test.c
+++ b/drivers/clk/clk_test.c
@@ -1,15 +1,24 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Kunit test for clk rate management
+ * Kunit tests for clk framework
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/clk/clk-conf.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
/* Needed for clk_hw_get_clk() */
#include "clk.h"
+#include <kunit/clk.h>
+#include <kunit/of.h>
+#include <kunit/platform_device.h>
#include <kunit/test.h>
+#include "kunit_clk_assigned_rates.h"
+#include "clk_parent_data_test.h"
+
static const struct clk_ops empty_clk_ops = { };
#define DUMMY_CLOCK_INIT_RATE (42 * 1000 * 1000)
@@ -283,7 +292,7 @@ static void clk_test_set_set_get_rate(struct kunit *test)
}
/*
- * Test that clk_round_rate and clk_set_rate are consitent and will
+ * Test that clk_round_rate and clk_set_rate are consistent and will
* return the same frequency.
*/
static void clk_test_round_set_get_rate(struct kunit *test)
@@ -466,7 +475,7 @@ clk_multiple_parents_mux_test_init(struct kunit *test)
&clk_dummy_rate_ops,
0);
ctx->parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
- ret = clk_hw_register(NULL, &ctx->parents_ctx[0].hw);
+ ret = clk_hw_register_kunit(test, NULL, &ctx->parents_ctx[0].hw);
if (ret)
return ret;
@@ -474,7 +483,7 @@ clk_multiple_parents_mux_test_init(struct kunit *test)
&clk_dummy_rate_ops,
0);
ctx->parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
- ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
+ ret = clk_hw_register_kunit(test, NULL, &ctx->parents_ctx[1].hw);
if (ret)
return ret;
@@ -482,23 +491,13 @@ clk_multiple_parents_mux_test_init(struct kunit *test)
ctx->hw.init = CLK_HW_INIT_PARENTS("test-mux", parents,
&clk_multiple_parents_mux_ops,
CLK_SET_RATE_PARENT);
- ret = clk_hw_register(NULL, &ctx->hw);
+ ret = clk_hw_register_kunit(test, NULL, &ctx->hw);
if (ret)
return ret;
return 0;
}
-static void
-clk_multiple_parents_mux_test_exit(struct kunit *test)
-{
- struct clk_multiple_parent_ctx *ctx = test->priv;
-
- clk_hw_unregister(&ctx->hw);
- clk_hw_unregister(&ctx->parents_ctx[0].hw);
- clk_hw_unregister(&ctx->parents_ctx[1].hw);
-}
-
/*
* Test that for a clock with multiple parents, clk_get_parent()
* actually returns the current one.
@@ -554,18 +553,18 @@ clk_test_multiple_parents_mux_set_range_set_parent_get_rate(struct kunit *test)
{
struct clk_multiple_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *clk = clk_hw_get_clk_kunit(test, hw, NULL);
struct clk *parent1, *parent2;
unsigned long rate;
int ret;
kunit_skip(test, "This needs to be fixed in the core.");
- parent1 = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
+ parent1 = clk_hw_get_clk_kunit(test, &ctx->parents_ctx[0].hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent1);
KUNIT_ASSERT_TRUE(test, clk_is_match(clk_get_parent(clk), parent1));
- parent2 = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
+ parent2 = clk_hw_get_clk_kunit(test, &ctx->parents_ctx[1].hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent2);
ret = clk_set_rate(parent1, DUMMY_CLOCK_RATE_1);
@@ -586,10 +585,6 @@ clk_test_multiple_parents_mux_set_range_set_parent_get_rate(struct kunit *test)
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 - 1000);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
-
- clk_put(parent2);
- clk_put(parent1);
- clk_put(clk);
}
static struct kunit_case clk_multiple_parents_mux_test_cases[] = {
@@ -610,7 +605,6 @@ static struct kunit_suite
clk_multiple_parents_mux_test_suite = {
.name = "clk-multiple-parents-mux-test",
.init = clk_multiple_parents_mux_test_init,
- .exit = clk_multiple_parents_mux_test_exit,
.test_cases = clk_multiple_parents_mux_test_cases,
};
@@ -630,29 +624,20 @@ clk_orphan_transparent_multiple_parent_mux_test_init(struct kunit *test)
&clk_dummy_rate_ops,
0);
ctx->parents_ctx[1].rate = DUMMY_CLOCK_INIT_RATE;
- ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
+ ret = clk_hw_register_kunit(test, NULL, &ctx->parents_ctx[1].hw);
if (ret)
return ret;
ctx->hw.init = CLK_HW_INIT_PARENTS("test-orphan-mux", parents,
&clk_multiple_parents_mux_ops,
CLK_SET_RATE_PARENT);
- ret = clk_hw_register(NULL, &ctx->hw);
+ ret = clk_hw_register_kunit(test, NULL, &ctx->hw);
if (ret)
return ret;
return 0;
}
-static void
-clk_orphan_transparent_multiple_parent_mux_test_exit(struct kunit *test)
-{
- struct clk_multiple_parent_ctx *ctx = test->priv;
-
- clk_hw_unregister(&ctx->hw);
- clk_hw_unregister(&ctx->parents_ctx[1].hw);
-}
-
/*
* Test that, for a mux whose current parent hasn't been registered yet and is
* thus orphan, clk_get_parent() will return NULL.
@@ -905,7 +890,7 @@ clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate(st
{
struct clk_multiple_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *clk = clk_hw_get_clk_kunit(test, hw, NULL);
struct clk *parent;
unsigned long rate;
int ret;
@@ -914,7 +899,7 @@ clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate(st
clk_hw_set_rate_range(hw, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
- parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
+ parent = clk_hw_get_clk_kunit(test, &ctx->parents_ctx[1].hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
ret = clk_set_parent(clk, parent);
@@ -924,9 +909,6 @@ clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate(st
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
-
- clk_put(parent);
- clk_put(clk);
}
static struct kunit_case clk_orphan_transparent_multiple_parent_mux_test_cases[] = {
@@ -954,7 +936,6 @@ static struct kunit_case clk_orphan_transparent_multiple_parent_mux_test_cases[]
static struct kunit_suite clk_orphan_transparent_multiple_parent_mux_test_suite = {
.name = "clk-orphan-transparent-multiple-parent-mux-test",
.init = clk_orphan_transparent_multiple_parent_mux_test_init,
- .exit = clk_orphan_transparent_multiple_parent_mux_test_exit,
.test_cases = clk_orphan_transparent_multiple_parent_mux_test_cases,
};
@@ -979,7 +960,7 @@ static int clk_single_parent_mux_test_init(struct kunit *test)
&clk_dummy_rate_ops,
0);
- ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
+ ret = clk_hw_register_kunit(test, NULL, &ctx->parent_ctx.hw);
if (ret)
return ret;
@@ -987,7 +968,7 @@ static int clk_single_parent_mux_test_init(struct kunit *test)
&clk_dummy_single_parent_ops,
CLK_SET_RATE_PARENT);
- ret = clk_hw_register(NULL, &ctx->hw);
+ ret = clk_hw_register_kunit(test, NULL, &ctx->hw);
if (ret)
return ret;
@@ -1053,7 +1034,7 @@ clk_test_single_parent_mux_set_range_disjoint_child_last(struct kunit *test)
{
struct clk_single_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *clk = clk_hw_get_clk_kunit(test, hw, NULL);
struct clk *parent;
int ret;
@@ -1067,8 +1048,6 @@ clk_test_single_parent_mux_set_range_disjoint_child_last(struct kunit *test)
ret = clk_set_rate_range(clk, 3000, 4000);
KUNIT_EXPECT_LT(test, ret, 0);
-
- clk_put(clk);
}
/*
@@ -1085,7 +1064,7 @@ clk_test_single_parent_mux_set_range_disjoint_parent_last(struct kunit *test)
{
struct clk_single_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *clk = clk_hw_get_clk_kunit(test, hw, NULL);
struct clk *parent;
int ret;
@@ -1099,8 +1078,6 @@ clk_test_single_parent_mux_set_range_disjoint_parent_last(struct kunit *test)
ret = clk_set_rate_range(parent, 3000, 4000);
KUNIT_EXPECT_LT(test, ret, 0);
-
- clk_put(clk);
}
/*
@@ -1231,7 +1208,6 @@ static struct kunit_suite
clk_single_parent_mux_test_suite = {
.name = "clk-single-parent-mux-test",
.init = clk_single_parent_mux_test_init,
- .exit = clk_single_parent_mux_test_exit,
.test_cases = clk_single_parent_mux_test_cases,
};
@@ -2659,7 +2635,916 @@ static struct kunit_suite clk_mux_no_reparent_test_suite = {
.test_cases = clk_mux_no_reparent_test_cases,
};
+struct clk_register_clk_parent_data_test_case {
+ const char *desc;
+ struct clk_parent_data pdata;
+};
+
+static void
+clk_register_clk_parent_data_test_case_to_desc(
+ const struct clk_register_clk_parent_data_test_case *t, char *desc)
+{
+ strcpy(desc, t->desc);
+}
+
+static const struct clk_register_clk_parent_data_test_case
+clk_register_clk_parent_data_of_cases[] = {
+ {
+ /*
+ * Test that a clk registered with a struct device_node can
+ * find a parent based on struct clk_parent_data::index.
+ */
+ .desc = "clk_parent_data_of_index_test",
+ .pdata.index = 0,
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device_node can
+ * find a parent based on struct clk_parent_data::fwname.
+ */
+ .desc = "clk_parent_data_of_fwname_test",
+ .pdata.fw_name = CLK_PARENT_DATA_PARENT1,
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device_node can
+ * find a parent based on struct clk_parent_data::name.
+ */
+ .desc = "clk_parent_data_of_name_test",
+ /* The index must be negative to indicate firmware not used */
+ .pdata.index = -1,
+ .pdata.name = CLK_PARENT_DATA_1MHZ_NAME,
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device_node can
+ * find a parent based on struct
+ * clk_parent_data::{fw_name,name}.
+ */
+ .desc = "clk_parent_data_of_fwname_name_test",
+ .pdata.fw_name = CLK_PARENT_DATA_PARENT1,
+ .pdata.name = "not_matching",
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device_node can
+ * find a parent based on struct clk_parent_data::{index,name}.
+ * Index takes priority.
+ */
+ .desc = "clk_parent_data_of_index_name_priority_test",
+ .pdata.index = 0,
+ .pdata.name = "not_matching",
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device_node can
+ * find a parent based on struct
+ * clk_parent_data::{index,fwname,name}. The fw_name takes
+ * priority over index and name.
+ */
+ .desc = "clk_parent_data_of_index_fwname_name_priority_test",
+ .pdata.index = 1,
+ .pdata.fw_name = CLK_PARENT_DATA_PARENT1,
+ .pdata.name = "not_matching",
+ },
+};
+
+KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_of_test, clk_register_clk_parent_data_of_cases,
+ clk_register_clk_parent_data_test_case_to_desc)
+
+/**
+ * struct clk_register_clk_parent_data_of_ctx - Context for clk_parent_data OF tests
+ * @np: device node of clk under test
+ * @hw: clk_hw for clk under test
+ */
+struct clk_register_clk_parent_data_of_ctx {
+ struct device_node *np;
+ struct clk_hw hw;
+};
+
+static int clk_register_clk_parent_data_of_test_init(struct kunit *test)
+{
+ struct clk_register_clk_parent_data_of_ctx *ctx;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ of_overlay_apply_kunit(test, kunit_clk_parent_data_test));
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ test->priv = ctx;
+
+ ctx->np = of_find_compatible_node(NULL, NULL, "test,clk-parent-data");
+ if (!ctx->np)
+ return -ENODEV;
+
+ of_node_put_kunit(test, ctx->np);
+
+ return 0;
+}
+
+/*
+ * Test that a clk registered with a struct device_node can find a parent based on
+ * struct clk_parent_data when the hw member isn't set.
+ */
+static void clk_register_clk_parent_data_of_test(struct kunit *test)
+{
+ struct clk_register_clk_parent_data_of_ctx *ctx = test->priv;
+ struct clk_hw *parent_hw;
+ const struct clk_register_clk_parent_data_test_case *test_param;
+ struct clk_init_data init = { };
+ struct clk *expected_parent, *actual_parent;
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->np);
+
+ expected_parent = of_clk_get_kunit(test, ctx->np, 0);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_parent);
+
+ test_param = test->param_value;
+ init.parent_data = &test_param->pdata;
+ init.num_parents = 1;
+ init.name = "parent_data_of_test_clk";
+ init.ops = &clk_dummy_single_parent_ops;
+ ctx->hw.init = &init;
+ KUNIT_ASSERT_EQ(test, 0, of_clk_hw_register_kunit(test, ctx->np, &ctx->hw));
+
+ parent_hw = clk_hw_get_parent(&ctx->hw);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent_hw);
+
+ actual_parent = clk_hw_get_clk_kunit(test, parent_hw, __func__);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, actual_parent);
+
+ KUNIT_EXPECT_TRUE(test, clk_is_match(expected_parent, actual_parent));
+}
+
+static struct kunit_case clk_register_clk_parent_data_of_test_cases[] = {
+ KUNIT_CASE_PARAM(clk_register_clk_parent_data_of_test,
+ clk_register_clk_parent_data_of_test_gen_params),
+ {}
+};
+
+/*
+ * Test suite for registering clks with struct clk_parent_data and a struct
+ * device_node.
+ */
+static struct kunit_suite clk_register_clk_parent_data_of_suite = {
+ .name = "clk_register_clk_parent_data_of",
+ .init = clk_register_clk_parent_data_of_test_init,
+ .test_cases = clk_register_clk_parent_data_of_test_cases,
+};
+
+/**
+ * struct platform_driver_dev_ctx - Context to stash platform device
+ * @dev: device under test
+ * @pdrv: driver to attach to find @dev
+ */
+struct platform_driver_dev_ctx {
+ struct device *dev;
+ struct platform_driver pdrv;
+};
+
+static inline struct platform_driver_dev_ctx *
+pdev_to_platform_driver_dev_ctx(struct platform_device *pdev)
+{
+ return container_of(to_platform_driver(pdev->dev.driver),
+ struct platform_driver_dev_ctx, pdrv);
+}
+
+static int kunit_platform_driver_dev_probe(struct platform_device *pdev)
+{
+ struct platform_driver_dev_ctx *ctx;
+
+ ctx = pdev_to_platform_driver_dev_ctx(pdev);
+ ctx->dev = &pdev->dev;
+
+ return 0;
+}
+
+static struct device *
+kunit_of_platform_driver_dev(struct kunit *test, const struct of_device_id *match_table)
+{
+ struct platform_driver_dev_ctx *ctx;
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
+ ctx->pdrv.probe = kunit_platform_driver_dev_probe;
+ ctx->pdrv.driver.of_match_table = match_table;
+ ctx->pdrv.driver.name = __func__;
+ ctx->pdrv.driver.owner = THIS_MODULE;
+
+ KUNIT_ASSERT_EQ(test, 0, kunit_platform_driver_register(test, &ctx->pdrv));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->dev);
+
+ return ctx->dev;
+}
+
+static const struct clk_register_clk_parent_data_test_case
+clk_register_clk_parent_data_device_cases[] = {
+ {
+ /*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::index.
+ */
+ .desc = "clk_parent_data_device_index_test",
+ .pdata.index = 1,
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::fwname.
+ */
+ .desc = "clk_parent_data_device_fwname_test",
+ .pdata.fw_name = CLK_PARENT_DATA_PARENT2,
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::name.
+ */
+ .desc = "clk_parent_data_device_name_test",
+ /* The index must be negative to indicate firmware not used */
+ .pdata.index = -1,
+ .pdata.name = CLK_PARENT_DATA_50MHZ_NAME,
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::{fw_name,name}.
+ */
+ .desc = "clk_parent_data_device_fwname_name_test",
+ .pdata.fw_name = CLK_PARENT_DATA_PARENT2,
+ .pdata.name = "not_matching",
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::{index,name}. Index
+ * takes priority.
+ */
+ .desc = "clk_parent_data_device_index_name_priority_test",
+ .pdata.index = 1,
+ .pdata.name = "not_matching",
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::{index,fwname,name}.
+ * The fw_name takes priority over index and name.
+ */
+ .desc = "clk_parent_data_device_index_fwname_name_priority_test",
+ .pdata.index = 0,
+ .pdata.fw_name = CLK_PARENT_DATA_PARENT2,
+ .pdata.name = "not_matching",
+ },
+};
+
+KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_device_test,
+ clk_register_clk_parent_data_device_cases,
+ clk_register_clk_parent_data_test_case_to_desc)
+
+/*
+ * Test that a clk registered with a struct device can find a parent based on
+ * struct clk_parent_data when the hw member isn't set.
+ */
+static void clk_register_clk_parent_data_device_test(struct kunit *test)
+{
+ struct device *dev;
+ struct clk_hw *hw;
+ const struct clk_register_clk_parent_data_test_case *test_param;
+ struct clk_hw *parent_hw;
+ struct clk_init_data init = { };
+ struct clk *expected_parent, *actual_parent;
+ static const struct of_device_id match_table[] = {
+ { .compatible = "test,clk-parent-data" },
+ { }
+ };
+
+ dev = kunit_of_platform_driver_dev(test, match_table);
+
+ expected_parent = clk_get_kunit(test, dev, "50");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_parent);
+
+ hw = kunit_kzalloc(test, sizeof(*hw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+
+ test_param = test->param_value;
+ init.parent_data = &test_param->pdata;
+ init.num_parents = 1;
+ init.name = "parent_data_device_test_clk";
+ init.ops = &clk_dummy_single_parent_ops;
+ hw->init = &init;
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, dev, hw));
+
+ parent_hw = clk_hw_get_parent(hw);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent_hw);
+
+ actual_parent = clk_hw_get_clk_kunit(test, parent_hw, __func__);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, actual_parent);
+
+ KUNIT_EXPECT_TRUE(test, clk_is_match(expected_parent, actual_parent));
+}
+
+static const struct clk_register_clk_parent_data_test_case
+clk_register_clk_parent_data_device_hw_cases[] = {
+ {
+ /*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::hw.
+ */
+ .desc = "clk_parent_data_device_hw_index_test",
+ /* The index must be negative to indicate firmware not used */
+ .pdata.index = -1,
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::hw when
+ * struct clk_parent_data::fw_name is set.
+ */
+ .desc = "clk_parent_data_device_hw_fwname_test",
+ .pdata.fw_name = CLK_PARENT_DATA_PARENT2,
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::hw when struct
+ * clk_parent_data::name is set.
+ */
+ .desc = "clk_parent_data_device_hw_name_test",
+ /* The index must be negative to indicate firmware not used */
+ .pdata.index = -1,
+ .pdata.name = CLK_PARENT_DATA_50MHZ_NAME,
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::hw when struct
+ * clk_parent_data::{fw_name,name} are set.
+ */
+ .desc = "clk_parent_data_device_hw_fwname_name_test",
+ .pdata.fw_name = CLK_PARENT_DATA_PARENT2,
+ .pdata.name = "not_matching",
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::hw when struct
+ * clk_parent_data::index is set. The hw pointer takes
+ * priority.
+ */
+ .desc = "clk_parent_data_device_hw_index_priority_test",
+ .pdata.index = 0,
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::hw when
+ * struct clk_parent_data::{index,fwname,name} are set.
+ * The hw pointer takes priority over everything else.
+ */
+ .desc = "clk_parent_data_device_hw_index_fwname_name_priority_test",
+ .pdata.index = 0,
+ .pdata.fw_name = CLK_PARENT_DATA_PARENT2,
+ .pdata.name = "not_matching",
+ },
+};
+
+KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_device_hw_test,
+ clk_register_clk_parent_data_device_hw_cases,
+ clk_register_clk_parent_data_test_case_to_desc)
+
+/*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::hw.
+ */
+static void clk_register_clk_parent_data_device_hw_test(struct kunit *test)
+{
+ struct device *dev;
+ struct clk_hw *hw;
+ const struct clk_register_clk_parent_data_test_case *test_param;
+ struct clk_dummy_context *parent;
+ struct clk_hw *parent_hw;
+ struct clk_parent_data pdata = { };
+ struct clk_init_data init = { };
+ static const struct of_device_id match_table[] = {
+ { .compatible = "test,clk-parent-data" },
+ { }
+ };
+
+ dev = kunit_of_platform_driver_dev(test, match_table);
+
+ parent = kunit_kzalloc(test, sizeof(*parent), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
+
+ parent_hw = &parent->hw;
+ parent_hw->init = CLK_HW_INIT_NO_PARENT("parent-clk",
+ &clk_dummy_rate_ops, 0);
+
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, dev, parent_hw));
+
+ hw = kunit_kzalloc(test, sizeof(*hw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+
+ test_param = test->param_value;
+ memcpy(&pdata, &test_param->pdata, sizeof(pdata));
+ pdata.hw = parent_hw;
+ init.parent_data = &pdata;
+ init.num_parents = 1;
+ init.ops = &clk_dummy_single_parent_ops;
+ init.name = "parent_data_device_hw_test_clk";
+ hw->init = &init;
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, dev, hw));
+
+ KUNIT_EXPECT_PTR_EQ(test, parent_hw, clk_hw_get_parent(hw));
+}
+
+static struct kunit_case clk_register_clk_parent_data_device_test_cases[] = {
+ KUNIT_CASE_PARAM(clk_register_clk_parent_data_device_test,
+ clk_register_clk_parent_data_device_test_gen_params),
+ KUNIT_CASE_PARAM(clk_register_clk_parent_data_device_hw_test,
+ clk_register_clk_parent_data_device_hw_test_gen_params),
+ {}
+};
+
+static int clk_register_clk_parent_data_device_init(struct kunit *test)
+{
+ KUNIT_ASSERT_EQ(test, 0,
+ of_overlay_apply_kunit(test, kunit_clk_parent_data_test));
+
+ return 0;
+}
+
+/*
+ * Test suite for registering clks with struct clk_parent_data and a struct
+ * device.
+ */
+static struct kunit_suite clk_register_clk_parent_data_device_suite = {
+ .name = "clk_register_clk_parent_data_device",
+ .init = clk_register_clk_parent_data_device_init,
+ .test_cases = clk_register_clk_parent_data_device_test_cases,
+};
+
+struct clk_assigned_rates_context {
+ struct clk_dummy_context clk0;
+ struct clk_dummy_context clk1;
+};
+
+/*
+ * struct clk_assigned_rates_test_param - Test parameters for clk_assigned_rates test
+ * @desc: Test description
+ * @overlay_begin: Pointer to start of DT overlay to apply for test
+ * @overlay_end: Pointer to end of DT overlay to apply for test
+ * @rate0: Initial rate of first clk
+ * @rate1: Initial rate of second clk
+ * @consumer_test: true if a consumer is being tested
+ */
+struct clk_assigned_rates_test_param {
+ const char *desc;
+ u8 *overlay_begin;
+ u8 *overlay_end;
+ unsigned long rate0;
+ unsigned long rate1;
+ bool consumer_test;
+};
+
+#define TEST_PARAM_OVERLAY(overlay_name) \
+ .overlay_begin = of_overlay_begin(overlay_name), \
+ .overlay_end = of_overlay_end(overlay_name)
+
+static void
+clk_assigned_rates_register_clk(struct kunit *test,
+ struct clk_dummy_context *ctx,
+ struct device_node *np, const char *name,
+ unsigned long rate)
+{
+ struct clk_init_data init = { };
+
+ init.name = name;
+ init.ops = &clk_dummy_rate_ops;
+ ctx->hw.init = &init;
+ ctx->rate = rate;
+
+ KUNIT_ASSERT_EQ(test, 0, of_clk_hw_register_kunit(test, np, &ctx->hw));
+ KUNIT_ASSERT_EQ(test, ctx->rate, rate);
+}
+
+/*
+ * Does most of the work of the test:
+ *
+ * 1. Apply the overlay to test
+ * 2. Register the clk or clks to test
+ * 3. Register the clk provider
+ * 4. Apply clk defaults to the consumer device if this is a consumer test
+ *
+ * The tests will set different test_param values to test different scenarios
+ * and validate that in their test functions.
+ */
+static int clk_assigned_rates_test_init(struct kunit *test)
+{
+ struct device_node *np, *consumer;
+ struct clk_hw_onecell_data *data;
+ struct clk_assigned_rates_context *ctx;
+ u32 clk_cells;
+ const struct clk_assigned_rates_test_param *test_param;
+
+ test_param = test->param_value;
+
+ KUNIT_ASSERT_EQ(test, 0, __of_overlay_apply_kunit(test,
+ test_param->overlay_begin,
+ test_param->overlay_end));
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test,
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL));
+ test->priv = ctx;
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test,
+ np = of_find_compatible_node(NULL, NULL, "test,clk-assigned-rates"));
+ of_node_put_kunit(test, np);
+
+ KUNIT_ASSERT_EQ(test, 0, of_property_read_u32(np, "#clock-cells", &clk_cells));
+ /* Only support #clock-cells = <0> or <1> */
+ KUNIT_ASSERT_LT(test, clk_cells, 2);
+
+ clk_assigned_rates_register_clk(test, &ctx->clk0, np,
+ "test_assigned_rate0", test_param->rate0);
+ if (clk_cells == 0) {
+ KUNIT_ASSERT_EQ(test, 0,
+ of_clk_add_hw_provider_kunit(test, np, of_clk_hw_simple_get,
+ &ctx->clk0.hw));
+ } else if (clk_cells == 1) {
+ clk_assigned_rates_register_clk(test, &ctx->clk1, np,
+ "test_assigned_rate1", test_param->rate1);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test,
+ data = kunit_kzalloc(test, struct_size(data, hws, 2), GFP_KERNEL));
+ data->num = 2;
+ data->hws[0] = &ctx->clk0.hw;
+ data->hws[1] = &ctx->clk1.hw;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ of_clk_add_hw_provider_kunit(test, np, of_clk_hw_onecell_get, data));
+ }
+
+ /* Consumers are optional */
+ if (test_param->consumer_test) {
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test,
+ consumer = of_find_compatible_node(NULL, NULL, "test,clk-consumer"));
+ of_node_put_kunit(test, consumer);
+
+ KUNIT_ASSERT_EQ(test, 0, of_clk_set_defaults(consumer, false));
+ }
+
+ return 0;
+}
+
+static void clk_assigned_rates_assigns_one(struct kunit *test)
+{
+ struct clk_assigned_rates_context *ctx = test->priv;
+
+ KUNIT_EXPECT_EQ(test, ctx->clk0.rate, ASSIGNED_RATES_0_RATE);
+}
+
+static void clk_assigned_rates_assigns_multiple(struct kunit *test)
+{
+ struct clk_assigned_rates_context *ctx = test->priv;
+
+ KUNIT_EXPECT_EQ(test, ctx->clk0.rate, ASSIGNED_RATES_0_RATE);
+ KUNIT_EXPECT_EQ(test, ctx->clk1.rate, ASSIGNED_RATES_1_RATE);
+}
+
+static void clk_assigned_rates_skips(struct kunit *test)
+{
+ struct clk_assigned_rates_context *ctx = test->priv;
+ const struct clk_assigned_rates_test_param *test_param = test->param_value;
+
+ KUNIT_EXPECT_NE(test, ctx->clk0.rate, ASSIGNED_RATES_0_RATE);
+ KUNIT_EXPECT_EQ(test, ctx->clk0.rate, test_param->rate0);
+}
+
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_one);
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_one_consumer);
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_u64_one);
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_u64_one_consumer);
+
+/* Test cases that assign one rate */
+static const struct clk_assigned_rates_test_param clk_assigned_rates_assigns_one_test_params[] = {
+ {
+ /*
+ * Test that a single cell assigned-clock-rates property
+ * assigns the rate when the property is in the provider.
+ */
+ .desc = "provider assigns",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_one),
+ },
+ {
+ /*
+ * Test that a single cell assigned-clock-rates property
+ * assigns the rate when the property is in the consumer.
+ */
+ .desc = "consumer assigns",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_one_consumer),
+ .consumer_test = true,
+ },
+ {
+ /*
+ * Test that a single cell assigned-clock-rates-u64 property
+ * assigns the rate when the property is in the provider.
+ */
+ .desc = "provider assigns u64",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_u64_one),
+ },
+ {
+ /*
+ * Test that a single cell assigned-clock-rates-u64 property
+ * assigns the rate when the property is in the consumer.
+ */
+ .desc = "consumer assigns u64",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_u64_one_consumer),
+ .consumer_test = true,
+ },
+};
+KUNIT_ARRAY_PARAM_DESC(clk_assigned_rates_assigns_one,
+ clk_assigned_rates_assigns_one_test_params, desc)
+
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_multiple);
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_multiple_consumer);
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_u64_multiple);
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_u64_multiple_consumer);
+
+/* Test cases that assign multiple rates */
+static const struct clk_assigned_rates_test_param clk_assigned_rates_assigns_multiple_test_params[] = {
+ {
+ /*
+ * Test that a multiple cell assigned-clock-rates property
+ * assigns the rates when the property is in the provider.
+ */
+ .desc = "provider assigns",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_multiple),
+ },
+ {
+ /*
+ * Test that a multiple cell assigned-clock-rates property
+ * assigns the rates when the property is in the consumer.
+ */
+ .desc = "consumer assigns",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_multiple_consumer),
+ .consumer_test = true,
+ },
+ {
+ /*
+ * Test that a single cell assigned-clock-rates-u64 property
+ * assigns the rate when the property is in the provider.
+ */
+ .desc = "provider assigns u64",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_u64_multiple),
+ },
+ {
+ /*
+ * Test that a multiple cell assigned-clock-rates-u64 property
+ * assigns the rates when the property is in the consumer.
+ */
+ .desc = "consumer assigns u64",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_u64_multiple_consumer),
+ .consumer_test = true,
+ },
+};
+KUNIT_ARRAY_PARAM_DESC(clk_assigned_rates_assigns_multiple,
+ clk_assigned_rates_assigns_multiple_test_params,
+ desc)
+
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_without);
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_without_consumer);
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_zero);
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_zero_consumer);
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_null);
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_null_consumer);
+
+/* Test cases that skip changing the rate due to malformed DT */
+static const struct clk_assigned_rates_test_param clk_assigned_rates_skips_test_params[] = {
+ {
+ /*
+ * Test that an assigned-clock-rates property without an assigned-clocks
+ * property fails when the property is in the provider.
+ */
+ .desc = "provider missing assigned-clocks",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_without),
+ .rate0 = 3000,
+ },
+ {
+ /*
+ * Test that an assigned-clock-rates property without an assigned-clocks
+ * property fails when the property is in the consumer.
+ */
+ .desc = "consumer missing assigned-clocks",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_without_consumer),
+ .rate0 = 3000,
+ .consumer_test = true,
+ },
+ {
+ /*
+ * Test that an assigned-clock-rates property of zero doesn't
+ * set a rate when the property is in the provider.
+ */
+ .desc = "provider assigned-clock-rates of zero",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_zero),
+ .rate0 = 3000,
+ },
+ {
+ /*
+ * Test that an assigned-clock-rates property of zero doesn't
+ * set a rate when the property is in the consumer.
+ */
+ .desc = "consumer assigned-clock-rates of zero",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_zero_consumer),
+ .rate0 = 3000,
+ .consumer_test = true,
+ },
+ {
+ /*
+ * Test that an assigned-clocks property with a null phandle
+ * doesn't set a rate when the property is in the provider.
+ */
+ .desc = "provider assigned-clocks null phandle",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_null),
+ .rate0 = 3000,
+ },
+ {
+ /*
+ * Test that an assigned-clocks property with a null phandle
+ * doesn't set a rate when the property is in the consumer.
+ */
+ .desc = "provider assigned-clocks null phandle",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_null_consumer),
+ .rate0 = 3000,
+ .consumer_test = true,
+ },
+};
+KUNIT_ARRAY_PARAM_DESC(clk_assigned_rates_skips,
+ clk_assigned_rates_skips_test_params,
+ desc)
+
+static struct kunit_case clk_assigned_rates_test_cases[] = {
+ KUNIT_CASE_PARAM(clk_assigned_rates_assigns_one,
+ clk_assigned_rates_assigns_one_gen_params),
+ KUNIT_CASE_PARAM(clk_assigned_rates_assigns_multiple,
+ clk_assigned_rates_assigns_multiple_gen_params),
+ KUNIT_CASE_PARAM(clk_assigned_rates_skips,
+ clk_assigned_rates_skips_gen_params),
+ {}
+};
+
+/*
+ * Test suite for assigned-clock-rates{-u64} DT property.
+ */
+static struct kunit_suite clk_assigned_rates_suite = {
+ .name = "clk_assigned_rates",
+ .test_cases = clk_assigned_rates_test_cases,
+ .init = clk_assigned_rates_test_init,
+};
+
+static const struct clk_init_data clk_hw_get_dev_of_node_init_data = {
+ .name = "clk_hw_get_dev_of_node",
+ .ops = &empty_clk_ops,
+};
+
+/*
+ * Test that a clk registered with a struct device returns the device from
+ * clk_hw_get_dev() and the node from clk_hw_get_of_node()
+ */
+static void clk_hw_register_dev_get_dev_returns_dev(struct kunit *test)
+{
+ struct device *dev;
+ struct clk_hw *hw;
+ static const struct of_device_id match_table[] = {
+ { .compatible = "test,clk-hw-get-dev-of-node" },
+ { }
+ };
+
+ KUNIT_ASSERT_EQ(test, 0, of_overlay_apply_kunit(test, kunit_clk_hw_get_dev_of_node));
+
+ dev = kunit_of_platform_driver_dev(test, match_table);
+
+ hw = kunit_kzalloc(test, sizeof(*hw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+
+ hw->init = &clk_hw_get_dev_of_node_init_data;
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, dev, hw));
+
+ KUNIT_EXPECT_PTR_EQ(test, dev, clk_hw_get_dev(hw));
+ KUNIT_EXPECT_PTR_EQ(test, dev_of_node(dev), clk_hw_get_of_node(hw));
+}
+
+/*
+ * Test that a clk registered with a struct device that's not associated with
+ * an OF node returns the device from clk_hw_get_dev() and NULL from
+ * clk_hw_get_of_node()
+ */
+static void clk_hw_register_dev_no_node_get_dev_returns_dev(struct kunit *test)
+{
+ struct platform_device *pdev;
+ struct device *dev;
+ struct clk_hw *hw;
+
+ pdev = kunit_platform_device_alloc(test, "clk_hw_register_dev_no_node", -1);
+ KUNIT_ASSERT_NOT_NULL(test, pdev);
+ KUNIT_ASSERT_EQ(test, 0, kunit_platform_device_add(test, pdev));
+ dev = &pdev->dev;
+
+ hw = kunit_kzalloc(test, sizeof(*hw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+
+ hw->init = &clk_hw_get_dev_of_node_init_data;
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, dev, hw));
+
+ KUNIT_EXPECT_PTR_EQ(test, dev, clk_hw_get_dev(hw));
+ KUNIT_EXPECT_PTR_EQ(test, NULL, clk_hw_get_of_node(hw));
+}
+
+/*
+ * Test that a clk registered without a struct device returns NULL from
+ * clk_hw_get_dev()
+ */
+static void clk_hw_register_NULL_get_dev_of_node_returns_NULL(struct kunit *test)
+{
+ struct clk_hw *hw;
+
+ hw = kunit_kzalloc(test, sizeof(*hw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+
+ hw->init = &clk_hw_get_dev_of_node_init_data;
+
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, NULL, hw));
+
+ KUNIT_EXPECT_PTR_EQ(test, NULL, clk_hw_get_dev(hw));
+ KUNIT_EXPECT_PTR_EQ(test, NULL, clk_hw_get_of_node(hw));
+}
+
+/*
+ * Test that a clk registered with an of_node returns the node from
+ * clk_hw_get_of_node() and NULL from clk_hw_get_dev()
+ */
+static void of_clk_hw_register_node_get_of_node_returns_node(struct kunit *test)
+{
+ struct device_node *np;
+ struct clk_hw *hw;
+
+ hw = kunit_kzalloc(test, sizeof(*hw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+
+ KUNIT_ASSERT_EQ(test, 0, of_overlay_apply_kunit(test, kunit_clk_hw_get_dev_of_node));
+
+ np = of_find_compatible_node(NULL, NULL, "test,clk-hw-get-dev-of-node");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, np);
+ of_node_put_kunit(test, np);
+
+ hw->init = &clk_hw_get_dev_of_node_init_data;
+ KUNIT_ASSERT_EQ(test, 0, of_clk_hw_register_kunit(test, np, hw));
+
+ KUNIT_EXPECT_PTR_EQ(test, NULL, clk_hw_get_dev(hw));
+ KUNIT_EXPECT_PTR_EQ(test, np, clk_hw_get_of_node(hw));
+}
+
+/*
+ * Test that a clk registered without an of_node returns the node from
+ * clk_hw_get_of_node() and clk_hw_get_dev()
+ */
+static void of_clk_hw_register_NULL_get_of_node_returns_NULL(struct kunit *test)
+{
+ struct clk_hw *hw;
+
+ hw = kunit_kzalloc(test, sizeof(*hw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+
+ hw->init = &clk_hw_get_dev_of_node_init_data;
+ KUNIT_ASSERT_EQ(test, 0, of_clk_hw_register_kunit(test, NULL, hw));
+
+ KUNIT_EXPECT_PTR_EQ(test, NULL, clk_hw_get_dev(hw));
+ KUNIT_EXPECT_PTR_EQ(test, NULL, clk_hw_get_of_node(hw));
+}
+
+static struct kunit_case clk_hw_get_dev_of_node_test_cases[] = {
+ KUNIT_CASE(clk_hw_register_dev_get_dev_returns_dev),
+ KUNIT_CASE(clk_hw_register_dev_no_node_get_dev_returns_dev),
+ KUNIT_CASE(clk_hw_register_NULL_get_dev_of_node_returns_NULL),
+ KUNIT_CASE(of_clk_hw_register_node_get_of_node_returns_node),
+ KUNIT_CASE(of_clk_hw_register_NULL_get_of_node_returns_NULL),
+ {}
+};
+
+/*
+ * Test suite to verify clk_hw_get_dev() and clk_hw_get_of_node() when clk
+ * registered with clk_hw_register() and of_clk_hw_register()
+ */
+static struct kunit_suite clk_hw_get_dev_of_node_test_suite = {
+ .name = "clk_hw_get_dev_of_node_test_suite",
+ .test_cases = clk_hw_get_dev_of_node_test_cases,
+};
+
+
kunit_test_suites(
+ &clk_assigned_rates_suite,
+ &clk_hw_get_dev_of_node_test_suite,
&clk_leaf_mux_set_rate_parent_test_suite,
&clk_test_suite,
&clk_multiple_parents_mux_test_suite,
@@ -2671,7 +3556,10 @@ kunit_test_suites(
&clk_range_test_suite,
&clk_range_maximize_test_suite,
&clk_range_minimize_test_suite,
+ &clk_register_clk_parent_data_of_suite,
+ &clk_register_clk_parent_data_device_suite,
&clk_single_parent_mux_test_suite,
- &clk_uncached_test_suite
+ &clk_uncached_test_suite,
);
+MODULE_DESCRIPTION("Kunit tests for clk framework");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index 6a77d7e201a9..e0bede6350e1 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -153,7 +153,7 @@ struct clk_lookup_alloc {
char con_id[MAX_CON_ID];
};
-static struct clk_lookup * __ref
+static __printf(3, 0) struct clk_lookup * __ref
vclkdev_alloc(struct clk_hw *hw, const char *con_id, const char *dev_fmt,
va_list ap)
{
@@ -204,11 +204,18 @@ fail:
pr_err("%pV:%s: %s ID is greater than %zu\n",
&vaf, con_id, failure, max_size);
va_end(ap_copy);
- kfree(cla);
- return NULL;
+
+ /*
+ * Don't fail in this case, but as the entry won't ever match just
+ * fill it with something that also won't match.
+ */
+ strscpy(cla->con_id, "bad", sizeof(cla->con_id));
+ strscpy(cla->dev_id, "bad", sizeof(cla->dev_id));
+
+ return &cla->cl;
}
-static struct clk_lookup *
+static __printf(3, 0) struct clk_lookup *
vclkdev_create(struct clk_hw *hw, const char *con_id, const char *dev_fmt,
va_list ap)
{
@@ -296,9 +303,8 @@ void clkdev_drop(struct clk_lookup *cl)
}
EXPORT_SYMBOL(clkdev_drop);
-static struct clk_lookup *__clk_register_clkdev(struct clk_hw *hw,
- const char *con_id,
- const char *dev_id, ...)
+static __printf(3, 4) struct clk_lookup *
+__clk_register_clkdev(struct clk_hw *hw, const char *con_id, const char *dev_id, ...)
{
struct clk_lookup *cl;
va_list ap;
diff --git a/drivers/clk/davinci/Makefile b/drivers/clk/davinci/Makefile
index 5d0ae1ee72ec..f9d5c9a392e4 100644
--- a/drivers/clk/davinci/Makefile
+++ b/drivers/clk/davinci/Makefile
@@ -4,10 +4,8 @@ ifeq ($(CONFIG_COMMON_CLK), y)
obj-$(CONFIG_ARCH_DAVINCI_DA8XX) += da8xx-cfgchip.o
obj-y += pll.o
-obj-$(CONFIG_ARCH_DAVINCI_DA830) += pll-da830.o
obj-$(CONFIG_ARCH_DAVINCI_DA850) += pll-da850.o
obj-y += psc.o
-obj-$(CONFIG_ARCH_DAVINCI_DA830) += psc-da830.o
obj-$(CONFIG_ARCH_DAVINCI_DA850) += psc-da850.o
endif
diff --git a/drivers/clk/davinci/da8xx-cfgchip.c b/drivers/clk/davinci/da8xx-cfgchip.c
index ad2d0df43dc6..a5109fe8b16e 100644
--- a/drivers/clk/davinci/da8xx-cfgchip.c
+++ b/drivers/clk/davinci/da8xx-cfgchip.c
@@ -508,13 +508,12 @@ da8xx_cfgchip_register_usb0_clk48(struct device *dev,
const char * const parent_names[] = { "usb_refclkin", "pll0_auxclk" };
struct clk *fck_clk;
struct da8xx_usb0_clk48 *usb0;
- struct clk_init_data init;
+ struct clk_init_data init = {};
int ret;
fck_clk = devm_clk_get(dev, "fck");
if (IS_ERR(fck_clk)) {
- dev_err_probe(dev, PTR_ERR(fck_clk), "Missing fck clock\n");
- return ERR_CAST(fck_clk);
+ return dev_err_cast_probe(dev, fck_clk, "Missing fck clock\n");
}
usb0 = devm_kzalloc(dev, sizeof(*usb0), GFP_KERNEL);
@@ -583,7 +582,7 @@ da8xx_cfgchip_register_usb1_clk48(struct device *dev,
{
const char * const parent_names[] = { "usb0_clk48", "usb_refclkin" };
struct da8xx_usb1_clk48 *usb1;
- struct clk_init_data init;
+ struct clk_init_data init = {};
int ret;
usb1 = devm_kzalloc(dev, sizeof(*usb1), GFP_KERNEL);
@@ -749,11 +748,9 @@ static int da8xx_cfgchip_probe(struct platform_device *pdev)
clk_init = device_get_match_data(dev);
if (clk_init) {
- struct device_node *parent;
+ struct device_node *parent __free(device_node) = of_get_parent(dev->of_node);
- parent = of_get_parent(dev->of_node);
regmap = syscon_node_to_regmap(parent);
- of_node_put(parent);
} else if (pdev->id_entry && pdata) {
clk_init = (void *)pdev->id_entry->driver_data;
regmap = pdata->cfgchip;
diff --git a/drivers/clk/davinci/pll-da830.c b/drivers/clk/davinci/pll-da830.c
deleted file mode 100644
index 0a0d06fb25fd..000000000000
--- a/drivers/clk/davinci/pll-da830.c
+++ /dev/null
@@ -1,71 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * PLL clock descriptions for TI DA830/OMAP-L137/AM17XX
- *
- * Copyright (C) 2018 David Lechner <david@lechnology.com>
- */
-
-#include <linux/clkdev.h>
-#include <linux/clk/davinci.h>
-#include <linux/bitops.h>
-#include <linux/init.h>
-#include <linux/types.h>
-
-#include "pll.h"
-
-static const struct davinci_pll_clk_info da830_pll_info = {
- .name = "pll0",
- .pllm_mask = GENMASK(4, 0),
- .pllm_min = 4,
- .pllm_max = 32,
- .pllout_min_rate = 300000000,
- .pllout_max_rate = 600000000,
- .flags = PLL_HAS_CLKMODE | PLL_HAS_PREDIV | PLL_HAS_POSTDIV,
-};
-
-/*
- * NB: Technically, the clocks flagged as SYSCLK_FIXED_DIV are "fixed ratio",
- * meaning that we could change the divider as long as we keep the correct
- * ratio between all of the clocks, but we don't support that because there is
- * currently not a need for it.
- */
-
-SYSCLK(2, pll0_sysclk2, pll0_pllen, 5, SYSCLK_FIXED_DIV);
-SYSCLK(3, pll0_sysclk3, pll0_pllen, 5, 0);
-SYSCLK(4, pll0_sysclk4, pll0_pllen, 5, SYSCLK_FIXED_DIV);
-SYSCLK(5, pll0_sysclk5, pll0_pllen, 5, 0);
-SYSCLK(6, pll0_sysclk6, pll0_pllen, 5, SYSCLK_FIXED_DIV);
-SYSCLK(7, pll0_sysclk7, pll0_pllen, 5, 0);
-
-int da830_pll_init(struct device *dev, void __iomem *base, struct regmap *cfgchip)
-{
- struct clk *clk;
-
- davinci_pll_clk_register(dev, &da830_pll_info, "ref_clk", base, cfgchip);
-
- clk = davinci_pll_sysclk_register(dev, &pll0_sysclk2, base);
- clk_register_clkdev(clk, "pll0_sysclk2", "da830-psc0");
- clk_register_clkdev(clk, "pll0_sysclk2", "da830-psc1");
-
- clk = davinci_pll_sysclk_register(dev, &pll0_sysclk3, base);
- clk_register_clkdev(clk, "pll0_sysclk3", "da830-psc0");
-
- clk = davinci_pll_sysclk_register(dev, &pll0_sysclk4, base);
- clk_register_clkdev(clk, "pll0_sysclk4", "da830-psc0");
- clk_register_clkdev(clk, "pll0_sysclk4", "da830-psc1");
-
- clk = davinci_pll_sysclk_register(dev, &pll0_sysclk5, base);
- clk_register_clkdev(clk, "pll0_sysclk5", "da830-psc1");
-
- clk = davinci_pll_sysclk_register(dev, &pll0_sysclk6, base);
- clk_register_clkdev(clk, "pll0_sysclk6", "da830-psc0");
-
- clk = davinci_pll_sysclk_register(dev, &pll0_sysclk7, base);
-
- clk = davinci_pll_auxclk_register(dev, "pll0_auxclk", base);
- clk_register_clkdev(clk, NULL, "i2c_davinci.1");
- clk_register_clkdev(clk, "timer0", NULL);
- clk_register_clkdev(clk, NULL, "davinci-wdt");
-
- return 0;
-}
diff --git a/drivers/clk/davinci/pll.c b/drivers/clk/davinci/pll.c
index 5bbbb3a66477..bfb6bbdc036c 100644
--- a/drivers/clk/davinci/pll.c
+++ b/drivers/clk/davinci/pll.c
@@ -19,7 +19,6 @@
#include <linux/mfd/syscon.h>
#include <linux/notifier.h>
#include <linux/of.h>
-#include <linux/platform_data/clk-davinci-pll.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regmap.h>
@@ -764,13 +763,14 @@ int of_davinci_pll_init(struct device *dev, struct device_node *node,
return PTR_ERR(clk);
}
- child = of_get_child_by_name(node, "pllout");
- if (of_device_is_available(child))
+ child = of_get_available_child_by_name(node, "pllout");
+ if (child) {
of_clk_add_provider(child, of_clk_src_simple_get, clk);
- of_node_put(child);
+ of_node_put(child);
+ }
- child = of_get_child_by_name(node, "sysclk");
- if (of_device_is_available(child)) {
+ child = of_get_available_child_by_name(node, "sysclk");
+ if (child) {
struct clk_onecell_data *clk_data;
struct clk **clks;
int n_clks = max_sysclk_id + 1;
@@ -804,11 +804,11 @@ int of_davinci_pll_init(struct device *dev, struct device_node *node,
clks[(*div_info)->id] = clk;
}
of_clk_add_provider(child, of_clk_src_onecell_get, clk_data);
+ of_node_put(child);
}
- of_node_put(child);
- child = of_get_child_by_name(node, "auxclk");
- if (of_device_is_available(child)) {
+ child = of_get_available_child_by_name(node, "auxclk");
+ if (child) {
char child_name[MAX_NAME_SIZE];
snprintf(child_name, MAX_NAME_SIZE, "%s_auxclk", info->name);
@@ -819,11 +819,12 @@ int of_davinci_pll_init(struct device *dev, struct device_node *node,
child_name, PTR_ERR(clk));
else
of_clk_add_provider(child, of_clk_src_simple_get, clk);
+
+ of_node_put(child);
}
- of_node_put(child);
- child = of_get_child_by_name(node, "obsclk");
- if (of_device_is_available(child)) {
+ child = of_get_available_child_by_name(node, "obsclk");
+ if (child) {
if (obsclk_info)
clk = davinci_pll_obsclk_register(dev, obsclk_info, base);
else
@@ -834,53 +835,23 @@ int of_davinci_pll_init(struct device *dev, struct device_node *node,
PTR_ERR(clk));
else
of_clk_add_provider(child, of_clk_src_simple_get, clk);
+ of_node_put(child);
}
- of_node_put(child);
return 0;
}
-static struct davinci_pll_platform_data *davinci_pll_get_pdata(struct device *dev)
-{
- struct davinci_pll_platform_data *pdata = dev_get_platdata(dev);
-
- /*
- * Platform data is optional, so allocate a new struct if one was not
- * provided. For device tree, this will always be the case.
- */
- if (!pdata)
- pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return NULL;
-
- /* for device tree, we need to fill in the struct */
- if (dev->of_node)
- pdata->cfgchip =
- syscon_regmap_lookup_by_compatible("ti,da830-cfgchip");
-
- return pdata;
-}
-
/* needed in early boot for clocksource/clockevent */
-#ifdef CONFIG_ARCH_DAVINCI_DA850
CLK_OF_DECLARE(da850_pll0, "ti,da850-pll0", of_da850_pll0_init);
-#endif
static const struct of_device_id davinci_pll_of_match[] = {
-#ifdef CONFIG_ARCH_DAVINCI_DA850
{ .compatible = "ti,da850-pll1", .data = of_da850_pll1_init },
-#endif
{ }
};
static const struct platform_device_id davinci_pll_id_table[] = {
-#ifdef CONFIG_ARCH_DAVINCI_DA830
- { .name = "da830-pll", .driver_data = (kernel_ulong_t)da830_pll_init },
-#endif
-#ifdef CONFIG_ARCH_DAVINCI_DA850
{ .name = "da850-pll0", .driver_data = (kernel_ulong_t)da850_pll0_init },
{ .name = "da850-pll1", .driver_data = (kernel_ulong_t)da850_pll1_init },
-#endif
{ }
};
@@ -890,8 +861,8 @@ typedef int (*davinci_pll_init)(struct device *dev, void __iomem *base,
static int davinci_pll_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct davinci_pll_platform_data *pdata;
davinci_pll_init pll_init = NULL;
+ struct regmap *cfgchip;
void __iomem *base;
pll_init = device_get_match_data(dev);
@@ -903,17 +874,13 @@ static int davinci_pll_probe(struct platform_device *pdev)
return -EINVAL;
}
- pdata = davinci_pll_get_pdata(dev);
- if (!pdata) {
- dev_err(dev, "missing platform data\n");
- return -EINVAL;
- }
+ cfgchip = syscon_regmap_lookup_by_compatible("ti,da830-cfgchip");
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
- return pll_init(dev, base, pdata->cfgchip);
+ return pll_init(dev, base, cfgchip);
}
static struct platform_driver davinci_pll_driver = {
diff --git a/drivers/clk/davinci/pll.h b/drivers/clk/davinci/pll.h
index 20bfcec2d3b5..ad286ba4ce0c 100644
--- a/drivers/clk/davinci/pll.h
+++ b/drivers/clk/davinci/pll.h
@@ -80,7 +80,7 @@ static const struct davinci_pll_sysclk_info n = { \
* @name: The name of the clock
* @parent_names: Array of names of the parent clocks
* @num_parents: Length of @parent_names
- * @table: Array of values to write to OCSEL[OCSRC] cooresponding to
+ * @table: Array of values to write to OCSEL[OCSRC] corresponding to
* @parent_names
* @ocsrc_mask: Bitmask for OCSEL[OCSRC]
*/
diff --git a/drivers/clk/davinci/psc-da830.c b/drivers/clk/davinci/psc-da830.c
deleted file mode 100644
index 6481337382a6..000000000000
--- a/drivers/clk/davinci/psc-da830.c
+++ /dev/null
@@ -1,118 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * PSC clock descriptions for TI DA830/OMAP-L137/AM17XX
- *
- * Copyright (C) 2018 David Lechner <david@lechnology.com>
- */
-
-#include <linux/clk-provider.h>
-#include <linux/clk.h>
-#include <linux/clkdev.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-#include "psc.h"
-
-LPSC_CLKDEV1(aemif_clkdev, NULL, "ti-aemif");
-LPSC_CLKDEV1(spi0_clkdev, NULL, "spi_davinci.0");
-LPSC_CLKDEV1(mmcsd_clkdev, NULL, "da830-mmc.0");
-LPSC_CLKDEV1(uart0_clkdev, NULL, "serial8250.0");
-
-static const struct davinci_lpsc_clk_info da830_psc0_info[] = {
- LPSC(0, 0, tpcc, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(1, 0, tptc0, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(2, 0, tptc1, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(3, 0, aemif, pll0_sysclk3, aemif_clkdev, LPSC_ALWAYS_ENABLED),
- LPSC(4, 0, spi0, pll0_sysclk2, spi0_clkdev, 0),
- LPSC(5, 0, mmcsd, pll0_sysclk2, mmcsd_clkdev, 0),
- LPSC(6, 0, aintc, pll0_sysclk4, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(7, 0, arm_rom, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(8, 0, secu_mgr, pll0_sysclk4, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(9, 0, uart0, pll0_sysclk2, uart0_clkdev, 0),
- LPSC(10, 0, scr0_ss, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(11, 0, scr1_ss, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(12, 0, scr2_ss, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(13, 0, pruss, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(14, 0, arm, pll0_sysclk6, NULL, LPSC_ALWAYS_ENABLED),
- { }
-};
-
-static int da830_psc0_init(struct device *dev, void __iomem *base)
-{
- return davinci_psc_register_clocks(dev, da830_psc0_info, 16, base);
-}
-
-static struct clk_bulk_data da830_psc0_parent_clks[] = {
- { .id = "pll0_sysclk2" },
- { .id = "pll0_sysclk3" },
- { .id = "pll0_sysclk4" },
- { .id = "pll0_sysclk6" },
-};
-
-const struct davinci_psc_init_data da830_psc0_init_data = {
- .parent_clks = da830_psc0_parent_clks,
- .num_parent_clks = ARRAY_SIZE(da830_psc0_parent_clks),
- .psc_init = &da830_psc0_init,
-};
-
-LPSC_CLKDEV3(usb0_clkdev, "fck", "da830-usb-phy-clks",
- NULL, "musb-da8xx",
- NULL, "cppi41-dmaengine");
-LPSC_CLKDEV1(usb1_clkdev, NULL, "ohci-da8xx");
-/* REVISIT: gpio-davinci.c should be modified to drop con_id */
-LPSC_CLKDEV1(gpio_clkdev, "gpio", NULL);
-LPSC_CLKDEV2(emac_clkdev, NULL, "davinci_emac.1",
- "fck", "davinci_mdio.0");
-LPSC_CLKDEV1(mcasp0_clkdev, NULL, "davinci-mcasp.0");
-LPSC_CLKDEV1(mcasp1_clkdev, NULL, "davinci-mcasp.1");
-LPSC_CLKDEV1(mcasp2_clkdev, NULL, "davinci-mcasp.2");
-LPSC_CLKDEV1(spi1_clkdev, NULL, "spi_davinci.1");
-LPSC_CLKDEV1(i2c1_clkdev, NULL, "i2c_davinci.2");
-LPSC_CLKDEV1(uart1_clkdev, NULL, "serial8250.1");
-LPSC_CLKDEV1(uart2_clkdev, NULL, "serial8250.2");
-LPSC_CLKDEV1(lcdc_clkdev, "fck", "da8xx_lcdc.0");
-LPSC_CLKDEV2(pwm_clkdev, "fck", "ehrpwm.0",
- "fck", "ehrpwm.1");
-LPSC_CLKDEV3(ecap_clkdev, "fck", "ecap.0",
- "fck", "ecap.1",
- "fck", "ecap.2");
-LPSC_CLKDEV2(eqep_clkdev, NULL, "eqep.0",
- NULL, "eqep.1");
-
-static const struct davinci_lpsc_clk_info da830_psc1_info[] = {
- LPSC(1, 0, usb0, pll0_sysclk2, usb0_clkdev, 0),
- LPSC(2, 0, usb1, pll0_sysclk4, usb1_clkdev, 0),
- LPSC(3, 0, gpio, pll0_sysclk4, gpio_clkdev, 0),
- LPSC(5, 0, emac, pll0_sysclk4, emac_clkdev, 0),
- LPSC(6, 0, emif3, pll0_sysclk5, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(7, 0, mcasp0, pll0_sysclk2, mcasp0_clkdev, 0),
- LPSC(8, 0, mcasp1, pll0_sysclk2, mcasp1_clkdev, 0),
- LPSC(9, 0, mcasp2, pll0_sysclk2, mcasp2_clkdev, 0),
- LPSC(10, 0, spi1, pll0_sysclk2, spi1_clkdev, 0),
- LPSC(11, 0, i2c1, pll0_sysclk4, i2c1_clkdev, 0),
- LPSC(12, 0, uart1, pll0_sysclk2, uart1_clkdev, 0),
- LPSC(13, 0, uart2, pll0_sysclk2, uart2_clkdev, 0),
- LPSC(16, 0, lcdc, pll0_sysclk2, lcdc_clkdev, 0),
- LPSC(17, 0, pwm, pll0_sysclk2, pwm_clkdev, 0),
- LPSC(20, 0, ecap, pll0_sysclk2, ecap_clkdev, 0),
- LPSC(21, 0, eqep, pll0_sysclk2, eqep_clkdev, 0),
- { }
-};
-
-static int da830_psc1_init(struct device *dev, void __iomem *base)
-{
- return davinci_psc_register_clocks(dev, da830_psc1_info, 32, base);
-}
-
-static struct clk_bulk_data da830_psc1_parent_clks[] = {
- { .id = "pll0_sysclk2" },
- { .id = "pll0_sysclk4" },
- { .id = "pll0_sysclk5" },
-};
-
-const struct davinci_psc_init_data da830_psc1_init_data = {
- .parent_clks = da830_psc1_parent_clks,
- .num_parent_clks = ARRAY_SIZE(da830_psc1_parent_clks),
- .psc_init = &da830_psc1_init,
-};
diff --git a/drivers/clk/davinci/psc-da850.c b/drivers/clk/davinci/psc-da850.c
index 5a18bca464cd..94081ab1e688 100644
--- a/drivers/clk/davinci/psc-da850.c
+++ b/drivers/clk/davinci/psc-da850.c
@@ -6,7 +6,6 @@
*/
#include <linux/clk-provider.h>
-#include <linux/reset-controller.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/init.h>
@@ -66,14 +65,8 @@ LPSC_CLKDEV3(ecap_clkdev, "fck", "ecap.0",
"fck", "ecap.1",
"fck", "ecap.2");
-static struct reset_control_lookup da850_psc0_reset_lookup_table[] = {
- RESET_LOOKUP("da850-psc0", 15, "davinci-rproc.0", NULL),
-};
-
static int da850_psc0_init(struct device *dev, void __iomem *base)
{
- reset_controller_add_lookup(da850_psc0_reset_lookup_table,
- ARRAY_SIZE(da850_psc0_reset_lookup_table));
return davinci_psc_register_clocks(dev, da850_psc0_info, 16, base);
}
diff --git a/drivers/clk/davinci/psc.c b/drivers/clk/davinci/psc.c
index 355d1be0b5d8..f3ee9397bb0c 100644
--- a/drivers/clk/davinci/psc.c
+++ b/drivers/clk/davinci/psc.c
@@ -277,6 +277,11 @@ davinci_lpsc_clk_register(struct device *dev, const char *name,
lpsc->pm_domain.name = devm_kasprintf(dev, GFP_KERNEL, "%s: %s",
best_dev_name(dev), name);
+ if (!lpsc->pm_domain.name) {
+ clk_hw_unregister(&lpsc->hw);
+ kfree(lpsc);
+ return ERR_PTR(-ENOMEM);
+ }
lpsc->pm_domain.attach_dev = davinci_psc_genpd_attach_dev;
lpsc->pm_domain.detach_dev = davinci_psc_genpd_detach_dev;
lpsc->pm_domain.flags = GENPD_FLAG_PM_CLK;
@@ -494,22 +499,14 @@ int of_davinci_psc_clk_init(struct device *dev,
}
static const struct of_device_id davinci_psc_of_match[] = {
-#ifdef CONFIG_ARCH_DAVINCI_DA850
{ .compatible = "ti,da850-psc0", .data = &of_da850_psc0_init_data },
{ .compatible = "ti,da850-psc1", .data = &of_da850_psc1_init_data },
-#endif
{ }
};
static const struct platform_device_id davinci_psc_id_table[] = {
-#ifdef CONFIG_ARCH_DAVINCI_DA830
- { .name = "da830-psc0", .driver_data = (kernel_ulong_t)&da830_psc0_init_data },
- { .name = "da830-psc1", .driver_data = (kernel_ulong_t)&da830_psc1_init_data },
-#endif
-#ifdef CONFIG_ARCH_DAVINCI_DA850
{ .name = "da850-psc0", .driver_data = (kernel_ulong_t)&da850_psc0_init_data },
{ .name = "da850-psc1", .driver_data = (kernel_ulong_t)&da850_psc1_init_data },
-#endif
{ }
};
diff --git a/drivers/clk/davinci/psc.h b/drivers/clk/davinci/psc.h
index bd23f6fd56df..742672843776 100644
--- a/drivers/clk/davinci/psc.h
+++ b/drivers/clk/davinci/psc.h
@@ -94,14 +94,9 @@ struct davinci_psc_init_data {
int (*psc_init)(struct device *dev, void __iomem *base);
};
-#ifdef CONFIG_ARCH_DAVINCI_DA830
-extern const struct davinci_psc_init_data da830_psc0_init_data;
-extern const struct davinci_psc_init_data da830_psc1_init_data;
-#endif
-#ifdef CONFIG_ARCH_DAVINCI_DA850
extern const struct davinci_psc_init_data da850_psc0_init_data;
extern const struct davinci_psc_init_data da850_psc1_init_data;
extern const struct davinci_psc_init_data of_da850_psc0_init_data;
extern const struct davinci_psc_init_data of_da850_psc1_init_data;
-#endif
+
#endif /* __CLK_DAVINCI_PSC_H__ */
diff --git a/drivers/clk/hisilicon/clk-hi3519.c b/drivers/clk/hisilicon/clk-hi3519.c
index 141b727ff60d..0c50acd8543a 100644
--- a/drivers/clk/hisilicon/clk-hi3519.c
+++ b/drivers/clk/hisilicon/clk-hi3519.c
@@ -179,7 +179,7 @@ MODULE_DEVICE_TABLE(of, hi3519_clk_match_table);
static struct platform_driver hi3519_clk_driver = {
.probe = hi3519_clk_probe,
- .remove_new = hi3519_clk_remove,
+ .remove = hi3519_clk_remove,
.driver = {
.name = "hi3519-clk",
.of_match_table = hi3519_clk_match_table,
diff --git a/drivers/clk/hisilicon/clk-hi3559a.c b/drivers/clk/hisilicon/clk-hi3559a.c
index c79a94f6d9d2..f297fb25c512 100644
--- a/drivers/clk/hisilicon/clk-hi3559a.c
+++ b/drivers/clk/hisilicon/clk-hi3559a.c
@@ -407,7 +407,7 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct hi3559av100_clk_pll *clk = to_pll_clk(hw);
- u64 frac_val, fbdiv_val, refdiv_val;
+ u64 frac_val, fbdiv_val;
u32 postdiv1_val, postdiv2_val;
u32 val;
u64 tmp, rate;
@@ -435,14 +435,13 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
val = readl_relaxed(clk->ctrl_reg2);
val = val >> clk->refdiv_shift;
val &= ((1 << clk->refdiv_width) - 1);
- refdiv_val = val;
/* rate = 24000000 * (fbdiv + frac / (1<<24) ) / refdiv */
rate = 0;
tmp = 24000000 * fbdiv_val + (24000000 * frac_val) / (1 << 24);
rate += tmp;
- do_div(rate, refdiv_val);
- do_div(rate, postdiv1_val * postdiv2_val);
+ rate = div_u64(rate, val);
+ rate = div_u64(rate, postdiv1_val * postdiv2_val);
return rate;
}
@@ -818,7 +817,7 @@ static void hi3559av100_crg_remove(struct platform_device *pdev)
static struct platform_driver hi3559av100_crg_driver = {
.probe = hi3559av100_crg_probe,
- .remove_new = hi3559av100_crg_remove,
+ .remove = hi3559av100_crg_remove,
.driver = {
.name = "hi3559av100-clock",
.of_match_table = hi3559av100_crg_match_table,
diff --git a/drivers/clk/hisilicon/clk-hi3660-stub.c b/drivers/clk/hisilicon/clk-hi3660-stub.c
index 3a653d54bee0..7c8b00ee6019 100644
--- a/drivers/clk/hisilicon/clk-hi3660-stub.c
+++ b/drivers/clk/hisilicon/clk-hi3660-stub.c
@@ -34,7 +34,7 @@
.num_parents = 0, \
.flags = CLK_GET_RATE_NOCACHE, \
}, \
- },
+ }
#define to_stub_clk(_hw) container_of(_hw, struct hi3660_stub_clk, hw)
@@ -67,14 +67,14 @@ static unsigned long hi3660_stub_clk_recalc_rate(struct clk_hw *hw,
return stub_clk->rate;
}
-static long hi3660_stub_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int hi3660_stub_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
/*
* LPM3 handles rate rounding so just return whatever
* rate is requested.
*/
- return rate;
+ return 0;
}
static int hi3660_stub_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -97,15 +97,15 @@ static int hi3660_stub_clk_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops hi3660_stub_clk_ops = {
.recalc_rate = hi3660_stub_clk_recalc_rate,
- .round_rate = hi3660_stub_clk_round_rate,
+ .determine_rate = hi3660_stub_clk_determine_rate,
.set_rate = hi3660_stub_clk_set_rate,
};
static struct hi3660_stub_clk hi3660_stub_clks[HI3660_CLK_STUB_NUM] = {
- DEFINE_CLK_STUB(HI3660_CLK_STUB_CLUSTER0, 0x0001030A, "cpu-cluster.0")
- DEFINE_CLK_STUB(HI3660_CLK_STUB_CLUSTER1, 0x0002030A, "cpu-cluster.1")
- DEFINE_CLK_STUB(HI3660_CLK_STUB_GPU, 0x0003030A, "clk-g3d")
- DEFINE_CLK_STUB(HI3660_CLK_STUB_DDR, 0x00040309, "clk-ddrc")
+ DEFINE_CLK_STUB(HI3660_CLK_STUB_CLUSTER0, 0x0001030A, "cpu-cluster.0"),
+ DEFINE_CLK_STUB(HI3660_CLK_STUB_CLUSTER1, 0x0002030A, "cpu-cluster.1"),
+ DEFINE_CLK_STUB(HI3660_CLK_STUB_GPU, 0x0003030A, "clk-g3d"),
+ DEFINE_CLK_STUB(HI3660_CLK_STUB_DDR, 0x00040309, "clk-ddrc"),
};
static struct clk_hw *hi3660_stub_clk_hw_get(struct of_phandle_args *clkspec,
diff --git a/drivers/clk/hisilicon/clk-hi6220-stub.c b/drivers/clk/hisilicon/clk-hi6220-stub.c
index a8319795ed1c..bf99cfafafa0 100644
--- a/drivers/clk/hisilicon/clk-hi6220-stub.c
+++ b/drivers/clk/hisilicon/clk-hi6220-stub.c
@@ -161,11 +161,11 @@ static int hi6220_stub_clk_set_rate(struct clk_hw *hw, unsigned long rate,
return ret;
}
-static long hi6220_stub_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int hi6220_stub_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct hi6220_stub_clk *stub_clk = to_stub_clk(hw);
- unsigned long new_rate = rate / 1000; /* kHz */
+ unsigned long new_rate = req->rate / 1000; /* kHz */
switch (stub_clk->id) {
case HI6220_STUB_ACPU0:
@@ -181,12 +181,14 @@ static long hi6220_stub_clk_round_rate(struct clk_hw *hw, unsigned long rate,
break;
}
- return new_rate;
+ req->rate = new_rate;
+
+ return 0;
}
static const struct clk_ops hi6220_stub_clk_ops = {
.recalc_rate = hi6220_stub_clk_recalc_rate,
- .round_rate = hi6220_stub_clk_round_rate,
+ .determine_rate = hi6220_stub_clk_determine_rate,
.set_rate = hi6220_stub_clk_set_rate,
};
diff --git a/drivers/clk/hisilicon/clkdivider-hi6220.c b/drivers/clk/hisilicon/clkdivider-hi6220.c
index 5348bafe694f..6bae18a84cb6 100644
--- a/drivers/clk/hisilicon/clkdivider-hi6220.c
+++ b/drivers/clk/hisilicon/clkdivider-hi6220.c
@@ -55,13 +55,15 @@ static unsigned long hi6220_clkdiv_recalc_rate(struct clk_hw *hw,
CLK_DIVIDER_ROUND_CLOSEST, dclk->width);
}
-static long hi6220_clkdiv_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int hi6220_clkdiv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct hi6220_clk_divider *dclk = to_hi6220_clk_divider(hw);
- return divider_round_rate(hw, rate, prate, dclk->table,
- dclk->width, CLK_DIVIDER_ROUND_CLOSEST);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate, dclk->table,
+ dclk->width, CLK_DIVIDER_ROUND_CLOSEST);
+
+ return 0;
}
static int hi6220_clkdiv_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -93,7 +95,7 @@ static int hi6220_clkdiv_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops hi6220_clkdiv_ops = {
.recalc_rate = hi6220_clkdiv_recalc_rate,
- .round_rate = hi6220_clkdiv_round_rate,
+ .determine_rate = hi6220_clkdiv_determine_rate,
.set_rate = hi6220_clkdiv_set_rate,
};
diff --git a/drivers/clk/hisilicon/clkgate-separated.c b/drivers/clk/hisilicon/clkgate-separated.c
index 90d858522967..21d4297f3225 100644
--- a/drivers/clk/hisilicon/clkgate-separated.c
+++ b/drivers/clk/hisilicon/clkgate-separated.c
@@ -17,9 +17,9 @@
#include "clk.h"
/* clock separated gate register offset */
-#define CLKGATE_SEPERATED_ENABLE 0x0
-#define CLKGATE_SEPERATED_DISABLE 0x4
-#define CLKGATE_SEPERATED_STATUS 0x8
+#define CLKGATE_SEPARATED_ENABLE 0x0
+#define CLKGATE_SEPARATED_DISABLE 0x4
+#define CLKGATE_SEPARATED_STATUS 0x8
struct clkgate_separated {
struct clk_hw hw;
@@ -40,7 +40,7 @@ static int clkgate_separated_enable(struct clk_hw *hw)
spin_lock_irqsave(sclk->lock, flags);
reg = BIT(sclk->bit_idx);
writel_relaxed(reg, sclk->enable);
- readl_relaxed(sclk->enable + CLKGATE_SEPERATED_STATUS);
+ readl_relaxed(sclk->enable + CLKGATE_SEPARATED_STATUS);
if (sclk->lock)
spin_unlock_irqrestore(sclk->lock, flags);
return 0;
@@ -56,8 +56,8 @@ static void clkgate_separated_disable(struct clk_hw *hw)
if (sclk->lock)
spin_lock_irqsave(sclk->lock, flags);
reg = BIT(sclk->bit_idx);
- writel_relaxed(reg, sclk->enable + CLKGATE_SEPERATED_DISABLE);
- readl_relaxed(sclk->enable + CLKGATE_SEPERATED_STATUS);
+ writel_relaxed(reg, sclk->enable + CLKGATE_SEPARATED_DISABLE);
+ readl_relaxed(sclk->enable + CLKGATE_SEPARATED_STATUS);
if (sclk->lock)
spin_unlock_irqrestore(sclk->lock, flags);
}
@@ -68,7 +68,7 @@ static int clkgate_separated_is_enabled(struct clk_hw *hw)
u32 reg;
sclk = container_of(hw, struct clkgate_separated, hw);
- reg = readl_relaxed(sclk->enable + CLKGATE_SEPERATED_STATUS);
+ reg = readl_relaxed(sclk->enable + CLKGATE_SEPARATED_STATUS);
reg &= BIT(sclk->bit_idx);
return reg ? 1 : 0;
@@ -100,7 +100,7 @@ struct clk *hisi_register_clkgate_sep(struct device *dev, const char *name,
init.parent_names = (parent_name ? &parent_name : NULL);
init.num_parents = (parent_name ? 1 : 0);
- sclk->enable = reg + CLKGATE_SEPERATED_ENABLE;
+ sclk->enable = reg + CLKGATE_SEPARATED_ENABLE;
sclk->bit_idx = bit_idx;
sclk->flags = clk_gate_flags;
sclk->hw.init = &init;
diff --git a/drivers/clk/hisilicon/crg-hi3516cv300.c b/drivers/clk/hisilicon/crg-hi3516cv300.c
index e602e65fbc38..b66140f74c51 100644
--- a/drivers/clk/hisilicon/crg-hi3516cv300.c
+++ b/drivers/clk/hisilicon/crg-hi3516cv300.c
@@ -294,7 +294,7 @@ static void hi3516cv300_crg_remove(struct platform_device *pdev)
static struct platform_driver hi3516cv300_crg_driver = {
.probe = hi3516cv300_crg_probe,
- .remove_new = hi3516cv300_crg_remove,
+ .remove = hi3516cv300_crg_remove,
.driver = {
.name = "hi3516cv300-crg",
.of_match_table = hi3516cv300_crg_match_table,
diff --git a/drivers/clk/hisilicon/crg-hi3798cv200.c b/drivers/clk/hisilicon/crg-hi3798cv200.c
index f651b197e45a..8eabd1cc229f 100644
--- a/drivers/clk/hisilicon/crg-hi3798cv200.c
+++ b/drivers/clk/hisilicon/crg-hi3798cv200.c
@@ -377,7 +377,7 @@ static void hi3798cv200_crg_remove(struct platform_device *pdev)
static struct platform_driver hi3798cv200_crg_driver = {
.probe = hi3798cv200_crg_probe,
- .remove_new = hi3798cv200_crg_remove,
+ .remove = hi3798cv200_crg_remove,
.driver = {
.name = "hi3798cv200-crg",
.of_match_table = hi3798cv200_crg_match_table,
diff --git a/drivers/clk/imgtec/clk-boston.c b/drivers/clk/imgtec/clk-boston.c
index b00cbd045af5..db96f8bea630 100644
--- a/drivers/clk/imgtec/clk-boston.c
+++ b/drivers/clk/imgtec/clk-boston.c
@@ -67,21 +67,21 @@ static void __init clk_boston_setup(struct device_node *np)
hw = clk_hw_register_fixed_rate(NULL, "input", NULL, 0, in_freq);
if (IS_ERR(hw)) {
- pr_err("failed to register input clock: %ld\n", PTR_ERR(hw));
+ pr_err("failed to register input clock: %pe\n", hw);
goto fail_input;
}
onecell->hws[BOSTON_CLK_INPUT] = hw;
hw = clk_hw_register_fixed_rate(NULL, "sys", "input", 0, sys_freq);
if (IS_ERR(hw)) {
- pr_err("failed to register sys clock: %ld\n", PTR_ERR(hw));
+ pr_err("failed to register sys clock: %pe\n", hw);
goto fail_sys;
}
onecell->hws[BOSTON_CLK_SYS] = hw;
hw = clk_hw_register_fixed_rate(NULL, "cpu", "input", 0, cpu_freq);
if (IS_ERR(hw)) {
- pr_err("failed to register cpu clock: %ld\n", PTR_ERR(hw));
+ pr_err("failed to register cpu clock: %pe\n", hw);
goto fail_cpu;
}
onecell->hws[BOSTON_CLK_CPU] = hw;
diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig
index 6da0fba68225..b292e7ca5c24 100644
--- a/drivers/clk/imx/Kconfig
+++ b/drivers/clk/imx/Kconfig
@@ -81,6 +81,7 @@ config CLK_IMX8MP
tristate "IMX8MP CCM Clock Driver"
depends on ARCH_MXC || COMPILE_TEST
select MXC_CLK
+ select AUXILIARY_BUS if RESET_CONTROLLER
help
Build the driver for i.MX8MP CCM Clock Driver
@@ -104,6 +105,7 @@ config CLK_IMX8ULP
tristate "IMX8ULP CCM Clock Driver"
depends on ARCH_MXC || COMPILE_TEST
select MXC_CLK
+ select AUXILIARY_BUS
help
Build the driver for i.MX8ULP CCM Clock Driver
diff --git a/drivers/clk/imx/Makefile b/drivers/clk/imx/Makefile
index 03f2b2a1ab63..208b46873a18 100644
--- a/drivers/clk/imx/Makefile
+++ b/drivers/clk/imx/Makefile
@@ -41,6 +41,7 @@ clk-imx-lpcg-scu-$(CONFIG_CLK_IMX8QXP) += clk-lpcg-scu.o clk-imx8qxp-lpcg.o
clk-imx-acm-$(CONFIG_CLK_IMX8QXP) = clk-imx8-acm.o
obj-$(CONFIG_CLK_IMX8ULP) += clk-imx8ulp.o
+obj-$(CONFIG_CLK_IMX8ULP) += clk-imx8ulp-sim-lpav.o
obj-$(CONFIG_CLK_IMX1) += clk-imx1.o
obj-$(CONFIG_CLK_IMX25) += clk-imx25.o
diff --git a/drivers/clk/imx/clk-busy.c b/drivers/clk/imx/clk-busy.c
index f163df952ccc..eb27c6fee359 100644
--- a/drivers/clk/imx/clk-busy.c
+++ b/drivers/clk/imx/clk-busy.c
@@ -46,12 +46,12 @@ static unsigned long clk_busy_divider_recalc_rate(struct clk_hw *hw,
return busy->div_ops->recalc_rate(&busy->div.hw, parent_rate);
}
-static long clk_busy_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_busy_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_busy_divider *busy = to_clk_busy_divider(hw);
- return busy->div_ops->round_rate(&busy->div.hw, rate, prate);
+ return busy->div_ops->determine_rate(&busy->div.hw, req);
}
static int clk_busy_divider_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -69,7 +69,7 @@ static int clk_busy_divider_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops clk_busy_divider_ops = {
.recalc_rate = clk_busy_divider_recalc_rate,
- .round_rate = clk_busy_divider_round_rate,
+ .determine_rate = clk_busy_divider_determine_rate,
.set_rate = clk_busy_divider_set_rate,
};
diff --git a/drivers/clk/imx/clk-composite-7ulp.c b/drivers/clk/imx/clk-composite-7ulp.c
index e208ddc51133..37d2fc197be6 100644
--- a/drivers/clk/imx/clk-composite-7ulp.c
+++ b/drivers/clk/imx/clk-composite-7ulp.c
@@ -7,6 +7,7 @@
#include <linux/bits.h>
#include <linux/clk-provider.h>
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/slab.h>
@@ -14,6 +15,7 @@
#include "../clk-fractional-divider.h"
#include "clk.h"
+#define PCG_PR_MASK BIT(31)
#define PCG_PCS_SHIFT 24
#define PCG_PCS_MASK 0x7
#define PCG_CGC_SHIFT 30
@@ -35,6 +37,9 @@ static int pcc_gate_enable(struct clk_hw *hw)
if (ret)
return ret;
+ /* Make sure the IP's clock is ready before release reset */
+ udelay(1);
+
spin_lock_irqsave(gate->lock, flags);
/*
* release the sw reset for peripherals associated with
@@ -46,6 +51,15 @@ static int pcc_gate_enable(struct clk_hw *hw)
spin_unlock_irqrestore(gate->lock, flags);
+ /*
+ * Read back the register to make sure the previous write has been
+ * done in the target HW register. For IP like GPU, after deassert
+ * the reset, need to wait for a while to make sure the sync reset
+ * is done
+ */
+ readl(gate->reg);
+ udelay(1);
+
return 0;
}
@@ -78,6 +92,12 @@ static struct clk_hw *imx_ulp_clk_hw_composite(const char *name,
struct clk_hw *hw;
u32 val;
+ val = readl(reg);
+ if (!(val & PCG_PR_MASK)) {
+ pr_info("PCC PR is 0 for clk:%s, bypass\n", name);
+ return NULL;
+ }
+
if (mux_present) {
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
if (!mux)
diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c
index 8cc07d056a83..1467d0a1b934 100644
--- a/drivers/clk/imx/clk-composite-8m.c
+++ b/drivers/clk/imx/clk-composite-8m.c
@@ -73,21 +73,6 @@ static int imx8m_clk_composite_compute_dividers(unsigned long rate,
return ret;
}
-static long imx8m_clk_composite_divider_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *prate)
-{
- int prediv_value;
- int div_value;
-
- imx8m_clk_composite_compute_dividers(rate, *prate,
- &prediv_value, &div_value);
- rate = DIV_ROUND_UP(*prate, prediv_value);
-
- return DIV_ROUND_UP(rate, div_value);
-
-}
-
static int imx8m_clk_composite_divider_set_rate(struct clk_hw *hw,
unsigned long rate,
unsigned long parent_rate)
@@ -153,7 +138,6 @@ static int imx8m_divider_determine_rate(struct clk_hw *hw,
static const struct clk_ops imx8m_clk_composite_divider_ops = {
.recalc_rate = imx8m_clk_composite_divider_recalc_rate,
- .round_rate = imx8m_clk_composite_divider_round_rate,
.set_rate = imx8m_clk_composite_divider_set_rate,
.determine_rate = imx8m_divider_determine_rate,
};
@@ -204,6 +188,34 @@ static const struct clk_ops imx8m_clk_composite_mux_ops = {
.determine_rate = imx8m_clk_composite_mux_determine_rate,
};
+static int imx8m_clk_composite_gate_enable(struct clk_hw *hw)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(gate->lock, flags);
+
+ val = readl(gate->reg);
+ val |= BIT(gate->bit_idx);
+ writel(val, gate->reg);
+
+ spin_unlock_irqrestore(gate->lock, flags);
+
+ return 0;
+}
+
+static void imx8m_clk_composite_gate_disable(struct clk_hw *hw)
+{
+ /* composite clk requires the disable hook */
+}
+
+static const struct clk_ops imx8m_clk_composite_gate_ops = {
+ .enable = imx8m_clk_composite_gate_enable,
+ .disable = imx8m_clk_composite_gate_disable,
+ .is_enabled = clk_gate_is_enabled,
+};
+
struct clk_hw *__imx8m_clk_hw_composite(const char *name,
const char * const *parent_names,
int num_parents, void __iomem *reg,
@@ -217,6 +229,7 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name,
struct clk_mux *mux;
const struct clk_ops *divider_ops;
const struct clk_ops *mux_ops;
+ const struct clk_ops *gate_ops;
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
if (!mux)
@@ -257,20 +270,22 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name,
div->flags = CLK_DIVIDER_ROUND_CLOSEST;
/* skip registering the gate ops if M4 is enabled */
- if (!mcore_booted) {
- gate = kzalloc(sizeof(*gate), GFP_KERNEL);
- if (!gate)
- goto free_div;
-
- gate_hw = &gate->hw;
- gate->reg = reg;
- gate->bit_idx = PCG_CGC_SHIFT;
- gate->lock = &imx_ccm_lock;
- }
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ goto free_div;
+
+ gate_hw = &gate->hw;
+ gate->reg = reg;
+ gate->bit_idx = PCG_CGC_SHIFT;
+ gate->lock = &imx_ccm_lock;
+ if (!mcore_booted)
+ gate_ops = &clk_gate_ops;
+ else
+ gate_ops = &imx8m_clk_composite_gate_ops;
hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
mux_hw, mux_ops, div_hw,
- divider_ops, gate_hw, &clk_gate_ops, flags);
+ divider_ops, gate_hw, gate_ops, flags);
if (IS_ERR(hw))
goto free_gate;
diff --git a/drivers/clk/imx/clk-composite-93.c b/drivers/clk/imx/clk-composite-93.c
index 81164bdcd6cc..513d74a39d3b 100644
--- a/drivers/clk/imx/clk-composite-93.c
+++ b/drivers/clk/imx/clk-composite-93.c
@@ -76,6 +76,13 @@ static int imx93_clk_composite_gate_enable(struct clk_hw *hw)
static void imx93_clk_composite_gate_disable(struct clk_hw *hw)
{
+ /*
+ * Skip disable the root clock gate if mcore enabled.
+ * The root clock may be used by the mcore.
+ */
+ if (mcore_booted)
+ return;
+
imx93_clk_composite_gate_endisable(hw, 0);
}
@@ -91,12 +98,6 @@ imx93_clk_composite_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_
return clk_divider_ops.recalc_rate(hw, parent_rate);
}
-static long
-imx93_clk_composite_divider_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate)
-{
- return clk_divider_ops.round_rate(hw, rate, prate);
-}
-
static int
imx93_clk_composite_divider_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
{
@@ -134,7 +135,6 @@ static int imx93_clk_composite_divider_set_rate(struct clk_hw *hw, unsigned long
static const struct clk_ops imx93_clk_composite_divider_ops = {
.recalc_rate = imx93_clk_composite_divider_recalc_rate,
- .round_rate = imx93_clk_composite_divider_round_rate,
.determine_rate = imx93_clk_composite_divider_determine_rate,
.set_rate = imx93_clk_composite_divider_set_rate,
};
@@ -222,7 +222,7 @@ struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *p
hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
mux_hw, &clk_mux_ro_ops, div_hw,
&clk_divider_ro_ops, NULL, NULL, flags);
- } else if (!mcore_booted) {
+ } else {
gate = kzalloc(sizeof(*gate), GFP_KERNEL);
if (!gate)
goto fail;
@@ -238,12 +238,6 @@ struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *p
&imx93_clk_composite_divider_ops, gate_hw,
&imx93_clk_composite_gate_ops,
flags | CLK_SET_RATE_NO_REPARENT);
- } else {
- hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
- mux_hw, &imx93_clk_composite_mux_ops, div_hw,
- &imx93_clk_composite_divider_ops, NULL,
- &imx93_clk_composite_gate_ops,
- flags | CLK_SET_RATE_NO_REPARENT);
}
if (IS_ERR(hw))
diff --git a/drivers/clk/imx/clk-cpu.c b/drivers/clk/imx/clk-cpu.c
index cb6ca4cf0535..43637cb61693 100644
--- a/drivers/clk/imx/clk-cpu.c
+++ b/drivers/clk/imx/clk-cpu.c
@@ -30,12 +30,14 @@ static unsigned long clk_cpu_recalc_rate(struct clk_hw *hw,
return clk_get_rate(cpu->div);
}
-static long clk_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_cpu_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_cpu *cpu = to_clk_cpu(hw);
- return clk_round_rate(cpu->pll, rate);
+ req->rate = clk_round_rate(cpu->pll, req->rate);
+
+ return 0;
}
static int clk_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -66,7 +68,7 @@ static int clk_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops clk_cpu_ops = {
.recalc_rate = clk_cpu_recalc_rate,
- .round_rate = clk_cpu_round_rate,
+ .determine_rate = clk_cpu_determine_rate,
.set_rate = clk_cpu_set_rate,
};
diff --git a/drivers/clk/imx/clk-fixup-div.c b/drivers/clk/imx/clk-fixup-div.c
index 100ca828b052..aa6addbeb5a8 100644
--- a/drivers/clk/imx/clk-fixup-div.c
+++ b/drivers/clk/imx/clk-fixup-div.c
@@ -18,7 +18,7 @@
* @fixup: a hook to fixup the write value
*
* The imx fixup divider clock is a subclass of basic clk_divider
- * with an addtional fixup hook.
+ * with an additional fixup hook.
*/
struct clk_fixup_div {
struct clk_divider divider;
@@ -41,12 +41,12 @@ static unsigned long clk_fixup_div_recalc_rate(struct clk_hw *hw,
return fixup_div->ops->recalc_rate(&fixup_div->divider.hw, parent_rate);
}
-static long clk_fixup_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_fixup_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_fixup_div *fixup_div = to_clk_fixup_div(hw);
- return fixup_div->ops->round_rate(&fixup_div->divider.hw, rate, prate);
+ return fixup_div->ops->determine_rate(&fixup_div->divider.hw, req);
}
static int clk_fixup_div_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -81,7 +81,7 @@ static int clk_fixup_div_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops clk_fixup_div_ops = {
.recalc_rate = clk_fixup_div_recalc_rate,
- .round_rate = clk_fixup_div_round_rate,
+ .determine_rate = clk_fixup_div_determine_rate,
.set_rate = clk_fixup_div_set_rate,
};
diff --git a/drivers/clk/imx/clk-fixup-mux.c b/drivers/clk/imx/clk-fixup-mux.c
index b48701864ef0..418ac9fe2c26 100644
--- a/drivers/clk/imx/clk-fixup-mux.c
+++ b/drivers/clk/imx/clk-fixup-mux.c
@@ -17,7 +17,7 @@
* @fixup: a hook to fixup the write value
*
* The imx fixup multiplexer clock is a subclass of basic clk_mux
- * with an addtional fixup hook.
+ * with an additional fixup hook.
*/
struct clk_fixup_mux {
struct clk_mux mux;
diff --git a/drivers/clk/imx/clk-frac-pll.c b/drivers/clk/imx/clk-frac-pll.c
index c703056fae85..eb668faaa38f 100644
--- a/drivers/clk/imx/clk-frac-pll.c
+++ b/drivers/clk/imx/clk-frac-pll.c
@@ -119,19 +119,19 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- u64 parent_rate = *prate;
+ u64 parent_rate = req->best_parent_rate;
u32 divff, divfi;
u64 temp64;
parent_rate *= 8;
- rate *= 2;
- temp64 = rate;
+ req->rate *= 2;
+ temp64 = req->rate;
do_div(temp64, parent_rate);
divfi = temp64;
- temp64 = rate - divfi * parent_rate;
+ temp64 = req->rate - divfi * parent_rate;
temp64 *= PLL_FRAC_DENOM;
do_div(temp64, parent_rate);
divff = temp64;
@@ -140,9 +140,11 @@ static long clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
temp64 *= divff;
do_div(temp64, PLL_FRAC_DENOM);
- rate = parent_rate * divfi + temp64;
+ req->rate = parent_rate * divfi + temp64;
+
+ req->rate = req->rate / 2;
- return rate / 2;
+ return 0;
}
/*
@@ -198,7 +200,7 @@ static const struct clk_ops clk_frac_pll_ops = {
.unprepare = clk_pll_unprepare,
.is_prepared = clk_pll_is_prepared,
.recalc_rate = clk_pll_recalc_rate,
- .round_rate = clk_pll_round_rate,
+ .determine_rate = clk_pll_determine_rate,
.set_rate = clk_pll_set_rate,
};
diff --git a/drivers/clk/imx/clk-fracn-gppll.c b/drivers/clk/imx/clk-fracn-gppll.c
index 44462ab50e51..090d60867250 100644
--- a/drivers/clk/imx/clk-fracn-gppll.c
+++ b/drivers/clk/imx/clk-fracn-gppll.c
@@ -78,6 +78,7 @@ struct clk_fracn_gppll {
* The Fvco should be in range 2.5Ghz to 5Ghz
*/
static const struct imx_fracn_gppll_rate_table fracn_tbl[] = {
+ PLL_FRACN_GP(1039500000U, 173, 25, 100, 1, 4),
PLL_FRACN_GP(650000000U, 162, 50, 100, 0, 6),
PLL_FRACN_GP(594000000U, 198, 0, 1, 0, 8),
PLL_FRACN_GP(560000000U, 140, 0, 1, 0, 6),
@@ -106,6 +107,7 @@ static const struct imx_fracn_gppll_rate_table int_tbl[] = {
PLL_FRACN_GP_INTEGER(1700000000U, 141, 1, 2),
PLL_FRACN_GP_INTEGER(1400000000U, 175, 1, 3),
PLL_FRACN_GP_INTEGER(900000000U, 150, 1, 4),
+ PLL_FRACN_GP_INTEGER(800000000U, 200, 1, 6),
};
struct imx_fracn_gppll_clk imx_fracn_gppll_integer = {
@@ -132,8 +134,8 @@ imx_get_pll_settings(struct clk_fracn_gppll *pll, unsigned long rate)
return NULL;
}
-static long clk_fracn_gppll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_fracn_gppll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_fracn_gppll *pll = to_clk_fracn_gppll(hw);
const struct imx_fracn_gppll_rate_table *rate_table = pll->rate_table;
@@ -141,11 +143,16 @@ static long clk_fracn_gppll_round_rate(struct clk_hw *hw, unsigned long rate,
/* Assuming rate_table is in descending order */
for (i = 0; i < pll->rate_count; i++)
- if (rate >= rate_table[i].rate)
- return rate_table[i].rate;
+ if (req->rate >= rate_table[i].rate) {
+ req->rate = rate_table[i].rate;
+
+ return 0;
+ }
/* return minimum supported value */
- return rate_table[pll->rate_count - 1].rate;
+ req->rate = rate_table[pll->rate_count - 1].rate;
+
+ return 0;
}
static unsigned long clk_fracn_gppll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
@@ -252,9 +259,11 @@ static int clk_fracn_gppll_set_rate(struct clk_hw *hw, unsigned long drate,
pll_div = FIELD_PREP(PLL_RDIV_MASK, rate->rdiv) | rate->odiv |
FIELD_PREP(PLL_MFI_MASK, rate->mfi);
writel_relaxed(pll_div, pll->base + PLL_DIV);
+ readl(pll->base + PLL_DIV);
if (pll->flags & CLK_FRACN_GPPLL_FRACN) {
writel_relaxed(rate->mfd, pll->base + PLL_DENOMINATOR);
writel_relaxed(FIELD_PREP(PLL_MFN_MASK, rate->mfn), pll->base + PLL_NUMERATOR);
+ readl(pll->base + PLL_NUMERATOR);
}
/* Wait for 5us according to fracn mode pll doc */
@@ -263,6 +272,7 @@ static int clk_fracn_gppll_set_rate(struct clk_hw *hw, unsigned long drate,
/* Enable Powerup */
tmp |= POWERUP_MASK;
writel_relaxed(tmp, pll->base + PLL_CTRL);
+ readl(pll->base + PLL_CTRL);
/* Wait Lock */
ret = clk_fracn_gppll_wait_lock(pll);
@@ -291,19 +301,24 @@ static int clk_fracn_gppll_prepare(struct clk_hw *hw)
if (val & POWERUP_MASK)
return 0;
+ if (pll->flags & CLK_FRACN_GPPLL_FRACN)
+ writel_relaxed(readl_relaxed(pll->base + PLL_NUMERATOR),
+ pll->base + PLL_NUMERATOR);
+
val |= CLKMUX_BYPASS;
writel_relaxed(val, pll->base + PLL_CTRL);
val |= POWERUP_MASK;
writel_relaxed(val, pll->base + PLL_CTRL);
-
- val |= CLKMUX_EN;
- writel_relaxed(val, pll->base + PLL_CTRL);
+ readl(pll->base + PLL_CTRL);
ret = clk_fracn_gppll_wait_lock(pll);
if (ret)
return ret;
+ val |= CLKMUX_EN;
+ writel_relaxed(val, pll->base + PLL_CTRL);
+
val &= ~CLKMUX_BYPASS;
writel_relaxed(val, pll->base + PLL_CTRL);
@@ -335,7 +350,7 @@ static const struct clk_ops clk_fracn_gppll_ops = {
.unprepare = clk_fracn_gppll_unprepare,
.is_prepared = clk_fracn_gppll_is_prepared,
.recalc_rate = clk_fracn_gppll_recalc_rate,
- .round_rate = clk_fracn_gppll_round_rate,
+ .determine_rate = clk_fracn_gppll_determine_rate,
.set_rate = clk_fracn_gppll_set_rate,
};
diff --git a/drivers/clk/imx/clk-gate-exclusive.c b/drivers/clk/imx/clk-gate-exclusive.c
index 77342893bb71..7017e9d4e188 100644
--- a/drivers/clk/imx/clk-gate-exclusive.c
+++ b/drivers/clk/imx/clk-gate-exclusive.c
@@ -18,7 +18,7 @@
* gate clock
*
* The imx exclusive gate clock is a subclass of basic clk_gate
- * with an addtional mask to indicate which other gate bits in the same
+ * with an additional mask to indicate which other gate bits in the same
* register is mutually exclusive to this gate clock.
*/
struct clk_gate_exclusive {
diff --git a/drivers/clk/imx/clk-imx5.c b/drivers/clk/imx/clk-imx5.c
index b82044911603..9c5f489b3975 100644
--- a/drivers/clk/imx/clk-imx5.c
+++ b/drivers/clk/imx/clk-imx5.c
@@ -454,7 +454,7 @@ static void __init mx51_clocks_init(struct device_node *np)
* longer supported. Set to one for better power saving.
*
* The effect of not setting these bits is that MIPI clocks can't be
- * enabled without the IPU clock being enabled aswell.
+ * enabled without the IPU clock being enabled as well.
*/
val = readl(MXC_CCM_CCDR);
val |= 1 << 18;
diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c
index f9394e94f69d..05c7a82b751f 100644
--- a/drivers/clk/imx/clk-imx6ul.c
+++ b/drivers/clk/imx/clk-imx6ul.c
@@ -542,8 +542,8 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clk_set_parent(hws[IMX6UL_CLK_ENFC_SEL]->clk, hws[IMX6UL_CLK_PLL2_PFD2]->clk);
- clk_set_parent(hws[IMX6UL_CLK_ENET1_REF_SEL]->clk, hws[IMX6UL_CLK_ENET_REF]->clk);
- clk_set_parent(hws[IMX6UL_CLK_ENET2_REF_SEL]->clk, hws[IMX6UL_CLK_ENET2_REF]->clk);
+ clk_set_parent(hws[IMX6UL_CLK_ENET1_REF_SEL]->clk, hws[IMX6UL_CLK_ENET1_REF_125M]->clk);
+ clk_set_parent(hws[IMX6UL_CLK_ENET2_REF_SEL]->clk, hws[IMX6UL_CLK_ENET2_REF_125M]->clk);
imx_register_uart_clocks();
}
diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
index 2b77d1fc7bb9..99adc55e3f5d 100644
--- a/drivers/clk/imx/clk-imx7d.c
+++ b/drivers/clk/imx/clk-imx7d.c
@@ -498,14 +498,14 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
hws[IMX7D_ENET_AXI_ROOT_SRC] = imx_clk_hw_mux2_flags("enet_axi_src", base + 0x8900, 24, 3, enet_axi_sel, ARRAY_SIZE(enet_axi_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_NAND_USDHC_BUS_ROOT_SRC] = imx_clk_hw_mux2_flags("nand_usdhc_src", base + 0x8980, 24, 3, nand_usdhc_bus_sel, ARRAY_SIZE(nand_usdhc_bus_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_DRAM_PHYM_ROOT_SRC] = imx_clk_hw_mux2_flags("dram_phym_src", base + 0x9800, 24, 1, dram_phym_sel, ARRAY_SIZE(dram_phym_sel), CLK_SET_PARENT_GATE);
- hws[IMX7D_DRAM_ROOT_SRC] = imx_clk_hw_mux2_flags("dram_src", base + 0x9880, 24, 1, dram_sel, ARRAY_SIZE(dram_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_DRAM_ROOT_SRC] = imx_clk_hw_mux2("dram_src", base + 0x9880, 24, 1, dram_sel, ARRAY_SIZE(dram_sel));
hws[IMX7D_DRAM_PHYM_ALT_ROOT_SRC] = imx_clk_hw_mux2_flags("dram_phym_alt_src", base + 0xa000, 24, 3, dram_phym_alt_sel, ARRAY_SIZE(dram_phym_alt_sel), CLK_SET_PARENT_GATE);
- hws[IMX7D_DRAM_ALT_ROOT_SRC] = imx_clk_hw_mux2_flags("dram_alt_src", base + 0xa080, 24, 3, dram_alt_sel, ARRAY_SIZE(dram_alt_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_DRAM_ALT_ROOT_SRC] = imx_clk_hw_mux2("dram_alt_src", base + 0xa080, 24, 3, dram_alt_sel, ARRAY_SIZE(dram_alt_sel));
hws[IMX7D_USB_HSIC_ROOT_SRC] = imx_clk_hw_mux2_flags("usb_hsic_src", base + 0xa100, 24, 3, usb_hsic_sel, ARRAY_SIZE(usb_hsic_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_PCIE_CTRL_ROOT_SRC] = imx_clk_hw_mux2_flags("pcie_ctrl_src", base + 0xa180, 24, 3, pcie_ctrl_sel, ARRAY_SIZE(pcie_ctrl_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_PCIE_PHY_ROOT_SRC] = imx_clk_hw_mux2_flags("pcie_phy_src", base + 0xa200, 24, 3, pcie_phy_sel, ARRAY_SIZE(pcie_phy_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_EPDC_PIXEL_ROOT_SRC] = imx_clk_hw_mux2_flags("epdc_pixel_src", base + 0xa280, 24, 3, epdc_pixel_sel, ARRAY_SIZE(epdc_pixel_sel), CLK_SET_PARENT_GATE);
- hws[IMX7D_LCDIF_PIXEL_ROOT_SRC] = imx_clk_hw_mux2_flags("lcdif_pixel_src", base + 0xa300, 24, 3, lcdif_pixel_sel, ARRAY_SIZE(lcdif_pixel_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_LCDIF_PIXEL_ROOT_SRC] = imx_clk_hw_mux2_flags("lcdif_pixel_src", base + 0xa300, 24, 3, lcdif_pixel_sel, ARRAY_SIZE(lcdif_pixel_sel), CLK_SET_PARENT_GATE | CLK_SET_RATE_PARENT);
hws[IMX7D_MIPI_DSI_ROOT_SRC] = imx_clk_hw_mux2_flags("mipi_dsi_src", base + 0xa380, 24, 3, mipi_dsi_sel, ARRAY_SIZE(mipi_dsi_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_MIPI_CSI_ROOT_SRC] = imx_clk_hw_mux2_flags("mipi_csi_src", base + 0xa400, 24, 3, mipi_csi_sel, ARRAY_SIZE(mipi_csi_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_MIPI_DPHY_ROOT_SRC] = imx_clk_hw_mux2_flags("mipi_dphy_src", base + 0xa480, 24, 3, mipi_dphy_sel, ARRAY_SIZE(mipi_dphy_sel), CLK_SET_PARENT_GATE);
diff --git a/drivers/clk/imx/clk-imx8-acm.c b/drivers/clk/imx/clk-imx8-acm.c
index 1bdb480cc96c..790f7e44b11e 100644
--- a/drivers/clk/imx/clk-imx8-acm.c
+++ b/drivers/clk/imx/clk-imx8-acm.c
@@ -22,7 +22,7 @@
* struct clk_imx_acm_pm_domains - structure for multi power domain
* @pd_dev: power domain device
* @pd_dev_link: power domain device link
- * @num_domains: power domain nummber
+ * @num_domains: power domain number
*/
struct clk_imx_acm_pm_domains {
struct device **pd_dev;
@@ -54,10 +54,12 @@ struct clk_imx8_acm_sel {
* struct imx8_acm_soc_data - soc specific data
* @sels: pointer to struct clk_imx8_acm_sel
* @num_sels: numbers of items
+ * @mclk_sels: pointer to imx8qm/qxp/dxl_mclk_sels
*/
struct imx8_acm_soc_data {
struct clk_imx8_acm_sel *sels;
unsigned int num_sels;
+ struct clk_parent_data *mclk_sels;
};
/**
@@ -111,11 +113,14 @@ static const struct clk_parent_data imx8qm_mclk_out_sels[] = {
{ .fw_name = "sai6_rx_bclk" },
};
-static const struct clk_parent_data imx8qm_mclk_sels[] = {
+#define ACM_AUD_CLK0_SEL_INDEX 2
+#define ACM_AUD_CLK1_SEL_INDEX 3
+
+static struct clk_parent_data imx8qm_mclk_sels[] = {
{ .fw_name = "aud_pll_div_clk0_lpcg_clk" },
{ .fw_name = "aud_pll_div_clk1_lpcg_clk" },
- { .fw_name = "acm_aud_clk0_sel" },
- { .fw_name = "acm_aud_clk1_sel" },
+ { }, /* clk_hw pointer of "acm_aud_clk0_sel" */
+ { }, /* clk_hw pointer of "acm_aud_clk1_sel" */
};
static const struct clk_parent_data imx8qm_asrc_mux_clk_sels[] = {
@@ -176,11 +181,11 @@ static const struct clk_parent_data imx8qxp_mclk_out_sels[] = {
{ .fw_name = "sai4_rx_bclk" },
};
-static const struct clk_parent_data imx8qxp_mclk_sels[] = {
+static struct clk_parent_data imx8qxp_mclk_sels[] = {
{ .fw_name = "aud_pll_div_clk0_lpcg_clk" },
{ .fw_name = "aud_pll_div_clk1_lpcg_clk" },
- { .fw_name = "acm_aud_clk0_sel" },
- { .fw_name = "acm_aud_clk1_sel" },
+ { }, /* clk_hw pointer of "acm_aud_clk0_sel" */
+ { }, /* clk_hw pointer of "acm_aud_clk1_sel" */
};
static struct clk_imx8_acm_sel imx8qxp_sels[] = {
@@ -228,11 +233,11 @@ static const struct clk_parent_data imx8dxl_mclk_out_sels[] = {
{ .index = -1 },
};
-static const struct clk_parent_data imx8dxl_mclk_sels[] = {
+static struct clk_parent_data imx8dxl_mclk_sels[] = {
{ .fw_name = "aud_pll_div_clk0_lpcg_clk" },
{ .fw_name = "aud_pll_div_clk1_lpcg_clk" },
- { .fw_name = "acm_aud_clk0_sel" },
- { .fw_name = "acm_aud_clk1_sel" },
+ { }, /* clk_hw pointer of "acm_aud_clk0_sel" */
+ { }, /* clk_hw pointer of "acm_aud_clk1_sel" */
};
static struct clk_imx8_acm_sel imx8dxl_sels[] = {
@@ -289,9 +294,9 @@ static int clk_imx_acm_attach_pm_domains(struct device *dev,
DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
- if (IS_ERR(dev_pm->pd_dev_link[i])) {
+ if (!dev_pm->pd_dev_link[i]) {
dev_pm_domain_detach(dev_pm->pd_dev[i], false);
- ret = PTR_ERR(dev_pm->pd_dev_link[i]);
+ ret = -EINVAL;
goto detach_pm;
}
}
@@ -375,6 +380,18 @@ static int imx8_acm_clk_probe(struct platform_device *pdev)
imx_check_clk_hws(hws, IMX_ADMA_ACM_CLK_END);
goto err_clk_register;
}
+
+ /*
+ * The IMX_ADMA_ACM_AUD_CLK0_SEL and IMX_ADMA_ACM_AUD_CLK1_SEL are
+ * registered first. After registration, update the clk_hw pointer
+ * to imx8qm/qxp/dxl_mclk_sels structures.
+ */
+ if (sels[i].clkid == IMX_ADMA_ACM_AUD_CLK0_SEL)
+ priv->soc_data->mclk_sels[ACM_AUD_CLK0_SEL_INDEX].hw =
+ hws[IMX_ADMA_ACM_AUD_CLK0_SEL];
+ if (sels[i].clkid == IMX_ADMA_ACM_AUD_CLK1_SEL)
+ priv->soc_data->mclk_sels[ACM_AUD_CLK1_SEL_INDEX].hw =
+ hws[IMX_ADMA_ACM_AUD_CLK1_SEL];
}
ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_hw_data);
@@ -406,16 +423,19 @@ static void imx8_acm_clk_remove(struct platform_device *pdev)
static const struct imx8_acm_soc_data imx8qm_acm_data = {
.sels = imx8qm_sels,
.num_sels = ARRAY_SIZE(imx8qm_sels),
+ .mclk_sels = imx8qm_mclk_sels,
};
static const struct imx8_acm_soc_data imx8qxp_acm_data = {
.sels = imx8qxp_sels,
.num_sels = ARRAY_SIZE(imx8qxp_sels),
+ .mclk_sels = imx8qxp_mclk_sels,
};
static const struct imx8_acm_soc_data imx8dxl_acm_data = {
.sels = imx8dxl_sels,
.num_sels = ARRAY_SIZE(imx8dxl_sels),
+ .mclk_sels = imx8dxl_mclk_sels,
};
static const struct of_device_id imx8_acm_match[] = {
@@ -468,7 +488,7 @@ static struct platform_driver imx8_acm_clk_driver = {
.pm = &imx8_acm_pm_ops,
},
.probe = imx8_acm_clk_probe,
- .remove_new = imx8_acm_clk_remove,
+ .remove = imx8_acm_clk_remove,
};
module_platform_driver(imx8_acm_clk_driver);
diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
index 075f643e3f35..342049b847b9 100644
--- a/drivers/clk/imx/clk-imx8mm.c
+++ b/drivers/clk/imx/clk-imx8mm.c
@@ -432,7 +432,7 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
/* BUS */
hws[IMX8MM_CLK_MAIN_AXI] = imx8m_clk_hw_composite_bus_critical("main_axi", imx8mm_main_axi_sels, base + 0x8800);
hws[IMX8MM_CLK_ENET_AXI] = imx8m_clk_hw_composite_bus("enet_axi", imx8mm_enet_axi_sels, base + 0x8880);
- hws[IMX8MM_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite_bus_critical("nand_usdhc_bus", imx8mm_nand_usdhc_sels, base + 0x8900);
+ hws[IMX8MM_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite("nand_usdhc_bus", imx8mm_nand_usdhc_sels, base + 0x8900);
hws[IMX8MM_CLK_VPU_BUS] = imx8m_clk_hw_composite_bus("vpu_bus", imx8mm_vpu_bus_sels, base + 0x8980);
hws[IMX8MM_CLK_DISP_AXI] = imx8m_clk_hw_composite_bus("disp_axi", imx8mm_disp_axi_sels, base + 0x8a00);
hws[IMX8MM_CLK_DISP_APB] = imx8m_clk_hw_composite_bus("disp_apb", imx8mm_disp_apb_sels, base + 0x8a80);
diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
index 4bd1ed11353b..ab77e148e70c 100644
--- a/drivers/clk/imx/clk-imx8mn.c
+++ b/drivers/clk/imx/clk-imx8mn.c
@@ -583,6 +583,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
hws[IMX8MN_CLK_SDMA2_ROOT] = imx_clk_hw_gate4("sdma2_clk", "ipg_audio_root", base + 0x43b0, 0);
hws[IMX8MN_CLK_SDMA3_ROOT] = imx_clk_hw_gate4("sdma3_clk", "ipg_audio_root", base + 0x45f0, 0);
hws[IMX8MN_CLK_SAI7_ROOT] = imx_clk_hw_gate2_shared2("sai7_root_clk", "sai7", base + 0x4650, 0, &share_count_sai7);
+ hws[IMX8MN_CLK_SAI7_IPG] = imx_clk_hw_gate2_shared2("sai7_ipg_clk", "ipg_audio_root", base + 0x4650, 0, &share_count_sai7);
hws[IMX8MN_CLK_GPT_3M] = imx_clk_hw_fixed_factor("gpt_3m", "osc_24m", 1, 8);
diff --git a/drivers/clk/imx/clk-imx8mp-audiomix.c b/drivers/clk/imx/clk-imx8mp-audiomix.c
index b381d6f784c8..131702f2c9ec 100644
--- a/drivers/clk/imx/clk-imx8mp-audiomix.c
+++ b/drivers/clk/imx/clk-imx8mp-audiomix.c
@@ -5,6 +5,7 @@
* Copyright (C) 2022 Marek Vasut <marex@denx.de>
*/
+#include <linux/auxiliary_bus.h>
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/io.h>
@@ -13,6 +14,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/slab.h>
#include <dt-bindings/clock/imx8mp-clock.h>
@@ -154,6 +156,15 @@ static const struct clk_parent_data clk_imx8mp_audiomix_pll_bypass_sels[] = {
PDM_SEL, 2, 0 \
}
+#define CLK_GATE_PARENT(gname, cname, pname) \
+ { \
+ gname"_cg", \
+ IMX8MP_CLK_AUDIOMIX_##cname, \
+ { .fw_name = pname, .name = pname }, NULL, 1, \
+ CLKEN0 + 4 * !!(IMX8MP_CLK_AUDIOMIX_##cname / 32), \
+ 1, IMX8MP_CLK_AUDIOMIX_##cname % 32 \
+ }
+
struct clk_imx8mp_audiomix_sel {
const char *name;
int clkid;
@@ -169,16 +180,16 @@ static struct clk_imx8mp_audiomix_sel sels[] = {
CLK_GATE("asrc", ASRC_IPG),
CLK_GATE("pdm", PDM_IPG),
CLK_GATE("earc", EARC_IPG),
- CLK_GATE("ocrama", OCRAMA_IPG),
+ CLK_GATE_PARENT("ocrama", OCRAMA_IPG, "axi"),
CLK_GATE("aud2htx", AUD2HTX_IPG),
- CLK_GATE("earc_phy", EARC_PHY),
+ CLK_GATE_PARENT("earc_phy", EARC_PHY, "sai_pll_out_div2"),
CLK_GATE("sdma2", SDMA2_ROOT),
CLK_GATE("sdma3", SDMA3_ROOT),
CLK_GATE("spba2", SPBA2_ROOT),
- CLK_GATE("dsp", DSP_ROOT),
- CLK_GATE("dspdbg", DSPDBG_ROOT),
+ CLK_GATE_PARENT("dsp", DSP_ROOT, "axi"),
+ CLK_GATE_PARENT("dspdbg", DSPDBG_ROOT, "axi"),
CLK_GATE("edma", EDMA_ROOT),
- CLK_GATE("audpll", AUDPLL_ROOT),
+ CLK_GATE_PARENT("audpll", AUDPLL_ROOT, "osc_24m"),
CLK_GATE("mu2", MU2_ROOT),
CLK_GATE("mu3", MU3_ROOT),
CLK_PDM,
@@ -217,6 +228,33 @@ struct clk_imx8mp_audiomix_priv {
struct clk_hw_onecell_data clk_data;
};
+#if IS_ENABLED(CONFIG_RESET_CONTROLLER)
+
+static int clk_imx8mp_audiomix_reset_controller_register(struct device *dev,
+ struct clk_imx8mp_audiomix_priv *priv)
+{
+ struct auxiliary_device *adev;
+
+ if (!of_property_present(dev->of_node, "#reset-cells"))
+ return 0;
+
+ adev = devm_auxiliary_device_create(dev, "reset", NULL);
+ if (!adev)
+ return -ENODEV;
+
+ return 0;
+}
+
+#else /* !CONFIG_RESET_CONTROLLER */
+
+static int clk_imx8mp_audiomix_reset_controller_register(struct device *dev,
+ struct clk_imx8mp_audiomix_priv *priv)
+{
+ return 0;
+}
+
+#endif /* !CONFIG_RESET_CONTROLLER */
+
static void clk_imx8mp_audiomix_save_restore(struct device *dev, bool save)
{
struct clk_imx8mp_audiomix_priv *priv = dev_get_drvdata(dev);
@@ -269,12 +307,12 @@ static int clk_imx8mp_audiomix_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(sels); i++) {
if (sels[i].num_parents == 1) {
hw = devm_clk_hw_register_gate_parent_data(dev,
- sels[i].name, &sels[i].parent, 0,
+ sels[i].name, &sels[i].parent, CLK_SET_RATE_PARENT,
base + sels[i].reg, sels[i].shift, 0, NULL);
} else {
hw = devm_clk_hw_register_mux_parent_data_table(dev,
sels[i].name, sels[i].parents,
- sels[i].num_parents, 0,
+ sels[i].num_parents, CLK_SET_RATE_PARENT,
base + sels[i].reg,
sels[i].shift, sels[i].width,
0, NULL, NULL);
@@ -317,7 +355,8 @@ static int clk_imx8mp_audiomix_probe(struct platform_device *pdev)
clk_hw_data->hws[IMX8MP_CLK_AUDIOMIX_SAI_PLL_BYPASS] = hw;
hw = devm_clk_hw_register_gate(dev, "sai_pll_out", "sai_pll_bypass",
- 0, base + SAI_PLL_GNRL_CTL, 13,
+ CLK_SET_RATE_PARENT,
+ base + SAI_PLL_GNRL_CTL, 13,
0, NULL);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
@@ -326,7 +365,8 @@ static int clk_imx8mp_audiomix_probe(struct platform_device *pdev)
clk_hw_data->hws[IMX8MP_CLK_AUDIOMIX_SAI_PLL_OUT] = hw;
hw = devm_clk_hw_register_fixed_factor(dev, "sai_pll_out_div2",
- "sai_pll_out", 0, 1, 2);
+ "sai_pll_out",
+ CLK_SET_RATE_PARENT, 1, 2);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto err_clk_register;
@@ -337,6 +377,10 @@ static int clk_imx8mp_audiomix_probe(struct platform_device *pdev)
if (ret)
goto err_clk_register;
+ ret = clk_imx8mp_audiomix_reset_controller_register(dev, priv);
+ if (ret)
+ goto err_clk_register;
+
pm_runtime_put_sync(dev);
return 0;
@@ -380,7 +424,7 @@ MODULE_DEVICE_TABLE(of, clk_imx8mp_audiomix_of_match);
static struct platform_driver clk_imx8mp_audiomix_driver = {
.probe = clk_imx8mp_audiomix_probe,
- .remove_new = clk_imx8mp_audiomix_remove,
+ .remove = clk_imx8mp_audiomix_remove,
.driver = {
.name = "imx8mp-audio-blk-ctrl",
.of_match_table = clk_imx8mp_audiomix_of_match,
diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
index 670aa2bab301..fe6dac70f1a1 100644
--- a/drivers/clk/imx/clk-imx8mp.c
+++ b/drivers/clk/imx/clk-imx8mp.c
@@ -8,6 +8,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/units.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -399,17 +400,158 @@ static const char * const imx8mp_dram_core_sels[] = {"dram_pll_out", "dram_alt_r
static const char * const imx8mp_clkout_sels[] = {"audio_pll1_out", "audio_pll2_out", "video_pll1_out",
"dummy", "dummy", "gpu_pll_out", "vpu_pll_out",
- "arm_pll_out", "sys_pll1", "sys_pll2", "sys_pll3",
- "dummy", "dummy", "osc_24m", "dummy", "osc_32k"};
+ "arm_pll_out", "sys_pll1_out", "sys_pll2_out",
+ "sys_pll3_out", "dummy", "dummy", "osc_24m",
+ "dummy", "osc_32k"};
static struct clk_hw **hws;
static struct clk_hw_onecell_data *clk_hw_data;
+struct imx8mp_clock_constraints {
+ unsigned int clkid;
+ u32 maxrate;
+};
+
+/*
+ * Below tables are taken from IMX8MPCEC Rev. 2.1, 07/2023
+ * Table 13. Maximum frequency of modules.
+ * Probable typos fixed are marked with a comment.
+ */
+static const struct imx8mp_clock_constraints imx8mp_clock_common_constraints[] = {
+ { IMX8MP_CLK_A53_DIV, 1000 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ENET_AXI, 266666667 }, /* Datasheet claims 266MHz */
+ { IMX8MP_CLK_NAND_USDHC_BUS, 266666667 }, /* Datasheet claims 266MHz */
+ { IMX8MP_CLK_MEDIA_APB, 200 * HZ_PER_MHZ },
+ { IMX8MP_CLK_HDMI_APB, 133333333 }, /* Datasheet claims 133MHz */
+ { IMX8MP_CLK_ML_AXI, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_AHB, 133333333 },
+ { IMX8MP_CLK_IPG_ROOT, 66666667 },
+ { IMX8MP_CLK_AUDIO_AHB, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_DISP2_PIX, 170 * HZ_PER_MHZ },
+ { IMX8MP_CLK_DRAM_ALT, 666666667 },
+ { IMX8MP_CLK_DRAM_APB, 200 * HZ_PER_MHZ },
+ { IMX8MP_CLK_CAN1, 80 * HZ_PER_MHZ },
+ { IMX8MP_CLK_CAN2, 80 * HZ_PER_MHZ },
+ { IMX8MP_CLK_PCIE_AUX, 10 * HZ_PER_MHZ },
+ { IMX8MP_CLK_I2C5, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_I2C6, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_SAI1, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_SAI2, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_SAI3, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_SAI5, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_SAI6, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_ENET_QOS, 125 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ENET_QOS_TIMER, 200 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ENET_REF, 125 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ENET_TIMER, 125 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ENET_PHY_REF, 125 * HZ_PER_MHZ },
+ { IMX8MP_CLK_NAND, 500 * HZ_PER_MHZ },
+ { IMX8MP_CLK_QSPI, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_USDHC1, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_USDHC2, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_I2C1, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_I2C2, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_I2C3, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_I2C4, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_UART1, 80 * HZ_PER_MHZ },
+ { IMX8MP_CLK_UART2, 80 * HZ_PER_MHZ },
+ { IMX8MP_CLK_UART3, 80 * HZ_PER_MHZ },
+ { IMX8MP_CLK_UART4, 80 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ECSPI1, 80 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ECSPI2, 80 * HZ_PER_MHZ },
+ { IMX8MP_CLK_PWM1, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_PWM2, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_PWM3, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_PWM4, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_GPT1, 100 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPT2, 100 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPT3, 100 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPT4, 100 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPT5, 100 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPT6, 100 * HZ_PER_MHZ },
+ { IMX8MP_CLK_WDOG, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_IPP_DO_CLKO1, 200 * HZ_PER_MHZ },
+ { IMX8MP_CLK_IPP_DO_CLKO2, 200 * HZ_PER_MHZ },
+ { IMX8MP_CLK_HDMI_REF_266M, 266 * HZ_PER_MHZ },
+ { IMX8MP_CLK_USDHC3, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_MIPI_PHY1_REF, 300 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_DISP1_PIX, 250 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_CAM2_PIX, 277 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_LDB, 595 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_MIPI_TEST_BYTE, 200 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ECSPI3, 80 * HZ_PER_MHZ },
+ { IMX8MP_CLK_PDM, 200 * HZ_PER_MHZ },
+ { IMX8MP_CLK_SAI7, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_MAIN_AXI, 400 * HZ_PER_MHZ },
+ { /* Sentinel */ }
+};
+
+static const struct imx8mp_clock_constraints imx8mp_clock_nominal_constraints[] = {
+ { IMX8MP_CLK_M7_CORE, 600 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ML_CORE, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU3D_CORE, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU3D_SHADER_CORE, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU2D_CORE, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_AUDIO_AXI_SRC, 600 * HZ_PER_MHZ },
+ { IMX8MP_CLK_HSIO_AXI, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_ISP, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_VPU_BUS, 600 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_AXI, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_HDMI_AXI, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU_AXI, 600 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU_AHB, 300 * HZ_PER_MHZ },
+ { IMX8MP_CLK_NOC, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_NOC_IO, 600 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ML_AHB, 300 * HZ_PER_MHZ },
+ { IMX8MP_CLK_VPU_G1, 600 * HZ_PER_MHZ },
+ { IMX8MP_CLK_VPU_G2, 500 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_CAM1_PIX, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_VPU_VC8000E, 400 * HZ_PER_MHZ }, /* Datasheet claims 500MHz */
+ { IMX8MP_CLK_DRAM_CORE, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GIC, 400 * HZ_PER_MHZ },
+ { /* Sentinel */ }
+};
+
+static const struct imx8mp_clock_constraints imx8mp_clock_overdrive_constraints[] = {
+ { IMX8MP_CLK_M7_CORE, 800 * HZ_PER_MHZ},
+ { IMX8MP_CLK_ML_CORE, 1000 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU3D_CORE, 1000 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU3D_SHADER_CORE, 1000 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU2D_CORE, 1000 * HZ_PER_MHZ },
+ { IMX8MP_CLK_AUDIO_AXI_SRC, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_HSIO_AXI, 500 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_ISP, 500 * HZ_PER_MHZ },
+ { IMX8MP_CLK_VPU_BUS, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_AXI, 500 * HZ_PER_MHZ },
+ { IMX8MP_CLK_HDMI_AXI, 500 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU_AXI, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU_AHB, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_NOC, 1000 * HZ_PER_MHZ },
+ { IMX8MP_CLK_NOC_IO, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ML_AHB, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_VPU_G1, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_VPU_G2, 700 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_CAM1_PIX, 500 * HZ_PER_MHZ },
+ { IMX8MP_CLK_VPU_VC8000E, 500 * HZ_PER_MHZ }, /* Datasheet claims 400MHz */
+ { IMX8MP_CLK_DRAM_CORE, 1000 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GIC, 500 * HZ_PER_MHZ },
+ { /* Sentinel */ }
+};
+
+static void imx8mp_clocks_apply_constraints(const struct imx8mp_clock_constraints constraints[])
+{
+ const struct imx8mp_clock_constraints *constr;
+
+ for (constr = constraints; constr->clkid; constr++)
+ clk_hw_set_rate_range(hws[constr->clkid], 0, constr->maxrate);
+}
+
static int imx8mp_clocks_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np;
void __iomem *anatop_base, *ccm_base;
+ const char *opmode;
int err;
np = of_find_compatible_node(NULL, NULL, "fsl,imx8mp-anatop");
@@ -547,12 +689,12 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_AHB] = imx8m_clk_hw_composite_bus_critical("ahb_root", imx8mp_ahb_sels, ccm_base + 0x9000);
hws[IMX8MP_CLK_AUDIO_AHB] = imx8m_clk_hw_composite_bus("audio_ahb", imx8mp_audio_ahb_sels, ccm_base + 0x9100);
hws[IMX8MP_CLK_MIPI_DSI_ESC_RX] = imx8m_clk_hw_composite_bus("mipi_dsi_esc_rx", imx8mp_mipi_dsi_esc_rx_sels, ccm_base + 0x9200);
- hws[IMX8MP_CLK_MEDIA_DISP2_PIX] = imx8m_clk_hw_composite_bus("media_disp2_pix", imx8mp_media_disp_pix_sels, ccm_base + 0x9300);
+ hws[IMX8MP_CLK_MEDIA_DISP2_PIX] = imx8m_clk_hw_composite_bus_flags("media_disp2_pix", imx8mp_media_disp_pix_sels, ccm_base + 0x9300, CLK_SET_RATE_PARENT);
hws[IMX8MP_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb_root", ccm_base + 0x9080, 0, 1);
- hws[IMX8MP_CLK_DRAM_ALT] = imx8m_clk_hw_composite("dram_alt", imx8mp_dram_alt_sels, ccm_base + 0xa000);
- hws[IMX8MP_CLK_DRAM_APB] = imx8m_clk_hw_composite_critical("dram_apb", imx8mp_dram_apb_sels, ccm_base + 0xa080);
+ hws[IMX8MP_CLK_DRAM_ALT] = imx8m_clk_hw_fw_managed_composite("dram_alt", imx8mp_dram_alt_sels, ccm_base + 0xa000);
+ hws[IMX8MP_CLK_DRAM_APB] = imx8m_clk_hw_fw_managed_composite_critical("dram_apb", imx8mp_dram_apb_sels, ccm_base + 0xa080);
hws[IMX8MP_CLK_VPU_G1] = imx8m_clk_hw_composite("vpu_g1", imx8mp_vpu_g1_sels, ccm_base + 0xa100);
hws[IMX8MP_CLK_VPU_G2] = imx8m_clk_hw_composite("vpu_g2", imx8mp_vpu_g2_sels, ccm_base + 0xa180);
hws[IMX8MP_CLK_CAN1] = imx8m_clk_hw_composite("can1", imx8mp_can1_sels, ccm_base + 0xa200);
@@ -609,7 +751,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_USDHC3] = imx8m_clk_hw_composite("usdhc3", imx8mp_usdhc3_sels, ccm_base + 0xbc80);
hws[IMX8MP_CLK_MEDIA_CAM1_PIX] = imx8m_clk_hw_composite("media_cam1_pix", imx8mp_media_cam1_pix_sels, ccm_base + 0xbd00);
hws[IMX8MP_CLK_MEDIA_MIPI_PHY1_REF] = imx8m_clk_hw_composite("media_mipi_phy1_ref", imx8mp_media_mipi_phy1_ref_sels, ccm_base + 0xbd80);
- hws[IMX8MP_CLK_MEDIA_DISP1_PIX] = imx8m_clk_hw_composite("media_disp1_pix", imx8mp_media_disp_pix_sels, ccm_base + 0xbe00);
+ hws[IMX8MP_CLK_MEDIA_DISP1_PIX] = imx8m_clk_hw_composite_bus_flags("media_disp1_pix", imx8mp_media_disp_pix_sels, ccm_base + 0xbe00, CLK_SET_RATE_PARENT);
hws[IMX8MP_CLK_MEDIA_CAM2_PIX] = imx8m_clk_hw_composite("media_cam2_pix", imx8mp_media_cam2_pix_sels, ccm_base + 0xbe80);
hws[IMX8MP_CLK_MEDIA_LDB] = imx8m_clk_hw_composite("media_ldb", imx8mp_media_ldb_sels, ccm_base + 0xbf00);
hws[IMX8MP_CLK_MEMREPAIR] = imx8m_clk_hw_composite_critical("mem_repair", imx8mp_memrepair_sels, ccm_base + 0xbf80);
@@ -714,6 +856,16 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
imx_check_clk_hws(hws, IMX8MP_CLK_END);
+ imx8mp_clocks_apply_constraints(imx8mp_clock_common_constraints);
+
+ err = of_property_read_string(np, "fsl,operating-mode", &opmode);
+ if (!err) {
+ if (!strcmp(opmode, "nominal"))
+ imx8mp_clocks_apply_constraints(imx8mp_clock_nominal_constraints);
+ else if (!strcmp(opmode, "overdrive"))
+ imx8mp_clocks_apply_constraints(imx8mp_clock_overdrive_constraints);
+ }
+
err = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
if (err < 0) {
dev_err(dev, "failed to register hws for i.MX8MP\n");
diff --git a/drivers/clk/imx/clk-imx8qxp-lpcg.c b/drivers/clk/imx/clk-imx8qxp-lpcg.c
index d0ccaa040225..1dae3410ee99 100644
--- a/drivers/clk/imx/clk-imx8qxp-lpcg.c
+++ b/drivers/clk/imx/clk-imx8qxp-lpcg.c
@@ -267,7 +267,6 @@ static int imx_lpcg_parse_clks_from_dt(struct platform_device *pdev,
if (ret)
goto unreg;
- pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
diff --git a/drivers/clk/imx/clk-imx8qxp.c b/drivers/clk/imx/clk-imx8qxp.c
index 7d8883916cac..3ae162625bb1 100644
--- a/drivers/clk/imx/clk-imx8qxp.c
+++ b/drivers/clk/imx/clk-imx8qxp.c
@@ -71,7 +71,7 @@ static const char *const lvds0_sels[] = {
"clk_dummy",
"clk_dummy",
"clk_dummy",
- "mipi0_lvds_bypass_clk",
+ "lvds0_bypass_clk",
};
static const char *const lvds1_sels[] = {
@@ -79,7 +79,7 @@ static const char *const lvds1_sels[] = {
"clk_dummy",
"clk_dummy",
"clk_dummy",
- "mipi1_lvds_bypass_clk",
+ "lvds1_bypass_clk",
};
static const char * const mipi_sels[] = {
@@ -90,6 +90,22 @@ static const char * const mipi_sels[] = {
"clk_dummy",
};
+static const char * const mipi0_phy_sels[] = {
+ "clk_dummy",
+ "clk_dummy",
+ "mipi_pll_div2_clk",
+ "clk_dummy",
+ "mipi0_bypass_clk",
+};
+
+static const char * const mipi1_phy_sels[] = {
+ "clk_dummy",
+ "clk_dummy",
+ "mipi_pll_div2_clk",
+ "clk_dummy",
+ "mipi1_bypass_clk",
+};
+
static const char * const lcd_sels[] = {
"clk_dummy",
"clk_dummy",
@@ -170,8 +186,8 @@ static int imx8qxp_clk_probe(struct platform_device *pdev)
imx_clk_scu("pwm_clk", IMX_SC_R_LCD_0_PWM_0, IMX_SC_PM_CLK_PER);
imx_clk_scu("elcdif_pll", IMX_SC_R_ELCDIF_PLL, IMX_SC_PM_CLK_PLL);
imx_clk_scu2("lcd_clk", lcd_sels, ARRAY_SIZE(lcd_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_PER);
- imx_clk_scu2("lcd_pxl_clk", lcd_pxl_sels, ARRAY_SIZE(lcd_pxl_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_MISC0);
imx_clk_scu("lcd_pxl_bypass_div_clk", IMX_SC_R_LCD_0, IMX_SC_PM_CLK_BYPASS);
+ imx_clk_scu2("lcd_pxl_clk", lcd_pxl_sels, ARRAY_SIZE(lcd_pxl_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_MISC0);
/* Audio SS */
imx_clk_scu("audio_pll0_clk", IMX_SC_R_AUDIO_PLL_0, IMX_SC_PM_CLK_PLL);
@@ -206,42 +222,41 @@ static int imx8qxp_clk_probe(struct platform_device *pdev)
imx_clk_scu("usb3_lpm_div", IMX_SC_R_USB_2, IMX_SC_PM_CLK_MISC);
/* Display controller SS */
- imx_clk_scu2("dc0_disp0_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC0);
- imx_clk_scu2("dc0_disp1_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC1);
imx_clk_scu("dc0_pll0_clk", IMX_SC_R_DC_0_PLL_0, IMX_SC_PM_CLK_PLL);
imx_clk_scu("dc0_pll1_clk", IMX_SC_R_DC_0_PLL_1, IMX_SC_PM_CLK_PLL);
imx_clk_scu("dc0_bypass0_clk", IMX_SC_R_DC_0_VIDEO0, IMX_SC_PM_CLK_BYPASS);
+ imx_clk_scu2("dc0_disp0_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC0);
+ imx_clk_scu2("dc0_disp1_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC1);
imx_clk_scu("dc0_bypass1_clk", IMX_SC_R_DC_0_VIDEO1, IMX_SC_PM_CLK_BYPASS);
- imx_clk_scu2("dc1_disp0_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC0);
- imx_clk_scu2("dc1_disp1_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC1);
imx_clk_scu("dc1_pll0_clk", IMX_SC_R_DC_1_PLL_0, IMX_SC_PM_CLK_PLL);
imx_clk_scu("dc1_pll1_clk", IMX_SC_R_DC_1_PLL_1, IMX_SC_PM_CLK_PLL);
imx_clk_scu("dc1_bypass0_clk", IMX_SC_R_DC_1_VIDEO0, IMX_SC_PM_CLK_BYPASS);
+ imx_clk_scu2("dc1_disp0_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC0);
+ imx_clk_scu2("dc1_disp1_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC1);
imx_clk_scu("dc1_bypass1_clk", IMX_SC_R_DC_1_VIDEO1, IMX_SC_PM_CLK_BYPASS);
/* MIPI-LVDS SS */
imx_clk_scu("mipi0_bypass_clk", IMX_SC_R_MIPI_0, IMX_SC_PM_CLK_BYPASS);
- imx_clk_scu("mipi0_pixel_clk", IMX_SC_R_MIPI_0, IMX_SC_PM_CLK_PER);
- imx_clk_scu("mipi0_lvds_bypass_clk", IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_BYPASS);
- imx_clk_scu2("mipi0_lvds_pixel_clk", lvds0_sels, ARRAY_SIZE(lvds0_sels), IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_MISC2);
- imx_clk_scu2("mipi0_lvds_phy_clk", lvds0_sels, ARRAY_SIZE(lvds0_sels), IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_MISC3);
+ imx_clk_scu2("mipi0_pixel_clk", mipi0_phy_sels, ARRAY_SIZE(mipi0_phy_sels), IMX_SC_R_MIPI_0, IMX_SC_PM_CLK_PER);
+ imx_clk_scu("lvds0_bypass_clk", IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_BYPASS);
+ imx_clk_scu2("lvds0_pixel_clk", lvds0_sels, ARRAY_SIZE(lvds0_sels), IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_MISC2);
+ imx_clk_scu2("lvds0_phy_clk", lvds0_sels, ARRAY_SIZE(lvds0_sels), IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_MISC3);
imx_clk_scu2("mipi0_dsi_tx_esc_clk", mipi_sels, ARRAY_SIZE(mipi_sels), IMX_SC_R_MIPI_0, IMX_SC_PM_CLK_MST_BUS);
imx_clk_scu2("mipi0_dsi_rx_esc_clk", mipi_sels, ARRAY_SIZE(mipi_sels), IMX_SC_R_MIPI_0, IMX_SC_PM_CLK_SLV_BUS);
- imx_clk_scu2("mipi0_dsi_phy_clk", mipi_sels, ARRAY_SIZE(mipi_sels), IMX_SC_R_MIPI_0, IMX_SC_PM_CLK_PHY);
+ imx_clk_scu2("mipi0_dsi_phy_clk", mipi0_phy_sels, ARRAY_SIZE(mipi0_phy_sels), IMX_SC_R_MIPI_0, IMX_SC_PM_CLK_PHY);
imx_clk_scu("mipi0_i2c0_clk", IMX_SC_R_MIPI_0_I2C_0, IMX_SC_PM_CLK_MISC2);
imx_clk_scu("mipi0_i2c1_clk", IMX_SC_R_MIPI_0_I2C_1, IMX_SC_PM_CLK_MISC2);
imx_clk_scu("mipi0_pwm0_clk", IMX_SC_R_MIPI_0_PWM_0, IMX_SC_PM_CLK_PER);
imx_clk_scu("mipi1_bypass_clk", IMX_SC_R_MIPI_1, IMX_SC_PM_CLK_BYPASS);
- imx_clk_scu("mipi1_pixel_clk", IMX_SC_R_MIPI_1, IMX_SC_PM_CLK_PER);
- imx_clk_scu("mipi1_lvds_bypass_clk", IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_BYPASS);
- imx_clk_scu2("mipi1_lvds_pixel_clk", lvds1_sels, ARRAY_SIZE(lvds1_sels), IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_MISC2);
- imx_clk_scu2("mipi1_lvds_phy_clk", lvds1_sels, ARRAY_SIZE(lvds1_sels), IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_MISC3);
-
+ imx_clk_scu2("mipi1_pixel_clk", mipi1_phy_sels, ARRAY_SIZE(mipi1_phy_sels), IMX_SC_R_MIPI_1, IMX_SC_PM_CLK_PER);
+ imx_clk_scu("lvds1_bypass_clk", IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_BYPASS);
+ imx_clk_scu2("lvds1_pixel_clk", lvds1_sels, ARRAY_SIZE(lvds1_sels), IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_MISC2);
+ imx_clk_scu2("lvds1_phy_clk", lvds1_sels, ARRAY_SIZE(lvds1_sels), IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_MISC3);
imx_clk_scu2("mipi1_dsi_tx_esc_clk", mipi_sels, ARRAY_SIZE(mipi_sels), IMX_SC_R_MIPI_1, IMX_SC_PM_CLK_MST_BUS);
imx_clk_scu2("mipi1_dsi_rx_esc_clk", mipi_sels, ARRAY_SIZE(mipi_sels), IMX_SC_R_MIPI_1, IMX_SC_PM_CLK_SLV_BUS);
- imx_clk_scu2("mipi1_dsi_phy_clk", mipi_sels, ARRAY_SIZE(mipi_sels), IMX_SC_R_MIPI_1, IMX_SC_PM_CLK_PHY);
+ imx_clk_scu2("mipi1_dsi_phy_clk", mipi1_phy_sels, ARRAY_SIZE(mipi1_phy_sels), IMX_SC_R_MIPI_1, IMX_SC_PM_CLK_PHY);
imx_clk_scu("mipi1_i2c0_clk", IMX_SC_R_MIPI_1_I2C_0, IMX_SC_PM_CLK_MISC2);
imx_clk_scu("mipi1_i2c1_clk", IMX_SC_R_MIPI_1_I2C_1, IMX_SC_PM_CLK_MISC2);
imx_clk_scu("mipi1_pwm0_clk", IMX_SC_R_MIPI_1_PWM_0, IMX_SC_PM_CLK_PER);
diff --git a/drivers/clk/imx/clk-imx8ulp-sim-lpav.c b/drivers/clk/imx/clk-imx8ulp-sim-lpav.c
new file mode 100644
index 000000000000..990c95b89b75
--- /dev/null
+++ b/drivers/clk/imx/clk-imx8ulp-sim-lpav.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2025 NXP
+ */
+
+#include <dt-bindings/clock/imx8ulp-clock.h>
+
+#include <linux/auxiliary_bus.h>
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define SYSCTRL0 0x8
+
+#define IMX8ULP_HIFI_CLK_GATE(gname, cname, pname, bidx) \
+ { \
+ .name = gname "_cg", \
+ .id = IMX8ULP_CLK_SIM_LPAV_HIFI_##cname, \
+ .parent = { .fw_name = pname }, \
+ .bit = bidx, \
+ }
+
+struct clk_imx8ulp_sim_lpav_data {
+ spinlock_t lock; /* shared by MUX, clock gate and reset */
+ unsigned long flags; /* for spinlock usage */
+ struct clk_hw_onecell_data clk_data; /* keep last */
+};
+
+struct clk_imx8ulp_sim_lpav_gate {
+ const char *name;
+ int id;
+ const struct clk_parent_data parent;
+ u8 bit;
+};
+
+static struct clk_imx8ulp_sim_lpav_gate gates[] = {
+ IMX8ULP_HIFI_CLK_GATE("hifi_core", CORE, "core", 17),
+ IMX8ULP_HIFI_CLK_GATE("hifi_pbclk", PBCLK, "bus", 18),
+ IMX8ULP_HIFI_CLK_GATE("hifi_plat", PLAT, "plat", 19)
+};
+
+static void clk_imx8ulp_sim_lpav_lock(void *arg) __acquires(&data->lock)
+{
+ struct clk_imx8ulp_sim_lpav_data *data = dev_get_drvdata(arg);
+
+ spin_lock_irqsave(&data->lock, data->flags);
+}
+
+static void clk_imx8ulp_sim_lpav_unlock(void *arg) __releases(&data->lock)
+{
+ struct clk_imx8ulp_sim_lpav_data *data = dev_get_drvdata(arg);
+
+ spin_unlock_irqrestore(&data->lock, data->flags);
+}
+
+static int clk_imx8ulp_sim_lpav_probe(struct platform_device *pdev)
+{
+ const struct regmap_config regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .lock = clk_imx8ulp_sim_lpav_lock,
+ .unlock = clk_imx8ulp_sim_lpav_unlock,
+ .lock_arg = &pdev->dev,
+ };
+ struct clk_imx8ulp_sim_lpav_data *data;
+ struct auxiliary_device *adev;
+ struct regmap *regmap;
+ void __iomem *base;
+ struct clk_hw *hw;
+ int i, ret;
+
+ data = devm_kzalloc(&pdev->dev,
+ struct_size(data, clk_data.hws, ARRAY_SIZE(gates)),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ dev_set_drvdata(&pdev->dev, data);
+
+ /*
+ * this lock is used directly by the clock gate and indirectly
+ * by the reset and mux controller via the regmap API
+ */
+ spin_lock_init(&data->lock);
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(base),
+ "failed to ioremap base\n");
+ /*
+ * although the clock gate doesn't use the regmap API to modify the
+ * registers, we still need the regmap because of the reset auxiliary
+ * driver and the MUX drivers, which use the parent device's regmap
+ */
+ regmap = devm_regmap_init_mmio(&pdev->dev, base, &regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(&pdev->dev, PTR_ERR(regmap),
+ "failed to initialize regmap\n");
+
+ data->clk_data.num = ARRAY_SIZE(gates);
+
+ for (i = 0; i < ARRAY_SIZE(gates); i++) {
+ hw = devm_clk_hw_register_gate_parent_data(&pdev->dev,
+ gates[i].name,
+ &gates[i].parent,
+ CLK_SET_RATE_PARENT,
+ base + SYSCTRL0,
+ gates[i].bit,
+ 0x0, &data->lock);
+ if (IS_ERR(hw))
+ return dev_err_probe(&pdev->dev, PTR_ERR(hw),
+ "failed to register %s gate\n",
+ gates[i].name);
+
+ data->clk_data.hws[i] = hw;
+ }
+
+ adev = devm_auxiliary_device_create(&pdev->dev, "reset", NULL);
+ if (!adev)
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "failed to register aux reset\n");
+
+ ret = devm_of_clk_add_hw_provider(&pdev->dev,
+ of_clk_hw_onecell_get,
+ &data->clk_data);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to register clk hw provider\n");
+
+ /* used to probe MUX child device */
+ return devm_of_platform_populate(&pdev->dev);
+}
+
+static const struct of_device_id clk_imx8ulp_sim_lpav_of_match[] = {
+ { .compatible = "fsl,imx8ulp-sim-lpav" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, clk_imx8ulp_sim_lpav_of_match);
+
+static struct platform_driver clk_imx8ulp_sim_lpav_driver = {
+ .probe = clk_imx8ulp_sim_lpav_probe,
+ .driver = {
+ .name = "clk-imx8ulp-sim-lpav",
+ .of_match_table = clk_imx8ulp_sim_lpav_of_match,
+ },
+};
+module_platform_driver(clk_imx8ulp_sim_lpav_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("i.MX8ULP LPAV System Integration Module (SIM) clock driver");
+MODULE_AUTHOR("Laurentiu Mihalcea <laurentiu.mihalcea@nxp.com>");
diff --git a/drivers/clk/imx/clk-imx93.c b/drivers/clk/imx/clk-imx93.c
index c6a9bc8ecc1f..c5f358a75f30 100644
--- a/drivers/clk/imx/clk-imx93.c
+++ b/drivers/clk/imx/clk-imx93.c
@@ -15,6 +15,11 @@
#include "clk.h"
+#define IMX93_CLK_END 208
+
+#define PLAT_IMX93 BIT(0)
+#define PLAT_IMX91 BIT(1)
+
enum clk_sel {
LOW_SPEED_IO_SEL,
NON_IO_SEL,
@@ -33,6 +38,7 @@ static u32 share_count_sai2;
static u32 share_count_sai3;
static u32 share_count_mub;
static u32 share_count_pdm;
+static u32 share_count_spdif;
static const char * const a55_core_sels[] = {"a55_alt", "arm_pll"};
static const char *parent_names[MAX_SEL][4] = {
@@ -53,6 +59,7 @@ static const struct imx93_clk_root {
u32 off;
enum clk_sel sel;
unsigned long flags;
+ unsigned long plat;
} root_array[] = {
/* a55/m33/bus critical clk for system run */
{ IMX93_CLK_A55_PERIPH, "a55_periph_root", 0x0000, FAST_SEL, CLK_IS_CRITICAL },
@@ -63,9 +70,9 @@ static const struct imx93_clk_root {
{ IMX93_CLK_BUS_AON, "bus_aon_root", 0x0300, LOW_SPEED_IO_SEL, CLK_IS_CRITICAL },
{ IMX93_CLK_WAKEUP_AXI, "wakeup_axi_root", 0x0380, FAST_SEL, CLK_IS_CRITICAL },
{ IMX93_CLK_SWO_TRACE, "swo_trace_root", 0x0400, LOW_SPEED_IO_SEL, },
- { IMX93_CLK_M33_SYSTICK, "m33_systick_root", 0x0480, LOW_SPEED_IO_SEL, },
- { IMX93_CLK_FLEXIO1, "flexio1_root", 0x0500, LOW_SPEED_IO_SEL, },
- { IMX93_CLK_FLEXIO2, "flexio2_root", 0x0580, LOW_SPEED_IO_SEL, },
+ { IMX93_CLK_M33_SYSTICK, "m33_systick_root", 0x0480, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
+ { IMX93_CLK_FLEXIO1, "flexio1_root", 0x0500, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
+ { IMX93_CLK_FLEXIO2, "flexio2_root", 0x0580, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
{ IMX93_CLK_LPTMR1, "lptmr1_root", 0x0700, LOW_SPEED_IO_SEL, },
{ IMX93_CLK_LPTMR2, "lptmr2_root", 0x0780, LOW_SPEED_IO_SEL, },
{ IMX93_CLK_TPM2, "tpm2_root", 0x0880, TPM_SEL, },
@@ -120,15 +127,15 @@ static const struct imx93_clk_root {
{ IMX93_CLK_HSIO_ACSCAN_80M, "hsio_acscan_80m_root", 0x1f80, LOW_SPEED_IO_SEL, },
{ IMX93_CLK_HSIO_ACSCAN_480M, "hsio_acscan_480m_root", 0x2000, MISC_SEL, },
{ IMX93_CLK_NIC_AXI, "nic_axi_root", 0x2080, FAST_SEL, CLK_IS_CRITICAL, },
- { IMX93_CLK_ML_APB, "ml_apb_root", 0x2180, LOW_SPEED_IO_SEL, },
- { IMX93_CLK_ML, "ml_root", 0x2200, FAST_SEL, },
+ { IMX93_CLK_ML_APB, "ml_apb_root", 0x2180, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
+ { IMX93_CLK_ML, "ml_root", 0x2200, FAST_SEL, 0, PLAT_IMX93, },
{ IMX93_CLK_MEDIA_AXI, "media_axi_root", 0x2280, FAST_SEL, },
{ IMX93_CLK_MEDIA_APB, "media_apb_root", 0x2300, LOW_SPEED_IO_SEL, },
- { IMX93_CLK_MEDIA_LDB, "media_ldb_root", 0x2380, VIDEO_SEL, },
+ { IMX93_CLK_MEDIA_LDB, "media_ldb_root", 0x2380, VIDEO_SEL, 0, PLAT_IMX93, },
{ IMX93_CLK_MEDIA_DISP_PIX, "media_disp_pix_root", 0x2400, VIDEO_SEL, },
{ IMX93_CLK_CAM_PIX, "cam_pix_root", 0x2480, VIDEO_SEL, },
- { IMX93_CLK_MIPI_TEST_BYTE, "mipi_test_byte_root", 0x2500, VIDEO_SEL, },
- { IMX93_CLK_MIPI_PHY_CFG, "mipi_phy_cfg_root", 0x2580, VIDEO_SEL, },
+ { IMX93_CLK_MIPI_TEST_BYTE, "mipi_test_byte_root", 0x2500, VIDEO_SEL, 0, PLAT_IMX93, },
+ { IMX93_CLK_MIPI_PHY_CFG, "mipi_phy_cfg_root", 0x2580, VIDEO_SEL, 0, PLAT_IMX93, },
{ IMX93_CLK_ADC, "adc_root", 0x2700, LOW_SPEED_IO_SEL, },
{ IMX93_CLK_PDM, "pdm_root", 0x2780, AUDIO_SEL, },
{ IMX93_CLK_TSTMR1, "tstmr1_root", 0x2800, LOW_SPEED_IO_SEL, },
@@ -137,13 +144,16 @@ static const struct imx93_clk_root {
{ IMX93_CLK_MQS2, "mqs2_root", 0x2980, AUDIO_SEL, },
{ IMX93_CLK_AUDIO_XCVR, "audio_xcvr_root", 0x2a00, NON_IO_SEL, },
{ IMX93_CLK_SPDIF, "spdif_root", 0x2a80, AUDIO_SEL, },
- { IMX93_CLK_ENET, "enet_root", 0x2b00, NON_IO_SEL, },
- { IMX93_CLK_ENET_TIMER1, "enet_timer1_root", 0x2b80, LOW_SPEED_IO_SEL, },
- { IMX93_CLK_ENET_TIMER2, "enet_timer2_root", 0x2c00, LOW_SPEED_IO_SEL, },
- { IMX93_CLK_ENET_REF, "enet_ref_root", 0x2c80, NON_IO_SEL, },
- { IMX93_CLK_ENET_REF_PHY, "enet_ref_phy_root", 0x2d00, LOW_SPEED_IO_SEL, },
- { IMX93_CLK_I3C1_SLOW, "i3c1_slow_root", 0x2d80, LOW_SPEED_IO_SEL, },
- { IMX93_CLK_I3C2_SLOW, "i3c2_slow_root", 0x2e00, LOW_SPEED_IO_SEL, },
+ { IMX93_CLK_ENET, "enet_root", 0x2b00, NON_IO_SEL, 0, PLAT_IMX93, },
+ { IMX93_CLK_ENET_TIMER1, "enet_timer1_root", 0x2b80, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
+ { IMX93_CLK_ENET_TIMER2, "enet_timer2_root", 0x2c00, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
+ { IMX93_CLK_ENET_REF, "enet_ref_root", 0x2c80, NON_IO_SEL, 0, PLAT_IMX93, },
+ { IMX93_CLK_ENET_REF_PHY, "enet_ref_phy_root", 0x2d00, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
+ { IMX91_CLK_ENET1_QOS_TSN, "enet1_qos_tsn_root", 0x2b00, NON_IO_SEL, 0, PLAT_IMX91, },
+ { IMX91_CLK_ENET_TIMER, "enet_timer_root", 0x2b80, LOW_SPEED_IO_SEL, 0, PLAT_IMX91, },
+ { IMX91_CLK_ENET2_REGULAR, "enet2_regular_root", 0x2c80, NON_IO_SEL, 0, PLAT_IMX91, },
+ { IMX93_CLK_I3C1_SLOW, "i3c1_slow_root", 0x2d80, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
+ { IMX93_CLK_I3C2_SLOW, "i3c2_slow_root", 0x2e00, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
{ IMX93_CLK_USB_PHY_BURUNIN, "usb_phy_root", 0x2e80, LOW_SPEED_IO_SEL, },
{ IMX93_CLK_PAL_CAME_SCAN, "pal_came_scan_root", 0x2f00, MISC_SEL, }
};
@@ -155,6 +165,7 @@ static const struct imx93_clk_ccgr {
u32 off;
unsigned long flags;
u32 *shared_count;
+ unsigned long plat;
} ccgr_array[] = {
{ IMX93_CLK_A55_GATE, "a55_alt", "a55_alt_root", 0x8000, },
/* M33 critical clk for system run */
@@ -167,10 +178,10 @@ static const struct imx93_clk_ccgr {
{ IMX93_CLK_WDOG5_GATE, "wdog5", "osc_24m", 0x8400, },
{ IMX93_CLK_SEMA1_GATE, "sema1", "bus_aon_root", 0x8440, },
{ IMX93_CLK_SEMA2_GATE, "sema2", "bus_wakeup_root", 0x8480, },
- { IMX93_CLK_MU1_A_GATE, "mu1_a", "bus_aon_root", 0x84c0, CLK_IGNORE_UNUSED },
- { IMX93_CLK_MU2_A_GATE, "mu2_a", "bus_wakeup_root", 0x84c0, CLK_IGNORE_UNUSED },
- { IMX93_CLK_MU1_B_GATE, "mu1_b", "bus_aon_root", 0x8500, 0, &share_count_mub },
- { IMX93_CLK_MU2_B_GATE, "mu2_b", "bus_wakeup_root", 0x8500, 0, &share_count_mub },
+ { IMX93_CLK_MU1_A_GATE, "mu1_a", "bus_aon_root", 0x84c0, CLK_IGNORE_UNUSED, NULL, PLAT_IMX93 },
+ { IMX93_CLK_MU2_A_GATE, "mu2_a", "bus_wakeup_root", 0x84c0, CLK_IGNORE_UNUSED, NULL, PLAT_IMX93 },
+ { IMX93_CLK_MU1_B_GATE, "mu1_b", "bus_aon_root", 0x8500, 0, &share_count_mub, PLAT_IMX93 },
+ { IMX93_CLK_MU2_B_GATE, "mu2_b", "bus_wakeup_root", 0x8500, 0, &share_count_mub, PLAT_IMX93 },
{ IMX93_CLK_EDMA1_GATE, "edma1", "m33_root", 0x8540, },
{ IMX93_CLK_EDMA2_GATE, "edma2", "wakeup_axi_root", 0x8580, },
{ IMX93_CLK_FLEXSPI1_GATE, "flexspi1", "flexspi1_root", 0x8640, },
@@ -178,8 +189,8 @@ static const struct imx93_clk_ccgr {
{ IMX93_CLK_GPIO2_GATE, "gpio2", "bus_wakeup_root", 0x88c0, },
{ IMX93_CLK_GPIO3_GATE, "gpio3", "bus_wakeup_root", 0x8900, },
{ IMX93_CLK_GPIO4_GATE, "gpio4", "bus_wakeup_root", 0x8940, },
- { IMX93_CLK_FLEXIO1_GATE, "flexio1", "flexio1_root", 0x8980, },
- { IMX93_CLK_FLEXIO2_GATE, "flexio2", "flexio2_root", 0x89c0, },
+ { IMX93_CLK_FLEXIO1_GATE, "flexio1", "flexio1_root", 0x8980, 0, NULL, PLAT_IMX93},
+ { IMX93_CLK_FLEXIO2_GATE, "flexio2", "flexio2_root", 0x89c0, 0, NULL, PLAT_IMX93},
{ IMX93_CLK_LPIT1_GATE, "lpit1", "bus_aon_root", 0x8a00, },
{ IMX93_CLK_LPIT2_GATE, "lpit2", "bus_wakeup_root", 0x8a40, },
{ IMX93_CLK_LPTMR1_GATE, "lptmr1", "lptmr1_root", 0x8a80, },
@@ -228,10 +239,10 @@ static const struct imx93_clk_ccgr {
{ IMX93_CLK_SAI3_GATE, "sai3", "sai3_root", 0x94c0, 0, &share_count_sai3},
{ IMX93_CLK_SAI3_IPG, "sai3_ipg_clk", "bus_wakeup_root", 0x94c0, 0, &share_count_sai3},
{ IMX93_CLK_MIPI_CSI_GATE, "mipi_csi", "media_apb_root", 0x9580, },
- { IMX93_CLK_MIPI_DSI_GATE, "mipi_dsi", "media_apb_root", 0x95c0, },
- { IMX93_CLK_LVDS_GATE, "lvds", "media_ldb_root", 0x9600, },
+ { IMX93_CLK_MIPI_DSI_GATE, "mipi_dsi", "media_apb_root", 0x95c0, 0, NULL, PLAT_IMX93 },
+ { IMX93_CLK_LVDS_GATE, "lvds", "media_ldb_root", 0x9600, 0, NULL, PLAT_IMX93 },
{ IMX93_CLK_LCDIF_GATE, "lcdif", "media_apb_root", 0x9640, },
- { IMX93_CLK_PXP_GATE, "pxp", "media_apb_root", 0x9680, },
+ { IMX93_CLK_PXP_GATE, "pxp", "media_apb_root", 0x9680, 0, NULL, PLAT_IMX93 },
{ IMX93_CLK_ISI_GATE, "isi", "media_apb_root", 0x96c0, },
{ IMX93_CLK_NIC_MEDIA_GATE, "nic_media", "media_axi_root", 0x9700, },
{ IMX93_CLK_USB_CONTROLLER_GATE, "usb_controller", "hsio_root", 0x9a00, },
@@ -242,10 +253,13 @@ static const struct imx93_clk_ccgr {
{ IMX93_CLK_MQS1_GATE, "mqs1", "sai1_root", 0x9b00, },
{ IMX93_CLK_MQS2_GATE, "mqs2", "sai3_root", 0x9b40, },
{ IMX93_CLK_AUD_XCVR_GATE, "aud_xcvr", "audio_xcvr_root", 0x9b80, },
- { IMX93_CLK_SPDIF_GATE, "spdif", "spdif_root", 0x9c00, },
+ { IMX93_CLK_SPDIF_IPG, "spdif_ipg_clk", "bus_wakeup_root", 0x9c00, 0, &share_count_spdif},
+ { IMX93_CLK_SPDIF_GATE, "spdif", "spdif_root", 0x9c00, 0, &share_count_spdif},
{ IMX93_CLK_HSIO_32K_GATE, "hsio_32k", "osc_32k", 0x9dc0, },
- { IMX93_CLK_ENET1_GATE, "enet1", "wakeup_axi_root", 0x9e00, },
- { IMX93_CLK_ENET_QOS_GATE, "enet_qos", "wakeup_axi_root", 0x9e40, },
+ { IMX93_CLK_ENET1_GATE, "enet1", "wakeup_axi_root", 0x9e00, 0, NULL, PLAT_IMX93, },
+ { IMX93_CLK_ENET_QOS_GATE, "enet_qos", "wakeup_axi_root", 0x9e40, 0, NULL, PLAT_IMX93, },
+ { IMX91_CLK_ENET2_REGULAR_GATE, "enet2_regular", "wakeup_axi_root", 0x9e00, 0, NULL, PLAT_IMX91, },
+ { IMX91_CLK_ENET1_QOS_TSN_GATE, "enet1_qos_tsn", "wakeup_axi_root", 0x9e40, 0, NULL, PLAT_IMX91, },
/* Critical because clk accessed during CPU idle */
{ IMX93_CLK_SYS_CNT_GATE, "sys_cnt", "osc_24m", 0x9e80, CLK_IS_CRITICAL},
{ IMX93_CLK_TSTMR1_GATE, "tstmr1", "bus_aon_root", 0x9ec0, },
@@ -265,6 +279,7 @@ static int imx93_clocks_probe(struct platform_device *pdev)
const struct imx93_clk_ccgr *ccgr;
void __iomem *base, *anatop_base;
int i, ret;
+ const unsigned long plat = (unsigned long)device_get_match_data(&pdev->dev);
clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws,
IMX93_CLK_END), GFP_KERNEL);
@@ -314,17 +329,20 @@ static int imx93_clocks_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(root_array); i++) {
root = &root_array[i];
- clks[root->clk] = imx93_clk_composite_flags(root->name,
- parent_names[root->sel],
- 4, base + root->off, 3,
- root->flags);
+ if (!root->plat || root->plat & plat)
+ clks[root->clk] = imx93_clk_composite_flags(root->name,
+ parent_names[root->sel],
+ 4, base + root->off, 3,
+ root->flags);
}
for (i = 0; i < ARRAY_SIZE(ccgr_array); i++) {
ccgr = &ccgr_array[i];
- clks[ccgr->clk] = imx93_clk_gate(NULL, ccgr->name, ccgr->parent_name,
- ccgr->flags, base + ccgr->off, 0, 1, 1, 3,
- ccgr->shared_count);
+ if (!ccgr->plat || ccgr->plat & plat)
+ clks[ccgr->clk] = imx93_clk_gate(NULL,
+ ccgr->name, ccgr->parent_name,
+ ccgr->flags, base + ccgr->off, 0, 1, 1, 3,
+ ccgr->shared_count);
}
clks[IMX93_CLK_A55_SEL] = imx_clk_hw_mux2("a55_sel", base + 0x4820, 0, 1, a55_core_sels,
@@ -354,7 +372,8 @@ unregister_hws:
}
static const struct of_device_id imx93_clk_of_match[] = {
- { .compatible = "fsl,imx93-ccm" },
+ { .compatible = "fsl,imx93-ccm", .data = (void *)PLAT_IMX93 },
+ { .compatible = "fsl,imx91-ccm", .data = (void *)PLAT_IMX91 },
{ /* Sentinel */ },
};
MODULE_DEVICE_TABLE(of, imx93_clk_of_match);
diff --git a/drivers/clk/imx/clk-imx95-blk-ctl.c b/drivers/clk/imx/clk-imx95-blk-ctl.c
index 74f595f9e5e3..56bed4471995 100644
--- a/drivers/clk/imx/clk-imx95-blk-ctl.c
+++ b/drivers/clk/imx/clk-imx95-blk-ctl.c
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright 2024 NXP
+ * Copyright 2024-2025 NXP
*/
+#include <dt-bindings/clock/nxp,imx94-clock.h>
#include <dt-bindings/clock/nxp,imx95-clock.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
@@ -35,6 +36,7 @@ struct imx95_blk_ctl {
void __iomem *base;
/* clock gate register */
u32 clk_reg_restore;
+ const struct imx95_blk_ctl_dev_data *pdata;
};
struct imx95_blk_ctl_clk_dev_data {
@@ -156,7 +158,7 @@ static const struct imx95_blk_ctl_dev_data camblk_dev_data = {
.clk_reg_offset = 0,
};
-static const struct imx95_blk_ctl_clk_dev_data lvds_clk_dev_data[] = {
+static const struct imx95_blk_ctl_clk_dev_data imx95_lvds_clk_dev_data[] = {
[IMX95_CLK_DISPMIX_LVDS_PHY_DIV] = {
.name = "ldb_phy_div",
.parent_names = (const char *[]){ "ldbpll", },
@@ -213,17 +215,21 @@ static const struct imx95_blk_ctl_clk_dev_data lvds_clk_dev_data[] = {
},
};
-static const struct imx95_blk_ctl_dev_data lvds_csr_dev_data = {
- .num_clks = ARRAY_SIZE(lvds_clk_dev_data),
- .clk_dev_data = lvds_clk_dev_data,
+static const struct imx95_blk_ctl_dev_data imx95_lvds_csr_dev_data = {
+ .num_clks = ARRAY_SIZE(imx95_lvds_clk_dev_data),
+ .clk_dev_data = imx95_lvds_clk_dev_data,
.clk_reg_offset = 0,
};
-static const struct imx95_blk_ctl_clk_dev_data dispmix_csr_clk_dev_data[] = {
+static const char * const imx95_disp_engine_parents[] = {
+ "videopll1", "dsi_pll", "ldb_pll_div7"
+};
+
+static const struct imx95_blk_ctl_clk_dev_data imx95_dispmix_csr_clk_dev_data[] = {
[IMX95_CLK_DISPMIX_ENG0_SEL] = {
.name = "disp_engine0_sel",
- .parent_names = (const char *[]){"videopll1", "dsi_pll", "ldb_pll_div7", },
- .num_parents = 4,
+ .parent_names = imx95_disp_engine_parents,
+ .num_parents = ARRAY_SIZE(imx95_disp_engine_parents),
.reg = 0,
.bit_idx = 0,
.bit_width = 2,
@@ -232,8 +238,8 @@ static const struct imx95_blk_ctl_clk_dev_data dispmix_csr_clk_dev_data[] = {
},
[IMX95_CLK_DISPMIX_ENG1_SEL] = {
.name = "disp_engine1_sel",
- .parent_names = (const char *[]){"videopll1", "dsi_pll", "ldb_pll_div7", },
- .num_parents = 4,
+ .parent_names = imx95_disp_engine_parents,
+ .num_parents = ARRAY_SIZE(imx95_disp_engine_parents),
.reg = 0,
.bit_idx = 2,
.bit_width = 2,
@@ -242,16 +248,108 @@ static const struct imx95_blk_ctl_clk_dev_data dispmix_csr_clk_dev_data[] = {
}
};
-static const struct imx95_blk_ctl_dev_data dispmix_csr_dev_data = {
- .num_clks = ARRAY_SIZE(dispmix_csr_clk_dev_data),
- .clk_dev_data = dispmix_csr_clk_dev_data,
+static const struct imx95_blk_ctl_dev_data imx95_dispmix_csr_dev_data = {
+ .num_clks = ARRAY_SIZE(imx95_dispmix_csr_clk_dev_data),
+ .clk_dev_data = imx95_dispmix_csr_clk_dev_data,
+ .clk_reg_offset = 0,
+};
+
+static const struct imx95_blk_ctl_clk_dev_data netxmix_clk_dev_data[] = {
+ [IMX95_CLK_NETCMIX_ENETC0_RMII] = {
+ .name = "enetc0_rmii_sel",
+ .parent_names = (const char *[]){"ext_enetref", "enetref"},
+ .num_parents = 2,
+ .reg = 4,
+ .bit_idx = 5,
+ .bit_width = 1,
+ .type = CLK_MUX,
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT,
+ },
+ [IMX95_CLK_NETCMIX_ENETC1_RMII] = {
+ .name = "enetc1_rmii_sel",
+ .parent_names = (const char *[]){"ext_enetref", "enetref"},
+ .num_parents = 2,
+ .reg = 4,
+ .bit_idx = 10,
+ .bit_width = 1,
+ .type = CLK_MUX,
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct imx95_blk_ctl_dev_data netcmix_dev_data = {
+ .num_clks = ARRAY_SIZE(netxmix_clk_dev_data),
+ .clk_dev_data = netxmix_clk_dev_data,
.clk_reg_offset = 0,
};
+static const struct imx95_blk_ctl_clk_dev_data hsio_blk_ctl_clk_dev_data[] = {
+ [0] = {
+ .name = "hsio_blk_ctl_clk",
+ .parent_names = (const char *[]){ "hsio_pll", },
+ .num_parents = 1,
+ .reg = 0,
+ .bit_idx = 6,
+ .bit_width = 1,
+ .type = CLK_GATE,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+};
+
+static const struct imx95_blk_ctl_dev_data hsio_blk_ctl_dev_data = {
+ .num_clks = 1,
+ .clk_dev_data = hsio_blk_ctl_clk_dev_data,
+ .clk_reg_offset = 0,
+};
+
+static const struct imx95_blk_ctl_clk_dev_data imx94_lvds_clk_dev_data[] = {
+ [IMX94_CLK_DISPMIX_LVDS_CLK_GATE] = {
+ .name = "lvds_clk_gate",
+ .parent_names = (const char *[]){ "ldbpll", },
+ .num_parents = 1,
+ .reg = 0,
+ .bit_idx = 1,
+ .bit_width = 1,
+ .type = CLK_GATE,
+ .flags = CLK_SET_RATE_PARENT,
+ .flags2 = CLK_GATE_SET_TO_DISABLE,
+ },
+};
+
+static const struct imx95_blk_ctl_dev_data imx94_lvds_csr_dev_data = {
+ .num_clks = ARRAY_SIZE(imx94_lvds_clk_dev_data),
+ .clk_dev_data = imx94_lvds_clk_dev_data,
+ .clk_reg_offset = 0,
+ .rpm_enabled = true,
+};
+
+static const char * const imx94_disp_engine_parents[] = {
+ "disppix", "ldb_pll_div7"
+};
+
+static const struct imx95_blk_ctl_clk_dev_data imx94_dispmix_csr_clk_dev_data[] = {
+ [IMX94_CLK_DISPMIX_CLK_SEL] = {
+ .name = "disp_clk_sel",
+ .parent_names = imx94_disp_engine_parents,
+ .num_parents = ARRAY_SIZE(imx94_disp_engine_parents),
+ .reg = 0,
+ .bit_idx = 1,
+ .bit_width = 1,
+ .type = CLK_MUX,
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct imx95_blk_ctl_dev_data imx94_dispmix_csr_dev_data = {
+ .num_clks = ARRAY_SIZE(imx94_dispmix_csr_clk_dev_data),
+ .clk_dev_data = imx94_dispmix_csr_clk_dev_data,
+ .clk_reg_offset = 0,
+ .rpm_enabled = true,
+};
+
static int imx95_bc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- const struct imx95_blk_ctl_dev_data *bc_data;
struct imx95_blk_ctl *bc;
struct clk_hw_onecell_data *clk_hw_data;
struct clk_hw **hws;
@@ -281,23 +379,25 @@ static int imx95_bc_probe(struct platform_device *pdev)
return ret;
}
- bc_data = of_device_get_match_data(dev);
- if (!bc_data)
+ bc->pdata = of_device_get_match_data(dev);
+ if (!bc->pdata)
return devm_of_platform_populate(dev);
- clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws, bc_data->num_clks),
+ clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws, bc->pdata->num_clks),
GFP_KERNEL);
if (!clk_hw_data)
return -ENOMEM;
- if (bc_data->rpm_enabled)
- pm_runtime_enable(&pdev->dev);
+ if (bc->pdata->rpm_enabled) {
+ devm_pm_runtime_enable(&pdev->dev);
+ pm_runtime_resume_and_get(&pdev->dev);
+ }
- clk_hw_data->num = bc_data->num_clks;
+ clk_hw_data->num = bc->pdata->num_clks;
hws = clk_hw_data->hws;
- for (i = 0; i < bc_data->num_clks; i++) {
- const struct imx95_blk_ctl_clk_dev_data *data = &bc_data->clk_dev_data[i];
+ for (i = 0; i < bc->pdata->num_clks; i++) {
+ const struct imx95_blk_ctl_clk_dev_data *data = &bc->pdata->clk_dev_data[i];
void __iomem *reg = base + data->reg;
if (data->type == CLK_MUX) {
@@ -331,21 +431,20 @@ static int imx95_bc_probe(struct platform_device *pdev)
goto cleanup;
}
- if (pm_runtime_enabled(bc->dev))
+ if (pm_runtime_enabled(bc->dev)) {
+ pm_runtime_put_sync(&pdev->dev);
clk_disable_unprepare(bc->clk_apb);
+ }
return 0;
cleanup:
- for (i = 0; i < bc_data->num_clks; i++) {
+ for (i = 0; i < bc->pdata->num_clks; i++) {
if (IS_ERR_OR_NULL(hws[i]))
continue;
clk_hw_unregister(hws[i]);
}
- if (bc_data->rpm_enabled)
- pm_runtime_disable(&pdev->dev);
-
return ret;
}
@@ -354,15 +453,24 @@ static int imx95_bc_runtime_suspend(struct device *dev)
{
struct imx95_blk_ctl *bc = dev_get_drvdata(dev);
+ bc->clk_reg_restore = readl(bc->base + bc->pdata->clk_reg_offset);
clk_disable_unprepare(bc->clk_apb);
+
return 0;
}
static int imx95_bc_runtime_resume(struct device *dev)
{
struct imx95_blk_ctl *bc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(bc->clk_apb);
+ if (ret)
+ return ret;
- return clk_prepare_enable(bc->clk_apb);
+ writel(bc->clk_reg_restore, bc->base + bc->pdata->clk_reg_offset);
+
+ return 0;
}
#endif
@@ -370,22 +478,12 @@ static int imx95_bc_runtime_resume(struct device *dev)
static int imx95_bc_suspend(struct device *dev)
{
struct imx95_blk_ctl *bc = dev_get_drvdata(dev);
- const struct imx95_blk_ctl_dev_data *bc_data;
- int ret;
- bc_data = of_device_get_match_data(dev);
- if (!bc_data)
+ if (pm_runtime_suspended(dev))
return 0;
- if (bc_data->rpm_enabled) {
- ret = pm_runtime_get_sync(bc->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(bc->dev);
- return ret;
- }
- }
-
- bc->clk_reg_restore = readl(bc->base + bc_data->clk_reg_offset);
+ bc->clk_reg_restore = readl(bc->base + bc->pdata->clk_reg_offset);
+ clk_disable_unprepare(bc->clk_apb);
return 0;
}
@@ -393,16 +491,16 @@ static int imx95_bc_suspend(struct device *dev)
static int imx95_bc_resume(struct device *dev)
{
struct imx95_blk_ctl *bc = dev_get_drvdata(dev);
- const struct imx95_blk_ctl_dev_data *bc_data;
+ int ret;
- bc_data = of_device_get_match_data(dev);
- if (!bc_data)
+ if (pm_runtime_suspended(dev))
return 0;
- writel(bc->clk_reg_restore, bc->base + bc_data->clk_reg_offset);
+ ret = clk_prepare_enable(bc->clk_apb);
+ if (ret)
+ return ret;
- if (bc_data->rpm_enabled)
- pm_runtime_put(bc->dev);
+ writel(bc->clk_reg_restore, bc->base + bc->pdata->clk_reg_offset);
return 0;
}
@@ -414,11 +512,15 @@ static const struct dev_pm_ops imx95_bc_pm_ops = {
};
static const struct of_device_id imx95_bc_of_match[] = {
+ { .compatible = "nxp,imx94-display-csr", .data = &imx94_dispmix_csr_dev_data },
+ { .compatible = "nxp,imx94-lvds-csr", .data = &imx94_lvds_csr_dev_data },
{ .compatible = "nxp,imx95-camera-csr", .data = &camblk_dev_data },
{ .compatible = "nxp,imx95-display-master-csr", },
- { .compatible = "nxp,imx95-lvds-csr", .data = &lvds_csr_dev_data },
- { .compatible = "nxp,imx95-display-csr", .data = &dispmix_csr_dev_data },
+ { .compatible = "nxp,imx95-display-csr", .data = &imx95_dispmix_csr_dev_data },
+ { .compatible = "nxp,imx95-lvds-csr", .data = &imx95_lvds_csr_dev_data },
+ { .compatible = "nxp,imx95-hsio-blk-ctl", .data = &hsio_blk_ctl_dev_data },
{ .compatible = "nxp,imx95-vpu-csr", .data = &vpublk_dev_data },
+ { .compatible = "nxp,imx95-netcmix-blk-ctrl", .data = &netcmix_dev_data},
{ /* Sentinel */ },
};
MODULE_DEVICE_TABLE(of, imx95_bc_of_match);
diff --git a/drivers/clk/imx/clk-imxrt1050.c b/drivers/clk/imx/clk-imxrt1050.c
index 08d155feb035..efd1ac9d8eeb 100644
--- a/drivers/clk/imx/clk-imxrt1050.c
+++ b/drivers/clk/imx/clk-imxrt1050.c
@@ -176,6 +176,7 @@ static struct platform_driver imxrt1050_clk_driver = {
};
module_platform_driver(imxrt1050_clk_driver);
+MODULE_DESCRIPTION("NXP i.MX RT1050 clock driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Jesse Taube <Mr.Bossman075@gmail.com>");
MODULE_AUTHOR("Giulio Benetti <giulio.benetti@benettiengineering.com>");
diff --git a/drivers/clk/imx/clk-lpcg-scu.c b/drivers/clk/imx/clk-lpcg-scu.c
index dd5abd09f3e2..6376557a3c3d 100644
--- a/drivers/clk/imx/clk-lpcg-scu.c
+++ b/drivers/clk/imx/clk-lpcg-scu.c
@@ -6,10 +6,12 @@
#include <linux/bits.h>
#include <linux/clk-provider.h>
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/units.h>
#include "clk-scu.h"
@@ -41,6 +43,29 @@ struct clk_lpcg_scu {
#define to_clk_lpcg_scu(_hw) container_of(_hw, struct clk_lpcg_scu, hw)
+/* e10858 -LPCG clock gating register synchronization errata */
+static void lpcg_e10858_writel(unsigned long rate, void __iomem *reg, u32 val)
+{
+ writel(val, reg);
+
+ if (rate >= 24 * HZ_PER_MHZ || rate == 0) {
+ /*
+ * The time taken to access the LPCG registers from the AP core
+ * through the interconnect is longer than the minimum delay
+ * of 4 clock cycles required by the errata.
+ * Adding a readl will provide sufficient delay to prevent
+ * back-to-back writes.
+ */
+ readl(reg);
+ } else {
+ /*
+ * For clocks running below 24MHz, wait a minimum of
+ * 4 clock cycles.
+ */
+ ndelay(4 * (DIV_ROUND_UP(1000 * HZ_PER_MHZ, rate)));
+ }
+}
+
static int clk_lpcg_scu_enable(struct clk_hw *hw)
{
struct clk_lpcg_scu *clk = to_clk_lpcg_scu(hw);
@@ -57,7 +82,8 @@ static int clk_lpcg_scu_enable(struct clk_hw *hw)
val |= CLK_GATE_SCU_LPCG_HW_SEL;
reg |= val << clk->bit_idx;
- writel(reg, clk->reg);
+
+ lpcg_e10858_writel(clk_hw_get_rate(hw), clk->reg, reg);
spin_unlock_irqrestore(&imx_lpcg_scu_lock, flags);
@@ -74,7 +100,7 @@ static void clk_lpcg_scu_disable(struct clk_hw *hw)
reg = readl_relaxed(clk->reg);
reg &= ~(CLK_GATE_SCU_LPCG_MASK << clk->bit_idx);
- writel(reg, clk->reg);
+ lpcg_e10858_writel(clk_hw_get_rate(hw), clk->reg, reg);
spin_unlock_irqrestore(&imx_lpcg_scu_lock, flags);
}
@@ -135,6 +161,9 @@ static int __maybe_unused imx_clk_lpcg_scu_suspend(struct device *dev)
{
struct clk_lpcg_scu *clk = dev_get_drvdata(dev);
+ if (!strncmp("hdmi_lpcg", clk_hw_get_name(&clk->hw), strlen("hdmi_lpcg")))
+ return 0;
+
clk->state = readl_relaxed(clk->reg);
dev_dbg(dev, "save lpcg state 0x%x\n", clk->state);
@@ -145,13 +174,11 @@ static int __maybe_unused imx_clk_lpcg_scu_resume(struct device *dev)
{
struct clk_lpcg_scu *clk = dev_get_drvdata(dev);
- /*
- * FIXME: Sometimes writes don't work unless the CPU issues
- * them twice
- */
+ if (!strncmp("hdmi_lpcg", clk_hw_get_name(&clk->hw), strlen("hdmi_lpcg")))
+ return 0;
writel(clk->state, clk->reg);
- writel(clk->state, clk->reg);
+ lpcg_e10858_writel(0, clk->reg, clk->state);
dev_dbg(dev, "restore lpcg state 0x%x\n", clk->state);
return 0;
diff --git a/drivers/clk/imx/clk-pfd.c b/drivers/clk/imx/clk-pfd.c
index 5cf0149dfa15..31220fa7882b 100644
--- a/drivers/clk/imx/clk-pfd.c
+++ b/drivers/clk/imx/clk-pfd.c
@@ -62,24 +62,26 @@ static unsigned long clk_pfd_recalc_rate(struct clk_hw *hw,
return tmp;
}
-static long clk_pfd_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pfd_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- u64 tmp = *prate;
+ u64 tmp = req->best_parent_rate;
u8 frac;
- tmp = tmp * 18 + rate / 2;
- do_div(tmp, rate);
+ tmp = tmp * 18 + req->rate / 2;
+ do_div(tmp, req->rate);
frac = tmp;
if (frac < 12)
frac = 12;
else if (frac > 35)
frac = 35;
- tmp = *prate;
+ tmp = req->best_parent_rate;
tmp *= 18;
do_div(tmp, frac);
- return tmp;
+ req->rate = tmp;
+
+ return 0;
}
static int clk_pfd_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -117,7 +119,7 @@ static const struct clk_ops clk_pfd_ops = {
.enable = clk_pfd_enable,
.disable = clk_pfd_disable,
.recalc_rate = clk_pfd_recalc_rate,
- .round_rate = clk_pfd_round_rate,
+ .determine_rate = clk_pfd_determine_rate,
.set_rate = clk_pfd_set_rate,
.is_enabled = clk_pfd_is_enabled,
};
diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c
index d63564dbb12c..36d0e80b55b8 100644
--- a/drivers/clk/imx/clk-pll14xx.c
+++ b/drivers/clk/imx/clk-pll14xx.c
@@ -56,7 +56,9 @@ static const struct imx_pll14xx_rate_table imx_pll1416x_tbl[] = {
PLL_1416X_RATE(700000000U, 350, 3, 2),
PLL_1416X_RATE(640000000U, 320, 3, 2),
PLL_1416X_RATE(600000000U, 300, 3, 2),
+ PLL_1416X_RATE(416000000U, 208, 3, 2),
PLL_1416X_RATE(320000000U, 160, 3, 2),
+ PLL_1416X_RATE(208000000U, 208, 3, 3),
};
static const struct imx_pll14xx_rate_table imx_pll1443x_tbl[] = {
@@ -214,8 +216,8 @@ found:
t->mdiv, t->kdiv);
}
-static long clk_pll1416x_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pll1416x_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_pll14xx *pll = to_clk_pll14xx(hw);
const struct imx_pll14xx_rate_table *rate_table = pll->rate_table;
@@ -223,22 +225,29 @@ static long clk_pll1416x_round_rate(struct clk_hw *hw, unsigned long rate,
/* Assuming rate_table is in descending order */
for (i = 0; i < pll->rate_count; i++)
- if (rate >= rate_table[i].rate)
- return rate_table[i].rate;
+ if (req->rate >= rate_table[i].rate) {
+ req->rate = rate_table[i].rate;
+
+ return 0;
+ }
/* return minimum supported value */
- return rate_table[pll->rate_count - 1].rate;
+ req->rate = rate_table[pll->rate_count - 1].rate;
+
+ return 0;
}
-static long clk_pll1443x_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pll1443x_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_pll14xx *pll = to_clk_pll14xx(hw);
struct imx_pll14xx_rate_table t;
- imx_pll14xx_calc_settings(pll, rate, *prate, &t);
+ imx_pll14xx_calc_settings(pll, req->rate, req->best_parent_rate, &t);
+
+ req->rate = t.rate;
- return t.rate;
+ return 0;
}
static unsigned long clk_pll14xx_recalc_rate(struct clk_hw *hw,
@@ -468,7 +477,7 @@ static const struct clk_ops clk_pll1416x_ops = {
.unprepare = clk_pll14xx_unprepare,
.is_prepared = clk_pll14xx_is_prepared,
.recalc_rate = clk_pll14xx_recalc_rate,
- .round_rate = clk_pll1416x_round_rate,
+ .determine_rate = clk_pll1416x_determine_rate,
.set_rate = clk_pll1416x_set_rate,
};
@@ -481,7 +490,7 @@ static const struct clk_ops clk_pll1443x_ops = {
.unprepare = clk_pll14xx_unprepare,
.is_prepared = clk_pll14xx_is_prepared,
.recalc_rate = clk_pll14xx_recalc_rate,
- .round_rate = clk_pll1443x_round_rate,
+ .determine_rate = clk_pll1443x_determine_rate,
.set_rate = clk_pll1443x_set_rate,
};
diff --git a/drivers/clk/imx/clk-pllv2.c b/drivers/clk/imx/clk-pllv2.c
index ff17f0664faa..bb497ad5e0ae 100644
--- a/drivers/clk/imx/clk-pllv2.c
+++ b/drivers/clk/imx/clk-pllv2.c
@@ -178,18 +178,25 @@ static int clk_pllv2_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static long clk_pllv2_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pllv2_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u32 dp_op, dp_mfd, dp_mfn;
int ret;
- ret = __clk_pllv2_set_rate(rate, *prate, &dp_op, &dp_mfd, &dp_mfn);
- if (ret)
- return ret;
+ ret = __clk_pllv2_set_rate(req->rate, req->best_parent_rate, &dp_op,
+ &dp_mfd, &dp_mfn);
+ if (ret) {
+ req->rate = ret;
- return __clk_pllv2_recalc_rate(*prate, MXC_PLL_DP_CTL_DPDCK0_2_EN,
- dp_op, dp_mfd, dp_mfn);
+ return 0;
+ }
+
+ req->rate = __clk_pllv2_recalc_rate(req->best_parent_rate,
+ MXC_PLL_DP_CTL_DPDCK0_2_EN, dp_op,
+ dp_mfd, dp_mfn);
+
+ return 0;
}
static int clk_pllv2_prepare(struct clk_hw *hw)
@@ -235,7 +242,7 @@ static const struct clk_ops clk_pllv2_ops = {
.prepare = clk_pllv2_prepare,
.unprepare = clk_pllv2_unprepare,
.recalc_rate = clk_pllv2_recalc_rate,
- .round_rate = clk_pllv2_round_rate,
+ .determine_rate = clk_pllv2_determine_rate,
.set_rate = clk_pllv2_set_rate,
};
diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c
index 11fb238ee8f0..b99508367bcb 100644
--- a/drivers/clk/imx/clk-pllv3.c
+++ b/drivers/clk/imx/clk-pllv3.c
@@ -117,13 +117,14 @@ static unsigned long clk_pllv3_recalc_rate(struct clk_hw *hw,
return (div == 1) ? parent_rate * 22 : parent_rate * 20;
}
-static long clk_pllv3_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pllv3_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long parent_rate = *prate;
+ unsigned long parent_rate = req->best_parent_rate;
- return (rate >= parent_rate * 22) ? parent_rate * 22 :
- parent_rate * 20;
+ req->rate = (req->rate >= parent_rate * 22) ? parent_rate * 22 : parent_rate * 20;
+
+ return 0;
}
static int clk_pllv3_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -152,7 +153,7 @@ static const struct clk_ops clk_pllv3_ops = {
.unprepare = clk_pllv3_unprepare,
.is_prepared = clk_pllv3_is_prepared,
.recalc_rate = clk_pllv3_recalc_rate,
- .round_rate = clk_pllv3_round_rate,
+ .determine_rate = clk_pllv3_determine_rate,
.set_rate = clk_pllv3_set_rate,
};
@@ -165,21 +166,23 @@ static unsigned long clk_pllv3_sys_recalc_rate(struct clk_hw *hw,
return parent_rate * div / 2;
}
-static long clk_pllv3_sys_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pllv3_sys_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long parent_rate = *prate;
+ unsigned long parent_rate = req->best_parent_rate;
unsigned long min_rate = parent_rate * 54 / 2;
unsigned long max_rate = parent_rate * 108 / 2;
u32 div;
- if (rate > max_rate)
- rate = max_rate;
- else if (rate < min_rate)
- rate = min_rate;
- div = rate * 2 / parent_rate;
+ if (req->rate > max_rate)
+ req->rate = max_rate;
+ else if (req->rate < min_rate)
+ req->rate = min_rate;
+ div = req->rate * 2 / parent_rate;
- return parent_rate * div / 2;
+ req->rate = parent_rate * div / 2;
+
+ return 0;
}
static int clk_pllv3_sys_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -207,7 +210,7 @@ static const struct clk_ops clk_pllv3_sys_ops = {
.unprepare = clk_pllv3_unprepare,
.is_prepared = clk_pllv3_is_prepared,
.recalc_rate = clk_pllv3_sys_recalc_rate,
- .round_rate = clk_pllv3_sys_round_rate,
+ .determine_rate = clk_pllv3_sys_determine_rate,
.set_rate = clk_pllv3_sys_set_rate,
};
@@ -226,10 +229,10 @@ static unsigned long clk_pllv3_av_recalc_rate(struct clk_hw *hw,
return parent_rate * div + (unsigned long)temp64;
}
-static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pllv3_av_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long parent_rate = *prate;
+ unsigned long parent_rate = req->best_parent_rate;
unsigned long min_rate = parent_rate * 27;
unsigned long max_rate = parent_rate * 54;
u32 div;
@@ -237,16 +240,16 @@ static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
u32 max_mfd = 0x3FFFFFFF;
u64 temp64;
- if (rate > max_rate)
- rate = max_rate;
- else if (rate < min_rate)
- rate = min_rate;
+ if (req->rate > max_rate)
+ req->rate = max_rate;
+ else if (req->rate < min_rate)
+ req->rate = min_rate;
if (parent_rate <= max_mfd)
mfd = parent_rate;
- div = rate / parent_rate;
- temp64 = (u64) (rate - div * parent_rate);
+ div = req->rate / parent_rate;
+ temp64 = (u64) (req->rate - div * parent_rate);
temp64 *= mfd;
temp64 = div64_ul(temp64, parent_rate);
mfn = temp64;
@@ -255,7 +258,9 @@ static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
temp64 *= mfn;
do_div(temp64, mfd);
- return parent_rate * div + (unsigned long)temp64;
+ req->rate = parent_rate * div + (unsigned long)temp64;
+
+ return 0;
}
static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -296,7 +301,7 @@ static const struct clk_ops clk_pllv3_av_ops = {
.unprepare = clk_pllv3_unprepare,
.is_prepared = clk_pllv3_is_prepared,
.recalc_rate = clk_pllv3_av_recalc_rate,
- .round_rate = clk_pllv3_av_round_rate,
+ .determine_rate = clk_pllv3_av_determine_rate,
.set_rate = clk_pllv3_av_set_rate,
};
@@ -355,12 +360,15 @@ static unsigned long clk_pllv3_vf610_recalc_rate(struct clk_hw *hw,
return clk_pllv3_vf610_mf_to_rate(parent_rate, mf);
}
-static long clk_pllv3_vf610_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pllv3_vf610_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- struct clk_pllv3_vf610_mf mf = clk_pllv3_vf610_rate_to_mf(*prate, rate);
+ struct clk_pllv3_vf610_mf mf = clk_pllv3_vf610_rate_to_mf(req->best_parent_rate,
+ req->rate);
+
+ req->rate = clk_pllv3_vf610_mf_to_rate(req->best_parent_rate, mf);
- return clk_pllv3_vf610_mf_to_rate(*prate, mf);
+ return 0;
}
static int clk_pllv3_vf610_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -389,7 +397,7 @@ static const struct clk_ops clk_pllv3_vf610_ops = {
.unprepare = clk_pllv3_unprepare,
.is_prepared = clk_pllv3_is_prepared,
.recalc_rate = clk_pllv3_vf610_recalc_rate,
- .round_rate = clk_pllv3_vf610_round_rate,
+ .determine_rate = clk_pllv3_vf610_determine_rate,
.set_rate = clk_pllv3_vf610_set_rate,
};
diff --git a/drivers/clk/imx/clk-pllv4.c b/drivers/clk/imx/clk-pllv4.c
index 9b136c951762..01d05b5d5438 100644
--- a/drivers/clk/imx/clk-pllv4.c
+++ b/drivers/clk/imx/clk-pllv4.c
@@ -95,11 +95,11 @@ static unsigned long clk_pllv4_recalc_rate(struct clk_hw *hw,
return (parent_rate * mult) + (u32)temp64;
}
-static long clk_pllv4_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pllv4_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_pllv4 *pll = to_clk_pllv4(hw);
- unsigned long parent_rate = *prate;
+ unsigned long parent_rate = req->best_parent_rate;
unsigned long round_rate, i;
u32 mfn, mfd = DEFAULT_MFD;
bool found = false;
@@ -107,7 +107,7 @@ static long clk_pllv4_round_rate(struct clk_hw *hw, unsigned long rate,
u32 mult;
if (pll->use_mult_range) {
- temp64 = (u64)rate;
+ temp64 = (u64) req->rate;
do_div(temp64, parent_rate);
mult = temp64;
if (mult >= pllv4_mult_range[1] &&
@@ -118,7 +118,7 @@ static long clk_pllv4_round_rate(struct clk_hw *hw, unsigned long rate,
} else {
for (i = 0; i < ARRAY_SIZE(pllv4_mult_table); i++) {
round_rate = parent_rate * pllv4_mult_table[i];
- if (rate >= round_rate) {
+ if (req->rate >= round_rate) {
found = true;
break;
}
@@ -127,14 +127,16 @@ static long clk_pllv4_round_rate(struct clk_hw *hw, unsigned long rate,
if (!found) {
pr_warn("%s: unable to round rate %lu, parent rate %lu\n",
- clk_hw_get_name(hw), rate, parent_rate);
+ clk_hw_get_name(hw), req->rate, parent_rate);
+ req->rate = 0;
+
return 0;
}
if (parent_rate <= MAX_MFD)
mfd = parent_rate;
- temp64 = (u64)(rate - round_rate);
+ temp64 = (u64)(req->rate - round_rate);
temp64 *= mfd;
do_div(temp64, parent_rate);
mfn = temp64;
@@ -145,14 +147,19 @@ static long clk_pllv4_round_rate(struct clk_hw *hw, unsigned long rate,
* pair of mfn/mfd, we simply return the round_rate without using
* the frac part.
*/
- if (mfn >= mfd)
- return round_rate;
+ if (mfn >= mfd) {
+ req->rate = round_rate;
+
+ return 0;
+ }
temp64 = (u64)parent_rate;
temp64 *= mfn;
do_div(temp64, mfd);
- return round_rate + (u32)temp64;
+ req->rate = round_rate + (u32)temp64;
+
+ return 0;
}
static bool clk_pllv4_is_valid_mult(struct clk_pllv4 *pll, unsigned int mult)
@@ -229,7 +236,7 @@ static void clk_pllv4_unprepare(struct clk_hw *hw)
static const struct clk_ops clk_pllv4_ops = {
.recalc_rate = clk_pllv4_recalc_rate,
- .round_rate = clk_pllv4_round_rate,
+ .determine_rate = clk_pllv4_determine_rate,
.set_rate = clk_pllv4_set_rate,
.prepare = clk_pllv4_prepare,
.unprepare = clk_pllv4_unprepare,
diff --git a/drivers/clk/imx/clk-scu.c b/drivers/clk/imx/clk-scu.c
index b1dd0c08e091..34c9dc1fb20e 100644
--- a/drivers/clk/imx/clk-scu.c
+++ b/drivers/clk/imx/clk-scu.c
@@ -269,24 +269,6 @@ static int clk_scu_determine_rate(struct clk_hw *hw,
return 0;
}
-/*
- * clk_scu_round_rate - Round clock rate for a SCU clock
- * @hw: clock to round rate for
- * @rate: rate to round
- * @parent_rate: parent rate provided by common clock framework, not used
- *
- * Returns the current clock rate, or zero in failure.
- */
-static long clk_scu_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
-{
- /*
- * Assume we support all the requested rate and let the SCU firmware
- * to handle the left work
- */
- return rate;
-}
-
static int clk_scu_atf_set_cpu_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
@@ -454,7 +436,7 @@ static const struct clk_ops clk_scu_ops = {
static const struct clk_ops clk_scu_cpu_ops = {
.recalc_rate = clk_scu_recalc_rate,
- .round_rate = clk_scu_round_rate,
+ .determine_rate = clk_scu_determine_rate,
.set_rate = clk_scu_atf_set_cpu_rate,
.prepare = clk_scu_prepare,
.unprepare = clk_scu_unprepare,
@@ -462,7 +444,7 @@ static const struct clk_ops clk_scu_cpu_ops = {
static const struct clk_ops clk_scu_pi_ops = {
.recalc_rate = clk_scu_recalc_rate,
- .round_rate = clk_scu_round_rate,
+ .determine_rate = clk_scu_determine_rate,
.set_rate = clk_scu_set_rate,
};
@@ -567,7 +549,6 @@ static int imx_clk_scu_probe(struct platform_device *pdev)
if (!((clk->rsrc == IMX_SC_R_A35) || (clk->rsrc == IMX_SC_R_A53) ||
(clk->rsrc == IMX_SC_R_A72))) {
- pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
}
@@ -596,7 +577,7 @@ static int __maybe_unused imx_clk_scu_suspend(struct device *dev)
clk->rate = clk_scu_recalc_rate(&clk->hw, 0);
else
clk->rate = clk_hw_get_rate(&clk->hw);
- clk->is_enabled = clk_hw_is_enabled(&clk->hw);
+ clk->is_enabled = clk_hw_is_prepared(&clk->hw);
if (clk->parent)
dev_dbg(dev, "save parent %s idx %u\n", clk_hw_get_name(clk->parent),
@@ -729,7 +710,7 @@ struct clk_hw *imx_clk_scu_alloc_dev(const char *name,
if (ret)
goto put_device;
- /* For API backwards compatiblilty, simply return NULL for success */
+ /* For API backwards compatibility, simply return NULL for success */
return NULL;
put_device:
@@ -766,15 +747,15 @@ static unsigned long clk_gpr_div_scu_recalc_rate(struct clk_hw *hw,
return err ? 0 : rate;
}
-static long clk_gpr_div_scu_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_gpr_div_scu_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- if (rate < *prate)
- rate = *prate / 2;
+ if (req->rate < req->best_parent_rate)
+ req->rate = req->best_parent_rate / 2;
else
- rate = *prate;
+ req->rate = req->best_parent_rate;
- return rate;
+ return 0;
}
static int clk_gpr_div_scu_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -793,7 +774,7 @@ static int clk_gpr_div_scu_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops clk_gpr_div_scu_ops = {
.recalc_rate = clk_gpr_div_scu_recalc_rate,
- .round_rate = clk_gpr_div_scu_round_rate,
+ .determine_rate = clk_gpr_div_scu_determine_rate,
.set_rate = clk_gpr_div_scu_set_rate,
};
diff --git a/drivers/clk/imx/clk-vf610.c b/drivers/clk/imx/clk-vf610.c
index 9e11f1c7c397..41eb38552a9c 100644
--- a/drivers/clk/imx/clk-vf610.c
+++ b/drivers/clk/imx/clk-vf610.c
@@ -139,7 +139,7 @@ static struct clk * __init vf610_get_fixed_clock(
return clk;
};
-static int vf610_clk_suspend(void)
+static int vf610_clk_suspend(void *data)
{
int i;
@@ -156,7 +156,7 @@ static int vf610_clk_suspend(void)
return 0;
}
-static void vf610_clk_resume(void)
+static void vf610_clk_resume(void *data)
{
int i;
@@ -171,11 +171,15 @@ static void vf610_clk_resume(void)
writel_relaxed(ccgr[i], CCM_CCGRx(i));
}
-static struct syscore_ops vf610_clk_syscore_ops = {
+static const struct syscore_ops vf610_clk_syscore_ops = {
.suspend = vf610_clk_suspend,
.resume = vf610_clk_resume,
};
+static struct syscore vf610_clk_syscore = {
+ .ops = &vf610_clk_syscore_ops,
+};
+
static void __init vf610_clocks_init(struct device_node *ccm_node)
{
struct device_node *np;
@@ -462,7 +466,7 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
clk_prepare_enable(clk[clks_init_on[i]]);
- register_syscore_ops(&vf610_clk_syscore_ops);
+ register_syscore(&vf610_clk_syscore);
/* Add the clocks to provider list */
clk_data.clks = clk;
diff --git a/drivers/clk/imx/clk.c b/drivers/clk/imx/clk.c
index e35496af5ceb..df83bd939492 100644
--- a/drivers/clk/imx/clk.c
+++ b/drivers/clk/imx/clk.c
@@ -226,4 +226,5 @@ static int __init imx_clk_disable_uart(void)
late_initcall_sync(imx_clk_disable_uart);
#endif
+MODULE_DESCRIPTION("Common clock support for NXP i.MX SoC family");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index adb7ad649a0d..aa5202f284f3 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -442,6 +442,10 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name,
_imx8m_clk_hw_composite(name, parent_names, reg, \
IMX_COMPOSITE_BUS, IMX_COMPOSITE_CLK_FLAGS_DEFAULT)
+#define imx8m_clk_hw_composite_bus_flags(name, parent_names, reg, flags) \
+ _imx8m_clk_hw_composite(name, parent_names, reg, \
+ IMX_COMPOSITE_BUS, IMX_COMPOSITE_CLK_FLAGS_DEFAULT | flags)
+
#define imx8m_clk_hw_composite_bus_critical(name, parent_names, reg) \
_imx8m_clk_hw_composite(name, parent_names, reg, \
IMX_COMPOSITE_BUS, IMX_COMPOSITE_CLK_FLAGS_CRITICAL)
diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c
index 0c9c8344ad11..91e7ac0cc334 100644
--- a/drivers/clk/ingenic/cgu.c
+++ b/drivers/clk/ingenic/cgu.c
@@ -174,14 +174,16 @@ ingenic_pll_calc(const struct ingenic_cgu_clk_info *clk_info,
n * od);
}
-static long
-ingenic_pll_round_rate(struct clk_hw *hw, unsigned long req_rate,
- unsigned long *prate)
+static int ingenic_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
- return ingenic_pll_calc(clk_info, req_rate, *prate, NULL, NULL, NULL);
+ req->rate = ingenic_pll_calc(clk_info, req->rate, req->best_parent_rate,
+ NULL, NULL, NULL);
+
+ return 0;
}
static inline int ingenic_pll_check_stable(struct ingenic_cgu *cgu,
@@ -317,7 +319,7 @@ static int ingenic_pll_is_enabled(struct clk_hw *hw)
static const struct clk_ops ingenic_pll_ops = {
.recalc_rate = ingenic_pll_recalc_rate,
- .round_rate = ingenic_pll_round_rate,
+ .determine_rate = ingenic_pll_determine_rate,
.set_rate = ingenic_pll_set_rate,
.enable = ingenic_pll_enable,
diff --git a/drivers/clk/ingenic/cgu.h b/drivers/clk/ingenic/cgu.h
index 99da9bd86e63..0d417d69dab7 100644
--- a/drivers/clk/ingenic/cgu.h
+++ b/drivers/clk/ingenic/cgu.h
@@ -239,7 +239,7 @@ ingenic_cgu_new(const struct ingenic_cgu_clk_info *clock_info,
*
* Register the clocks described by the CGU with the common clock framework.
*
- * Return: 0 on success or -errno if unsuccesful.
+ * Return: 0 on success or -errno if unsuccessful.
*/
int ingenic_cgu_register_clocks(struct ingenic_cgu *cgu);
diff --git a/drivers/clk/ingenic/jz4725b-cgu.c b/drivers/clk/ingenic/jz4725b-cgu.c
index 590e9c85cb25..94cee44c854f 100644
--- a/drivers/clk/ingenic/jz4725b-cgu.c
+++ b/drivers/clk/ingenic/jz4725b-cgu.c
@@ -268,6 +268,6 @@ static void __init jz4725b_cgu_init(struct device_node *np)
if (retval)
pr_err("%s: failed to register CGU Clocks\n", __func__);
- ingenic_cgu_register_syscore_ops(cgu);
+ ingenic_cgu_register_syscore(cgu);
}
CLK_OF_DECLARE_DRIVER(jz4725b_cgu, "ingenic,jz4725b-cgu", jz4725b_cgu_init);
diff --git a/drivers/clk/ingenic/jz4740-cgu.c b/drivers/clk/ingenic/jz4740-cgu.c
index 3e0a30574ebb..2def3aedc8dd 100644
--- a/drivers/clk/ingenic/jz4740-cgu.c
+++ b/drivers/clk/ingenic/jz4740-cgu.c
@@ -266,6 +266,6 @@ static void __init jz4740_cgu_init(struct device_node *np)
if (retval)
pr_err("%s: failed to register CGU Clocks\n", __func__);
- ingenic_cgu_register_syscore_ops(cgu);
+ ingenic_cgu_register_syscore(cgu);
}
CLK_OF_DECLARE_DRIVER(jz4740_cgu, "ingenic,jz4740-cgu", jz4740_cgu_init);
diff --git a/drivers/clk/ingenic/jz4755-cgu.c b/drivers/clk/ingenic/jz4755-cgu.c
index f2c2d848dab7..17cf5dcaece9 100644
--- a/drivers/clk/ingenic/jz4755-cgu.c
+++ b/drivers/clk/ingenic/jz4755-cgu.c
@@ -337,7 +337,7 @@ static void __init jz4755_cgu_init(struct device_node *np)
if (retval)
pr_err("%s: failed to register CGU Clocks\n", __func__);
- ingenic_cgu_register_syscore_ops(cgu);
+ ingenic_cgu_register_syscore(cgu);
}
/*
* CGU has some children devices, this is useful for probing children devices
diff --git a/drivers/clk/ingenic/jz4760-cgu.c b/drivers/clk/ingenic/jz4760-cgu.c
index e407f00bd594..372fe4b07992 100644
--- a/drivers/clk/ingenic/jz4760-cgu.c
+++ b/drivers/clk/ingenic/jz4760-cgu.c
@@ -436,7 +436,7 @@ static void __init jz4760_cgu_init(struct device_node *np)
if (retval)
pr_err("%s: failed to register CGU Clocks\n", __func__);
- ingenic_cgu_register_syscore_ops(cgu);
+ ingenic_cgu_register_syscore(cgu);
}
/* We only probe via devicetree, no need for a platform driver */
diff --git a/drivers/clk/ingenic/jz4770-cgu.c b/drivers/clk/ingenic/jz4770-cgu.c
index 6ae1740367f9..58f1d3bad677 100644
--- a/drivers/clk/ingenic/jz4770-cgu.c
+++ b/drivers/clk/ingenic/jz4770-cgu.c
@@ -456,7 +456,7 @@ static void __init jz4770_cgu_init(struct device_node *np)
if (retval)
pr_err("%s: failed to register CGU Clocks\n", __func__);
- ingenic_cgu_register_syscore_ops(cgu);
+ ingenic_cgu_register_syscore(cgu);
}
/* We only probe via devicetree, no need for a platform driver */
diff --git a/drivers/clk/ingenic/jz4780-cgu.c b/drivers/clk/ingenic/jz4780-cgu.c
index b1dadc0a5e75..1e88aef7ac0f 100644
--- a/drivers/clk/ingenic/jz4780-cgu.c
+++ b/drivers/clk/ingenic/jz4780-cgu.c
@@ -128,19 +128,19 @@ static unsigned long jz4780_otg_phy_recalc_rate(struct clk_hw *hw,
return parent_rate;
}
-static long jz4780_otg_phy_round_rate(struct clk_hw *hw, unsigned long req_rate,
- unsigned long *parent_rate)
+static int jz4780_otg_phy_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- if (req_rate < 15600000)
- return 12000000;
-
- if (req_rate < 21600000)
- return 19200000;
+ if (req->rate < 15600000)
+ req->rate = 12000000;
+ else if (req->rate < 21600000)
+ req->rate = 19200000;
+ else if (req->rate < 36000000)
+ req->rate = 24000000;
+ else
+ req->rate = 48000000;
- if (req_rate < 36000000)
- return 24000000;
-
- return 48000000;
+ return 0;
}
static int jz4780_otg_phy_set_rate(struct clk_hw *hw, unsigned long req_rate,
@@ -212,7 +212,7 @@ static int jz4780_otg_phy_is_enabled(struct clk_hw *hw)
static const struct clk_ops jz4780_otg_phy_ops = {
.recalc_rate = jz4780_otg_phy_recalc_rate,
- .round_rate = jz4780_otg_phy_round_rate,
+ .determine_rate = jz4780_otg_phy_determine_rate,
.set_rate = jz4780_otg_phy_set_rate,
.enable = jz4780_otg_phy_enable,
@@ -803,6 +803,6 @@ static void __init jz4780_cgu_init(struct device_node *np)
return;
}
- ingenic_cgu_register_syscore_ops(cgu);
+ ingenic_cgu_register_syscore(cgu);
}
CLK_OF_DECLARE_DRIVER(jz4780_cgu, "ingenic,jz4780-cgu", jz4780_cgu_init);
diff --git a/drivers/clk/ingenic/pm.c b/drivers/clk/ingenic/pm.c
index 341752b640d2..206d5cf2872f 100644
--- a/drivers/clk/ingenic/pm.c
+++ b/drivers/clk/ingenic/pm.c
@@ -15,7 +15,7 @@
static void __iomem * __maybe_unused ingenic_cgu_base;
-static int __maybe_unused ingenic_cgu_pm_suspend(void)
+static int __maybe_unused ingenic_cgu_pm_suspend(void *data)
{
u32 val = readl(ingenic_cgu_base + CGU_REG_LCR);
@@ -24,22 +24,26 @@ static int __maybe_unused ingenic_cgu_pm_suspend(void)
return 0;
}
-static void __maybe_unused ingenic_cgu_pm_resume(void)
+static void __maybe_unused ingenic_cgu_pm_resume(void *data)
{
u32 val = readl(ingenic_cgu_base + CGU_REG_LCR);
writel(val & ~LCR_LOW_POWER_MODE, ingenic_cgu_base + CGU_REG_LCR);
}
-static struct syscore_ops __maybe_unused ingenic_cgu_pm_ops = {
+static const struct syscore_ops __maybe_unused ingenic_cgu_pm_ops = {
.suspend = ingenic_cgu_pm_suspend,
.resume = ingenic_cgu_pm_resume,
};
-void ingenic_cgu_register_syscore_ops(struct ingenic_cgu *cgu)
+static struct syscore __maybe_unused ingenic_cgu_pm = {
+ .ops = &ingenic_cgu_pm_ops,
+};
+
+void ingenic_cgu_register_syscore(struct ingenic_cgu *cgu)
{
if (IS_ENABLED(CONFIG_PM_SLEEP)) {
ingenic_cgu_base = cgu->base;
- register_syscore_ops(&ingenic_cgu_pm_ops);
+ register_syscore(&ingenic_cgu_pm);
}
}
diff --git a/drivers/clk/ingenic/pm.h b/drivers/clk/ingenic/pm.h
index fa7540407b6b..0dcb57dc64cb 100644
--- a/drivers/clk/ingenic/pm.h
+++ b/drivers/clk/ingenic/pm.h
@@ -7,6 +7,6 @@
struct ingenic_cgu;
-void ingenic_cgu_register_syscore_ops(struct ingenic_cgu *cgu);
+void ingenic_cgu_register_syscore(struct ingenic_cgu *cgu);
#endif /* DRIVERS_CLK_INGENIC_PM_H */
diff --git a/drivers/clk/ingenic/tcu.c b/drivers/clk/ingenic/tcu.c
index 7d04ef40b7cf..bc6a51da2072 100644
--- a/drivers/clk/ingenic/tcu.c
+++ b/drivers/clk/ingenic/tcu.c
@@ -455,7 +455,7 @@ err_free_tcu:
return ret;
}
-static int __maybe_unused tcu_pm_suspend(void)
+static int __maybe_unused tcu_pm_suspend(void *data)
{
struct ingenic_tcu *tcu = ingenic_tcu;
@@ -465,7 +465,7 @@ static int __maybe_unused tcu_pm_suspend(void)
return 0;
}
-static void __maybe_unused tcu_pm_resume(void)
+static void __maybe_unused tcu_pm_resume(void *data)
{
struct ingenic_tcu *tcu = ingenic_tcu;
@@ -473,11 +473,15 @@ static void __maybe_unused tcu_pm_resume(void)
clk_enable(tcu->clk);
}
-static struct syscore_ops __maybe_unused tcu_pm_ops = {
+static const struct syscore_ops __maybe_unused tcu_pm_ops = {
.suspend = tcu_pm_suspend,
.resume = tcu_pm_resume,
};
+static struct syscore __maybe_unused tcu_pm = {
+ .ops = &tcu_pm_ops,
+};
+
static void __init ingenic_tcu_init(struct device_node *np)
{
int ret = ingenic_tcu_probe(np);
@@ -486,7 +490,7 @@ static void __init ingenic_tcu_init(struct device_node *np)
pr_crit("Failed to initialize TCU clocks: %d\n", ret);
if (IS_ENABLED(CONFIG_PM_SLEEP))
- register_syscore_ops(&tcu_pm_ops);
+ register_syscore(&tcu_pm);
}
CLK_OF_DECLARE_DRIVER(jz4740_cgu, "ingenic,jz4740-tcu", ingenic_tcu_init);
diff --git a/drivers/clk/ingenic/x1000-cgu.c b/drivers/clk/ingenic/x1000-cgu.c
index feb03eed4fe8..d89bdfb7c219 100644
--- a/drivers/clk/ingenic/x1000-cgu.c
+++ b/drivers/clk/ingenic/x1000-cgu.c
@@ -84,16 +84,17 @@ static unsigned long x1000_otg_phy_recalc_rate(struct clk_hw *hw,
return parent_rate;
}
-static long x1000_otg_phy_round_rate(struct clk_hw *hw, unsigned long req_rate,
- unsigned long *parent_rate)
+static int x1000_otg_phy_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- if (req_rate < 18000000)
- return 12000000;
-
- if (req_rate < 36000000)
- return 24000000;
+ if (req->rate < 18000000)
+ req->rate = 12000000;
+ else if (req->rate < 36000000)
+ req->rate = 24000000;
+ else
+ req->rate = 48000000;
- return 48000000;
+ return 0;
}
static int x1000_otg_phy_set_rate(struct clk_hw *hw, unsigned long req_rate,
@@ -161,7 +162,7 @@ static int x1000_usb_phy_is_enabled(struct clk_hw *hw)
static const struct clk_ops x1000_otg_phy_ops = {
.recalc_rate = x1000_otg_phy_recalc_rate,
- .round_rate = x1000_otg_phy_round_rate,
+ .determine_rate = x1000_otg_phy_determine_rate,
.set_rate = x1000_otg_phy_set_rate,
.enable = x1000_usb_phy_enable,
@@ -555,7 +556,7 @@ static void __init x1000_cgu_init(struct device_node *np)
return;
}
- ingenic_cgu_register_syscore_ops(cgu);
+ ingenic_cgu_register_syscore(cgu);
}
/*
* CGU has some children devices, this is useful for probing children devices
diff --git a/drivers/clk/ingenic/x1830-cgu.c b/drivers/clk/ingenic/x1830-cgu.c
index 0fd46e50a513..acf856e5009e 100644
--- a/drivers/clk/ingenic/x1830-cgu.c
+++ b/drivers/clk/ingenic/x1830-cgu.c
@@ -463,7 +463,7 @@ static void __init x1830_cgu_init(struct device_node *np)
return;
}
- ingenic_cgu_register_syscore_ops(cgu);
+ ingenic_cgu_register_syscore(cgu);
}
/*
* CGU has some children devices, this is useful for probing children devices
diff --git a/drivers/clk/keystone/sci-clk.c b/drivers/clk/keystone/sci-clk.c
index 5cefc30a843e..9d5071223f4c 100644
--- a/drivers/clk/keystone/sci-clk.c
+++ b/drivers/clk/keystone/sci-clk.c
@@ -480,13 +480,10 @@ static int ti_sci_scan_clocks_from_fw(struct sci_clk_provider *provider)
num_clks++;
}
- provider->clocks = devm_kmalloc_array(dev, num_clks, sizeof(sci_clk),
- GFP_KERNEL);
+ provider->clocks = devm_kmemdup_array(dev, clks, num_clks, sizeof(sci_clk), GFP_KERNEL);
if (!provider->clocks)
return -ENOMEM;
- memcpy(provider->clocks, clks, num_clks * sizeof(sci_clk));
-
provider->num_clocks = num_clks;
devm_kfree(dev, clks);
@@ -499,8 +496,8 @@ static int ti_sci_scan_clocks_from_fw(struct sci_clk_provider *provider)
static int _cmp_sci_clk_list(void *priv, const struct list_head *a,
const struct list_head *b)
{
- struct sci_clk *ca = container_of(a, struct sci_clk, node);
- struct sci_clk *cb = container_of(b, struct sci_clk, node);
+ const struct sci_clk *ca = container_of(a, struct sci_clk, node);
+ const struct sci_clk *cb = container_of(b, struct sci_clk, node);
return _cmp_sci_clk(ca, &cb);
}
@@ -707,7 +704,7 @@ static void ti_sci_clk_remove(struct platform_device *pdev)
static struct platform_driver ti_sci_clk_driver = {
.probe = ti_sci_clk_probe,
- .remove_new = ti_sci_clk_remove,
+ .remove = ti_sci_clk_remove,
.driver = {
.name = "ti-sci-clk",
.of_match_table = of_match_ptr(ti_sci_clk_of_match),
diff --git a/drivers/clk/keystone/syscon-clk.c b/drivers/clk/keystone/syscon-clk.c
index 935d9a2d8c2b..ecf180a7949c 100644
--- a/drivers/clk/keystone/syscon-clk.c
+++ b/drivers/clk/keystone/syscon-clk.c
@@ -105,6 +105,12 @@ static struct clk_hw
return &priv->hw;
}
+static const struct regmap_config ti_syscon_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
static int ti_syscon_gate_clk_probe(struct platform_device *pdev)
{
const struct ti_syscon_gate_clk_data *data, *p;
@@ -113,12 +119,17 @@ static int ti_syscon_gate_clk_probe(struct platform_device *pdev)
int num_clks, num_parents, i;
const char *parent_name;
struct regmap *regmap;
+ void __iomem *base;
data = device_get_match_data(dev);
if (!data)
return -EINVAL;
- regmap = device_node_to_regmap(dev->of_node);
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(dev, base, &ti_syscon_regmap_cfg);
if (IS_ERR(regmap))
return dev_err_probe(dev, PTR_ERR(regmap),
"failed to get regmap\n");
diff --git a/drivers/clk/kunit_clk_assigned_rates.h b/drivers/clk/kunit_clk_assigned_rates.h
new file mode 100644
index 000000000000..df2d84dcaa93
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _KUNIT_CLK_ASSIGNED_RATES_H
+#define _KUNIT_CLK_ASSIGNED_RATES_H
+
+#define ASSIGNED_RATES_0_RATE 1600000
+#define ASSIGNED_RATES_1_RATE 9700000
+
+#endif
diff --git a/drivers/clk/kunit_clk_assigned_rates_multiple.dtso b/drivers/clk/kunit_clk_assigned_rates_multiple.dtso
new file mode 100644
index 000000000000..e600736e70f5
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_multiple.dtso
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "kunit_clk_assigned_rates.h"
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <1>;
+ assigned-clocks = <&clk 0>,
+ <&clk 1>;
+ assigned-clock-rates = <ASSIGNED_RATES_0_RATE>,
+ <ASSIGNED_RATES_1_RATE>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_multiple_consumer.dtso b/drivers/clk/kunit_clk_assigned_rates_multiple_consumer.dtso
new file mode 100644
index 000000000000..260aba458daf
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_multiple_consumer.dtso
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "kunit_clk_assigned_rates.h"
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <1>;
+ };
+
+ kunit-clock-consumer {
+ compatible = "test,clk-consumer";
+ assigned-clocks = <&clk 0>,
+ <&clk 1>;
+ assigned-clock-rates = <ASSIGNED_RATES_0_RATE>,
+ <ASSIGNED_RATES_1_RATE>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_null.dtso b/drivers/clk/kunit_clk_assigned_rates_null.dtso
new file mode 100644
index 000000000000..0b27b38a9130
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_null.dtso
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "kunit_clk_assigned_rates.h"
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <0>;
+ assigned-clocks = <0>;
+ assigned-clock-rates = <ASSIGNED_RATES_0_RATE>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_null_consumer.dtso b/drivers/clk/kunit_clk_assigned_rates_null_consumer.dtso
new file mode 100644
index 000000000000..99fb332ae83d
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_null_consumer.dtso
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "kunit_clk_assigned_rates.h"
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <0>;
+ };
+
+ kunit-clock-consumer {
+ compatible = "test,clk-consumer";
+ assigned-clocks = <0>;
+ assigned-clock-rates = <ASSIGNED_RATES_0_RATE>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_one.dtso b/drivers/clk/kunit_clk_assigned_rates_one.dtso
new file mode 100644
index 000000000000..dd95ec9b1cf9
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_one.dtso
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "kunit_clk_assigned_rates.h"
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <0>;
+ assigned-clocks = <&clk>;
+ assigned-clock-rates = <ASSIGNED_RATES_0_RATE>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_one_consumer.dtso b/drivers/clk/kunit_clk_assigned_rates_one_consumer.dtso
new file mode 100644
index 000000000000..a41dca806318
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_one_consumer.dtso
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "kunit_clk_assigned_rates.h"
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <0>;
+ };
+
+ kunit-clock-consumer {
+ compatible = "test,clk-consumer";
+ assigned-clocks = <&clk>;
+ assigned-clock-rates = <ASSIGNED_RATES_0_RATE>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_u64_multiple.dtso b/drivers/clk/kunit_clk_assigned_rates_u64_multiple.dtso
new file mode 100644
index 000000000000..389b4e2eb7f7
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_u64_multiple.dtso
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "kunit_clk_assigned_rates.h"
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <1>;
+ assigned-clocks = <&clk 0>,
+ <&clk 1>;
+ assigned-clock-rates-u64 = /bits/ 64 <ASSIGNED_RATES_0_RATE>,
+ /bits/ 64 <ASSIGNED_RATES_1_RATE>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_u64_multiple_consumer.dtso b/drivers/clk/kunit_clk_assigned_rates_u64_multiple_consumer.dtso
new file mode 100644
index 000000000000..3e117fd59b7d
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_u64_multiple_consumer.dtso
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "kunit_clk_assigned_rates.h"
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <1>;
+ };
+
+ kunit-clock-consumer {
+ compatible = "test,clk-consumer";
+ assigned-clocks = <&clk 0>,
+ <&clk 1>;
+ assigned-clock-rates-u64 = /bits/ 64 <ASSIGNED_RATES_0_RATE>,
+ /bits/ 64 <ASSIGNED_RATES_1_RATE>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_u64_one.dtso b/drivers/clk/kunit_clk_assigned_rates_u64_one.dtso
new file mode 100644
index 000000000000..87041264e8f5
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_u64_one.dtso
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "kunit_clk_assigned_rates.h"
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <0>;
+ assigned-clocks = <&clk>;
+ assigned-clock-rates-u64 = /bits/ 64 <ASSIGNED_RATES_0_RATE>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_u64_one_consumer.dtso b/drivers/clk/kunit_clk_assigned_rates_u64_one_consumer.dtso
new file mode 100644
index 000000000000..3259c003aec0
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_u64_one_consumer.dtso
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "kunit_clk_assigned_rates.h"
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <0>;
+ };
+
+ kunit-clock-consumer {
+ compatible = "test,clk-consumer";
+ assigned-clocks = <&clk>;
+ assigned-clock-rates-u64 = /bits/ 64 <ASSIGNED_RATES_0_RATE>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_without.dtso b/drivers/clk/kunit_clk_assigned_rates_without.dtso
new file mode 100644
index 000000000000..22d333495cf2
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_without.dtso
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "kunit_clk_assigned_rates.h"
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <0>;
+ assigned-clock-rates = <ASSIGNED_RATES_0_RATE>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_without_consumer.dtso b/drivers/clk/kunit_clk_assigned_rates_without_consumer.dtso
new file mode 100644
index 000000000000..75ac09140f83
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_without_consumer.dtso
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "kunit_clk_assigned_rates.h"
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <0>;
+ };
+
+ kunit-clock-consumer {
+ compatible = "test,clk-consumer";
+ assigned-clock-rates = <ASSIGNED_RATES_0_RATE>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_zero.dtso b/drivers/clk/kunit_clk_assigned_rates_zero.dtso
new file mode 100644
index 000000000000..08e042c2eafe
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_zero.dtso
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <0>;
+ assigned-clocks = <&clk>;
+ assigned-clock-rates = <0>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_zero_consumer.dtso b/drivers/clk/kunit_clk_assigned_rates_zero_consumer.dtso
new file mode 100644
index 000000000000..1d964672e855
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_zero_consumer.dtso
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <0>;
+ };
+
+ kunit-clock-consumer {
+ compatible = "test,clk-consumer";
+ assigned-clocks = <&clk>;
+ assigned-clock-rates = <0>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_fixed_rate_test.dtso b/drivers/clk/kunit_clk_fixed_rate_test.dtso
new file mode 100644
index 000000000000..d838ce766fa2
--- /dev/null
+++ b/drivers/clk/kunit_clk_fixed_rate_test.dtso
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "clk-fixed-rate_test.h"
+
+&{/} {
+ fixed_50MHz: kunit-clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <TEST_FIXED_FREQUENCY>;
+ clock-accuracy = <TEST_FIXED_ACCURACY>;
+ };
+
+ kunit-clock-consumer {
+ compatible = "test,single-clk-consumer";
+ clocks = <&fixed_50MHz>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_hw_get_dev_of_node.dtso b/drivers/clk/kunit_clk_hw_get_dev_of_node.dtso
new file mode 100644
index 000000000000..760717da3235
--- /dev/null
+++ b/drivers/clk/kunit_clk_hw_get_dev_of_node.dtso
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+&{/} {
+ kunit-clock-controller {
+ compatible = "test,clk-hw-get-dev-of-node";
+ #clock-cells = <0>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_parent_data_test.dtso b/drivers/clk/kunit_clk_parent_data_test.dtso
new file mode 100644
index 000000000000..7d3ed9a5a2e8
--- /dev/null
+++ b/drivers/clk/kunit_clk_parent_data_test.dtso
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "clk_parent_data_test.h"
+
+&{/} {
+ fixed_50: kunit-clock-50MHz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <50000000>;
+ clock-output-names = CLK_PARENT_DATA_50MHZ_NAME;
+ };
+
+ fixed_parent: kunit-clock-1MHz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <1000000>;
+ clock-output-names = CLK_PARENT_DATA_1MHZ_NAME;
+ };
+
+ kunit-clock-controller {
+ compatible = "test,clk-parent-data";
+ clocks = <&fixed_parent>, <&fixed_50>;
+ clock-names = CLK_PARENT_DATA_PARENT1, CLK_PARENT_DATA_PARENT2;
+ #clock-cells = <1>;
+ };
+};
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index 70a005e7e1b1..0e8dd82aa84e 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -124,6 +124,43 @@ config COMMON_CLK_MT2712_VENCSYS
help
This driver supports MediaTek MT2712 vencsys clocks.
+config COMMON_CLK_MT6735
+ tristate "Main clock drivers for MediaTek MT6735"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ help
+ This enables drivers for clocks and resets provided
+ by apmixedsys, topckgen, infracfg and pericfg on the
+ MediaTek MT6735 SoC.
+
+config COMMON_CLK_MT6735_IMGSYS
+ tristate "Clock driver for MediaTek MT6735 imgsys"
+ depends on COMMON_CLK_MT6735
+ help
+ This enables a driver for clocks provided by imgsys
+ on the MediaTek MT6735 SoC.
+
+config COMMON_CLK_MT6735_MFGCFG
+ tristate "Clock driver for MediaTek MT6735 mfgcfg"
+ depends on COMMON_CLK_MT6735
+ help
+ This enables a driver for clocks and resets provided
+ by mfgcfg on the MediaTek MT6735 SoC.
+
+config COMMON_CLK_MT6735_VDECSYS
+ tristate "Clock driver for MediaTek MT6735 vdecsys"
+ depends on COMMON_CLK_MT6735
+ help
+ This enables a driver for clocks and resets provided
+ by vdecsys on the MediaTek MT6735 SoC.
+
+config COMMON_CLK_MT6735_VENCSYS
+ tristate "Clock driver for MediaTek MT6735 vencsys"
+ depends on COMMON_CLK_MT6735
+ help
+ This enables a driver for clocks provided by vencsys
+ on the MediaTek MT6735 SoC.
+
config COMMON_CLK_MT6765
bool "Clock driver for MediaTek MT6765"
depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
@@ -887,13 +924,6 @@ config COMMON_CLK_MT8195_APUSYS
help
This driver supports MediaTek MT8195 AI Processor Unit System clocks.
-config COMMON_CLK_MT8195_AUDSYS
- tristate "Clock driver for MediaTek MT8195 audsys"
- depends on COMMON_CLK_MT8195
- default COMMON_CLK_MT8195
- help
- This driver supports MediaTek MT8195 audsys clocks.
-
config COMMON_CLK_MT8195_IMP_IIC_WRAP
tristate "Clock driver for MediaTek MT8195 imp_iic_wrap"
depends on COMMON_CLK_MT8195
@@ -908,14 +938,6 @@ config COMMON_CLK_MT8195_MFGCFG
help
This driver supports MediaTek MT8195 mfgcfg clocks.
-config COMMON_CLK_MT8195_MSDC
- tristate "Clock driver for MediaTek MT8195 msdc"
- depends on COMMON_CLK_MT8195
- default COMMON_CLK_MT8195
- help
- This driver supports MediaTek MT8195 MMC and SD Controller's
- msdc and msdc_top clocks.
-
config COMMON_CLK_MT8195_SCP_ADSP
tristate "Clock driver for MediaTek MT8195 scp_adsp"
depends on COMMON_CLK_MT8195
@@ -980,6 +1002,77 @@ config COMMON_CLK_MT8195_VENCSYS
help
This driver supports MediaTek MT8195 vencsys clocks.
+config COMMON_CLK_MT8196
+ tristate "Clock driver for MediaTek MT8196"
+ depends on ARM64 || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ default ARCH_MEDIATEK
+ help
+ This driver supports MediaTek MT8196 basic clocks.
+
+config COMMON_CLK_MT8196_IMP_IIC_WRAP
+ tristate "Clock driver for MediaTek MT8196 imp_iic_wrap"
+ depends on COMMON_CLK_MT8196
+ default COMMON_CLK_MT8196
+ help
+ This driver supports MediaTek MT8196 i2c clocks.
+
+config COMMON_CLK_MT8196_MCUSYS
+ tristate "Clock driver for MediaTek MT8196 mcusys"
+ depends on COMMON_CLK_MT8196
+ default COMMON_CLK_MT8196
+ help
+ This driver supports MediaTek MT8196 mcusys clocks.
+
+config COMMON_CLK_MT8196_MDPSYS
+ tristate "Clock driver for MediaTek MT8196 mdpsys"
+ depends on COMMON_CLK_MT8196
+ default COMMON_CLK_MT8196
+ help
+ This driver supports MediaTek MT8196 mdpsys clocks.
+
+config COMMON_CLK_MT8196_MFGCFG
+ tristate "Clock driver for MediaTek MT8196 mfgcfg"
+ depends on COMMON_CLK_MT8196
+ default m
+ help
+ This driver supports MediaTek MT8196 mfgcfg clocks.
+
+config COMMON_CLK_MT8196_MMSYS
+ tristate "Clock driver for MediaTek MT8196 mmsys"
+ depends on COMMON_CLK_MT8196
+ default m
+ help
+ This driver supports MediaTek MT8196 mmsys clocks.
+
+config COMMON_CLK_MT8196_PEXTPSYS
+ tristate "Clock driver for MediaTek MT8196 pextpsys"
+ depends on COMMON_CLK_MT8196
+ default COMMON_CLK_MT8196
+ help
+ This driver supports MediaTek MT8196 pextpsys clocks.
+
+config COMMON_CLK_MT8196_UFSSYS
+ tristate "Clock driver for MediaTek MT8196 ufssys"
+ depends on COMMON_CLK_MT8196
+ default COMMON_CLK_MT8196
+ help
+ This driver supports MediaTek MT8196 ufssys clocks.
+
+config COMMON_CLK_MT8196_VDECSYS
+ tristate "Clock driver for MediaTek MT8196 vdecsys"
+ depends on COMMON_CLK_MT8196
+ default m
+ help
+ This driver supports MediaTek MT8196 vdecsys clocks.
+
+config COMMON_CLK_MT8196_VENCSYS
+ tristate "Clock driver for MediaTek MT8196 vencsys"
+ depends on COMMON_CLK_MT8196
+ default m
+ help
+ This driver supports MediaTek MT8196 vencsys clocks.
+
config COMMON_CLK_MT8365
tristate "Clock driver for MediaTek MT8365"
depends on ARCH_MEDIATEK || COMPILE_TEST
diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile
index eeccfa039896..d8736a060dbd 100644
--- a/drivers/clk/mediatek/Makefile
+++ b/drivers/clk/mediatek/Makefile
@@ -2,6 +2,11 @@
obj-$(CONFIG_COMMON_CLK_MEDIATEK) += clk-mtk.o clk-pll.o clk-gate.o clk-apmixed.o clk-cpumux.o reset.o clk-mux.o
obj-$(CONFIG_COMMON_CLK_MEDIATEK_FHCTL) += clk-fhctl.o clk-pllfh.o
+obj-$(CONFIG_COMMON_CLK_MT6735) += clk-mt6735-apmixedsys.o clk-mt6735-infracfg.o clk-mt6735-pericfg.o clk-mt6735-topckgen.o
+obj-$(CONFIG_COMMON_CLK_MT6735_IMGSYS) += clk-mt6735-imgsys.o
+obj-$(CONFIG_COMMON_CLK_MT6735_MFGCFG) += clk-mt6735-mfgcfg.o
+obj-$(CONFIG_COMMON_CLK_MT6735_VDECSYS) += clk-mt6735-vdecsys.o
+obj-$(CONFIG_COMMON_CLK_MT6735_VENCSYS) += clk-mt6735-vencsys.o
obj-$(CONFIG_COMMON_CLK_MT6765) += clk-mt6765.o
obj-$(CONFIG_COMMON_CLK_MT6765_AUDIOSYS) += clk-mt6765-audio.o
obj-$(CONFIG_COMMON_CLK_MT6765_CAMSYS) += clk-mt6765-cam.o
@@ -145,6 +150,19 @@ obj-$(CONFIG_COMMON_CLK_MT8195_VDOSYS) += clk-mt8195-vdo0.o clk-mt8195-vdo1.o
obj-$(CONFIG_COMMON_CLK_MT8195_VENCSYS) += clk-mt8195-venc.o
obj-$(CONFIG_COMMON_CLK_MT8195_VPPSYS) += clk-mt8195-vpp0.o clk-mt8195-vpp1.o
obj-$(CONFIG_COMMON_CLK_MT8195_WPESYS) += clk-mt8195-wpe.o
+obj-$(CONFIG_COMMON_CLK_MT8196) += clk-mt8196-apmixedsys.o clk-mt8196-topckgen.o \
+ clk-mt8196-topckgen2.o clk-mt8196-vlpckgen.o \
+ clk-mt8196-peri_ao.o
+obj-$(CONFIG_COMMON_CLK_MT8196_IMP_IIC_WRAP) += clk-mt8196-imp_iic_wrap.o
+obj-$(CONFIG_COMMON_CLK_MT8196_MCUSYS) += clk-mt8196-mcu.o
+obj-$(CONFIG_COMMON_CLK_MT8196_MDPSYS) += clk-mt8196-mdpsys.o
+obj-$(CONFIG_COMMON_CLK_MT8196_MFGCFG) += clk-mt8196-mfg.o
+obj-$(CONFIG_COMMON_CLK_MT8196_MMSYS) += clk-mt8196-disp0.o clk-mt8196-disp1.o clk-mt8196-vdisp_ao.o \
+ clk-mt8196-ovl0.o clk-mt8196-ovl1.o
+obj-$(CONFIG_COMMON_CLK_MT8196_PEXTPSYS) += clk-mt8196-pextp.o
+obj-$(CONFIG_COMMON_CLK_MT8196_UFSSYS) += clk-mt8196-ufs_ao.o
+obj-$(CONFIG_COMMON_CLK_MT8196_VDECSYS) += clk-mt8196-vdec.o
+obj-$(CONFIG_COMMON_CLK_MT8196_VENCSYS) += clk-mt8196-venc.o
obj-$(CONFIG_COMMON_CLK_MT8365) += clk-mt8365-apmixedsys.o clk-mt8365.o
obj-$(CONFIG_COMMON_CLK_MT8365_APU) += clk-mt8365-apu.o
obj-$(CONFIG_COMMON_CLK_MT8365_CAM) += clk-mt8365-cam.o
diff --git a/drivers/clk/mediatek/clk-gate.c b/drivers/clk/mediatek/clk-gate.c
index 67d9e741c5e7..f6b1429ff757 100644
--- a/drivers/clk/mediatek/clk-gate.c
+++ b/drivers/clk/mediatek/clk-gate.c
@@ -5,6 +5,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/dev_printk.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/printk.h>
@@ -12,15 +13,14 @@
#include <linux/slab.h>
#include <linux/types.h>
+#include "clk-mtk.h"
#include "clk-gate.h"
struct mtk_clk_gate {
struct clk_hw hw;
struct regmap *regmap;
- int set_ofs;
- int clr_ofs;
- int sta_ofs;
- u8 bit;
+ struct regmap *regmap_hwv;
+ const struct mtk_gate *gate;
};
static inline struct mtk_clk_gate *to_mtk_clk_gate(struct clk_hw *hw)
@@ -33,9 +33,9 @@ static u32 mtk_get_clockgating(struct clk_hw *hw)
struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
u32 val;
- regmap_read(cg->regmap, cg->sta_ofs, &val);
+ regmap_read(cg->regmap, cg->gate->regs->sta_ofs, &val);
- return val & BIT(cg->bit);
+ return val & BIT(cg->gate->shift);
}
static int mtk_cg_bit_is_cleared(struct clk_hw *hw)
@@ -52,28 +52,30 @@ static void mtk_cg_set_bit(struct clk_hw *hw)
{
struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
- regmap_write(cg->regmap, cg->set_ofs, BIT(cg->bit));
+ regmap_write(cg->regmap, cg->gate->regs->set_ofs, BIT(cg->gate->shift));
}
static void mtk_cg_clr_bit(struct clk_hw *hw)
{
struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
- regmap_write(cg->regmap, cg->clr_ofs, BIT(cg->bit));
+ regmap_write(cg->regmap, cg->gate->regs->clr_ofs, BIT(cg->gate->shift));
}
static void mtk_cg_set_bit_no_setclr(struct clk_hw *hw)
{
struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
- regmap_set_bits(cg->regmap, cg->sta_ofs, BIT(cg->bit));
+ regmap_set_bits(cg->regmap, cg->gate->regs->sta_ofs,
+ BIT(cg->gate->shift));
}
static void mtk_cg_clr_bit_no_setclr(struct clk_hw *hw)
{
struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
- regmap_clear_bits(cg->regmap, cg->sta_ofs, BIT(cg->bit));
+ regmap_clear_bits(cg->regmap, cg->gate->regs->sta_ofs,
+ BIT(cg->gate->shift));
}
static int mtk_cg_enable(struct clk_hw *hw)
@@ -100,6 +102,32 @@ static void mtk_cg_disable_inv(struct clk_hw *hw)
mtk_cg_clr_bit(hw);
}
+static int mtk_cg_hwv_set_en(struct clk_hw *hw, bool enable)
+{
+ struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
+ u32 val;
+
+ regmap_write(cg->regmap_hwv,
+ enable ? cg->gate->hwv_regs->set_ofs :
+ cg->gate->hwv_regs->clr_ofs,
+ BIT(cg->gate->shift));
+
+ return regmap_read_poll_timeout_atomic(cg->regmap_hwv,
+ cg->gate->hwv_regs->sta_ofs, val,
+ val & BIT(cg->gate->shift), 0,
+ MTK_WAIT_HWV_DONE_US);
+}
+
+static int mtk_cg_hwv_enable(struct clk_hw *hw)
+{
+ return mtk_cg_hwv_set_en(hw, true);
+}
+
+static void mtk_cg_hwv_disable(struct clk_hw *hw)
+{
+ mtk_cg_hwv_set_en(hw, false);
+}
+
static int mtk_cg_enable_no_setclr(struct clk_hw *hw)
{
mtk_cg_clr_bit_no_setclr(hw);
@@ -124,6 +152,15 @@ static void mtk_cg_disable_inv_no_setclr(struct clk_hw *hw)
mtk_cg_clr_bit_no_setclr(hw);
}
+static bool mtk_cg_uses_hwv(const struct clk_ops *ops)
+{
+ if (ops == &mtk_clk_gate_hwv_ops_setclr ||
+ ops == &mtk_clk_gate_hwv_ops_setclr_inv)
+ return true;
+
+ return false;
+}
+
const struct clk_ops mtk_clk_gate_ops_setclr = {
.is_enabled = mtk_cg_bit_is_cleared,
.enable = mtk_cg_enable,
@@ -138,6 +175,20 @@ const struct clk_ops mtk_clk_gate_ops_setclr_inv = {
};
EXPORT_SYMBOL_GPL(mtk_clk_gate_ops_setclr_inv);
+const struct clk_ops mtk_clk_gate_hwv_ops_setclr = {
+ .is_enabled = mtk_cg_bit_is_cleared,
+ .enable = mtk_cg_hwv_enable,
+ .disable = mtk_cg_hwv_disable,
+};
+EXPORT_SYMBOL_GPL(mtk_clk_gate_hwv_ops_setclr);
+
+const struct clk_ops mtk_clk_gate_hwv_ops_setclr_inv = {
+ .is_enabled = mtk_cg_bit_is_set,
+ .enable = mtk_cg_hwv_enable,
+ .disable = mtk_cg_hwv_disable,
+};
+EXPORT_SYMBOL_GPL(mtk_clk_gate_hwv_ops_setclr_inv);
+
const struct clk_ops mtk_clk_gate_ops_no_setclr = {
.is_enabled = mtk_cg_bit_is_cleared,
.enable = mtk_cg_enable_no_setclr,
@@ -152,12 +203,10 @@ const struct clk_ops mtk_clk_gate_ops_no_setclr_inv = {
};
EXPORT_SYMBOL_GPL(mtk_clk_gate_ops_no_setclr_inv);
-static struct clk_hw *mtk_clk_register_gate(struct device *dev, const char *name,
- const char *parent_name,
- struct regmap *regmap, int set_ofs,
- int clr_ofs, int sta_ofs, u8 bit,
- const struct clk_ops *ops,
- unsigned long flags)
+static struct clk_hw *mtk_clk_register_gate(struct device *dev,
+ const struct mtk_gate *gate,
+ struct regmap *regmap,
+ struct regmap *regmap_hwv)
{
struct mtk_clk_gate *cg;
int ret;
@@ -167,18 +216,19 @@ static struct clk_hw *mtk_clk_register_gate(struct device *dev, const char *name
if (!cg)
return ERR_PTR(-ENOMEM);
- init.name = name;
- init.flags = flags | CLK_SET_RATE_PARENT;
- init.parent_names = parent_name ? &parent_name : NULL;
- init.num_parents = parent_name ? 1 : 0;
- init.ops = ops;
+ init.name = gate->name;
+ init.flags = gate->flags | CLK_SET_RATE_PARENT;
+ init.parent_names = gate->parent_name ? &gate->parent_name : NULL;
+ init.num_parents = gate->parent_name ? 1 : 0;
+ init.ops = gate->ops;
+ if (mtk_cg_uses_hwv(init.ops) && !regmap_hwv)
+ return dev_err_ptr_probe(
+ dev, -ENXIO,
+ "regmap not found for hardware voter clocks\n");
cg->regmap = regmap;
- cg->set_ofs = set_ofs;
- cg->clr_ofs = clr_ofs;
- cg->sta_ofs = sta_ofs;
- cg->bit = bit;
-
+ cg->regmap_hwv = regmap_hwv;
+ cg->gate = gate;
cg->hw.init = &init;
ret = clk_hw_register(dev, &cg->hw);
@@ -209,6 +259,7 @@ int mtk_clk_register_gates(struct device *dev, struct device_node *node,
int i;
struct clk_hw *hw;
struct regmap *regmap;
+ struct regmap *regmap_hwv;
if (!clk_data)
return -ENOMEM;
@@ -219,6 +270,12 @@ int mtk_clk_register_gates(struct device *dev, struct device_node *node,
return PTR_ERR(regmap);
}
+ regmap_hwv = mtk_clk_get_hwv_regmap(node);
+ if (IS_ERR(regmap_hwv))
+ return dev_err_probe(
+ dev, PTR_ERR(regmap_hwv),
+ "Cannot find hardware voter regmap for %pOF\n", node);
+
for (i = 0; i < num; i++) {
const struct mtk_gate *gate = &clks[i];
@@ -228,13 +285,7 @@ int mtk_clk_register_gates(struct device *dev, struct device_node *node,
continue;
}
- hw = mtk_clk_register_gate(dev, gate->name, gate->parent_name,
- regmap,
- gate->regs->set_ofs,
- gate->regs->clr_ofs,
- gate->regs->sta_ofs,
- gate->shift, gate->ops,
- gate->flags);
+ hw = mtk_clk_register_gate(dev, gate, regmap, regmap_hwv);
if (IS_ERR(hw)) {
pr_err("Failed to register clk %s: %pe\n", gate->name,
diff --git a/drivers/clk/mediatek/clk-gate.h b/drivers/clk/mediatek/clk-gate.h
index 1a46b4c56fc5..4f05b9855dae 100644
--- a/drivers/clk/mediatek/clk-gate.h
+++ b/drivers/clk/mediatek/clk-gate.h
@@ -19,6 +19,8 @@ extern const struct clk_ops mtk_clk_gate_ops_setclr;
extern const struct clk_ops mtk_clk_gate_ops_setclr_inv;
extern const struct clk_ops mtk_clk_gate_ops_no_setclr;
extern const struct clk_ops mtk_clk_gate_ops_no_setclr_inv;
+extern const struct clk_ops mtk_clk_gate_hwv_ops_setclr;
+extern const struct clk_ops mtk_clk_gate_hwv_ops_setclr_inv;
struct mtk_gate_regs {
u32 sta_ofs;
@@ -31,6 +33,7 @@ struct mtk_gate {
const char *name;
const char *parent_name;
const struct mtk_gate_regs *regs;
+ const struct mtk_gate_regs *hwv_regs;
int shift;
const struct clk_ops *ops;
unsigned long flags;
diff --git a/drivers/clk/mediatek/clk-mt2701-aud.c b/drivers/clk/mediatek/clk-mt2701-aud.c
index 27eecb6d3a53..e103121cf58e 100644
--- a/drivers/clk/mediatek/clk-mt2701-aud.c
+++ b/drivers/clk/mediatek/clk-mt2701-aud.c
@@ -55,10 +55,16 @@ static const struct mtk_gate audio_clks[] = {
GATE_DUMMY(CLK_DUMMY, "aud_dummy"),
/* AUDIO0 */
GATE_AUDIO0(CLK_AUD_AFE, "audio_afe", "aud_intbus_sel", 2),
+ GATE_DUMMY(CLK_AUD_LRCK_DETECT, "audio_lrck_detect_dummy"),
+ GATE_DUMMY(CLK_AUD_I2S, "audio_i2c_dummy"),
+ GATE_DUMMY(CLK_AUD_APLL_TUNER, "audio_apll_tuner_dummy"),
GATE_AUDIO0(CLK_AUD_HDMI, "audio_hdmi", "audpll_sel", 20),
GATE_AUDIO0(CLK_AUD_SPDF, "audio_spdf", "audpll_sel", 21),
GATE_AUDIO0(CLK_AUD_SPDF2, "audio_spdf2", "audpll_sel", 22),
GATE_AUDIO0(CLK_AUD_APLL, "audio_apll", "audpll_sel", 23),
+ GATE_DUMMY(CLK_AUD_TML, "audio_tml_dummy"),
+ GATE_DUMMY(CLK_AUD_AHB_IDLE_EXT, "audio_ahb_idle_ext_dummy"),
+ GATE_DUMMY(CLK_AUD_AHB_IDLE_INT, "audio_ahb_idle_int_dummy"),
/* AUDIO1 */
GATE_AUDIO1(CLK_AUD_I2SIN1, "audio_i2sin1", "aud_mux1_sel", 0),
GATE_AUDIO1(CLK_AUD_I2SIN2, "audio_i2sin2", "aud_mux1_sel", 1),
@@ -76,10 +82,12 @@ static const struct mtk_gate audio_clks[] = {
GATE_AUDIO1(CLK_AUD_ASRCI2, "audio_asrci2", "asm_h_sel", 13),
GATE_AUDIO1(CLK_AUD_ASRCO1, "audio_asrco1", "asm_h_sel", 14),
GATE_AUDIO1(CLK_AUD_ASRCO2, "audio_asrco2", "asm_h_sel", 15),
+ GATE_DUMMY(CLK_AUD_HDMIRX, "audio_hdmirx_dummy"),
GATE_AUDIO1(CLK_AUD_INTDIR, "audio_intdir", "intdir_sel", 20),
GATE_AUDIO1(CLK_AUD_A1SYS, "audio_a1sys", "aud_mux1_sel", 21),
GATE_AUDIO1(CLK_AUD_A2SYS, "audio_a2sys", "aud_mux2_sel", 22),
GATE_AUDIO1(CLK_AUD_AFE_CONN, "audio_afe_conn", "aud_mux1_sel", 23),
+ GATE_DUMMY(CLK_AUD_AFE_PCMIF, "audio_afe_pcmif_dummy"),
GATE_AUDIO1(CLK_AUD_AFE_MRGIF, "audio_afe_mrgif", "aud_mux1_sel", 25),
/* AUDIO2 */
GATE_AUDIO2(CLK_AUD_MMIF_UL1, "audio_ul1", "aud_mux1_sel", 0),
@@ -100,6 +108,8 @@ static const struct mtk_gate audio_clks[] = {
GATE_AUDIO2(CLK_AUD_MMIF_AWB2, "audio_awb2", "aud_mux1_sel", 15),
GATE_AUDIO2(CLK_AUD_MMIF_DAI, "audio_dai", "aud_mux1_sel", 16),
/* AUDIO3 */
+ GATE_DUMMY(CLK_AUD_DMIC1, "audio_dmic1_dummy"),
+ GATE_DUMMY(CLK_AUD_DMIC2, "audio_dmic2_dummy"),
GATE_AUDIO3(CLK_AUD_ASRCI3, "audio_asrci3", "asm_h_sel", 2),
GATE_AUDIO3(CLK_AUD_ASRCI4, "audio_asrci4", "asm_h_sel", 3),
GATE_AUDIO3(CLK_AUD_ASRCI5, "audio_asrci5", "asm_h_sel", 4),
@@ -158,11 +168,13 @@ static void clk_mt2701_aud_remove(struct platform_device *pdev)
static struct platform_driver clk_mt2701_aud_drv = {
.probe = clk_mt2701_aud_probe,
- .remove_new = clk_mt2701_aud_remove,
+ .remove = clk_mt2701_aud_remove,
.driver = {
.name = "clk-mt2701-aud",
.of_match_table = of_match_clk_mt2701_aud,
},
};
module_platform_driver(clk_mt2701_aud_drv);
+
+MODULE_DESCRIPTION("MediaTek MT2701 audio clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt2701-bdp.c b/drivers/clk/mediatek/clk-mt2701-bdp.c
index b25703ec8dc0..f11c7a4fa37b 100644
--- a/drivers/clk/mediatek/clk-mt2701-bdp.c
+++ b/drivers/clk/mediatek/clk-mt2701-bdp.c
@@ -31,6 +31,7 @@ static const struct mtk_gate_regs bdp1_cg_regs = {
GATE_MTK(_id, _name, _parent, &bdp1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
static const struct mtk_gate bdp_clks[] = {
+ GATE_DUMMY(CLK_DUMMY, "bdp_dummy"),
GATE_BDP0(CLK_BDP_BRG_BA, "brg_baclk", "mm_sel", 0),
GATE_BDP0(CLK_BDP_BRG_DRAM, "brg_dram", "mm_sel", 1),
GATE_BDP0(CLK_BDP_LARB_DRAM, "larb_dram", "mm_sel", 2),
@@ -99,11 +100,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2701_bdp);
static struct platform_driver clk_mt2701_bdp_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2701-bdp",
.of_match_table = of_match_clk_mt2701_bdp,
},
};
module_platform_driver(clk_mt2701_bdp_drv);
+
+MODULE_DESCRIPTION("MediaTek MT2701 BDP clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt2701-eth.c b/drivers/clk/mediatek/clk-mt2701-eth.c
index 056d1e8459da..608252e73f24 100644
--- a/drivers/clk/mediatek/clk-mt2701-eth.c
+++ b/drivers/clk/mediatek/clk-mt2701-eth.c
@@ -53,11 +53,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2701_eth);
static struct platform_driver clk_mt2701_eth_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2701-eth",
.of_match_table = of_match_clk_mt2701_eth,
},
};
module_platform_driver(clk_mt2701_eth_drv);
+
+MODULE_DESCRIPTION("MediaTek MT2701 Ethernet clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt2701-g3d.c b/drivers/clk/mediatek/clk-mt2701-g3d.c
index d25687f6c9b4..b3e18b6db75d 100644
--- a/drivers/clk/mediatek/clk-mt2701-g3d.c
+++ b/drivers/clk/mediatek/clk-mt2701-g3d.c
@@ -50,11 +50,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2701_g3d);
static struct platform_driver clk_mt2701_g3d_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2701-g3d",
.of_match_table = of_match_clk_mt2701_g3d,
},
};
module_platform_driver(clk_mt2701_g3d_drv);
+
+MODULE_DESCRIPTION("MediaTek MT2701 GPU g3d clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt2701-hif.c b/drivers/clk/mediatek/clk-mt2701-hif.c
index cbd5ece3e9e9..000e00576052 100644
--- a/drivers/clk/mediatek/clk-mt2701-hif.c
+++ b/drivers/clk/mediatek/clk-mt2701-hif.c
@@ -50,11 +50,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2701_hif);
static struct platform_driver clk_mt2701_hif_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2701-hif",
.of_match_table = of_match_clk_mt2701_hif,
},
};
module_platform_driver(clk_mt2701_hif_drv);
+
+MODULE_DESCRIPTION("MediaTek MT2701 HIFSYS clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt2701-img.c b/drivers/clk/mediatek/clk-mt2701-img.c
index 2768360b213e..c158e54c4652 100644
--- a/drivers/clk/mediatek/clk-mt2701-img.c
+++ b/drivers/clk/mediatek/clk-mt2701-img.c
@@ -22,6 +22,7 @@ static const struct mtk_gate_regs img_cg_regs = {
GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
static const struct mtk_gate img_clks[] = {
+ GATE_DUMMY(CLK_DUMMY, "img_dummy"),
GATE_IMG(CLK_IMG_SMI_COMM, "img_smi_comm", "mm_sel", 0),
GATE_IMG(CLK_IMG_RESZ, "img_resz", "mm_sel", 1),
GATE_IMG(CLK_IMG_JPGDEC_SMI, "img_jpgdec_smi", "mm_sel", 5),
@@ -47,11 +48,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2701_img);
static struct platform_driver clk_mt2701_img_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2701-img",
.of_match_table = of_match_clk_mt2701_img,
},
};
module_platform_driver(clk_mt2701_img_drv);
+
+MODULE_DESCRIPTION("MediaTek MT2701 imgsys clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt2701-mm.c b/drivers/clk/mediatek/clk-mt2701-mm.c
index 2b990b5a0422..474d87d62e83 100644
--- a/drivers/clk/mediatek/clk-mt2701-mm.c
+++ b/drivers/clk/mediatek/clk-mt2701-mm.c
@@ -31,6 +31,7 @@ static const struct mtk_gate_regs disp1_cg_regs = {
GATE_MTK(_id, _name, _parent, &disp1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
static const struct mtk_gate mm_clks[] = {
+ GATE_DUMMY(CLK_DUMMY, "mm_dummy"),
GATE_DISP0(CLK_MM_SMI_COMMON, "mm_smi_comm", "mm_sel", 0),
GATE_DISP0(CLK_MM_SMI_LARB0, "mm_smi_larb0", "mm_sel", 1),
GATE_DISP0(CLK_MM_CMDQ, "mm_cmdq", "mm_sel", 2),
@@ -80,11 +81,13 @@ MODULE_DEVICE_TABLE(platform, clk_mt2701_mm_id_table);
static struct platform_driver clk_mt2701_mm_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt2701-mm",
},
.id_table = clk_mt2701_mm_id_table,
};
module_platform_driver(clk_mt2701_mm_drv);
+
+MODULE_DESCRIPTION("MediaTek MT2701 MultiMedia ddp clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt2701-vdec.c b/drivers/clk/mediatek/clk-mt2701-vdec.c
index 57711b953b7f..5299d92f3aba 100644
--- a/drivers/clk/mediatek/clk-mt2701-vdec.c
+++ b/drivers/clk/mediatek/clk-mt2701-vdec.c
@@ -31,6 +31,7 @@ static const struct mtk_gate_regs vdec1_cg_regs = {
GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
static const struct mtk_gate vdec_clks[] = {
+ GATE_DUMMY(CLK_DUMMY, "vdec_dummy"),
GATE_VDEC0(CLK_VDEC_CKGEN, "vdec_cken", "vdec_sel", 0),
GATE_VDEC1(CLK_VDEC_LARB, "vdec_larb_cken", "mm_sel", 0),
};
@@ -52,11 +53,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2701_vdec);
static struct platform_driver clk_mt2701_vdec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2701-vdec",
.of_match_table = of_match_clk_mt2701_vdec,
},
};
module_platform_driver(clk_mt2701_vdec_drv);
+
+MODULE_DESCRIPTION("MediaTek MT2701 Video Decoders clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c
index 12d9560eb4ba..1e88ad8b93f4 100644
--- a/drivers/clk/mediatek/clk-mt2701.c
+++ b/drivers/clk/mediatek/clk-mt2701.c
@@ -1037,4 +1037,6 @@ static int __init clk_mt2701_init(void)
}
arch_initcall(clk_mt2701_init);
+
+MODULE_DESCRIPTION("MediaTek MT2701 main clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt2712-apmixedsys.c b/drivers/clk/mediatek/clk-mt2712-apmixedsys.c
index 43272dc744c7..a60622d251ff 100644
--- a/drivers/clk/mediatek/clk-mt2712-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt2712-apmixedsys.c
@@ -156,11 +156,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2712_apmixed);
static struct platform_driver clk_mt2712_apmixed_drv = {
.probe = clk_mt2712_apmixed_probe,
- .remove_new = clk_mt2712_apmixed_remove,
+ .remove = clk_mt2712_apmixed_remove,
.driver = {
.name = "clk-mt2712-apmixed",
.of_match_table = of_match_clk_mt2712_apmixed,
},
};
module_platform_driver(clk_mt2712_apmixed_drv)
+
+MODULE_DESCRIPTION("MediaTek MT2712 apmixedsys clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt2712-bdp.c b/drivers/clk/mediatek/clk-mt2712-bdp.c
index 1b54b1f3808d..c838311a0c51 100644
--- a/drivers/clk/mediatek/clk-mt2712-bdp.c
+++ b/drivers/clk/mediatek/clk-mt2712-bdp.c
@@ -69,11 +69,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2712_bdp);
static struct platform_driver clk_mt2712_bdp_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2712-bdp",
.of_match_table = of_match_clk_mt2712_bdp,
},
};
module_platform_driver(clk_mt2712_bdp_drv);
+
+MODULE_DESCRIPTION("MediaTek MT2712 BDP clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt2712-img.c b/drivers/clk/mediatek/clk-mt2712-img.c
index 1fecc0f68f0e..bedebf86b0b5 100644
--- a/drivers/clk/mediatek/clk-mt2712-img.c
+++ b/drivers/clk/mediatek/clk-mt2712-img.c
@@ -47,11 +47,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2712_img);
static struct platform_driver clk_mt2712_img_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2712-img",
.of_match_table = of_match_clk_mt2712_img,
},
};
module_platform_driver(clk_mt2712_img_drv);
+
+MODULE_DESCRIPTION("MediaTek MT2712 imgsys clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt2712-jpgdec.c b/drivers/clk/mediatek/clk-mt2712-jpgdec.c
index 019080d6d0f0..1a73474b2f99 100644
--- a/drivers/clk/mediatek/clk-mt2712-jpgdec.c
+++ b/drivers/clk/mediatek/clk-mt2712-jpgdec.c
@@ -43,11 +43,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2712_jpgdec);
static struct platform_driver clk_mt2712_jpgdec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2712-jpgdec",
.of_match_table = of_match_clk_mt2712_jpgdec,
},
};
module_platform_driver(clk_mt2712_jpgdec_drv);
+
+MODULE_DESCRIPTION("MediaTek MT2712 JPEG Decoder clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt2712-mfg.c b/drivers/clk/mediatek/clk-mt2712-mfg.c
index 39161516cf21..c1bb45c7469e 100644
--- a/drivers/clk/mediatek/clk-mt2712-mfg.c
+++ b/drivers/clk/mediatek/clk-mt2712-mfg.c
@@ -42,11 +42,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2712_mfg);
static struct platform_driver clk_mt2712_mfg_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2712-mfg",
.of_match_table = of_match_clk_mt2712_mfg,
},
};
module_platform_driver(clk_mt2712_mfg_drv);
+
+MODULE_DESCRIPTION("MediaTek MT2712 GPU mfg clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt2712-mm.c b/drivers/clk/mediatek/clk-mt2712-mm.c
index 15cb61fe2d2f..32ecb949f7eb 100644
--- a/drivers/clk/mediatek/clk-mt2712-mm.c
+++ b/drivers/clk/mediatek/clk-mt2712-mm.c
@@ -121,11 +121,13 @@ MODULE_DEVICE_TABLE(platform, clk_mt2712_mm_id_table);
static struct platform_driver clk_mt2712_mm_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt2712-mm",
},
.id_table = clk_mt2712_mm_id_table,
};
module_platform_driver(clk_mt2712_mm_drv);
+
+MODULE_DESCRIPTION("MediaTek MT2712 MultiMedia ddp clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt2712-vdec.c b/drivers/clk/mediatek/clk-mt2712-vdec.c
index e1dd38fc2b3c..a766342fbafa 100644
--- a/drivers/clk/mediatek/clk-mt2712-vdec.c
+++ b/drivers/clk/mediatek/clk-mt2712-vdec.c
@@ -55,11 +55,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2712_vdec);
static struct platform_driver clk_mt2712_vdec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2712-vdec",
.of_match_table = of_match_clk_mt2712_vdec,
},
};
module_platform_driver(clk_mt2712_vdec_drv);
+
+MODULE_DESCRIPTION("MediaTek MT2712 Video Decoders clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt2712-venc.c b/drivers/clk/mediatek/clk-mt2712-venc.c
index ef6608a5db38..fc193dc8e8f6 100644
--- a/drivers/clk/mediatek/clk-mt2712-venc.c
+++ b/drivers/clk/mediatek/clk-mt2712-venc.c
@@ -44,11 +44,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2712_venc);
static struct platform_driver clk_mt2712_venc_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2712-venc",
.of_match_table = of_match_clk_mt2712_venc,
},
};
module_platform_driver(clk_mt2712_venc_drv);
+
+MODULE_DESCRIPTION("MediaTek MT2712 Video Encoders clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt2712.c b/drivers/clk/mediatek/clk-mt2712.c
index a8d12a1210fc..964c92130e3c 100644
--- a/drivers/clk/mediatek/clk-mt2712.c
+++ b/drivers/clk/mediatek/clk-mt2712.c
@@ -993,11 +993,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2712);
static struct platform_driver clk_mt2712_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2712",
.of_match_table = of_match_clk_mt2712,
},
};
module_platform_driver(clk_mt2712_drv);
+
+MODULE_DESCRIPTION("MediaTek MT2712 main clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6735-apmixedsys.c b/drivers/clk/mediatek/clk-mt6735-apmixedsys.c
new file mode 100644
index 000000000000..e0949911e8f7
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6735-apmixedsys.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-pll.h"
+
+#include <dt-bindings/clock/mediatek,mt6735-apmixedsys.h>
+
+#define AP_PLL_CON_5 0x014
+#define ARMPLL_CON0 0x200
+#define ARMPLL_CON1 0x204
+#define ARMPLL_PWR_CON0 0x20c
+#define MAINPLL_CON0 0x210
+#define MAINPLL_CON1 0x214
+#define MAINPLL_PWR_CON0 0x21c
+#define UNIVPLL_CON0 0x220
+#define UNIVPLL_CON1 0x224
+#define UNIVPLL_PWR_CON0 0x22c
+#define MMPLL_CON0 0x230
+#define MMPLL_CON1 0x234
+#define MMPLL_PWR_CON0 0x23c
+#define MSDCPLL_CON0 0x240
+#define MSDCPLL_CON1 0x244
+#define MSDCPLL_PWR_CON0 0x24c
+#define VENCPLL_CON0 0x250
+#define VENCPLL_CON1 0x254
+#define VENCPLL_PWR_CON0 0x25c
+#define TVDPLL_CON0 0x260
+#define TVDPLL_CON1 0x264
+#define TVDPLL_PWR_CON0 0x26c
+#define APLL1_CON0 0x270
+#define APLL1_CON1 0x274
+#define APLL1_CON2 0x278
+#define APLL1_PWR_CON0 0x280
+#define APLL2_CON0 0x284
+#define APLL2_CON1 0x288
+#define APLL2_CON2 0x28c
+#define APLL2_PWR_CON0 0x294
+
+#define CON0_RST_BAR BIT(24)
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _rst_bar_mask, \
+ _pd_reg, _pd_shift, _tuner_reg, _tuner_en_reg, \
+ _tuner_en_bit, _pcw_reg, _pcwbits, _flags) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = "clk26m", \
+ .reg = _reg, \
+ .pwr_reg = _pwr_reg, \
+ .en_mask = _en_mask, \
+ .rst_bar_mask = _rst_bar_mask, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .tuner_en_reg = _tuner_en_reg, \
+ .tuner_en_bit = _tuner_en_bit, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_chg_reg = _pcw_reg, \
+ .pcwbits = _pcwbits, \
+ .flags = _flags, \
+ }
+
+static const struct mtk_pll_data apmixedsys_plls[] = {
+ PLL(CLK_APMIXED_ARMPLL, "armpll", ARMPLL_CON0, ARMPLL_PWR_CON0, 0x00000001, 0, ARMPLL_CON1, 24, 0, 0, 0, ARMPLL_CON1, 21, PLL_AO),
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", MAINPLL_CON0, MAINPLL_PWR_CON0, 0xf0000101, CON0_RST_BAR, MAINPLL_CON1, 24, 0, 0, 0, MAINPLL_CON1, 21, HAVE_RST_BAR),
+ PLL(CLK_APMIXED_UNIVPLL, "univpll", UNIVPLL_CON0, UNIVPLL_PWR_CON0, 0xfc000001, CON0_RST_BAR, UNIVPLL_CON1, 24, 0, 0, 0, UNIVPLL_CON1, 21, HAVE_RST_BAR),
+ PLL(CLK_APMIXED_MMPLL, "mmpll", MMPLL_CON0, MMPLL_PWR_CON0, 0x00000001, 0, MMPLL_CON1, 24, 0, 0, 0, MMPLL_CON1, 21, 0),
+ PLL(CLK_APMIXED_MSDCPLL, "msdcpll", MSDCPLL_CON0, MSDCPLL_PWR_CON0, 0x00000001, 0, MSDCPLL_CON1, 24, 0, 0, 0, MSDCPLL_CON1, 21, 0),
+ PLL(CLK_APMIXED_VENCPLL, "vencpll", VENCPLL_CON0, VENCPLL_PWR_CON0, 0x00000001, CON0_RST_BAR, VENCPLL_CON1, 24, 0, 0, 0, VENCPLL_CON1, 21, HAVE_RST_BAR),
+ PLL(CLK_APMIXED_TVDPLL, "tvdpll", TVDPLL_CON0, TVDPLL_PWR_CON0, 0x00000001, 0, TVDPLL_CON1, 24, 0, 0, 0, TVDPLL_CON1, 21, 0),
+ PLL(CLK_APMIXED_APLL1, "apll1", APLL1_CON0, APLL1_PWR_CON0, 0x00000001, 0, APLL1_CON0, 4, APLL1_CON2, AP_PLL_CON_5, 0, APLL1_CON1, 31, 0),
+ PLL(CLK_APMIXED_APLL2, "apll2", APLL2_CON0, APLL2_PWR_CON0, 0x00000001, 0, APLL2_CON0, 4, APLL2_CON2, AP_PLL_CON_5, 1, APLL2_CON1, 31, 0)
+};
+
+static int clk_mt6735_apmixed_probe(struct platform_device *pdev)
+{
+ void __iomem *base;
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct clk_hw_onecell_data *clk_data;
+ int ret;
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_devm_alloc_clk_data(&pdev->dev, ARRAY_SIZE(apmixedsys_plls));
+ if (!clk_data)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, clk_data);
+
+ ret = mtk_clk_register_plls(pdev->dev.of_node, apmixedsys_plls,
+ ARRAY_SIZE(apmixedsys_plls), clk_data);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register PLLs: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_onecell_get,
+ clk_data);
+ if (ret)
+ dev_err(&pdev->dev,
+ "Failed to register clock provider: %d\n", ret);
+
+ return ret;
+}
+
+static void clk_mt6735_apmixed_remove(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+
+ mtk_clk_unregister_plls(apmixedsys_plls, ARRAY_SIZE(apmixedsys_plls), clk_data);
+}
+
+static const struct of_device_id of_match_mt6735_apmixedsys[] = {
+ { .compatible = "mediatek,mt6735-apmixedsys" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_mt6735_apmixedsys);
+
+static struct platform_driver clk_mt6735_apmixedsys = {
+ .probe = clk_mt6735_apmixed_probe,
+ .remove = clk_mt6735_apmixed_remove,
+ .driver = {
+ .name = "clk-mt6735-apmixedsys",
+ .of_match_table = of_match_mt6735_apmixedsys,
+ },
+};
+module_platform_driver(clk_mt6735_apmixedsys);
+
+MODULE_AUTHOR("Yassine Oudjana <y.oudjana@protonmail.com>");
+MODULE_DESCRIPTION("MediaTek MT6735 apmixedsys clock driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6735-imgsys.c b/drivers/clk/mediatek/clk-mt6735-imgsys.c
new file mode 100644
index 000000000000..c564f8f72432
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6735-imgsys.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mediatek,mt6735-imgsys.h>
+
+#define IMG_CG_CON 0x00
+#define IMG_CG_SET 0x04
+#define IMG_CG_CLR 0x08
+
+static struct mtk_gate_regs imgsys_cg_regs = {
+ .set_ofs = IMG_CG_SET,
+ .clr_ofs = IMG_CG_CLR,
+ .sta_ofs = IMG_CG_CON,
+};
+
+static const struct mtk_gate imgsys_gates[] = {
+ GATE_MTK(CLK_IMG_SMI_LARB2, "smi_larb2", "mm_sel", &imgsys_cg_regs, 0, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_IMG_CAM_SMI, "cam_smi", "mm_sel", &imgsys_cg_regs, 5, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_IMG_CAM_CAM, "cam_cam", "mm_sel", &imgsys_cg_regs, 6, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_IMG_SEN_TG, "sen_tg", "mm_sel", &imgsys_cg_regs, 7, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_IMG_SEN_CAM, "sen_cam", "mm_sel", &imgsys_cg_regs, 8, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_IMG_CAM_SV, "cam_sv", "mm_sel", &imgsys_cg_regs, 9, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_IMG_SUFOD, "sufod", "mm_sel", &imgsys_cg_regs, 10, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_IMG_FD, "fd", "mm_sel", &imgsys_cg_regs, 11, &mtk_clk_gate_ops_setclr),
+};
+
+static const struct mtk_clk_desc imgsys_clks = {
+ .clks = imgsys_gates,
+ .num_clks = ARRAY_SIZE(imgsys_gates),
+};
+
+static const struct of_device_id of_match_mt6735_imgsys[] = {
+ { .compatible = "mediatek,mt6735-imgsys", .data = &imgsys_clks },
+ { /* sentinel */ }
+};
+
+static struct platform_driver clk_mt6735_imgsys = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt6735-imgsys",
+ .of_match_table = of_match_mt6735_imgsys,
+ },
+};
+module_platform_driver(clk_mt6735_imgsys);
+
+MODULE_AUTHOR("Yassine Oudjana <y.oudjana@protonmail.com>");
+MODULE_DESCRIPTION("MediaTek MT6735 imgsys clock driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6735-infracfg.c b/drivers/clk/mediatek/clk-mt6735-infracfg.c
new file mode 100644
index 000000000000..c1171f903cfa
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6735-infracfg.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mediatek,mt6735-infracfg.h>
+#include <dt-bindings/reset/mediatek,mt6735-infracfg.h>
+
+#define INFRA_RST0 0x30
+#define INFRA_GLOBALCON_PDN0 0x40
+#define INFRA_PDN1 0x44
+#define INFRA_PDN_STA 0x48
+
+#define RST_NR_PER_BANK 32
+
+static struct mtk_gate_regs infra_cg_regs = {
+ .set_ofs = INFRA_GLOBALCON_PDN0,
+ .clr_ofs = INFRA_PDN1,
+ .sta_ofs = INFRA_PDN_STA,
+};
+
+static const struct mtk_gate infracfg_gates[] = {
+ GATE_MTK(CLK_INFRA_DBG, "dbg", "axi_sel", &infra_cg_regs, 0, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_GCE, "gce", "axi_sel", &infra_cg_regs, 1, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_TRBG, "trbg", "axi_sel", &infra_cg_regs, 2, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_CPUM, "cpum", "axi_sel", &infra_cg_regs, 3, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_DEVAPC, "devapc", "axi_sel", &infra_cg_regs, 4, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_AUDIO, "audio", "aud_intbus_sel", &infra_cg_regs, 5, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_GCPU, "gcpu", "axi_sel", &infra_cg_regs, 6, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_L2C_SRAM, "l2csram", "axi_sel", &infra_cg_regs, 7, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_M4U, "m4u", "axi_sel", &infra_cg_regs, 8, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_CLDMA, "cldma", "axi_sel", &infra_cg_regs, 12, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_CONNMCU_BUS, "connmcu_bus", "axi_sel", &infra_cg_regs, 15, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_KP, "kp", "axi_sel", &infra_cg_regs, 16, &mtk_clk_gate_ops_setclr),
+ GATE_MTK_FLAGS(CLK_INFRA_APXGPT, "apxgpt", "axi_sel", &infra_cg_regs, 18, &mtk_clk_gate_ops_setclr, CLK_IS_CRITICAL),
+ GATE_MTK(CLK_INFRA_SEJ, "sej", "axi_sel", &infra_cg_regs, 19, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_CCIF0_AP, "ccif0ap", "axi_sel", &infra_cg_regs, 20, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_CCIF1_AP, "ccif1ap", "axi_sel", &infra_cg_regs, 21, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_PMIC_SPI, "pmicspi", "pmicspi_sel", &infra_cg_regs, 22, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_PMIC_WRAP, "pmicwrap", "axi_sel", &infra_cg_regs, 23, &mtk_clk_gate_ops_setclr)
+};
+
+static u16 infracfg_rst_bank_ofs[] = { INFRA_RST0 };
+
+static u16 infracfg_rst_idx_map[] = {
+ [MT6735_INFRA_RST0_EMI_REG] = 0 * RST_NR_PER_BANK + 0,
+ [MT6735_INFRA_RST0_DRAMC0_AO] = 0 * RST_NR_PER_BANK + 1,
+ [MT6735_INFRA_RST0_AP_CIRQ_EINT] = 0 * RST_NR_PER_BANK + 3,
+ [MT6735_INFRA_RST0_APXGPT] = 0 * RST_NR_PER_BANK + 4,
+ [MT6735_INFRA_RST0_SCPSYS] = 0 * RST_NR_PER_BANK + 5,
+ [MT6735_INFRA_RST0_KP] = 0 * RST_NR_PER_BANK + 6,
+ [MT6735_INFRA_RST0_PMIC_WRAP] = 0 * RST_NR_PER_BANK + 7,
+ [MT6735_INFRA_RST0_CLDMA_AO_TOP] = 0 * RST_NR_PER_BANK + 8,
+ [MT6735_INFRA_RST0_USBSIF_TOP] = 0 * RST_NR_PER_BANK + 9,
+ [MT6735_INFRA_RST0_EMI] = 0 * RST_NR_PER_BANK + 16,
+ [MT6735_INFRA_RST0_CCIF] = 0 * RST_NR_PER_BANK + 17,
+ [MT6735_INFRA_RST0_DRAMC0] = 0 * RST_NR_PER_BANK + 18,
+ [MT6735_INFRA_RST0_EMI_AO_REG] = 0 * RST_NR_PER_BANK + 19,
+ [MT6735_INFRA_RST0_CCIF_AO] = 0 * RST_NR_PER_BANK + 20,
+ [MT6735_INFRA_RST0_TRNG] = 0 * RST_NR_PER_BANK + 21,
+ [MT6735_INFRA_RST0_SYS_CIRQ] = 0 * RST_NR_PER_BANK + 22,
+ [MT6735_INFRA_RST0_GCE] = 0 * RST_NR_PER_BANK + 23,
+ [MT6735_INFRA_RST0_M4U] = 0 * RST_NR_PER_BANK + 24,
+ [MT6735_INFRA_RST0_CCIF1] = 0 * RST_NR_PER_BANK + 25,
+ [MT6735_INFRA_RST0_CLDMA_TOP_PD] = 0 * RST_NR_PER_BANK + 26
+};
+
+static const struct mtk_clk_rst_desc infracfg_resets = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = infracfg_rst_bank_ofs,
+ .rst_bank_nr = ARRAY_SIZE(infracfg_rst_bank_ofs),
+ .rst_idx_map = infracfg_rst_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(infracfg_rst_idx_map)
+};
+
+static const struct mtk_clk_desc infracfg_clks = {
+ .clks = infracfg_gates,
+ .num_clks = ARRAY_SIZE(infracfg_gates),
+
+ .rst_desc = &infracfg_resets
+};
+
+static const struct of_device_id of_match_mt6735_infracfg[] = {
+ { .compatible = "mediatek,mt6735-infracfg", .data = &infracfg_clks },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_mt6735_infracfg);
+
+static struct platform_driver clk_mt6735_infracfg = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt6735-infracfg",
+ .of_match_table = of_match_mt6735_infracfg,
+ },
+};
+module_platform_driver(clk_mt6735_infracfg);
+
+MODULE_AUTHOR("Yassine Oudjana <y.oudjana@protonmail.com>");
+MODULE_DESCRIPTION("MediaTek MT6735 infracfg clock and reset driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6735-mfgcfg.c b/drivers/clk/mediatek/clk-mt6735-mfgcfg.c
new file mode 100644
index 000000000000..1f5aedddf209
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6735-mfgcfg.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mediatek,mt6735-mfgcfg.h>
+
+#define MFG_CG_CON 0x00
+#define MFG_CG_SET 0x04
+#define MFG_CG_CLR 0x08
+#define MFG_RESET 0x0c
+
+static struct mtk_gate_regs mfgcfg_cg_regs = {
+ .set_ofs = MFG_CG_SET,
+ .clr_ofs = MFG_CG_CLR,
+ .sta_ofs = MFG_CG_CON,
+};
+
+static const struct mtk_gate mfgcfg_gates[] = {
+ GATE_MTK(CLK_MFG_BG3D, "bg3d", "mfg_sel", &mfgcfg_cg_regs, 0, &mtk_clk_gate_ops_setclr),
+};
+
+static u16 mfgcfg_rst_ofs[] = { MFG_RESET };
+
+static const struct mtk_clk_rst_desc mfgcfg_resets = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = mfgcfg_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(mfgcfg_rst_ofs)
+};
+
+static const struct mtk_clk_desc mfgcfg_clks = {
+ .clks = mfgcfg_gates,
+ .num_clks = ARRAY_SIZE(mfgcfg_gates),
+
+ .rst_desc = &mfgcfg_resets
+};
+
+static const struct of_device_id of_match_mt6735_mfgcfg[] = {
+ { .compatible = "mediatek,mt6735-mfgcfg", .data = &mfgcfg_clks },
+ { /* sentinel */ }
+};
+
+static struct platform_driver clk_mt6735_mfgcfg = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt6735-mfgcfg",
+ .of_match_table = of_match_mt6735_mfgcfg,
+ },
+};
+module_platform_driver(clk_mt6735_mfgcfg);
+
+MODULE_AUTHOR("Yassine Oudjana <y.oudjana@protonmail.com>");
+MODULE_DESCRIPTION("Mediatek MT6735 mfgcfg clock and reset driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6735-pericfg.c b/drivers/clk/mediatek/clk-mt6735-pericfg.c
new file mode 100644
index 000000000000..cbdf6d25c1b2
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6735-pericfg.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mediatek,mt6735-pericfg.h>
+#include <dt-bindings/reset/mediatek,mt6735-pericfg.h>
+
+#define PERI_GLOBALCON_RST0 0x00
+#define PERI_GLOBALCON_RST1 0x04
+#define PERI_GLOBALCON_PDN0_SET 0x08
+#define PERI_GLOBALCON_PDN0_CLR 0x10
+#define PERI_GLOBALCON_PDN0_STA 0x18
+
+#define RST_NR_PER_BANK 32
+
+static struct mtk_gate_regs peri_cg_regs = {
+ .set_ofs = PERI_GLOBALCON_PDN0_SET,
+ .clr_ofs = PERI_GLOBALCON_PDN0_CLR,
+ .sta_ofs = PERI_GLOBALCON_PDN0_STA,
+};
+
+static const struct mtk_gate pericfg_gates[] = {
+ GATE_MTK(CLK_PERI_DISP_PWM, "disp_pwm", "disppwm_sel", &peri_cg_regs, 0, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_THERM, "therm", "axi_sel", &peri_cg_regs, 1, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_PWM1, "pwm1", "axi_sel", &peri_cg_regs, 2, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_PWM2, "pwm2", "axi_sel", &peri_cg_regs, 3, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_PWM3, "pwm3", "axi_sel", &peri_cg_regs, 4, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_PWM4, "pwm4", "axi_sel", &peri_cg_regs, 5, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_PWM5, "pwm5", "axi_sel", &peri_cg_regs, 6, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_PWM6, "pwm6", "axi_sel", &peri_cg_regs, 7, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_PWM7, "pwm7", "axi_sel", &peri_cg_regs, 8, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_PWM, "pwm", "axi_sel", &peri_cg_regs, 9, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_USB0, "usb0", "usb20_sel", &peri_cg_regs, 10, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_IRDA, "irda", "irda_sel", &peri_cg_regs, 11, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_APDMA, "apdma", "axi_sel", &peri_cg_regs, 12, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_MSDC30_0, "msdc30_0", "msdc30_0_sel", &peri_cg_regs, 13, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_MSDC30_1, "msdc30_1", "msdc30_1_sel", &peri_cg_regs, 14, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_MSDC30_2, "msdc30_2", "msdc30_2_sel", &peri_cg_regs, 15, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_MSDC30_3, "msdc30_3", "msdc30_3_sel", &peri_cg_regs, 16, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_UART0, "uart0", "uart_sel", &peri_cg_regs, 17, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_UART1, "uart1", "uart_sel", &peri_cg_regs, 18, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_UART2, "uart2", "uart_sel", &peri_cg_regs, 19, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_UART3, "uart3", "uart_sel", &peri_cg_regs, 20, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_UART4, "uart4", "uart_sel", &peri_cg_regs, 21, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_BTIF, "btif", "axi_sel", &peri_cg_regs, 22, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_I2C0, "i2c0", "axi_sel", &peri_cg_regs, 23, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_I2C1, "i2c1", "axi_sel", &peri_cg_regs, 24, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_I2C2, "i2c2", "axi_sel", &peri_cg_regs, 25, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_I2C3, "i2c3", "axi_sel", &peri_cg_regs, 26, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_AUXADC, "auxadc", "axi_sel", &peri_cg_regs, 27, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_SPI0, "spi0", "spi_sel", &peri_cg_regs, 28, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_IRTX, "irtx", "irtx_sel", &peri_cg_regs, 29, &mtk_clk_gate_ops_setclr)
+};
+
+static u16 pericfg_rst_bank_ofs[] = { PERI_GLOBALCON_RST0, PERI_GLOBALCON_RST1 };
+
+static u16 pericfg_rst_idx_map[] = {
+ [MT6735_PERI_RST0_UART0] = 0 * RST_NR_PER_BANK + 0,
+ [MT6735_PERI_RST0_UART1] = 0 * RST_NR_PER_BANK + 1,
+ [MT6735_PERI_RST0_UART2] = 0 * RST_NR_PER_BANK + 2,
+ [MT6735_PERI_RST0_UART3] = 0 * RST_NR_PER_BANK + 3,
+ [MT6735_PERI_RST0_UART4] = 0 * RST_NR_PER_BANK + 4,
+ [MT6735_PERI_RST0_BTIF] = 0 * RST_NR_PER_BANK + 6,
+ [MT6735_PERI_RST0_DISP_PWM_PERI] = 0 * RST_NR_PER_BANK + 7,
+ [MT6735_PERI_RST0_PWM] = 0 * RST_NR_PER_BANK + 8,
+ [MT6735_PERI_RST0_AUXADC] = 0 * RST_NR_PER_BANK + 10,
+ [MT6735_PERI_RST0_DMA] = 0 * RST_NR_PER_BANK + 11,
+ [MT6735_PERI_RST0_IRDA] = 0 * RST_NR_PER_BANK + 12,
+ [MT6735_PERI_RST0_IRTX] = 0 * RST_NR_PER_BANK + 13,
+ [MT6735_PERI_RST0_THERM] = 0 * RST_NR_PER_BANK + 16,
+ [MT6735_PERI_RST0_MSDC2] = 0 * RST_NR_PER_BANK + 17,
+ [MT6735_PERI_RST0_MSDC3] = 0 * RST_NR_PER_BANK + 18,
+ [MT6735_PERI_RST0_MSDC0] = 0 * RST_NR_PER_BANK + 19,
+ [MT6735_PERI_RST0_MSDC1] = 0 * RST_NR_PER_BANK + 20,
+ [MT6735_PERI_RST0_I2C0] = 0 * RST_NR_PER_BANK + 22,
+ [MT6735_PERI_RST0_I2C1] = 0 * RST_NR_PER_BANK + 23,
+ [MT6735_PERI_RST0_I2C2] = 0 * RST_NR_PER_BANK + 24,
+ [MT6735_PERI_RST0_I2C3] = 0 * RST_NR_PER_BANK + 25,
+ [MT6735_PERI_RST0_USB] = 0 * RST_NR_PER_BANK + 28,
+
+ [MT6735_PERI_RST1_SPI0] = 1 * RST_NR_PER_BANK + 1,
+};
+
+static const struct mtk_clk_rst_desc pericfg_resets = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = pericfg_rst_bank_ofs,
+ .rst_bank_nr = ARRAY_SIZE(pericfg_rst_bank_ofs),
+ .rst_idx_map = pericfg_rst_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(pericfg_rst_idx_map)
+};
+
+static const struct mtk_clk_desc pericfg_clks = {
+ .clks = pericfg_gates,
+ .num_clks = ARRAY_SIZE(pericfg_gates),
+
+ .rst_desc = &pericfg_resets
+};
+
+static const struct of_device_id of_match_mt6735_pericfg[] = {
+ { .compatible = "mediatek,mt6735-pericfg", .data = &pericfg_clks },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_mt6735_pericfg);
+
+static struct platform_driver clk_mt6735_pericfg = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt6735-pericfg",
+ .of_match_table = of_match_mt6735_pericfg,
+ },
+};
+module_platform_driver(clk_mt6735_pericfg);
+
+MODULE_AUTHOR("Yassine Oudjana <y.oudjana@protonmail.com>");
+MODULE_DESCRIPTION("MediaTek MT6735 pericfg clock driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6735-topckgen.c b/drivers/clk/mediatek/clk-mt6735-topckgen.c
new file mode 100644
index 000000000000..2589ebfe2271
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6735-topckgen.c
@@ -0,0 +1,394 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-mux.h"
+
+#include <dt-bindings/clock/mediatek,mt6735-topckgen.h>
+
+#define CLK_CFG_0 0x40
+#define CLK_CFG_0_SET 0x44
+#define CLK_CFG_0_CLR 0x48
+#define CLK_CFG_1 0x50
+#define CLK_CFG_1_SET 0x54
+#define CLK_CFG_1_CLR 0x58
+#define CLK_CFG_2 0x60
+#define CLK_CFG_2_SET 0x64
+#define CLK_CFG_2_CLR 0x68
+#define CLK_CFG_3 0x70
+#define CLK_CFG_3_SET 0x74
+#define CLK_CFG_3_CLR 0x78
+#define CLK_CFG_4 0x80
+#define CLK_CFG_4_SET 0x84
+#define CLK_CFG_4_CLR 0x88
+#define CLK_CFG_5 0x90
+#define CLK_CFG_5_SET 0x94
+#define CLK_CFG_5_CLR 0x98
+#define CLK_CFG_6 0xa0
+#define CLK_CFG_6_SET 0xa4
+#define CLK_CFG_6_CLR 0xa8
+#define CLK_CFG_7 0xb0
+#define CLK_CFG_7_SET 0xb4
+#define CLK_CFG_7_CLR 0xb8
+
+static DEFINE_SPINLOCK(mt6735_topckgen_lock);
+
+/* Some clocks with unknown details are modeled as fixed clocks */
+static const struct mtk_fixed_clk topckgen_fixed_clks[] = {
+ /*
+ * This clock is available as a parent option for multiple
+ * muxes and seems like an alternative name for clk26m at first,
+ * but it appears alongside it in several muxes which should
+ * mean it is a separate clock.
+ */
+ FIXED_CLK(CLK_TOP_AD_SYS_26M_CK, "ad_sys_26m_ck", "clk26m", 26 * MHZ),
+ /*
+ * This clock is the parent of DMPLL divisors. It might be MEMPLL
+ * or its parent, as DMPLL appears to be an alternative name for
+ * MEMPLL.
+ */
+ FIXED_CLK(CLK_TOP_CLKPH_MCK_O, "clkph_mck_o", NULL, 0),
+ /*
+ * DMPLL clock (dmpll_ck), controlled by DDRPHY.
+ */
+ FIXED_CLK(CLK_TOP_DMPLL, "dmpll", "clkph_mck_o", 0),
+ /*
+ * MIPI DPI clock. Parent option for dpi0_sel. Unknown parent.
+ */
+ FIXED_CLK(CLK_TOP_DPI_CK, "dpi_ck", NULL, 0),
+ /*
+ * This clock is a child of WHPLL which is controlled by
+ * the modem.
+ */
+ FIXED_CLK(CLK_TOP_WHPLL_AUDIO_CK, "whpll_audio_ck", NULL, 0)
+};
+
+static const struct mtk_fixed_factor topckgen_factors[] = {
+ FACTOR(CLK_TOP_SYSPLL_D2, "syspll_d2", "mainpll", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL_D3, "syspll_d3", "mainpll", 1, 3),
+ FACTOR(CLK_TOP_SYSPLL_D5, "syspll_d5", "mainpll", 1, 5),
+ FACTOR(CLK_TOP_SYSPLL1_D2, "syspll1_d2", "mainpll", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL1_D4, "syspll1_d4", "mainpll", 1, 4),
+ FACTOR(CLK_TOP_SYSPLL1_D8, "syspll1_d8", "mainpll", 1, 8),
+ FACTOR(CLK_TOP_SYSPLL1_D16, "syspll1_d16", "mainpll", 1, 16),
+ FACTOR(CLK_TOP_SYSPLL2_D2, "syspll2_d2", "mainpll", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL2_D4, "syspll2_d4", "mainpll", 1, 4),
+ FACTOR(CLK_TOP_SYSPLL3_D2, "syspll3_d2", "mainpll", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL3_D4, "syspll3_d4", "mainpll", 1, 4),
+ FACTOR(CLK_TOP_SYSPLL4_D2, "syspll4_d2", "mainpll", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL4_D4, "syspll4_d4", "mainpll", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univpll", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll", 1, 3),
+ FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1, 5),
+ FACTOR(CLK_TOP_UNIVPLL_D26, "univpll_d26", "univpll", 1, 26),
+ FACTOR(CLK_TOP_UNIVPLL1_D2, "univpll1_d2", "univpll", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL1_D4, "univpll1_d4", "univpll", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL1_D8, "univpll1_d8", "univpll", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL2_D2, "univpll2_d2", "univpll", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL2_D4, "univpll2_d4", "univpll", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL2_D8, "univpll2_d8", "univpll", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL3_D2, "univpll3_d2", "univpll", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL3_D4, "univpll3_d4", "univpll", 1, 4),
+ FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll", 1, 2),
+ FACTOR(CLK_TOP_MSDCPLL_D4, "msdcpll_d4", "msdcpll", 1, 4),
+ FACTOR(CLK_TOP_MSDCPLL_D8, "msdcpll_d8", "msdcpll", 1, 8),
+ FACTOR(CLK_TOP_MSDCPLL_D16, "msdcpll_d16", "msdcpll", 1, 16),
+ FACTOR(CLK_TOP_VENCPLL_D3, "vencpll_d3", "vencpll", 1, 3),
+ FACTOR(CLK_TOP_TVDPLL_D2, "tvdpll_d2", "tvdpll", 1, 2),
+ FACTOR(CLK_TOP_TVDPLL_D4, "tvdpll_d4", "tvdpll", 1, 4),
+ FACTOR(CLK_TOP_DMPLL_D2, "dmpll_d2", "clkph_mck_o", 1, 2),
+ FACTOR(CLK_TOP_DMPLL_D4, "dmpll_d4", "clkph_mck_o", 1, 4),
+ FACTOR(CLK_TOP_DMPLL_D8, "dmpll_d8", "clkph_mck_o", 1, 8),
+ FACTOR(CLK_TOP_AD_SYS_26M_D2, "ad_sys_26m_d2", "clk26m", 1, 2)
+};
+
+static const char * const axi_sel_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "syspll_d5",
+ "syspll1_d4",
+ "univpll_d5",
+ "univpll2_d2",
+ "dmpll",
+ "dmpll_d2"
+};
+
+static const char * const mem_sel_parents[] = {
+ "clk26m",
+ "dmpll"
+};
+
+static const char * const ddrphycfg_parents[] = {
+ "clk26m",
+ "syspll1_d8"
+};
+
+static const char * const mm_sel_parents[] = {
+ "clk26m",
+ "vencpll",
+ "syspll1_d2",
+ "syspll_d5",
+ "syspll1_d4",
+ "univpll_d5",
+ "univpll2_d2",
+ "dmpll"
+};
+
+static const char * const pwm_sel_parents[] = {
+ "clk26m",
+ "univpll2_d4",
+ "univpll3_d2",
+ "univpll1_d4"
+};
+
+static const char * const vdec_sel_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "syspll_d5",
+ "syspll1_d4",
+ "univpll_d5",
+ "syspll_d2",
+ "syspll2_d2",
+ "msdcpll_d2"
+};
+
+static const char * const mfg_sel_parents[] = {
+ "clk26m",
+ "mmpll",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "syspll_d3",
+ "syspll1_d2",
+ "syspll_d5",
+ "univpll_d3",
+ "univpll1_d2"
+};
+
+static const char * const camtg_sel_parents[] = {
+ "clk26m",
+ "univpll_d26",
+ "univpll2_d2",
+ "syspll3_d2",
+ "syspll3_d4",
+ "msdcpll_d4"
+};
+
+static const char * const uart_sel_parents[] = {
+ "clk26m",
+ "univpll2_d8"
+};
+
+static const char * const spi_sel_parents[] = {
+ "clk26m",
+ "syspll3_d2",
+ "msdcpll_d8",
+ "syspll2_d4",
+ "syspll4_d2",
+ "univpll2_d4",
+ "univpll1_d8"
+};
+
+static const char * const usb20_sel_parents[] = {
+ "clk26m",
+ "univpll1_d8",
+ "univpll3_d4"
+};
+
+static const char * const msdc50_0_sel_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "syspll2_d2",
+ "syspll4_d2",
+ "univpll_d5",
+ "univpll1_d4"
+};
+
+static const char * const msdc30_0_sel_parents[] = {
+ "clk26m",
+ "msdcpll",
+ "msdcpll_d2",
+ "msdcpll_d4",
+ "syspll2_d2",
+ "syspll1_d4",
+ "univpll1_d4",
+ "univpll_d3",
+ "univpll_d26",
+ "syspll2_d4",
+ "univpll_d2"
+};
+
+static const char * const msdc30_1_2_sel_parents[] = {
+ "clk26m",
+ "univpll2_d2",
+ "msdcpll_d4",
+ "syspll2_d2",
+ "syspll1_d4",
+ "univpll1_d4",
+ "univpll_d26",
+ "syspll2_d4"
+};
+
+static const char * const msdc30_3_sel_parents[] = {
+ "clk26m",
+ "univpll2_d2",
+ "msdcpll_d4",
+ "syspll2_d2",
+ "syspll1_d4",
+ "univpll1_d4",
+ "univpll_d26",
+ "msdcpll_d16",
+ "syspll2_d4"
+};
+
+static const char * const audio_sel_parents[] = {
+ "clk26m",
+ "syspll3_d4",
+ "syspll4_d4",
+ "syspll1_d16"
+};
+
+static const char * const aud_intbus_sel_parents[] = {
+ "clk26m",
+ "syspll1_d4",
+ "syspll4_d2",
+ "dmpll_d4"
+};
+
+static const char * const pmicspi_sel_parents[] = {
+ "clk26m",
+ "syspll1_d8",
+ "syspll3_d4",
+ "syspll1_d16",
+ "univpll3_d4",
+ "univpll_d26",
+ "dmpll_d4",
+ "dmpll_d8"
+};
+
+static const char * const scp_sel_parents[] = {
+ "clk26m",
+ "syspll1_d8",
+ "dmpll_d2",
+ "dmpll_d4"
+};
+
+static const char * const atb_sel_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "syspll_d5",
+ "dmpll"
+};
+
+static const char * const dpi0_sel_parents[] = {
+ "clk26m",
+ "tvdpll",
+ "tvdpll_d2",
+ "tvdpll_d4",
+ "dpi_ck"
+};
+
+static const char * const scam_sel_parents[] = {
+ "clk26m",
+ "syspll3_d2",
+ "univpll2_d4",
+ "vencpll_d3"
+};
+
+static const char * const mfg13m_sel_parents[] = {
+ "clk26m",
+ "ad_sys_26m_d2"
+};
+
+static const char * const aud_1_2_sel_parents[] = {
+ "clk26m",
+ "apll1"
+};
+
+static const char * const irda_sel_parents[] = {
+ "clk26m",
+ "univpll2_d4"
+};
+
+static const char * const irtx_sel_parents[] = {
+ "clk26m",
+ "ad_sys_26m_ck"
+};
+
+static const char * const disppwm_sel_parents[] = {
+ "clk26m",
+ "univpll2_d4",
+ "syspll4_d2_d8",
+ "ad_sys_26m_ck"
+};
+
+static const struct mtk_mux topckgen_muxes[] = {
+ MUX_CLR_SET_UPD(CLK_TOP_AXI_SEL, "axi_sel", axi_sel_parents, CLK_CFG_0, CLK_CFG_0_SET, CLK_CFG_0_CLR, 0, 3, 0, 0),
+ MUX_CLR_SET_UPD(CLK_TOP_MEM_SEL, "mem_sel", mem_sel_parents, CLK_CFG_0, CLK_CFG_0_SET, CLK_CFG_0_CLR, 8, 1, 0, 0),
+ MUX_CLR_SET_UPD(CLK_TOP_DDRPHY_SEL, "ddrphycfg_sel", ddrphycfg_parents, CLK_CFG_0, CLK_CFG_0_SET, CLK_CFG_0_CLR, 16, 1, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MM_SEL, "mm_sel", mm_sel_parents, CLK_CFG_0, CLK_CFG_0_SET, CLK_CFG_0_CLR, 24, 3, 31, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_PWM_SEL, "pwm_sel", pwm_sel_parents, CLK_CFG_1, CLK_CFG_1_SET, CLK_CFG_1_CLR, 0, 2, 7, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_VDEC_SEL, "vdec_sel", vdec_sel_parents, CLK_CFG_1, CLK_CFG_1_SET, CLK_CFG_1_CLR, 8, 3, 15, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MFG_SEL, "mfg_sel", mfg_sel_parents, CLK_CFG_1, CLK_CFG_1_SET, CLK_CFG_1_CLR, 16, 4, 23, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG_SEL, "camtg_sel", camtg_sel_parents, CLK_CFG_1, CLK_CFG_1_SET, CLK_CFG_1_CLR, 24, 3, 31, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_UART_SEL, "uart_sel", uart_sel_parents, CLK_CFG_2, CLK_CFG_2_SET, CLK_CFG_2_CLR, 0, 1, 7, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPI_SEL, "spi_sel", spi_sel_parents, CLK_CFG_2, CLK_CFG_2_SET, CLK_CFG_2_CLR, 8, 3, 15, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_USB20_SEL, "usb20_sel", usb20_sel_parents, CLK_CFG_2, CLK_CFG_2_SET, CLK_CFG_2_CLR, 16, 2, 23, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC50_0_SEL, "msdc50_0_sel", msdc50_0_sel_parents, CLK_CFG_2, CLK_CFG_2_SET, CLK_CFG_2_CLR, 24, 3, 31, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC30_0_SEL, "msdc30_0_sel", msdc30_0_sel_parents, CLK_CFG_3, CLK_CFG_3_SET, CLK_CFG_3_CLR, 0, 4, 7, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC30_1_SEL, "msdc30_1_sel", msdc30_1_2_sel_parents, CLK_CFG_3, CLK_CFG_3_SET, CLK_CFG_3_CLR, 8, 3, 15, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC30_2_SEL, "msdc30_2_sel", msdc30_1_2_sel_parents, CLK_CFG_3, CLK_CFG_3_SET, CLK_CFG_3_CLR, 16, 3, 23, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC30_3_SEL, "msdc30_3_sel", msdc30_3_sel_parents, CLK_CFG_3, CLK_CFG_3_SET, CLK_CFG_3_CLR, 24, 4, 31, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUDIO_SEL, "audio_sel", audio_sel_parents, CLK_CFG_4, CLK_CFG_4_SET, CLK_CFG_4_CLR, 0, 2, 7, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUDINTBUS_SEL, "aud_intbus_sel", aud_intbus_sel_parents, CLK_CFG_4, CLK_CFG_4_SET, CLK_CFG_4_CLR, 8, 2, 15, 0, 0),
+ MUX_CLR_SET_UPD(CLK_TOP_PMICSPI_SEL, "pmicspi_sel", pmicspi_sel_parents, CLK_CFG_4, CLK_CFG_4_SET, CLK_CFG_4_CLR, 16, 3, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SCP_SEL, "scp_sel", scp_sel_parents, CLK_CFG_4, CLK_CFG_4_SET, CLK_CFG_4_CLR, 24, 2, 31, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ATB_SEL, "atb_sel", atb_sel_parents, CLK_CFG_5, CLK_CFG_5_SET, CLK_CFG_5_CLR, 0, 2, 7, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DPI0_SEL, "dpi0_sel", dpi0_sel_parents, CLK_CFG_5, CLK_CFG_5_SET, CLK_CFG_5_CLR, 8, 3, 15, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SCAM_SEL, "scam_sel", scam_sel_parents, CLK_CFG_5, CLK_CFG_5_SET, CLK_CFG_5_CLR, 16, 2, 23, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MFG13M_SEL, "mfg13m_sel", mfg13m_sel_parents, CLK_CFG_5, CLK_CFG_5_SET, CLK_CFG_5_CLR, 24, 1, 31, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD1_SEL, "aud_1_sel", aud_1_2_sel_parents, CLK_CFG_6, CLK_CFG_6_SET, CLK_CFG_6_CLR, 0, 1, 7, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD2_SEL, "aud_2_sel", aud_1_2_sel_parents, CLK_CFG_6, CLK_CFG_6_SET, CLK_CFG_6_CLR, 8, 1, 15, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_IRDA_SEL, "irda_sel", irda_sel_parents, CLK_CFG_6, CLK_CFG_6_SET, CLK_CFG_6_CLR, 16, 1, 23, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_IRTX_SEL, "irtx_sel", irtx_sel_parents, CLK_CFG_6, CLK_CFG_6_SET, CLK_CFG_6_CLR, 24, 1, 31, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DISPPWM_SEL, "disppwm_sel", disppwm_sel_parents, CLK_CFG_7, CLK_CFG_7_SET, CLK_CFG_7_CLR, 0, 2, 7, 0, 0),
+};
+
+static const struct mtk_clk_desc topckgen_desc = {
+ .fixed_clks = topckgen_fixed_clks,
+ .num_fixed_clks = ARRAY_SIZE(topckgen_fixed_clks),
+ .factor_clks = topckgen_factors,
+ .num_factor_clks = ARRAY_SIZE(topckgen_factors),
+ .mux_clks = topckgen_muxes,
+ .num_mux_clks = ARRAY_SIZE(topckgen_muxes),
+ .clk_lock = &mt6735_topckgen_lock,
+};
+
+static const struct of_device_id of_match_mt6735_topckgen[] = {
+ { .compatible = "mediatek,mt6735-topckgen", .data = &topckgen_desc},
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_mt6735_topckgen);
+
+static struct platform_driver clk_mt6735_topckgen = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt6735-topckgen",
+ .of_match_table = of_match_mt6735_topckgen,
+ },
+};
+module_platform_driver(clk_mt6735_topckgen);
+
+MODULE_AUTHOR("Yassine Oudjana <y.oudjana@protonmail.com>");
+MODULE_DESCRIPTION("MediaTek MT6735 topckgen clock driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6735-vdecsys.c b/drivers/clk/mediatek/clk-mt6735-vdecsys.c
new file mode 100644
index 000000000000..8817085fc1db
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6735-vdecsys.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mediatek,mt6735-vdecsys.h>
+#include <dt-bindings/reset/mediatek,mt6735-vdecsys.h>
+
+#define VDEC_CKEN_SET 0x00
+#define VDEC_CKEN_CLR 0x04
+#define SMI_LARB1_CKEN_SET 0x08
+#define SMI_LARB1_CKEN_CLR 0x0c
+#define VDEC_RESETB_CON 0x10
+#define SMI_LARB1_RESETB_CON 0x14
+
+#define RST_NR_PER_BANK 32
+
+static struct mtk_gate_regs vdec_cg_regs = {
+ .set_ofs = VDEC_CKEN_SET,
+ .clr_ofs = VDEC_CKEN_CLR,
+ .sta_ofs = VDEC_CKEN_SET,
+};
+
+static struct mtk_gate_regs smi_larb1_cg_regs = {
+ .set_ofs = SMI_LARB1_CKEN_SET,
+ .clr_ofs = SMI_LARB1_CKEN_CLR,
+ .sta_ofs = SMI_LARB1_CKEN_SET,
+};
+
+static const struct mtk_gate vdecsys_gates[] = {
+ GATE_MTK(CLK_VDEC_VDEC, "vdec", "vdec_sel", &vdec_cg_regs, 0, &mtk_clk_gate_ops_setclr_inv),
+ GATE_MTK(CLK_VDEC_SMI_LARB1, "smi_larb1", "vdec_sel", &smi_larb1_cg_regs, 0, &mtk_clk_gate_ops_setclr_inv),
+};
+
+static u16 vdecsys_rst_bank_ofs[] = { VDEC_RESETB_CON, SMI_LARB1_RESETB_CON };
+
+static u16 vdecsys_rst_idx_map[] = {
+ [MT6735_VDEC_RST0_VDEC] = 0 * RST_NR_PER_BANK + 0,
+ [MT6735_VDEC_RST1_SMI_LARB1] = 1 * RST_NR_PER_BANK + 0,
+};
+
+static const struct mtk_clk_rst_desc vdecsys_resets = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = vdecsys_rst_bank_ofs,
+ .rst_bank_nr = ARRAY_SIZE(vdecsys_rst_bank_ofs),
+ .rst_idx_map = vdecsys_rst_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(vdecsys_rst_idx_map)
+};
+
+static const struct mtk_clk_desc vdecsys_clks = {
+ .clks = vdecsys_gates,
+ .num_clks = ARRAY_SIZE(vdecsys_gates),
+ .rst_desc = &vdecsys_resets
+};
+
+static const struct of_device_id of_match_mt6735_vdecsys[] = {
+ { .compatible = "mediatek,mt6735-vdecsys", .data = &vdecsys_clks },
+ { /* sentinel */ }
+};
+
+static struct platform_driver clk_mt6735_vdecsys = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt6735-vdecsys",
+ .of_match_table = of_match_mt6735_vdecsys,
+ },
+};
+module_platform_driver(clk_mt6735_vdecsys);
+
+MODULE_AUTHOR("Yassine Oudjana <y.oudjana@protonmail.com>");
+MODULE_DESCRIPTION("MediaTek MT6735 vdecsys clock and reset driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6735-vencsys.c b/drivers/clk/mediatek/clk-mt6735-vencsys.c
new file mode 100644
index 000000000000..8dec7f98492a
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6735-vencsys.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mediatek,mt6735-vencsys.h>
+
+#define VENC_CG_CON 0x00
+#define VENC_CG_SET 0x04
+#define VENC_CG_CLR 0x08
+
+static struct mtk_gate_regs venc_cg_regs = {
+ .set_ofs = VENC_CG_SET,
+ .clr_ofs = VENC_CG_CLR,
+ .sta_ofs = VENC_CG_CON,
+};
+
+static const struct mtk_gate vencsys_gates[] = {
+ GATE_MTK(CLK_VENC_SMI_LARB3, "smi_larb3", "mm_sel", &venc_cg_regs, 0, &mtk_clk_gate_ops_setclr_inv),
+ GATE_MTK(CLK_VENC_VENC, "venc", "mm_sel", &venc_cg_regs, 4, &mtk_clk_gate_ops_setclr_inv),
+ GATE_MTK(CLK_VENC_JPGENC, "jpgenc", "mm_sel", &venc_cg_regs, 8, &mtk_clk_gate_ops_setclr_inv),
+ GATE_MTK(CLK_VENC_JPGDEC, "jpgdec", "mm_sel", &venc_cg_regs, 12, &mtk_clk_gate_ops_setclr_inv),
+};
+
+static const struct mtk_clk_desc vencsys_clks = {
+ .clks = vencsys_gates,
+ .num_clks = ARRAY_SIZE(vencsys_gates),
+};
+
+static const struct of_device_id of_match_mt6735_vencsys[] = {
+ { .compatible = "mediatek,mt6735-vencsys", .data = &vencsys_clks },
+ { /* sentinel */ }
+};
+
+static struct platform_driver clk_mt6735_vencsys = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt6735-vencsys",
+ .of_match_table = of_match_mt6735_vencsys,
+ },
+};
+module_platform_driver(clk_mt6735_vencsys);
+
+MODULE_AUTHOR("Yassine Oudjana <y.oudjana@protonmail.com>");
+MODULE_DESCRIPTION("Mediatek MT6735 vencsys clock driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6765-audio.c b/drivers/clk/mediatek/clk-mt6765-audio.c
index 901bf793c272..2be1458087e6 100644
--- a/drivers/clk/mediatek/clk-mt6765-audio.c
+++ b/drivers/clk/mediatek/clk-mt6765-audio.c
@@ -69,11 +69,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6765_audio);
static struct platform_driver clk_mt6765_audio_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6765-audio",
.of_match_table = of_match_clk_mt6765_audio,
},
};
module_platform_driver(clk_mt6765_audio_drv);
+
+MODULE_DESCRIPTION("MediaTek MT6765 audio clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6765-cam.c b/drivers/clk/mediatek/clk-mt6765-cam.c
index 19cedfa832bc..2a7f30dc85bb 100644
--- a/drivers/clk/mediatek/clk-mt6765-cam.c
+++ b/drivers/clk/mediatek/clk-mt6765-cam.c
@@ -50,11 +50,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6765_cam);
static struct platform_driver clk_mt6765_cam_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6765-cam",
.of_match_table = of_match_clk_mt6765_cam,
},
};
module_platform_driver(clk_mt6765_cam_drv);
+
+MODULE_DESCRIPTION("MediaTek MT6765 Camera clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6765-img.c b/drivers/clk/mediatek/clk-mt6765-img.c
index 16e20c61932e..ff857852cfb0 100644
--- a/drivers/clk/mediatek/clk-mt6765-img.c
+++ b/drivers/clk/mediatek/clk-mt6765-img.c
@@ -46,11 +46,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6765_img);
static struct platform_driver clk_mt6765_img_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6765-img",
.of_match_table = of_match_clk_mt6765_img,
},
};
module_platform_driver(clk_mt6765_img_drv);
+
+MODULE_DESCRIPTION("MediaTek MT6765 imgsys clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6765-mipi0a.c b/drivers/clk/mediatek/clk-mt6765-mipi0a.c
index cc5bb0c95f08..8261dfd12a9a 100644
--- a/drivers/clk/mediatek/clk-mt6765-mipi0a.c
+++ b/drivers/clk/mediatek/clk-mt6765-mipi0a.c
@@ -43,11 +43,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6765_mipi0a);
static struct platform_driver clk_mt6765_mipi0a_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6765-mipi0a",
.of_match_table = of_match_clk_mt6765_mipi0a,
},
};
module_platform_driver(clk_mt6765_mipi0a_drv);
+
+MODULE_DESCRIPTION("MediaTek MT6765 Camera clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6765-mm.c b/drivers/clk/mediatek/clk-mt6765-mm.c
index fc5842e13b78..e525919f9e81 100644
--- a/drivers/clk/mediatek/clk-mt6765-mm.c
+++ b/drivers/clk/mediatek/clk-mt6765-mm.c
@@ -72,11 +72,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6765_mm);
static struct platform_driver clk_mt6765_mm_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6765-mm",
.of_match_table = of_match_clk_mt6765_mm,
},
};
module_platform_driver(clk_mt6765_mm_drv);
+
+MODULE_DESCRIPTION("MediaTek MT6765 MultiMedia clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6765-vcodec.c b/drivers/clk/mediatek/clk-mt6765-vcodec.c
index d6e036795b0a..f309d1090cda 100644
--- a/drivers/clk/mediatek/clk-mt6765-vcodec.c
+++ b/drivers/clk/mediatek/clk-mt6765-vcodec.c
@@ -45,11 +45,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6765_vcodec);
static struct platform_driver clk_mt6765_vcodec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6765-vcodec",
.of_match_table = of_match_clk_mt6765_vcodec,
},
};
module_platform_driver(clk_mt6765_vcodec_drv);
+
+MODULE_DESCRIPTION("MediaTek MT6765 Video Codec clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6765.c b/drivers/clk/mediatek/clk-mt6765.c
index 9c7f7407d798..d53731e7933f 100644
--- a/drivers/clk/mediatek/clk-mt6765.c
+++ b/drivers/clk/mediatek/clk-mt6765.c
@@ -873,4 +873,6 @@ static int __init clk_mt6765_init(void)
}
arch_initcall(clk_mt6765_init);
+
+MODULE_DESCRIPTION("MediaTek MT6765 main clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6779-aud.c b/drivers/clk/mediatek/clk-mt6779-aud.c
index 66ae6421e27e..8ed318bd7765 100644
--- a/drivers/clk/mediatek/clk-mt6779-aud.c
+++ b/drivers/clk/mediatek/clk-mt6779-aud.c
@@ -104,12 +104,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6779_aud);
static struct platform_driver clk_mt6779_aud_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6779-aud",
.of_match_table = of_match_clk_mt6779_aud,
},
};
-
module_platform_driver(clk_mt6779_aud_drv);
+
+MODULE_DESCRIPTION("MediaTek MT6779 audio clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6779-cam.c b/drivers/clk/mediatek/clk-mt6779-cam.c
index 7b1a40d891ad..f397b55606de 100644
--- a/drivers/clk/mediatek/clk-mt6779-cam.c
+++ b/drivers/clk/mediatek/clk-mt6779-cam.c
@@ -55,12 +55,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6779_cam);
static struct platform_driver clk_mt6779_cam_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6779-cam",
.of_match_table = of_match_clk_mt6779_cam,
},
};
-
module_platform_driver(clk_mt6779_cam_drv);
+
+MODULE_DESCRIPTION("MediaTek MT6779 Camera clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6779-img.c b/drivers/clk/mediatek/clk-mt6779-img.c
index 1c53209f60a9..474a59a4ca9e 100644
--- a/drivers/clk/mediatek/clk-mt6779-img.c
+++ b/drivers/clk/mediatek/clk-mt6779-img.c
@@ -47,7 +47,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6779_img);
static struct platform_driver clk_mt6779_img_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6779-img",
.of_match_table = of_match_clk_mt6779_img,
@@ -55,4 +55,6 @@ static struct platform_driver clk_mt6779_img_drv = {
};
module_platform_driver(clk_mt6779_img_drv);
+
+MODULE_DESCRIPTION("MediaTek MT6779 imgsys clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6779-ipe.c b/drivers/clk/mediatek/clk-mt6779-ipe.c
index 784bc08ace5e..c2314654f43a 100644
--- a/drivers/clk/mediatek/clk-mt6779-ipe.c
+++ b/drivers/clk/mediatek/clk-mt6779-ipe.c
@@ -49,7 +49,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6779_ipe);
static struct platform_driver clk_mt6779_ipe_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6779-ipe",
.of_match_table = of_match_clk_mt6779_ipe,
@@ -57,4 +57,6 @@ static struct platform_driver clk_mt6779_ipe_drv = {
};
module_platform_driver(clk_mt6779_ipe_drv);
+
+MODULE_DESCRIPTION("MediaTek MT6779 Image Processing Engine clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6779-mfg.c b/drivers/clk/mediatek/clk-mt6779-mfg.c
index 040e4c45fa5f..21793cb6e6e3 100644
--- a/drivers/clk/mediatek/clk-mt6779-mfg.c
+++ b/drivers/clk/mediatek/clk-mt6779-mfg.c
@@ -44,7 +44,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6779_mfg);
static struct platform_driver clk_mt6779_mfg_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6779-mfg",
.of_match_table = of_match_clk_mt6779_mfg,
@@ -52,4 +52,6 @@ static struct platform_driver clk_mt6779_mfg_drv = {
};
module_platform_driver(clk_mt6779_mfg_drv);
+
+MODULE_DESCRIPTION("MediaTek MT6779 GPU mfg clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6779-mm.c b/drivers/clk/mediatek/clk-mt6779-mm.c
index 5e17e441f679..30bbab308388 100644
--- a/drivers/clk/mediatek/clk-mt6779-mm.c
+++ b/drivers/clk/mediatek/clk-mt6779-mm.c
@@ -98,7 +98,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt6779_mm_id_table);
static struct platform_driver clk_mt6779_mm_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt6779-mm",
},
@@ -106,4 +106,6 @@ static struct platform_driver clk_mt6779_mm_drv = {
};
module_platform_driver(clk_mt6779_mm_drv);
+
+MODULE_DESCRIPTION("MediaTek MT6779 MultiMedia mdp/ddp clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6779-vdec.c b/drivers/clk/mediatek/clk-mt6779-vdec.c
index a411c23512b7..458d012f023c 100644
--- a/drivers/clk/mediatek/clk-mt6779-vdec.c
+++ b/drivers/clk/mediatek/clk-mt6779-vdec.c
@@ -56,7 +56,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6779_vdec);
static struct platform_driver clk_mt6779_vdec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6779-vdec",
.of_match_table = of_match_clk_mt6779_vdec,
@@ -64,4 +64,6 @@ static struct platform_driver clk_mt6779_vdec_drv = {
};
module_platform_driver(clk_mt6779_vdec_drv);
+
+MODULE_DESCRIPTION("MediaTek MT6779 Video Decoders clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6779-venc.c b/drivers/clk/mediatek/clk-mt6779-venc.c
index f14512d284d6..70cebc274031 100644
--- a/drivers/clk/mediatek/clk-mt6779-venc.c
+++ b/drivers/clk/mediatek/clk-mt6779-venc.c
@@ -47,7 +47,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6779_venc);
static struct platform_driver clk_mt6779_venc_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6779-venc",
.of_match_table = of_match_clk_mt6779_venc,
@@ -55,4 +55,6 @@ static struct platform_driver clk_mt6779_venc_drv = {
};
module_platform_driver(clk_mt6779_venc_drv);
+
+MODULE_DESCRIPTION("MediaTek MT6779 Video Encoders clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6779.c b/drivers/clk/mediatek/clk-mt6779.c
index ffedb1fe3c67..86732f5acf93 100644
--- a/drivers/clk/mediatek/clk-mt6779.c
+++ b/drivers/clk/mediatek/clk-mt6779.c
@@ -1305,7 +1305,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6779);
static struct platform_driver clk_mt6779_infra_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6779-infra",
.of_match_table = of_match_clk_mt6779_infra,
@@ -1330,4 +1330,6 @@ static int __init clk_mt6779_init(void)
}
arch_initcall(clk_mt6779_init);
+
+MODULE_DESCRIPTION("MediaTek MT6779 main clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6795-apmixedsys.c b/drivers/clk/mediatek/clk-mt6795-apmixedsys.c
index 8c65974ed9b8..91665d7f125e 100644
--- a/drivers/clk/mediatek/clk-mt6795-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt6795-apmixedsys.c
@@ -201,7 +201,7 @@ static void clk_mt6795_apmixed_remove(struct platform_device *pdev)
static struct platform_driver clk_mt6795_apmixed_drv = {
.probe = clk_mt6795_apmixed_probe,
- .remove_new = clk_mt6795_apmixed_remove,
+ .remove = clk_mt6795_apmixed_remove,
.driver = {
.name = "clk-mt6795-apmixed",
.of_match_table = of_match_clk_mt6795_apmixed,
diff --git a/drivers/clk/mediatek/clk-mt6795-infracfg.c b/drivers/clk/mediatek/clk-mt6795-infracfg.c
index 06d7fdf3098b..e4559569f5b0 100644
--- a/drivers/clk/mediatek/clk-mt6795-infracfg.c
+++ b/drivers/clk/mediatek/clk-mt6795-infracfg.c
@@ -144,7 +144,7 @@ static struct platform_driver clk_mt6795_infracfg_drv = {
.of_match_table = of_match_clk_mt6795_infracfg,
},
.probe = clk_mt6795_infracfg_probe,
- .remove_new = clk_mt6795_infracfg_remove,
+ .remove = clk_mt6795_infracfg_remove,
};
module_platform_driver(clk_mt6795_infracfg_drv);
diff --git a/drivers/clk/mediatek/clk-mt6795-mfg.c b/drivers/clk/mediatek/clk-mt6795-mfg.c
index dff6a6ded837..1d658bb19e82 100644
--- a/drivers/clk/mediatek/clk-mt6795-mfg.c
+++ b/drivers/clk/mediatek/clk-mt6795-mfg.c
@@ -43,7 +43,7 @@ static struct platform_driver clk_mt6795_mfg_drv = {
.of_match_table = of_match_clk_mt6795_mfg,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt6795_mfg_drv);
diff --git a/drivers/clk/mediatek/clk-mt6795-mm.c b/drivers/clk/mediatek/clk-mt6795-mm.c
index ced6e310d694..733d0e2021fc 100644
--- a/drivers/clk/mediatek/clk-mt6795-mm.c
+++ b/drivers/clk/mediatek/clk-mt6795-mm.c
@@ -93,9 +93,9 @@ static struct platform_driver clk_mt6795_mm_drv = {
},
.id_table = clk_mt6795_mm_id_table,
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
};
module_platform_driver(clk_mt6795_mm_drv);
-MODULE_DESCRIPTION("MediaTek MT6795 MultiMedia clocks driver");
+MODULE_DESCRIPTION("MediaTek MT6795 MMSYS clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6795-pericfg.c b/drivers/clk/mediatek/clk-mt6795-pericfg.c
index 3f6bea418a5a..d48240eb2a67 100644
--- a/drivers/clk/mediatek/clk-mt6795-pericfg.c
+++ b/drivers/clk/mediatek/clk-mt6795-pericfg.c
@@ -153,7 +153,7 @@ static struct platform_driver clk_mt6795_pericfg_drv = {
.of_match_table = of_match_clk_mt6795_pericfg,
},
.probe = clk_mt6795_pericfg_probe,
- .remove_new = clk_mt6795_pericfg_remove,
+ .remove = clk_mt6795_pericfg_remove,
};
module_platform_driver(clk_mt6795_pericfg_drv);
diff --git a/drivers/clk/mediatek/clk-mt6795-topckgen.c b/drivers/clk/mediatek/clk-mt6795-topckgen.c
index be595853a925..9c6d63a80b19 100644
--- a/drivers/clk/mediatek/clk-mt6795-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt6795-topckgen.c
@@ -547,7 +547,7 @@ static struct platform_driver clk_mt6795_topckgen_drv = {
.of_match_table = of_match_clk_mt6795_topckgen,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt6795_topckgen_drv);
diff --git a/drivers/clk/mediatek/clk-mt6795-vdecsys.c b/drivers/clk/mediatek/clk-mt6795-vdecsys.c
index 9e91d6f7f5bf..f2968f859dca 100644
--- a/drivers/clk/mediatek/clk-mt6795-vdecsys.c
+++ b/drivers/clk/mediatek/clk-mt6795-vdecsys.c
@@ -44,7 +44,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6795_vdecsys);
static struct platform_driver clk_mt6795_vdecsys_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6795-vdecsys",
.of_match_table = of_match_clk_mt6795_vdecsys,
diff --git a/drivers/clk/mediatek/clk-mt6795-vencsys.c b/drivers/clk/mediatek/clk-mt6795-vencsys.c
index bd81e80b744f..2f8d48da1a85 100644
--- a/drivers/clk/mediatek/clk-mt6795-vencsys.c
+++ b/drivers/clk/mediatek/clk-mt6795-vencsys.c
@@ -43,7 +43,7 @@ static struct platform_driver clk_mt6795_vencsys_drv = {
.of_match_table = of_match_clk_mt6795_vencsys,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt6795_vencsys_drv);
diff --git a/drivers/clk/mediatek/clk-mt6797-img.c b/drivers/clk/mediatek/clk-mt6797-img.c
index e1c1ee692a1d..338c69234f24 100644
--- a/drivers/clk/mediatek/clk-mt6797-img.c
+++ b/drivers/clk/mediatek/clk-mt6797-img.c
@@ -43,11 +43,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6797_img);
static struct platform_driver clk_mt6797_img_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6797-img",
.of_match_table = of_match_clk_mt6797_img,
},
};
module_platform_driver(clk_mt6797_img_drv);
+
+MODULE_DESCRIPTION("MediaTek MT6797 imgsys clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6797-mm.c b/drivers/clk/mediatek/clk-mt6797-mm.c
index 5b0a77530b62..ddb40b8a1a7d 100644
--- a/drivers/clk/mediatek/clk-mt6797-mm.c
+++ b/drivers/clk/mediatek/clk-mt6797-mm.c
@@ -93,11 +93,13 @@ MODULE_DEVICE_TABLE(platform, clk_mt6797_mm_id_table);
static struct platform_driver clk_mt6797_mm_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt6797-mm",
},
.id_table = clk_mt6797_mm_id_table,
};
module_platform_driver(clk_mt6797_mm_drv);
+
+MODULE_DESCRIPTION("MediaTek MT6797 MultiMedia clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6797-vdec.c b/drivers/clk/mediatek/clk-mt6797-vdec.c
index 0ed6710ab88e..d832f48123f5 100644
--- a/drivers/clk/mediatek/clk-mt6797-vdec.c
+++ b/drivers/clk/mediatek/clk-mt6797-vdec.c
@@ -54,11 +54,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6797_vdec);
static struct platform_driver clk_mt6797_vdec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6797-vdec",
.of_match_table = of_match_clk_mt6797_vdec,
},
};
module_platform_driver(clk_mt6797_vdec_drv);
+
+MODULE_DESCRIPTION("MediaTek MT6797 Video Decoders clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6797-venc.c b/drivers/clk/mediatek/clk-mt6797-venc.c
index 93d1da7423fe..fd4446f4a9d7 100644
--- a/drivers/clk/mediatek/clk-mt6797-venc.c
+++ b/drivers/clk/mediatek/clk-mt6797-venc.c
@@ -45,11 +45,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6797_venc);
static struct platform_driver clk_mt6797_venc_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6797-venc",
.of_match_table = of_match_clk_mt6797_venc,
},
};
module_platform_driver(clk_mt6797_venc_drv);
+
+MODULE_DESCRIPTION("MediaTek MT6797 Video Encoders clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6797.c b/drivers/clk/mediatek/clk-mt6797.c
index f12d4e9ff0bb..fb59e71af58e 100644
--- a/drivers/clk/mediatek/clk-mt6797.c
+++ b/drivers/clk/mediatek/clk-mt6797.c
@@ -708,4 +708,6 @@ static int __init clk_mt6797_init(void)
}
arch_initcall(clk_mt6797_init);
+
+MODULE_DESCRIPTION("MediaTek MT6797 main clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7622-apmixedsys.c b/drivers/clk/mediatek/clk-mt7622-apmixedsys.c
index 1b8f859b6b6c..2350592d9a93 100644
--- a/drivers/clk/mediatek/clk-mt7622-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt7622-apmixedsys.c
@@ -137,7 +137,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7622_apmixed);
static struct platform_driver clk_mt7622_apmixed_drv = {
.probe = clk_mt7622_apmixed_probe,
- .remove_new = clk_mt7622_apmixed_remove,
+ .remove = clk_mt7622_apmixed_remove,
.driver = {
.name = "clk-mt7622-apmixed",
.of_match_table = of_match_clk_mt7622_apmixed,
diff --git a/drivers/clk/mediatek/clk-mt7622-aud.c b/drivers/clk/mediatek/clk-mt7622-aud.c
index 42bade71e2f8..a4ea5e20efa2 100644
--- a/drivers/clk/mediatek/clk-mt7622-aud.c
+++ b/drivers/clk/mediatek/clk-mt7622-aud.c
@@ -75,6 +75,7 @@ static const struct mtk_gate audio_clks[] = {
GATE_AUDIO1(CLK_AUDIO_A1SYS, "audio_a1sys", "a1sys_hp_sel", 21),
GATE_AUDIO1(CLK_AUDIO_A2SYS, "audio_a2sys", "a2sys_hp_sel", 22),
GATE_AUDIO1(CLK_AUDIO_AFE_CONN, "audio_afe_conn", "a1sys_hp_sel", 23),
+ GATE_AUDIO1(CLK_AUDIO_AFE_MRGIF, "audio_afe_mrgif", "aud_mux1_sel", 25),
/* AUDIO2 */
GATE_AUDIO2(CLK_AUDIO_UL1, "audio_ul1", "a1sys_hp_sel", 0),
GATE_AUDIO2(CLK_AUDIO_UL2, "audio_ul2", "a1sys_hp_sel", 1),
@@ -149,11 +150,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7622_aud);
static struct platform_driver clk_mt7622_aud_drv = {
.probe = clk_mt7622_aud_probe,
- .remove_new = clk_mt7622_aud_remove,
+ .remove = clk_mt7622_aud_remove,
.driver = {
.name = "clk-mt7622-aud",
.of_match_table = of_match_clk_mt7622_aud,
},
};
module_platform_driver(clk_mt7622_aud_drv);
+
+MODULE_DESCRIPTION("MediaTek MT7622 audio clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7622-eth.c b/drivers/clk/mediatek/clk-mt7622-eth.c
index 62fdf127e77d..1c1033a92c46 100644
--- a/drivers/clk/mediatek/clk-mt7622-eth.c
+++ b/drivers/clk/mediatek/clk-mt7622-eth.c
@@ -79,11 +79,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7622_eth);
static struct platform_driver clk_mt7622_eth_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt7622-eth",
.of_match_table = of_match_clk_mt7622_eth,
},
};
module_platform_driver(clk_mt7622_eth_drv);
+
+MODULE_DESCRIPTION("MediaTek MT7622 Ethernet clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7622-hif.c b/drivers/clk/mediatek/clk-mt7622-hif.c
index b4a520ce362c..5bcfe12c4fd0 100644
--- a/drivers/clk/mediatek/clk-mt7622-hif.c
+++ b/drivers/clk/mediatek/clk-mt7622-hif.c
@@ -91,11 +91,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7622_hif);
static struct platform_driver clk_mt7622_hif_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt7622-hif",
.of_match_table = of_match_clk_mt7622_hif,
},
};
module_platform_driver(clk_mt7622_hif_drv);
+
+MODULE_DESCRIPTION("MediaTek MT7622 HIF clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7622-infracfg.c b/drivers/clk/mediatek/clk-mt7622-infracfg.c
index 6bc911cb29a6..cfdf3b07c3e0 100644
--- a/drivers/clk/mediatek/clk-mt7622-infracfg.c
+++ b/drivers/clk/mediatek/clk-mt7622-infracfg.c
@@ -118,7 +118,7 @@ static struct platform_driver clk_mt7622_infracfg_drv = {
.of_match_table = of_match_clk_mt7622_infracfg,
},
.probe = clk_mt7622_infracfg_probe,
- .remove_new = clk_mt7622_infracfg_remove,
+ .remove = clk_mt7622_infracfg_remove,
};
module_platform_driver(clk_mt7622_infracfg_drv);
diff --git a/drivers/clk/mediatek/clk-mt7622.c b/drivers/clk/mediatek/clk-mt7622.c
index 27781a62a131..f62b03abab4f 100644
--- a/drivers/clk/mediatek/clk-mt7622.c
+++ b/drivers/clk/mediatek/clk-mt7622.c
@@ -524,7 +524,7 @@ static struct platform_driver clk_mt7622_drv = {
.of_match_table = of_match_clk_mt7622,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt7622_drv)
diff --git a/drivers/clk/mediatek/clk-mt7629-eth.c b/drivers/clk/mediatek/clk-mt7629-eth.c
index 1bfedc988cfe..29ea93abaac5 100644
--- a/drivers/clk/mediatek/clk-mt7629-eth.c
+++ b/drivers/clk/mediatek/clk-mt7629-eth.c
@@ -157,4 +157,6 @@ static struct platform_driver clk_mt7629_eth_drv = {
};
builtin_platform_driver(clk_mt7629_eth_drv);
+
+MODULE_DESCRIPTION("MediaTek MT7629 Ethernet clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7629-hif.c b/drivers/clk/mediatek/clk-mt7629-hif.c
index dd1be946a8f0..3fdc2d7d4274 100644
--- a/drivers/clk/mediatek/clk-mt7629-hif.c
+++ b/drivers/clk/mediatek/clk-mt7629-hif.c
@@ -86,11 +86,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7629_hif);
static struct platform_driver clk_mt7629_hif_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt7629-hif",
.of_match_table = of_match_clk_mt7629_hif,
},
};
module_platform_driver(clk_mt7629_hif_drv);
+
+MODULE_DESCRIPTION("MediaTek MT2701 HIF clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7629.c b/drivers/clk/mediatek/clk-mt7629.c
index b8a1f01bc974..baf94e7bea37 100644
--- a/drivers/clk/mediatek/clk-mt7629.c
+++ b/drivers/clk/mediatek/clk-mt7629.c
@@ -698,4 +698,6 @@ static int clk_mt7629_init(void)
}
arch_initcall(clk_mt7629_init);
+
+MODULE_DESCRIPTION("MediaTek MT7629 main clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7981-apmixed.c b/drivers/clk/mediatek/clk-mt7981-apmixed.c
index 98739877f430..e8211eb4e09e 100644
--- a/drivers/clk/mediatek/clk-mt7981-apmixed.c
+++ b/drivers/clk/mediatek/clk-mt7981-apmixed.c
@@ -99,4 +99,6 @@ static struct platform_driver clk_mt7981_apmixed_drv = {
},
};
builtin_platform_driver(clk_mt7981_apmixed_drv);
+
+MODULE_DESCRIPTION("MediaTek MT7981 apmixedsys clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7981-eth.c b/drivers/clk/mediatek/clk-mt7981-eth.c
index fb1a8c9242ef..906aec9ddff5 100644
--- a/drivers/clk/mediatek/clk-mt7981-eth.c
+++ b/drivers/clk/mediatek/clk-mt7981-eth.c
@@ -107,11 +107,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7981_eth);
static struct platform_driver clk_mt7981_eth_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt7981-eth",
.of_match_table = of_match_clk_mt7981_eth,
},
};
module_platform_driver(clk_mt7981_eth_drv);
+
+MODULE_DESCRIPTION("MediaTek MT7981 Ethernet clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7981-infracfg.c b/drivers/clk/mediatek/clk-mt7981-infracfg.c
index 18bf4e8be457..0487b6bb80ae 100644
--- a/drivers/clk/mediatek/clk-mt7981-infracfg.c
+++ b/drivers/clk/mediatek/clk-mt7981-infracfg.c
@@ -197,11 +197,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7981_infracfg);
static struct platform_driver clk_mt7981_infracfg_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt7981-infracfg",
.of_match_table = of_match_clk_mt7981_infracfg,
},
};
module_platform_driver(clk_mt7981_infracfg_drv);
+
+MODULE_DESCRIPTION("MediaTek MT7981 infracfg clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7981-topckgen.c b/drivers/clk/mediatek/clk-mt7981-topckgen.c
index 493aa11d3a17..1943f11e47c1 100644
--- a/drivers/clk/mediatek/clk-mt7981-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt7981-topckgen.c
@@ -413,11 +413,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7981_topckgen);
static struct platform_driver clk_mt7981_topckgen_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt7981-topckgen",
.of_match_table = of_match_clk_mt7981_topckgen,
},
};
module_platform_driver(clk_mt7981_topckgen_drv);
+
+MODULE_DESCRIPTION("MediaTek MT7981 top clock generators driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7986-apmixed.c b/drivers/clk/mediatek/clk-mt7986-apmixed.c
index 7f807fbdfcb9..93751abe6be8 100644
--- a/drivers/clk/mediatek/clk-mt7986-apmixed.c
+++ b/drivers/clk/mediatek/clk-mt7986-apmixed.c
@@ -97,4 +97,6 @@ static struct platform_driver clk_mt7986_apmixed_drv = {
},
};
builtin_platform_driver(clk_mt7986_apmixed_drv);
+
+MODULE_DESCRIPTION("MediaTek MT7986 apmixedsys clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7986-eth.c b/drivers/clk/mediatek/clk-mt7986-eth.c
index 7ab78e0f49a1..4514d42c0829 100644
--- a/drivers/clk/mediatek/clk-mt7986-eth.c
+++ b/drivers/clk/mediatek/clk-mt7986-eth.c
@@ -92,7 +92,7 @@ static struct platform_driver clk_mt7986_eth_drv = {
.of_match_table = of_match_clk_mt7986_eth,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt7986_eth_drv);
diff --git a/drivers/clk/mediatek/clk-mt7986-infracfg.c b/drivers/clk/mediatek/clk-mt7986-infracfg.c
index cb8ab3e53abf..732c65e616de 100644
--- a/drivers/clk/mediatek/clk-mt7986-infracfg.c
+++ b/drivers/clk/mediatek/clk-mt7986-infracfg.c
@@ -177,7 +177,7 @@ static struct platform_driver clk_mt7986_infracfg_drv = {
.of_match_table = of_match_clk_mt7986_infracfg,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt7986_infracfg_drv);
diff --git a/drivers/clk/mediatek/clk-mt7986-topckgen.c b/drivers/clk/mediatek/clk-mt7986-topckgen.c
index 84bc24511504..2dd30da306d9 100644
--- a/drivers/clk/mediatek/clk-mt7986-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt7986-topckgen.c
@@ -306,11 +306,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7986_topckgen);
static struct platform_driver clk_mt7986_topckgen_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt7986-topckgen",
.of_match_table = of_match_clk_mt7986_topckgen,
},
};
module_platform_driver(clk_mt7986_topckgen_drv);
+
+MODULE_DESCRIPTION("MediaTek MT7986 top clock generators driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7988-apmixed.c b/drivers/clk/mediatek/clk-mt7988-apmixed.c
index baf9564351a3..63d33a78cb48 100644
--- a/drivers/clk/mediatek/clk-mt7988-apmixed.c
+++ b/drivers/clk/mediatek/clk-mt7988-apmixed.c
@@ -111,4 +111,6 @@ static struct platform_driver clk_mt7988_apmixed_drv = {
},
};
builtin_platform_driver(clk_mt7988_apmixed_drv);
+
+MODULE_DESCRIPTION("MediaTek MT7988 apmixedsys clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7988-eth.c b/drivers/clk/mediatek/clk-mt7988-eth.c
index adf4a9d39b38..7d9463688be2 100644
--- a/drivers/clk/mediatek/clk-mt7988-eth.c
+++ b/drivers/clk/mediatek/clk-mt7988-eth.c
@@ -142,7 +142,7 @@ static struct platform_driver clk_mt7988_eth_drv = {
.of_match_table = of_match_clk_mt7988_eth,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt7988_eth_drv);
diff --git a/drivers/clk/mediatek/clk-mt7988-infracfg.c b/drivers/clk/mediatek/clk-mt7988-infracfg.c
index c8c023afe3e5..ef8267319d91 100644
--- a/drivers/clk/mediatek/clk-mt7988-infracfg.c
+++ b/drivers/clk/mediatek/clk-mt7988-infracfg.c
@@ -292,7 +292,9 @@ static struct platform_driver clk_mt7988_infracfg_drv = {
.of_match_table = of_match_clk_mt7988_infracfg,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt7988_infracfg_drv);
+
+MODULE_DESCRIPTION("MediaTek MT7988 infracfg clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7988-topckgen.c b/drivers/clk/mediatek/clk-mt7988-topckgen.c
index 760f8e0d2f26..50e02cc7a214 100644
--- a/drivers/clk/mediatek/clk-mt7988-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt7988-topckgen.c
@@ -315,11 +315,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7988_topckgen);
static struct platform_driver clk_mt7988_topckgen_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt7988-topckgen",
.of_match_table = of_match_clk_mt7988_topckgen,
},
};
module_platform_driver(clk_mt7988_topckgen_drv);
+
+MODULE_DESCRIPTION("MediaTek MT7988 top clock generators driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7988-xfipll.c b/drivers/clk/mediatek/clk-mt7988-xfipll.c
index 9b9ca5471158..f941e4d3ef28 100644
--- a/drivers/clk/mediatek/clk-mt7988-xfipll.c
+++ b/drivers/clk/mediatek/clk-mt7988-xfipll.c
@@ -74,7 +74,7 @@ static struct platform_driver clk_mt7988_xfipll_drv = {
.of_match_table = of_match_clk_mt7988_xfipll,
},
.probe = clk_mt7988_xfipll_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt7988_xfipll_drv);
diff --git a/drivers/clk/mediatek/clk-mt8135-apmixedsys.c b/drivers/clk/mediatek/clk-mt8135-apmixedsys.c
index 41bb2d2e2ea7..bdadc35c64cb 100644
--- a/drivers/clk/mediatek/clk-mt8135-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt8135-apmixedsys.c
@@ -93,7 +93,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8135_apmixed);
static struct platform_driver clk_mt8135_apmixed_drv = {
.probe = clk_mt8135_apmixed_probe,
- .remove_new = clk_mt8135_apmixed_remove,
+ .remove = clk_mt8135_apmixed_remove,
.driver = {
.name = "clk-mt8135-apmixed",
.of_match_table = of_match_clk_mt8135_apmixed,
diff --git a/drivers/clk/mediatek/clk-mt8135.c b/drivers/clk/mediatek/clk-mt8135.c
index 019af88d7f9c..084e48a554c2 100644
--- a/drivers/clk/mediatek/clk-mt8135.c
+++ b/drivers/clk/mediatek/clk-mt8135.c
@@ -558,7 +558,7 @@ static struct platform_driver clk_mt8135_drv = {
.of_match_table = of_match_clk_mt8135,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt8135_drv);
diff --git a/drivers/clk/mediatek/clk-mt8167-apmixedsys.c b/drivers/clk/mediatek/clk-mt8167-apmixedsys.c
index fca41f50d6ba..adf576786696 100644
--- a/drivers/clk/mediatek/clk-mt8167-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt8167-apmixedsys.c
@@ -142,4 +142,6 @@ static struct platform_driver clk_mt8167_apmixed_drv = {
},
};
builtin_platform_driver(clk_mt8167_apmixed_drv)
+
+MODULE_DESCRIPTION("MediaTek MT8167 apmixedsys clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8167-aud.c b/drivers/clk/mediatek/clk-mt8167-aud.c
index a5181f4eb34a..d6cff4bdf4cb 100644
--- a/drivers/clk/mediatek/clk-mt8167-aud.c
+++ b/drivers/clk/mediatek/clk-mt8167-aud.c
@@ -54,11 +54,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8167_audsys);
static struct platform_driver clk_mt8167_audsys_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8167-audsys",
.of_match_table = of_match_clk_mt8167_audsys,
},
};
module_platform_driver(clk_mt8167_audsys_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8167 audio clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8167-img.c b/drivers/clk/mediatek/clk-mt8167-img.c
index 02fa52d8aabb..42d38ae94b69 100644
--- a/drivers/clk/mediatek/clk-mt8167-img.c
+++ b/drivers/clk/mediatek/clk-mt8167-img.c
@@ -46,11 +46,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8167_imgsys);
static struct platform_driver clk_mt8167_imgsys_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8167-imgsys",
.of_match_table = of_match_clk_mt8167_imgsys,
},
};
module_platform_driver(clk_mt8167_imgsys_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8167 imgsys clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8167-mfgcfg.c b/drivers/clk/mediatek/clk-mt8167-mfgcfg.c
index a5b5ee7ac805..1ef37a3e6851 100644
--- a/drivers/clk/mediatek/clk-mt8167-mfgcfg.c
+++ b/drivers/clk/mediatek/clk-mt8167-mfgcfg.c
@@ -44,11 +44,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8167_mfgcfg);
static struct platform_driver clk_mt8167_mfgcfg_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8167-mfgcfg",
.of_match_table = of_match_clk_mt8167_mfgcfg,
},
};
module_platform_driver(clk_mt8167_mfgcfg_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8167 GPU mfg clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8167-mm.c b/drivers/clk/mediatek/clk-mt8167-mm.c
index 9b0c6b3343b9..cef66ee836f3 100644
--- a/drivers/clk/mediatek/clk-mt8167-mm.c
+++ b/drivers/clk/mediatek/clk-mt8167-mm.c
@@ -85,11 +85,13 @@ MODULE_DEVICE_TABLE(platform, clk_mt8167_mm_id_table);
static struct platform_driver clk_mt8167_mm_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8167-mm",
},
.id_table = clk_mt8167_mm_id_table,
};
module_platform_driver(clk_mt8167_mm_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8167 MultiMedia clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8167-vdec.c b/drivers/clk/mediatek/clk-mt8167-vdec.c
index 1e1e76743180..e3769bc556a9 100644
--- a/drivers/clk/mediatek/clk-mt8167-vdec.c
+++ b/drivers/clk/mediatek/clk-mt8167-vdec.c
@@ -53,11 +53,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8167_vdec);
static struct platform_driver clk_mt8167_vdec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8167-vdecsys",
.of_match_table = of_match_clk_mt8167_vdec,
},
};
module_platform_driver(clk_mt8167_vdec_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8167 Video Decoders clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8167.c b/drivers/clk/mediatek/clk-mt8167.c
index 270221c6e6e8..c64d918c37de 100644
--- a/drivers/clk/mediatek/clk-mt8167.c
+++ b/drivers/clk/mediatek/clk-mt8167.c
@@ -887,11 +887,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8167);
static struct platform_driver clk_mt8167_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8167",
.of_match_table = of_match_clk_mt8167,
},
};
module_platform_driver(clk_mt8167_drv);
+
+MODULE_DESCRIPTION("MediaTek MTì8167 main clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8173-apmixedsys.c b/drivers/clk/mediatek/clk-mt8173-apmixedsys.c
index 6cab483b8e1e..95385bb67d55 100644
--- a/drivers/clk/mediatek/clk-mt8173-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt8173-apmixedsys.c
@@ -207,7 +207,7 @@ static void clk_mt8173_apmixed_remove(struct platform_device *pdev)
static struct platform_driver clk_mt8173_apmixed_drv = {
.probe = clk_mt8173_apmixed_probe,
- .remove_new = clk_mt8173_apmixed_remove,
+ .remove = clk_mt8173_apmixed_remove,
.driver = {
.name = "clk-mt8173-apmixed",
.of_match_table = of_match_clk_mt8173_apmixed,
diff --git a/drivers/clk/mediatek/clk-mt8173-img.c b/drivers/clk/mediatek/clk-mt8173-img.c
index 1011b9ab3dad..6db2b9ab2bc9 100644
--- a/drivers/clk/mediatek/clk-mt8173-img.c
+++ b/drivers/clk/mediatek/clk-mt8173-img.c
@@ -44,7 +44,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8173_imgsys);
static struct platform_driver clk_mt8173_vdecsys_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8173-imgsys",
.of_match_table = of_match_clk_mt8173_imgsys,
diff --git a/drivers/clk/mediatek/clk-mt8173-infracfg.c b/drivers/clk/mediatek/clk-mt8173-infracfg.c
index 2f2f074e231a..fa2d1d557e04 100644
--- a/drivers/clk/mediatek/clk-mt8173-infracfg.c
+++ b/drivers/clk/mediatek/clk-mt8173-infracfg.c
@@ -98,7 +98,17 @@ CLK_OF_DECLARE_DRIVER(mtk_infrasys, "mediatek,mt8173-infracfg",
static int clk_mt8173_infracfg_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
- int r;
+ int r, i;
+
+ if (!infra_clk_data) {
+ infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
+ if (!infra_clk_data)
+ return -ENOMEM;
+ } else {
+ for (i = 0; i < CLK_INFRA_NR_CLK; i++)
+ if (infra_clk_data->hws[i] == ERR_PTR(-EPROBE_DEFER))
+ infra_clk_data->hws[i] = ERR_PTR(-ENOENT);
+ }
r = mtk_clk_register_gates(&pdev->dev, node, infra_gates,
ARRAY_SIZE(infra_gates), infra_clk_data);
@@ -146,7 +156,7 @@ static struct platform_driver clk_mt8173_infracfg_drv = {
.of_match_table = of_match_clk_mt8173_infracfg,
},
.probe = clk_mt8173_infracfg_probe,
- .remove_new = clk_mt8173_infracfg_remove,
+ .remove = clk_mt8173_infracfg_remove,
};
module_platform_driver(clk_mt8173_infracfg_drv);
diff --git a/drivers/clk/mediatek/clk-mt8173-mm.c b/drivers/clk/mediatek/clk-mt8173-mm.c
index fd903bee328f..26d27250b914 100644
--- a/drivers/clk/mediatek/clk-mt8173-mm.c
+++ b/drivers/clk/mediatek/clk-mt8173-mm.c
@@ -106,7 +106,7 @@ static struct platform_driver clk_mt8173_mm_drv = {
},
.id_table = clk_mt8173_mm_id_table,
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
};
module_platform_driver(clk_mt8173_mm_drv);
diff --git a/drivers/clk/mediatek/clk-mt8173-pericfg.c b/drivers/clk/mediatek/clk-mt8173-pericfg.c
index 783efed3f254..bebda74d0f43 100644
--- a/drivers/clk/mediatek/clk-mt8173-pericfg.c
+++ b/drivers/clk/mediatek/clk-mt8173-pericfg.c
@@ -115,7 +115,7 @@ static struct platform_driver clk_mt8173_pericfg_drv = {
.of_match_table = of_match_clk_mt8173_pericfg,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt8173_pericfg_drv);
diff --git a/drivers/clk/mediatek/clk-mt8173-topckgen.c b/drivers/clk/mediatek/clk-mt8173-topckgen.c
index 6bb7ffd74487..42c37541cebb 100644
--- a/drivers/clk/mediatek/clk-mt8173-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt8173-topckgen.c
@@ -646,7 +646,7 @@ static struct platform_driver clk_mt8173_topckgen_drv = {
.of_match_table = of_match_clk_mt8173_topckgen,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt8173_topckgen_drv);
diff --git a/drivers/clk/mediatek/clk-mt8173-vdecsys.c b/drivers/clk/mediatek/clk-mt8173-vdecsys.c
index 011e3812156f..625ca0b09cc2 100644
--- a/drivers/clk/mediatek/clk-mt8173-vdecsys.c
+++ b/drivers/clk/mediatek/clk-mt8173-vdecsys.c
@@ -46,7 +46,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8173_vdecsys);
static struct platform_driver clk_mt8173_vdecsys_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8173-vdecsys",
.of_match_table = of_match_clk_mt8173_vdecsys,
diff --git a/drivers/clk/mediatek/clk-mt8173-vencsys.c b/drivers/clk/mediatek/clk-mt8173-vencsys.c
index 1bf84ae6a0bc..87755dd1a337 100644
--- a/drivers/clk/mediatek/clk-mt8173-vencsys.c
+++ b/drivers/clk/mediatek/clk-mt8173-vencsys.c
@@ -57,7 +57,7 @@ static struct platform_driver clk_mt8173_vencsys_drv = {
.of_match_table = of_match_clk_mt8173_vencsys,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt8173_vencsys_drv);
diff --git a/drivers/clk/mediatek/clk-mt8183-apmixedsys.c b/drivers/clk/mediatek/clk-mt8183-apmixedsys.c
index 2b261c0e2b61..551adbfd7ac9 100644
--- a/drivers/clk/mediatek/clk-mt8183-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt8183-apmixedsys.c
@@ -192,4 +192,6 @@ static struct platform_driver clk_mt8183_apmixed_drv = {
},
};
builtin_platform_driver(clk_mt8183_apmixed_drv)
+
+MODULE_DESCRIPTION("MediaTek MT8183 apmixedsys clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8183-audio.c b/drivers/clk/mediatek/clk-mt8183-audio.c
index 716b26825ef0..011d329ad30e 100644
--- a/drivers/clk/mediatek/clk-mt8183-audio.c
+++ b/drivers/clk/mediatek/clk-mt8183-audio.c
@@ -101,11 +101,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_audio);
static struct platform_driver clk_mt8183_audio_drv = {
.probe = clk_mt8183_audio_probe,
- .remove_new = clk_mt8183_audio_remove,
+ .remove = clk_mt8183_audio_remove,
.driver = {
.name = "clk-mt8183-audio",
.of_match_table = of_match_clk_mt8183_audio,
},
};
module_platform_driver(clk_mt8183_audio_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8183 audio clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8183-cam.c b/drivers/clk/mediatek/clk-mt8183-cam.c
index b0f8e4242a63..c7642085f8de 100644
--- a/drivers/clk/mediatek/clk-mt8183-cam.c
+++ b/drivers/clk/mediatek/clk-mt8183-cam.c
@@ -51,11 +51,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_cam);
static struct platform_driver clk_mt8183_cam_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-cam",
.of_match_table = of_match_clk_mt8183_cam,
},
};
module_platform_driver(clk_mt8183_cam_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8183 Camera clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8183-img.c b/drivers/clk/mediatek/clk-mt8183-img.c
index 6e177d2e8872..ee92459c74ca 100644
--- a/drivers/clk/mediatek/clk-mt8183-img.c
+++ b/drivers/clk/mediatek/clk-mt8183-img.c
@@ -51,11 +51,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_img);
static struct platform_driver clk_mt8183_img_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-img",
.of_match_table = of_match_clk_mt8183_img,
},
};
module_platform_driver(clk_mt8183_img_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8183 imgsys clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu0.c b/drivers/clk/mediatek/clk-mt8183-ipu0.c
index 0b61c7af8aea..6831747f123b 100644
--- a/drivers/clk/mediatek/clk-mt8183-ipu0.c
+++ b/drivers/clk/mediatek/clk-mt8183-ipu0.c
@@ -44,11 +44,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_ipu_core0);
static struct platform_driver clk_mt8183_ipu_core0_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-ipu_core0",
.of_match_table = of_match_clk_mt8183_ipu_core0,
},
};
module_platform_driver(clk_mt8183_ipu_core0_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8183 Pri. Image Processing Unit clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu1.c b/drivers/clk/mediatek/clk-mt8183-ipu1.c
index 544b1ca0e1c5..ecf434432e7b 100644
--- a/drivers/clk/mediatek/clk-mt8183-ipu1.c
+++ b/drivers/clk/mediatek/clk-mt8183-ipu1.c
@@ -44,11 +44,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_ipu_core1);
static struct platform_driver clk_mt8183_ipu_core1_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-ipu_core1",
.of_match_table = of_match_clk_mt8183_ipu_core1,
},
};
module_platform_driver(clk_mt8183_ipu_core1_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8183 Sec. Image Processing Unit clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu_adl.c b/drivers/clk/mediatek/clk-mt8183-ipu_adl.c
index 7f53674f393c..c1a770ba3245 100644
--- a/drivers/clk/mediatek/clk-mt8183-ipu_adl.c
+++ b/drivers/clk/mediatek/clk-mt8183-ipu_adl.c
@@ -42,11 +42,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_ipu_adl);
static struct platform_driver clk_mt8183_ipu_adl_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-ipu_adl",
.of_match_table = of_match_clk_mt8183_ipu_adl,
},
};
module_platform_driver(clk_mt8183_ipu_adl_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8183 Image Processing Unit ADL driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu_conn.c b/drivers/clk/mediatek/clk-mt8183-ipu_conn.c
index fb03ad2d8f6a..f0e72e6edb7a 100644
--- a/drivers/clk/mediatek/clk-mt8183-ipu_conn.c
+++ b/drivers/clk/mediatek/clk-mt8183-ipu_conn.c
@@ -111,11 +111,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_ipu_conn);
static struct platform_driver clk_mt8183_ipu_conn_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-ipu_conn",
.of_match_table = of_match_clk_mt8183_ipu_conn,
},
};
module_platform_driver(clk_mt8183_ipu_conn_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8183 Image Processing Unit Bus clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
index ba504e19d420..be44889783ff 100644
--- a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
+++ b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
@@ -29,6 +29,7 @@ static const struct mtk_gate mfg_clks[] = {
static const struct mtk_clk_desc mfg_desc = {
.clks = mfg_clks,
.num_clks = ARRAY_SIZE(mfg_clks),
+ .need_runtime_pm = true,
};
static const struct of_device_id of_match_clk_mt8183_mfg[] = {
@@ -43,11 +44,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_mfg);
static struct platform_driver clk_mt8183_mfg_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-mfg",
.of_match_table = of_match_clk_mt8183_mfg,
},
};
module_platform_driver(clk_mt8183_mfg_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8183 GPU mfg clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8183-mm.c b/drivers/clk/mediatek/clk-mt8183-mm.c
index 8aaddcfee568..0f132f05fa8b 100644
--- a/drivers/clk/mediatek/clk-mt8183-mm.c
+++ b/drivers/clk/mediatek/clk-mt8183-mm.c
@@ -95,11 +95,13 @@ MODULE_DEVICE_TABLE(platform, clk_mt8183_mm_id_table);
static struct platform_driver clk_mt8183_mm_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8183-mm",
},
.id_table = clk_mt8183_mm_id_table,
};
module_platform_driver(clk_mt8183_mm_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8183 MultiMedia clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8183-vdec.c b/drivers/clk/mediatek/clk-mt8183-vdec.c
index 8c99ae89834f..43bf34077b16 100644
--- a/drivers/clk/mediatek/clk-mt8183-vdec.c
+++ b/drivers/clk/mediatek/clk-mt8183-vdec.c
@@ -55,11 +55,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_vdec);
static struct platform_driver clk_mt8183_vdec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-vdec",
.of_match_table = of_match_clk_mt8183_vdec,
},
};
module_platform_driver(clk_mt8183_vdec_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8183 Video Decoders clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8183-venc.c b/drivers/clk/mediatek/clk-mt8183-venc.c
index a8e0220902ae..c3d99b3b8ff7 100644
--- a/drivers/clk/mediatek/clk-mt8183-venc.c
+++ b/drivers/clk/mediatek/clk-mt8183-venc.c
@@ -47,11 +47,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_venc);
static struct platform_driver clk_mt8183_venc_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-venc",
.of_match_table = of_match_clk_mt8183_venc,
},
};
module_platform_driver(clk_mt8183_venc_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8183 Video Encoders clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8183.c b/drivers/clk/mediatek/clk-mt8183.c
index 934d5a15acfc..aa7cc7709b2d 100644
--- a/drivers/clk/mediatek/clk-mt8183.c
+++ b/drivers/clk/mediatek/clk-mt8183.c
@@ -899,11 +899,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183);
static struct platform_driver clk_mt8183_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183",
.of_match_table = of_match_clk_mt8183,
},
};
module_platform_driver(clk_mt8183_drv)
+
+MODULE_DESCRIPTION("MediaTek MT8183 main clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8186-apmixedsys.c b/drivers/clk/mediatek/clk-mt8186-apmixedsys.c
index fff64a8fd557..4b2b16578232 100644
--- a/drivers/clk/mediatek/clk-mt8186-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt8186-apmixedsys.c
@@ -185,11 +185,13 @@ static void clk_mt8186_apmixed_remove(struct platform_device *pdev)
static struct platform_driver clk_mt8186_apmixed_drv = {
.probe = clk_mt8186_apmixed_probe,
- .remove_new = clk_mt8186_apmixed_remove,
+ .remove = clk_mt8186_apmixed_remove,
.driver = {
.name = "clk-mt8186-apmixed",
.of_match_table = of_match_clk_mt8186_apmixed,
},
};
module_platform_driver(clk_mt8186_apmixed_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8186 apmixedsys clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8186-cam.c b/drivers/clk/mediatek/clk-mt8186-cam.c
index effd2900d2e8..2ddd5f90377f 100644
--- a/drivers/clk/mediatek/clk-mt8186-cam.c
+++ b/drivers/clk/mediatek/clk-mt8186-cam.c
@@ -82,11 +82,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_cam);
static struct platform_driver clk_mt8186_cam_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8186-cam",
.of_match_table = of_match_clk_mt8186_cam,
},
};
module_platform_driver(clk_mt8186_cam_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8186 Camera clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8186-img.c b/drivers/clk/mediatek/clk-mt8186-img.c
index 71b0571e6351..5e466e1f5f44 100644
--- a/drivers/clk/mediatek/clk-mt8186-img.c
+++ b/drivers/clk/mediatek/clk-mt8186-img.c
@@ -60,11 +60,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_img);
static struct platform_driver clk_mt8186_img_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8186-img",
.of_match_table = of_match_clk_mt8186_img,
},
};
module_platform_driver(clk_mt8186_img_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8186 imgsys clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8186-imp_iic_wrap.c b/drivers/clk/mediatek/clk-mt8186-imp_iic_wrap.c
index 640ccb553274..75abb871044c 100644
--- a/drivers/clk/mediatek/clk-mt8186-imp_iic_wrap.c
+++ b/drivers/clk/mediatek/clk-mt8186-imp_iic_wrap.c
@@ -59,11 +59,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_imp_iic_wrap);
static struct platform_driver clk_mt8186_imp_iic_wrap_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8186-imp_iic_wrap",
.of_match_table = of_match_clk_mt8186_imp_iic_wrap,
},
};
module_platform_driver(clk_mt8186_imp_iic_wrap_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8186 I2C Wrapper clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8186-infra_ao.c b/drivers/clk/mediatek/clk-mt8186-infra_ao.c
index 837304cd0ed7..8d9d86a510ff 100644
--- a/drivers/clk/mediatek/clk-mt8186-infra_ao.c
+++ b/drivers/clk/mediatek/clk-mt8186-infra_ao.c
@@ -231,11 +231,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_infra_ao);
static struct platform_driver clk_mt8186_infra_ao_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8186-infra-ao",
.of_match_table = of_match_clk_mt8186_infra_ao,
},
};
module_platform_driver(clk_mt8186_infra_ao_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8186 infracfg clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8186-ipe.c b/drivers/clk/mediatek/clk-mt8186-ipe.c
index 60739e225cb6..f66a0aeaa6b3 100644
--- a/drivers/clk/mediatek/clk-mt8186-ipe.c
+++ b/drivers/clk/mediatek/clk-mt8186-ipe.c
@@ -47,11 +47,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_ipe);
static struct platform_driver clk_mt8186_ipe_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8186-ipe",
.of_match_table = of_match_clk_mt8186_ipe,
},
};
module_platform_driver(clk_mt8186_ipe_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8186 Image Processing Engine clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8186-mcu.c b/drivers/clk/mediatek/clk-mt8186-mcu.c
index eb54ccb77b74..d1640e4dc2ad 100644
--- a/drivers/clk/mediatek/clk-mt8186-mcu.c
+++ b/drivers/clk/mediatek/clk-mt8186-mcu.c
@@ -60,7 +60,7 @@ static struct platform_driver clk_mt8186_mcu_drv = {
.of_match_table = of_match_clk_mt8186_mcu,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt8186_mcu_drv);
diff --git a/drivers/clk/mediatek/clk-mt8186-mdp.c b/drivers/clk/mediatek/clk-mt8186-mdp.c
index 9a335f2285ce..01561cf902c4 100644
--- a/drivers/clk/mediatek/clk-mt8186-mdp.c
+++ b/drivers/clk/mediatek/clk-mt8186-mdp.c
@@ -72,11 +72,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_mdp);
static struct platform_driver clk_mt8186_mdp_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8186-mdp",
.of_match_table = of_match_clk_mt8186_mdp,
},
};
module_platform_driver(clk_mt8186_mdp_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8186 Multimedia Data Path clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8186-mfg.c b/drivers/clk/mediatek/clk-mt8186-mfg.c
index 7618dad9e0e0..3f21b1f222e1 100644
--- a/drivers/clk/mediatek/clk-mt8186-mfg.c
+++ b/drivers/clk/mediatek/clk-mt8186-mfg.c
@@ -41,11 +41,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_mfg);
static struct platform_driver clk_mt8186_mfg_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8186-mfg",
.of_match_table = of_match_clk_mt8186_mfg,
},
};
module_platform_driver(clk_mt8186_mfg_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8186 GPU mfg clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8186-mm.c b/drivers/clk/mediatek/clk-mt8186-mm.c
index 44ed504a8069..fc8488c44866 100644
--- a/drivers/clk/mediatek/clk-mt8186-mm.c
+++ b/drivers/clk/mediatek/clk-mt8186-mm.c
@@ -71,11 +71,13 @@ MODULE_DEVICE_TABLE(platform, clk_mt8186_mm_id_table);
static struct platform_driver clk_mt8186_mm_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8186-mm",
},
.id_table = clk_mt8186_mm_id_table,
};
module_platform_driver(clk_mt8186_mm_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8186 MultiMedia clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8186-topckgen.c b/drivers/clk/mediatek/clk-mt8186-topckgen.c
index 8e385d6bfef2..14f1cbdbbd13 100644
--- a/drivers/clk/mediatek/clk-mt8186-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt8186-topckgen.c
@@ -725,11 +725,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_topck);
static struct platform_driver clk_mt8186_topck_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8186-topck",
.of_match_table = of_match_clk_mt8186_topck,
},
};
module_platform_driver(clk_mt8186_topck_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8186 top clock generators driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8186-vdec.c b/drivers/clk/mediatek/clk-mt8186-vdec.c
index 0b814e8e107f..522b8c952969 100644
--- a/drivers/clk/mediatek/clk-mt8186-vdec.c
+++ b/drivers/clk/mediatek/clk-mt8186-vdec.c
@@ -80,11 +80,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_vdec);
static struct platform_driver clk_mt8186_vdec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8186-vdec",
.of_match_table = of_match_clk_mt8186_vdec,
},
};
module_platform_driver(clk_mt8186_vdec_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8186 Video Decoders clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8186-venc.c b/drivers/clk/mediatek/clk-mt8186-venc.c
index 9493e51af3e2..c0c98bc75112 100644
--- a/drivers/clk/mediatek/clk-mt8186-venc.c
+++ b/drivers/clk/mediatek/clk-mt8186-venc.c
@@ -43,11 +43,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_venc);
static struct platform_driver clk_mt8186_venc_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8186-venc",
.of_match_table = of_match_clk_mt8186_venc,
},
};
module_platform_driver(clk_mt8186_venc_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8186 Video Encoders clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8186-wpe.c b/drivers/clk/mediatek/clk-mt8186-wpe.c
index a0174eabef4a..babd7b2778c2 100644
--- a/drivers/clk/mediatek/clk-mt8186-wpe.c
+++ b/drivers/clk/mediatek/clk-mt8186-wpe.c
@@ -43,11 +43,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_wpe);
static struct platform_driver clk_mt8186_wpe_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8186-wpe",
.of_match_table = of_match_clk_mt8186_wpe,
},
};
module_platform_driver(clk_mt8186_wpe_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8186 Warp Engine clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8188-adsp_audio26m.c b/drivers/clk/mediatek/clk-mt8188-adsp_audio26m.c
index 1dc3d2bad42d..dcde2187d24a 100644
--- a/drivers/clk/mediatek/clk-mt8188-adsp_audio26m.c
+++ b/drivers/clk/mediatek/clk-mt8188-adsp_audio26m.c
@@ -40,11 +40,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_adsp_audio26m);
static struct platform_driver clk_mt8188_adsp_audio26m_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8188-adsp_audio26m",
.of_match_table = of_match_clk_mt8188_adsp_audio26m,
},
};
module_platform_driver(clk_mt8188_adsp_audio26m_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8188 AudioDSP clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8188-apmixedsys.c b/drivers/clk/mediatek/clk-mt8188-apmixedsys.c
index 41ab4d6896a4..21d7a9a2ab1a 100644
--- a/drivers/clk/mediatek/clk-mt8188-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt8188-apmixedsys.c
@@ -145,11 +145,13 @@ static void clk_mt8188_apmixed_remove(struct platform_device *pdev)
static struct platform_driver clk_mt8188_apmixed_drv = {
.probe = clk_mt8188_apmixed_probe,
- .remove_new = clk_mt8188_apmixed_remove,
+ .remove = clk_mt8188_apmixed_remove,
.driver = {
.name = "clk-mt8188-apmixed",
.of_match_table = of_match_clk_mt8188_apmixed,
},
};
module_platform_driver(clk_mt8188_apmixed_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8188 apmixedsys clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8188-cam.c b/drivers/clk/mediatek/clk-mt8188-cam.c
index f78f564aa27e..9b029fdd584e 100644
--- a/drivers/clk/mediatek/clk-mt8188-cam.c
+++ b/drivers/clk/mediatek/clk-mt8188-cam.c
@@ -20,6 +20,8 @@ static const struct mtk_gate_regs cam_cg_regs = {
#define GATE_CAM(_id, _name, _parent, _shift) \
GATE_MTK(_id, _name, _parent, &cam_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+#define CAM_SYS_SMI_LARB_RST_OFF (0xA0)
+
static const struct mtk_gate cam_main_clks[] = {
GATE_CAM(CLK_CAM_MAIN_LARB13, "cam_main_larb13", "top_cam", 0),
GATE_CAM(CLK_CAM_MAIN_LARB14, "cam_main_larb14", "top_cam", 1),
@@ -72,6 +74,17 @@ static const struct mtk_gate cam_yuvb_clks[] = {
GATE_CAM(CLK_CAM_YUVB_CAMTG, "cam_yuvb_camtg", "top_cam", 2),
};
+/* Reset for SMI larb 16a/16b/17a/17b */
+static u16 cam_sys_rst_ofs[] = {
+ CAM_SYS_SMI_LARB_RST_OFF,
+};
+
+static const struct mtk_clk_rst_desc cam_sys_rst_desc = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = cam_sys_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(cam_sys_rst_ofs),
+};
+
static const struct mtk_clk_desc cam_main_desc = {
.clks = cam_main_clks,
.num_clks = ARRAY_SIZE(cam_main_clks),
@@ -80,21 +93,25 @@ static const struct mtk_clk_desc cam_main_desc = {
static const struct mtk_clk_desc cam_rawa_desc = {
.clks = cam_rawa_clks,
.num_clks = ARRAY_SIZE(cam_rawa_clks),
+ .rst_desc = &cam_sys_rst_desc,
};
static const struct mtk_clk_desc cam_rawb_desc = {
.clks = cam_rawb_clks,
.num_clks = ARRAY_SIZE(cam_rawb_clks),
+ .rst_desc = &cam_sys_rst_desc,
};
static const struct mtk_clk_desc cam_yuva_desc = {
.clks = cam_yuva_clks,
.num_clks = ARRAY_SIZE(cam_yuva_clks),
+ .rst_desc = &cam_sys_rst_desc,
};
static const struct mtk_clk_desc cam_yuvb_desc = {
.clks = cam_yuvb_clks,
.num_clks = ARRAY_SIZE(cam_yuvb_clks),
+ .rst_desc = &cam_sys_rst_desc,
};
static const struct of_device_id of_match_clk_mt8188_cam[] = {
@@ -109,12 +126,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_cam);
static struct platform_driver clk_mt8188_cam_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8188-cam",
.of_match_table = of_match_clk_mt8188_cam,
},
};
-
module_platform_driver(clk_mt8188_cam_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8188 Camera clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8188-ccu.c b/drivers/clk/mediatek/clk-mt8188-ccu.c
index 428dcc4818c2..1566fc437ea3 100644
--- a/drivers/clk/mediatek/clk-mt8188-ccu.c
+++ b/drivers/clk/mediatek/clk-mt8188-ccu.c
@@ -39,12 +39,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_ccu);
static struct platform_driver clk_mt8188_ccu_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8188-ccu",
.of_match_table = of_match_clk_mt8188_ccu,
},
};
-
module_platform_driver(clk_mt8188_ccu_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8188 Camera Control Unit clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8188-img.c b/drivers/clk/mediatek/clk-mt8188-img.c
index 76c64a8992a4..d44bfbd8308a 100644
--- a/drivers/clk/mediatek/clk-mt8188-img.c
+++ b/drivers/clk/mediatek/clk-mt8188-img.c
@@ -20,6 +20,8 @@ static const struct mtk_gate_regs imgsys_cg_regs = {
#define GATE_IMGSYS(_id, _name, _parent, _shift) \
GATE_MTK(_id, _name, _parent, &imgsys_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+#define IMG_SYS_SMI_LARB_RST_OFF (0xC)
+
static const struct mtk_gate imgsys_main_clks[] = {
GATE_IMGSYS(CLK_IMGSYS_MAIN_LARB9, "imgsys_main_larb9", "top_img", 0),
GATE_IMGSYS(CLK_IMGSYS_MAIN_TRAW0, "imgsys_main_traw0", "top_img", 1),
@@ -58,6 +60,17 @@ static const struct mtk_gate imgsys1_dip_nr_clks[] = {
GATE_IMGSYS(CLK_IMGSYS1_DIP_NR_DIP_NR, "imgsys1_dip_nr_dip_nr", "top_img", 1),
};
+/* Reset for SMI larb 10/11a/11b/11c/15 */
+static u16 img_sys_rst_ofs[] = {
+ IMG_SYS_SMI_LARB_RST_OFF,
+};
+
+static const struct mtk_clk_rst_desc img_sys_rst_desc = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = img_sys_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(img_sys_rst_ofs),
+};
+
static const struct mtk_clk_desc imgsys_main_desc = {
.clks = imgsys_main_clks,
.num_clks = ARRAY_SIZE(imgsys_main_clks),
@@ -66,26 +79,31 @@ static const struct mtk_clk_desc imgsys_main_desc = {
static const struct mtk_clk_desc imgsys_wpe1_desc = {
.clks = imgsys_wpe1_clks,
.num_clks = ARRAY_SIZE(imgsys_wpe1_clks),
+ .rst_desc = &img_sys_rst_desc,
};
static const struct mtk_clk_desc imgsys_wpe2_desc = {
.clks = imgsys_wpe2_clks,
.num_clks = ARRAY_SIZE(imgsys_wpe2_clks),
+ .rst_desc = &img_sys_rst_desc,
};
static const struct mtk_clk_desc imgsys_wpe3_desc = {
.clks = imgsys_wpe3_clks,
.num_clks = ARRAY_SIZE(imgsys_wpe3_clks),
+ .rst_desc = &img_sys_rst_desc,
};
static const struct mtk_clk_desc imgsys1_dip_top_desc = {
.clks = imgsys1_dip_top_clks,
.num_clks = ARRAY_SIZE(imgsys1_dip_top_clks),
+ .rst_desc = &img_sys_rst_desc,
};
static const struct mtk_clk_desc imgsys1_dip_nr_desc = {
.clks = imgsys1_dip_nr_clks,
.num_clks = ARRAY_SIZE(imgsys1_dip_nr_clks),
+ .rst_desc = &img_sys_rst_desc,
};
static const struct of_device_id of_match_clk_mt8188_imgsys_main[] = {
@@ -101,12 +119,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_imgsys_main);
static struct platform_driver clk_mt8188_imgsys_main_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8188-imgsys_main",
.of_match_table = of_match_clk_mt8188_imgsys_main,
},
};
-
module_platform_driver(clk_mt8188_imgsys_main_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8188 imgsys clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8188-imp_iic_wrap.c b/drivers/clk/mediatek/clk-mt8188-imp_iic_wrap.c
index 66946784cdba..14a4b575b583 100644
--- a/drivers/clk/mediatek/clk-mt8188-imp_iic_wrap.c
+++ b/drivers/clk/mediatek/clk-mt8188-imp_iic_wrap.c
@@ -71,7 +71,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_imp_iic_wrap);
static struct platform_driver clk_mt8188_imp_iic_wrap_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8188-imp_iic_wrap",
.of_match_table = of_match_clk_mt8188_imp_iic_wrap,
@@ -79,4 +79,6 @@ static struct platform_driver clk_mt8188_imp_iic_wrap_drv = {
};
module_platform_driver(clk_mt8188_imp_iic_wrap_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8188 I2C Wrapper clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8188-infra_ao.c b/drivers/clk/mediatek/clk-mt8188-infra_ao.c
index f590178737cb..b9bc8fcc2ade 100644
--- a/drivers/clk/mediatek/clk-mt8188-infra_ao.c
+++ b/drivers/clk/mediatek/clk-mt8188-infra_ao.c
@@ -213,11 +213,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_infra_ao);
static struct platform_driver clk_mt8188_infra_ao_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8188-infra_ao",
.of_match_table = of_match_clk_mt8188_infra_ao,
},
};
module_platform_driver(clk_mt8188_infra_ao_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8188 infracfg clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8188-ipe.c b/drivers/clk/mediatek/clk-mt8188-ipe.c
index 54fe6b689b47..70a011c1f9ce 100644
--- a/drivers/clk/mediatek/clk-mt8188-ipe.c
+++ b/drivers/clk/mediatek/clk-mt8188-ipe.c
@@ -20,6 +20,8 @@ static const struct mtk_gate_regs ipe_cg_regs = {
#define GATE_IPE(_id, _name, _parent, _shift) \
GATE_MTK(_id, _name, _parent, &ipe_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+#define IPE_SYS_SMI_LARB_RST_OFF (0xC)
+
static const struct mtk_gate ipe_clks[] = {
GATE_IPE(CLK_IPE_DPE, "ipe_dpe", "top_ipe", 0),
GATE_IPE(CLK_IPE_FDVT, "ipe_fdvt", "top_ipe", 1),
@@ -28,9 +30,21 @@ static const struct mtk_gate ipe_clks[] = {
GATE_IPE(CLK_IPE_SMI_LARB12, "ipe_smi_larb12", "top_ipe", 4),
};
+/* Reset for SMI larb 12 */
+static u16 ipe_sys_rst_ofs[] = {
+ IPE_SYS_SMI_LARB_RST_OFF,
+};
+
+static const struct mtk_clk_rst_desc ipe_sys_rst_desc = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = ipe_sys_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(ipe_sys_rst_ofs),
+};
+
static const struct mtk_clk_desc ipe_desc = {
.clks = ipe_clks,
.num_clks = ARRAY_SIZE(ipe_clks),
+ .rst_desc = &ipe_sys_rst_desc,
};
static const struct of_device_id of_match_clk_mt8188_ipe[] = {
@@ -41,7 +55,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_ipe);
static struct platform_driver clk_mt8188_ipe_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8188-ipe",
.of_match_table = of_match_clk_mt8188_ipe,
@@ -49,4 +63,6 @@ static struct platform_driver clk_mt8188_ipe_drv = {
};
module_platform_driver(clk_mt8188_ipe_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8188 Image Processing Engine clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8188-mfg.c b/drivers/clk/mediatek/clk-mt8188-mfg.c
index 1c8ef4c6820f..2ddfb1a3de47 100644
--- a/drivers/clk/mediatek/clk-mt8188-mfg.c
+++ b/drivers/clk/mediatek/clk-mt8188-mfg.c
@@ -38,7 +38,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_mfgcfg);
static struct platform_driver clk_mt8188_mfgcfg_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8188-mfgcfg",
.of_match_table = of_match_clk_mt8188_mfgcfg,
@@ -46,4 +46,6 @@ static struct platform_driver clk_mt8188_mfgcfg_drv = {
};
module_platform_driver(clk_mt8188_mfgcfg_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8186 GPU mfg clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8188-peri_ao.c b/drivers/clk/mediatek/clk-mt8188-peri_ao.c
index a8214e42b8e5..639865335fc8 100644
--- a/drivers/clk/mediatek/clk-mt8188-peri_ao.c
+++ b/drivers/clk/mediatek/clk-mt8188-peri_ao.c
@@ -49,11 +49,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_peri_ao);
static struct platform_driver clk_mt8188_peri_ao_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8188-peri_ao",
.of_match_table = of_match_clk_mt8188_peri_ao,
},
};
module_platform_driver(clk_mt8188_peri_ao_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8188 pericfg clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8188-topckgen.c b/drivers/clk/mediatek/clk-mt8188-topckgen.c
index f7ec599b20af..6b07abe9a8f5 100644
--- a/drivers/clk/mediatek/clk-mt8188-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt8188-topckgen.c
@@ -342,11 +342,14 @@ static const char * const dsp7_parents[] = {
"univpll_d3"
};
+/*
+ * MFG can be also parented to "univpll_d6" and "univpll_d7":
+ * these have been removed from the parents list to let us
+ * achieve GPU DVFS without any special clock handlers.
+ */
static const char * const mfg_core_tmp_parents[] = {
"clk26m",
- "mainpll_d5_d2",
- "univpll_d6",
- "univpll_d7"
+ "mainpll_d5_d2"
};
static const char * const camtg_parents[] = {
@@ -1347,11 +1350,13 @@ static void clk_mt8188_topck_remove(struct platform_device *pdev)
static struct platform_driver clk_mt8188_topck_drv = {
.probe = clk_mt8188_topck_probe,
- .remove_new = clk_mt8188_topck_remove,
+ .remove = clk_mt8188_topck_remove,
.driver = {
.name = "clk-mt8188-topck",
.of_match_table = of_match_clk_mt8188_topck,
},
};
module_platform_driver(clk_mt8188_topck_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8188 top clock generators driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8188-vdec.c b/drivers/clk/mediatek/clk-mt8188-vdec.c
index db5855d133ac..f48f0716d7c2 100644
--- a/drivers/clk/mediatek/clk-mt8188-vdec.c
+++ b/drivers/clk/mediatek/clk-mt8188-vdec.c
@@ -81,7 +81,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_vdec);
static struct platform_driver clk_mt8188_vdec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8188-vdec",
.of_match_table = of_match_clk_mt8188_vdec,
@@ -89,4 +89,6 @@ static struct platform_driver clk_mt8188_vdec_drv = {
};
module_platform_driver(clk_mt8188_vdec_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8188 Video Decoders clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8188-vdo0.c b/drivers/clk/mediatek/clk-mt8188-vdo0.c
index d252e198678c..017d6662589b 100644
--- a/drivers/clk/mediatek/clk-mt8188-vdo0.c
+++ b/drivers/clk/mediatek/clk-mt8188-vdo0.c
@@ -97,11 +97,13 @@ MODULE_DEVICE_TABLE(platform, clk_mt8188_vdo0_id_table);
static struct platform_driver clk_mt8188_vdo0_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8188-vdo0",
},
.id_table = clk_mt8188_vdo0_id_table,
};
module_platform_driver(clk_mt8188_vdo0_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8188 Video Output 0 clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8188-vdo1.c b/drivers/clk/mediatek/clk-mt8188-vdo1.c
index 7b72d54086db..f715d45e545e 100644
--- a/drivers/clk/mediatek/clk-mt8188-vdo1.c
+++ b/drivers/clk/mediatek/clk-mt8188-vdo1.c
@@ -43,6 +43,12 @@ static const struct mtk_gate_regs vdo1_4_cg_regs = {
.sta_ofs = 0x140,
};
+static const struct mtk_gate_regs vdo1_5_cg_regs = {
+ .set_ofs = 0x400,
+ .clr_ofs = 0x400,
+ .sta_ofs = 0x400,
+};
+
#define GATE_VDO1_0(_id, _name, _parent, _shift) \
GATE_MTK(_id, _name, _parent, &vdo1_0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
@@ -62,6 +68,9 @@ static const struct mtk_gate_regs vdo1_4_cg_regs = {
#define GATE_VDO1_4(_id, _name, _parent, _shift) \
GATE_MTK(_id, _name, _parent, &vdo1_4_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+#define GATE_VDO1_5(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdo1_5_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
static const struct mtk_gate vdo1_clks[] = {
/* VDO1_0 */
GATE_VDO1_0(CLK_VDO1_SMI_LARB2, "vdo1_smi_larb2", "top_vpp", 0),
@@ -129,6 +138,8 @@ static const struct mtk_gate vdo1_clks[] = {
GATE_VDO1_3(CLK_VDO1_DISP_MONITOR_DPINTF, "vdo1_disp_monitor_dpintf_ck", "top_vpp", 17),
/* VDO1_4 */
GATE_VDO1_4(CLK_VDO1_26M_SLOW, "vdo1_26m_slow_ck", "clk26m", 8),
+ /* VDO1_5 */
+ GATE_VDO1_5(CLK_VDO1_DPI1_HDMI, "vdo1_dpi1_hdmi", "hdmi_txpll", 0),
};
static const struct mtk_clk_desc vdo1_desc = {
@@ -144,11 +155,13 @@ MODULE_DEVICE_TABLE(platform, clk_mt8188_vdo1_id_table);
static struct platform_driver clk_mt8188_vdo1_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8188-vdo1",
},
.id_table = clk_mt8188_vdo1_id_table,
};
module_platform_driver(clk_mt8188_vdo1_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8188 Video Output 1 clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8188-venc.c b/drivers/clk/mediatek/clk-mt8188-venc.c
index 5b1713908ed2..01e971545506 100644
--- a/drivers/clk/mediatek/clk-mt8188-venc.c
+++ b/drivers/clk/mediatek/clk-mt8188-venc.c
@@ -45,12 +45,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_venc1);
static struct platform_driver clk_mt8188_venc1_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8188-venc1",
.of_match_table = of_match_clk_mt8188_venc1,
},
};
-
module_platform_driver(clk_mt8188_venc1_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8188 Video Encoders clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8188-vpp0.c b/drivers/clk/mediatek/clk-mt8188-vpp0.c
index e7b02b26fefb..cd2579b7b9c3 100644
--- a/drivers/clk/mediatek/clk-mt8188-vpp0.c
+++ b/drivers/clk/mediatek/clk-mt8188-vpp0.c
@@ -104,11 +104,13 @@ MODULE_DEVICE_TABLE(platform, clk_mt8188_vpp0_id_table);
static struct platform_driver clk_mt8188_vpp0_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8188-vpp0",
},
.id_table = clk_mt8188_vpp0_id_table,
};
module_platform_driver(clk_mt8188_vpp0_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8188 Video Processing Pipe 0 clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8188-vpp1.c b/drivers/clk/mediatek/clk-mt8188-vpp1.c
index e8f0f7eca097..0e1bd8306e8a 100644
--- a/drivers/clk/mediatek/clk-mt8188-vpp1.c
+++ b/drivers/clk/mediatek/clk-mt8188-vpp1.c
@@ -99,11 +99,13 @@ MODULE_DEVICE_TABLE(platform, clk_mt8188_vpp1_id_table);
static struct platform_driver clk_mt8188_vpp1_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8188-vpp1",
},
.id_table = clk_mt8188_vpp1_id_table,
};
module_platform_driver(clk_mt8188_vpp1_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8188 Video Processing Pipe 1 clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8188-wpe.c b/drivers/clk/mediatek/clk-mt8188-wpe.c
index f394ec049872..d709bb1ee1d6 100644
--- a/drivers/clk/mediatek/clk-mt8188-wpe.c
+++ b/drivers/clk/mediatek/clk-mt8188-wpe.c
@@ -94,12 +94,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_wpe);
static struct platform_driver clk_mt8188_wpe_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8188-wpe",
.of_match_table = of_match_clk_mt8188_wpe,
},
};
-
module_platform_driver(clk_mt8188_wpe_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8188 Warp Engine clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8192-apmixedsys.c b/drivers/clk/mediatek/clk-mt8192-apmixedsys.c
index 3590932acc63..0b66a27e4d5a 100644
--- a/drivers/clk/mediatek/clk-mt8192-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt8192-apmixedsys.c
@@ -206,7 +206,7 @@ static struct platform_driver clk_mt8192_apmixed_drv = {
.of_match_table = of_match_clk_mt8192_apmixed,
},
.probe = clk_mt8192_apmixed_probe,
- .remove_new = clk_mt8192_apmixed_remove,
+ .remove = clk_mt8192_apmixed_remove,
};
module_platform_driver(clk_mt8192_apmixed_drv);
MODULE_DESCRIPTION("MediaTek MT8192 apmixed clocks driver");
diff --git a/drivers/clk/mediatek/clk-mt8192-aud.c b/drivers/clk/mediatek/clk-mt8192-aud.c
index 5bce67bf701d..f3ebf8713fbb 100644
--- a/drivers/clk/mediatek/clk-mt8192-aud.c
+++ b/drivers/clk/mediatek/clk-mt8192-aud.c
@@ -111,11 +111,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_aud);
static struct platform_driver clk_mt8192_aud_drv = {
.probe = clk_mt8192_aud_probe,
- .remove_new = clk_mt8192_aud_remove,
+ .remove = clk_mt8192_aud_remove,
.driver = {
.name = "clk-mt8192-aud",
.of_match_table = of_match_clk_mt8192_aud,
},
};
module_platform_driver(clk_mt8192_aud_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8192 audio clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8192-cam.c b/drivers/clk/mediatek/clk-mt8192-cam.c
index a2c0142273a8..891d2f88d9cf 100644
--- a/drivers/clk/mediatek/clk-mt8192-cam.c
+++ b/drivers/clk/mediatek/clk-mt8192-cam.c
@@ -99,11 +99,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_cam);
static struct platform_driver clk_mt8192_cam_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-cam",
.of_match_table = of_match_clk_mt8192_cam,
},
};
module_platform_driver(clk_mt8192_cam_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8192 Camera clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8192-img.c b/drivers/clk/mediatek/clk-mt8192-img.c
index ee52b5b748e7..c08e831125a5 100644
--- a/drivers/clk/mediatek/clk-mt8192-img.c
+++ b/drivers/clk/mediatek/clk-mt8192-img.c
@@ -62,11 +62,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_img);
static struct platform_driver clk_mt8192_img_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-img",
.of_match_table = of_match_clk_mt8192_img,
},
};
module_platform_driver(clk_mt8192_img_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8192 imgsys clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8192-imp_iic_wrap.c b/drivers/clk/mediatek/clk-mt8192-imp_iic_wrap.c
index a97b6e8d97ac..0f9530d9263c 100644
--- a/drivers/clk/mediatek/clk-mt8192-imp_iic_wrap.c
+++ b/drivers/clk/mediatek/clk-mt8192-imp_iic_wrap.c
@@ -111,11 +111,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_imp_iic_wrap);
static struct platform_driver clk_mt8192_imp_iic_wrap_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-imp_iic_wrap",
.of_match_table = of_match_clk_mt8192_imp_iic_wrap,
},
};
module_platform_driver(clk_mt8192_imp_iic_wrap_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8192 I2C Wrapper clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8192-ipe.c b/drivers/clk/mediatek/clk-mt8192-ipe.c
index 56ce58ac01ad..c932b8b20edc 100644
--- a/drivers/clk/mediatek/clk-mt8192-ipe.c
+++ b/drivers/clk/mediatek/clk-mt8192-ipe.c
@@ -49,11 +49,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_ipe);
static struct platform_driver clk_mt8192_ipe_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-ipe",
.of_match_table = of_match_clk_mt8192_ipe,
},
};
module_platform_driver(clk_mt8192_ipe_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8192 Image Processing Engine clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8192-mdp.c b/drivers/clk/mediatek/clk-mt8192-mdp.c
index bad2f3d439f5..30334ebca864 100644
--- a/drivers/clk/mediatek/clk-mt8192-mdp.c
+++ b/drivers/clk/mediatek/clk-mt8192-mdp.c
@@ -74,11 +74,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_mdp);
static struct platform_driver clk_mt8192_mdp_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-mdp",
.of_match_table = of_match_clk_mt8192_mdp,
},
};
module_platform_driver(clk_mt8192_mdp_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8192 Multimedia Data Path clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8192-mfg.c b/drivers/clk/mediatek/clk-mt8192-mfg.c
index 666b401e778c..9d176659e8a2 100644
--- a/drivers/clk/mediatek/clk-mt8192-mfg.c
+++ b/drivers/clk/mediatek/clk-mt8192-mfg.c
@@ -44,11 +44,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_mfg);
static struct platform_driver clk_mt8192_mfg_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-mfg",
.of_match_table = of_match_clk_mt8192_mfg,
},
};
module_platform_driver(clk_mt8192_mfg_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8192 GPU mfg clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8192-mm.c b/drivers/clk/mediatek/clk-mt8192-mm.c
index b294184c5183..bda4406e1304 100644
--- a/drivers/clk/mediatek/clk-mt8192-mm.c
+++ b/drivers/clk/mediatek/clk-mt8192-mm.c
@@ -93,11 +93,13 @@ MODULE_DEVICE_TABLE(platform, clk_mt8192_mm_id_table);
static struct platform_driver clk_mt8192_mm_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8192-mm",
},
.id_table = clk_mt8192_mm_id_table,
};
module_platform_driver(clk_mt8192_mm_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8192 MultiMedia clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8192-msdc.c b/drivers/clk/mediatek/clk-mt8192-msdc.c
index 52d6f0babf12..04a66220f269 100644
--- a/drivers/clk/mediatek/clk-mt8192-msdc.c
+++ b/drivers/clk/mediatek/clk-mt8192-msdc.c
@@ -56,11 +56,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_msdc);
static struct platform_driver clk_mt8192_msdc_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-msdc",
.of_match_table = of_match_clk_mt8192_msdc,
},
};
module_platform_driver(clk_mt8192_msdc_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8192 MMC/SD Controller clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8192-scp_adsp.c b/drivers/clk/mediatek/clk-mt8192-scp_adsp.c
index 72c8a8722b11..f9e4c16573e2 100644
--- a/drivers/clk/mediatek/clk-mt8192-scp_adsp.c
+++ b/drivers/clk/mediatek/clk-mt8192-scp_adsp.c
@@ -42,11 +42,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_scp_adsp);
static struct platform_driver clk_mt8192_scp_adsp_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-scp_adsp",
.of_match_table = of_match_clk_mt8192_scp_adsp,
},
};
module_platform_driver(clk_mt8192_scp_adsp_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8192 SCP AudioDSP clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8192-vdec.c b/drivers/clk/mediatek/clk-mt8192-vdec.c
index cc514068bcdb..9c10161807b2 100644
--- a/drivers/clk/mediatek/clk-mt8192-vdec.c
+++ b/drivers/clk/mediatek/clk-mt8192-vdec.c
@@ -86,11 +86,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_vdec);
static struct platform_driver clk_mt8192_vdec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-vdec",
.of_match_table = of_match_clk_mt8192_vdec,
},
};
module_platform_driver(clk_mt8192_vdec_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8192 Video Decoders clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8192-venc.c b/drivers/clk/mediatek/clk-mt8192-venc.c
index 9f6fff2dd753..0b01e2b7f036 100644
--- a/drivers/clk/mediatek/clk-mt8192-venc.c
+++ b/drivers/clk/mediatek/clk-mt8192-venc.c
@@ -45,11 +45,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_venc);
static struct platform_driver clk_mt8192_venc_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-venc",
.of_match_table = of_match_clk_mt8192_venc,
},
};
module_platform_driver(clk_mt8192_venc_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8192 Video Encoders clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8192.c b/drivers/clk/mediatek/clk-mt8192.c
index e395c04632bc..50b43807c60c 100644
--- a/drivers/clk/mediatek/clk-mt8192.c
+++ b/drivers/clk/mediatek/clk-mt8192.c
@@ -1026,7 +1026,9 @@ static struct platform_driver clk_mt8192_drv = {
.of_match_table = of_match_clk_mt8192,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt8192_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8192 main clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-apmixedsys.c b/drivers/clk/mediatek/clk-mt8195-apmixedsys.c
index 44a4c85a67ef..282a3137dc89 100644
--- a/drivers/clk/mediatek/clk-mt8195-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt8195-apmixedsys.c
@@ -223,11 +223,13 @@ static void clk_mt8195_apmixed_remove(struct platform_device *pdev)
static struct platform_driver clk_mt8195_apmixed_drv = {
.probe = clk_mt8195_apmixed_probe,
- .remove_new = clk_mt8195_apmixed_remove,
+ .remove = clk_mt8195_apmixed_remove,
.driver = {
.name = "clk-mt8195-apmixed",
.of_match_table = of_match_clk_mt8195_apmixed,
},
};
module_platform_driver(clk_mt8195_apmixed_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8195 apmixedsys clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-apusys_pll.c b/drivers/clk/mediatek/clk-mt8195-apusys_pll.c
index 79762bc85cd7..8b45a3fad02f 100644
--- a/drivers/clk/mediatek/clk-mt8195-apusys_pll.c
+++ b/drivers/clk/mediatek/clk-mt8195-apusys_pll.c
@@ -103,11 +103,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_apusys_pll);
static struct platform_driver clk_mt8195_apusys_pll_drv = {
.probe = clk_mt8195_apusys_pll_probe,
- .remove_new = clk_mt8195_apusys_pll_remove,
+ .remove = clk_mt8195_apusys_pll_remove,
.driver = {
.name = "clk-mt8195-apusys_pll",
.of_match_table = of_match_clk_mt8195_apusys_pll,
},
};
module_platform_driver(clk_mt8195_apusys_pll_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8195 AI Processing Unit PLL clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-cam.c b/drivers/clk/mediatek/clk-mt8195-cam.c
index 24cd6a2092b6..02cb20c2948b 100644
--- a/drivers/clk/mediatek/clk-mt8195-cam.c
+++ b/drivers/clk/mediatek/clk-mt8195-cam.c
@@ -135,11 +135,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_cam);
static struct platform_driver clk_mt8195_cam_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8195-cam",
.of_match_table = of_match_clk_mt8195_cam,
},
};
module_platform_driver(clk_mt8195_cam_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8195 Camera clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-ccu.c b/drivers/clk/mediatek/clk-mt8195-ccu.c
index 24dab128507a..22cd1cb070f1 100644
--- a/drivers/clk/mediatek/clk-mt8195-ccu.c
+++ b/drivers/clk/mediatek/clk-mt8195-ccu.c
@@ -43,11 +43,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_ccu);
static struct platform_driver clk_mt8195_ccu_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8195-ccu",
.of_match_table = of_match_clk_mt8195_ccu,
},
};
module_platform_driver(clk_mt8195_ccu_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8195 Camera Control Unit clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-img.c b/drivers/clk/mediatek/clk-mt8195-img.c
index c7dc3e9d133d..11beba4b2ac2 100644
--- a/drivers/clk/mediatek/clk-mt8195-img.c
+++ b/drivers/clk/mediatek/clk-mt8195-img.c
@@ -89,11 +89,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_img);
static struct platform_driver clk_mt8195_img_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8195-img",
.of_match_table = of_match_clk_mt8195_img,
},
};
module_platform_driver(clk_mt8195_img_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8195 imgsys clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c b/drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c
index 94912d45509e..8711b18b1576 100644
--- a/drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c
+++ b/drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c
@@ -59,11 +59,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_imp_iic_wrap);
static struct platform_driver clk_mt8195_imp_iic_wrap_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8195-imp_iic_wrap",
.of_match_table = of_match_clk_mt8195_imp_iic_wrap,
},
};
module_platform_driver(clk_mt8195_imp_iic_wrap_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8195 I2C Wrapper clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-infra_ao.c b/drivers/clk/mediatek/clk-mt8195-infra_ao.c
index dfba6eb61ccf..ad47fdb23460 100644
--- a/drivers/clk/mediatek/clk-mt8195-infra_ao.c
+++ b/drivers/clk/mediatek/clk-mt8195-infra_ao.c
@@ -103,7 +103,7 @@ static const struct mtk_gate infra_ao_clks[] = {
GATE_INFRA_AO0(CLK_INFRA_AO_CQ_DMA_FPC, "infra_ao_cq_dma_fpc", "fpc", 28),
GATE_INFRA_AO0(CLK_INFRA_AO_UART5, "infra_ao_uart5", "top_uart", 29),
/* INFRA_AO1 */
- GATE_INFRA_AO1(CLK_INFRA_AO_HDMI_26M, "infra_ao_hdmi_26m", "clk26m", 0),
+ GATE_INFRA_AO1(CLK_INFRA_AO_HDMI_26M, "infra_ao_hdmi_26m", "top_hdmi_xtal", 0),
GATE_INFRA_AO1(CLK_INFRA_AO_SPI0, "infra_ao_spi0", "top_spi", 1),
GATE_INFRA_AO1(CLK_INFRA_AO_MSDC0, "infra_ao_msdc0", "top_msdc50_0_hclk", 2),
GATE_INFRA_AO1(CLK_INFRA_AO_MSDC1, "infra_ao_msdc1", "top_axi", 4),
@@ -233,11 +233,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_infra_ao);
static struct platform_driver clk_mt8195_infra_ao_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8195-infra_ao",
.of_match_table = of_match_clk_mt8195_infra_ao,
},
};
module_platform_driver(clk_mt8195_infra_ao_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8195 infracfg clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-ipe.c b/drivers/clk/mediatek/clk-mt8195-ipe.c
index 21e76e5ad376..b1af00348a86 100644
--- a/drivers/clk/mediatek/clk-mt8195-ipe.c
+++ b/drivers/clk/mediatek/clk-mt8195-ipe.c
@@ -44,11 +44,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_ipe);
static struct platform_driver clk_mt8195_ipe_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8195-ipe",
.of_match_table = of_match_clk_mt8195_ipe,
},
};
module_platform_driver(clk_mt8195_ipe_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8195 Image Processing Engine clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-mfg.c b/drivers/clk/mediatek/clk-mt8195-mfg.c
index 4951574abf2a..07c358db1af9 100644
--- a/drivers/clk/mediatek/clk-mt8195-mfg.c
+++ b/drivers/clk/mediatek/clk-mt8195-mfg.c
@@ -42,11 +42,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_mfg);
static struct platform_driver clk_mt8195_mfg_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8195-mfg",
.of_match_table = of_match_clk_mt8195_mfg,
},
};
module_platform_driver(clk_mt8195_mfg_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8195 GPU mfg clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-peri_ao.c b/drivers/clk/mediatek/clk-mt8195-peri_ao.c
index 39069aaf6bcd..b743eb60a30b 100644
--- a/drivers/clk/mediatek/clk-mt8195-peri_ao.c
+++ b/drivers/clk/mediatek/clk-mt8195-peri_ao.c
@@ -55,11 +55,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_peri_ao);
static struct platform_driver clk_mt8195_peri_ao_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8195-peri_ao",
.of_match_table = of_match_clk_mt8195_peri_ao,
},
};
module_platform_driver(clk_mt8195_peri_ao_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8195 pericfg clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-scp_adsp.c b/drivers/clk/mediatek/clk-mt8195-scp_adsp.c
index 2b94d75be295..bc73fccd0515 100644
--- a/drivers/clk/mediatek/clk-mt8195-scp_adsp.c
+++ b/drivers/clk/mediatek/clk-mt8195-scp_adsp.c
@@ -40,11 +40,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_scp_adsp);
static struct platform_driver clk_mt8195_scp_adsp_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8195-scp_adsp",
.of_match_table = of_match_clk_mt8195_scp_adsp,
},
};
module_platform_driver(clk_mt8195_scp_adsp_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8195 SCP AudioDSP clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-topckgen.c b/drivers/clk/mediatek/clk-mt8195-topckgen.c
index 8f713a3341a9..b1f44b873354 100644
--- a/drivers/clk/mediatek/clk-mt8195-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt8195-topckgen.c
@@ -1354,11 +1354,13 @@ static void clk_mt8195_topck_remove(struct platform_device *pdev)
static struct platform_driver clk_mt8195_topck_drv = {
.probe = clk_mt8195_topck_probe,
- .remove_new = clk_mt8195_topck_remove,
+ .remove = clk_mt8195_topck_remove,
.driver = {
.name = "clk-mt8195-topck",
.of_match_table = of_match_clk_mt8195_topck,
},
};
module_platform_driver(clk_mt8195_topck_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8195 top clock generators driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-vdec.c b/drivers/clk/mediatek/clk-mt8195-vdec.c
index d266a6d3b603..0bad706047c9 100644
--- a/drivers/clk/mediatek/clk-mt8195-vdec.c
+++ b/drivers/clk/mediatek/clk-mt8195-vdec.c
@@ -97,11 +97,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_vdec);
static struct platform_driver clk_mt8195_vdec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8195-vdec",
.of_match_table = of_match_clk_mt8195_vdec,
},
};
module_platform_driver(clk_mt8195_vdec_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8195 Video Decoders clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-vdo0.c b/drivers/clk/mediatek/clk-mt8195-vdo0.c
index 34fc318c146c..581d99f8c254 100644
--- a/drivers/clk/mediatek/clk-mt8195-vdo0.c
+++ b/drivers/clk/mediatek/clk-mt8195-vdo0.c
@@ -106,11 +106,13 @@ MODULE_DEVICE_TABLE(platform, clk_mt8195_vdo0_id_table);
static struct platform_driver clk_mt8195_vdo0_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8195-vdo0",
},
.id_table = clk_mt8195_vdo0_id_table,
};
module_platform_driver(clk_mt8195_vdo0_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8195 Video Output 0 clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-vdo1.c b/drivers/clk/mediatek/clk-mt8195-vdo1.c
index e400631e1dbe..7f8b1a8967bd 100644
--- a/drivers/clk/mediatek/clk-mt8195-vdo1.c
+++ b/drivers/clk/mediatek/clk-mt8195-vdo1.c
@@ -133,11 +133,13 @@ MODULE_DEVICE_TABLE(platform, clk_mt8195_vdo1_id_table);
static struct platform_driver clk_mt8195_vdo1_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8195-vdo1",
},
.id_table = clk_mt8195_vdo1_id_table,
};
module_platform_driver(clk_mt8195_vdo1_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8195 Video Output 1 clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-venc.c b/drivers/clk/mediatek/clk-mt8195-venc.c
index 93093fadfd0d..3b52ff025d5e 100644
--- a/drivers/clk/mediatek/clk-mt8195-venc.c
+++ b/drivers/clk/mediatek/clk-mt8195-venc.c
@@ -62,11 +62,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_venc);
static struct platform_driver clk_mt8195_venc_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8195-venc",
.of_match_table = of_match_clk_mt8195_venc,
},
};
module_platform_driver(clk_mt8195_venc_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8195 Video Encoders clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-vpp0.c b/drivers/clk/mediatek/clk-mt8195-vpp0.c
index 81725fcb3a72..0e3e1dd7977c 100644
--- a/drivers/clk/mediatek/clk-mt8195-vpp0.c
+++ b/drivers/clk/mediatek/clk-mt8195-vpp0.c
@@ -99,11 +99,13 @@ MODULE_DEVICE_TABLE(platform, clk_mt8195_vpp0_id_table);
static struct platform_driver clk_mt8195_vpp0_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8195-vpp0",
},
.id_table = clk_mt8195_vpp0_id_table,
};
module_platform_driver(clk_mt8195_vpp0_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8195 Video Processing Pipe 0 clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-vpp1.c b/drivers/clk/mediatek/clk-mt8195-vpp1.c
index 867fde4e575b..fb7b7aef0bba 100644
--- a/drivers/clk/mediatek/clk-mt8195-vpp1.c
+++ b/drivers/clk/mediatek/clk-mt8195-vpp1.c
@@ -97,11 +97,13 @@ MODULE_DEVICE_TABLE(platform, clk_mt8195_vpp1_id_table);
static struct platform_driver clk_mt8195_vpp1_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8195-vpp1",
},
.id_table = clk_mt8195_vpp1_id_table,
};
module_platform_driver(clk_mt8195_vpp1_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8195 Video Processing Pipe 1 clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-wpe.c b/drivers/clk/mediatek/clk-mt8195-wpe.c
index 7324738179a4..315b93bbfcdc 100644
--- a/drivers/clk/mediatek/clk-mt8195-wpe.c
+++ b/drivers/clk/mediatek/clk-mt8195-wpe.c
@@ -136,11 +136,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_wpe);
static struct platform_driver clk_mt8195_wpe_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8195-wpe",
.of_match_table = of_match_clk_mt8195_wpe,
},
};
module_platform_driver(clk_mt8195_wpe_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8195 Warp Engine clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-apmixedsys.c b/drivers/clk/mediatek/clk-mt8196-apmixedsys.c
new file mode 100644
index 000000000000..617f5449b88b
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-apmixedsys.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-pll.h"
+
+/* APMIXEDSYS PLL control register offsets */
+#define MAINPLL_CON0 0x250
+#define MAINPLL_CON1 0x254
+#define UNIVPLL_CON0 0x264
+#define UNIVPLL_CON1 0x268
+#define MSDCPLL_CON0 0x278
+#define MSDCPLL_CON1 0x27c
+#define ADSPPLL_CON0 0x28c
+#define ADSPPLL_CON1 0x290
+#define EMIPLL_CON0 0x2a0
+#define EMIPLL_CON1 0x2a4
+#define EMIPLL2_CON0 0x2b4
+#define EMIPLL2_CON1 0x2b8
+#define NET1PLL_CON0 0x2c8
+#define NET1PLL_CON1 0x2cc
+#define SGMIIPLL_CON0 0x2dc
+#define SGMIIPLL_CON1 0x2e0
+
+/* APMIXEDSYS_GP2 PLL control register offsets*/
+#define MAINPLL2_CON0 0x250
+#define MAINPLL2_CON1 0x254
+#define UNIVPLL2_CON0 0x264
+#define UNIVPLL2_CON1 0x268
+#define MMPLL2_CON0 0x278
+#define MMPLL2_CON1 0x27c
+#define IMGPLL_CON0 0x28c
+#define IMGPLL_CON1 0x290
+#define TVDPLL1_CON0 0x2a0
+#define TVDPLL1_CON1 0x2a4
+#define TVDPLL2_CON0 0x2b4
+#define TVDPLL2_CON1 0x2b8
+#define TVDPLL3_CON0 0x2c8
+#define TVDPLL3_CON1 0x2cc
+
+#define PLLEN_ALL 0x080
+#define PLLEN_ALL_SET 0x084
+#define PLLEN_ALL_CLR 0x088
+
+#define FENC_STATUS_CON0 0x03c
+
+#define MT8196_PLL_FMAX (3800UL * MHZ)
+#define MT8196_PLL_FMIN (1500UL * MHZ)
+#define MT8196_INTEGER_BITS 8
+
+#define PLL_FENC(_id, _name, _reg, _fenc_sta_ofs, _fenc_sta_bit,\
+ _flags, _pd_reg, _pd_shift, \
+ _pcw_reg, _pcw_shift, _pcwbits, \
+ _pll_en_bit) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .fenc_sta_ofs = _fenc_sta_ofs, \
+ .fenc_sta_bit = _fenc_sta_bit, \
+ .flags = _flags, \
+ .fmax = MT8196_PLL_FMAX, \
+ .fmin = MT8196_PLL_FMIN, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .pcwbits = _pcwbits, \
+ .pcwibits = MT8196_INTEGER_BITS, \
+ .en_reg = PLLEN_ALL, \
+ .en_set_reg = PLLEN_ALL_SET, \
+ .en_clr_reg = PLLEN_ALL_CLR, \
+ .pll_en_bit = _pll_en_bit, \
+ .ops = &mtk_pll_fenc_clr_set_ops, \
+}
+
+struct mtk_pll_desc {
+ const struct mtk_pll_data *clks;
+ size_t num_clks;
+};
+
+static const struct mtk_pll_data apmixed_plls[] = {
+ PLL_FENC(CLK_APMIXED_MAINPLL, "mainpll", MAINPLL_CON0, FENC_STATUS_CON0,
+ 7, PLL_AO, MAINPLL_CON1, 24, MAINPLL_CON1, 0, 22, 0),
+ PLL_FENC(CLK_APMIXED_UNIVPLL, "univpll", UNIVPLL_CON0, FENC_STATUS_CON0,
+ 6, 0, UNIVPLL_CON1, 24, UNIVPLL_CON1, 0, 22, 1),
+ PLL_FENC(CLK_APMIXED_MSDCPLL, "msdcpll", MSDCPLL_CON0, FENC_STATUS_CON0,
+ 5, 0, MSDCPLL_CON1, 24, MSDCPLL_CON1, 0, 22, 2),
+ PLL_FENC(CLK_APMIXED_ADSPPLL, "adsppll", ADSPPLL_CON0, FENC_STATUS_CON0,
+ 4, 0, ADSPPLL_CON1, 24, ADSPPLL_CON1, 0, 22, 3),
+ PLL_FENC(CLK_APMIXED_EMIPLL, "emipll", EMIPLL_CON0, FENC_STATUS_CON0, 3,
+ PLL_AO, EMIPLL_CON1, 24, EMIPLL_CON1, 0, 22, 4),
+ PLL_FENC(CLK_APMIXED_EMIPLL2, "emipll2", EMIPLL2_CON0, FENC_STATUS_CON0,
+ 2, PLL_AO, EMIPLL2_CON1, 24, EMIPLL2_CON1, 0, 22, 5),
+ PLL_FENC(CLK_APMIXED_NET1PLL, "net1pll", NET1PLL_CON0, FENC_STATUS_CON0,
+ 1, 0, NET1PLL_CON1, 24, NET1PLL_CON1, 0, 22, 6),
+ PLL_FENC(CLK_APMIXED_SGMIIPLL, "sgmiipll", SGMIIPLL_CON0, FENC_STATUS_CON0,
+ 0, 0, SGMIIPLL_CON1, 24, SGMIIPLL_CON1, 0, 22, 7),
+};
+
+static const struct mtk_pll_desc apmixed_desc = {
+ .clks = apmixed_plls,
+ .num_clks = ARRAY_SIZE(apmixed_plls),
+};
+
+static const struct mtk_pll_data apmixed2_plls[] = {
+ PLL_FENC(CLK_APMIXED2_MAINPLL2, "mainpll2", MAINPLL2_CON0, FENC_STATUS_CON0,
+ 6, 0, MAINPLL2_CON1, 24, MAINPLL2_CON1, 0, 22, 0),
+ PLL_FENC(CLK_APMIXED2_UNIVPLL2, "univpll2", UNIVPLL2_CON0, FENC_STATUS_CON0,
+ 5, 0, UNIVPLL2_CON1, 24, UNIVPLL2_CON1, 0, 22, 1),
+ PLL_FENC(CLK_APMIXED2_MMPLL2, "mmpll2", MMPLL2_CON0, FENC_STATUS_CON0,
+ 4, 0, MMPLL2_CON1, 24, MMPLL2_CON1, 0, 22, 2),
+ PLL_FENC(CLK_APMIXED2_IMGPLL, "imgpll", IMGPLL_CON0, FENC_STATUS_CON0,
+ 3, 0, IMGPLL_CON1, 24, IMGPLL_CON1, 0, 22, 3),
+ PLL_FENC(CLK_APMIXED2_TVDPLL1, "tvdpll1", TVDPLL1_CON0, FENC_STATUS_CON0,
+ 2, 0, TVDPLL1_CON1, 24, TVDPLL1_CON1, 0, 22, 4),
+ PLL_FENC(CLK_APMIXED2_TVDPLL2, "tvdpll2", TVDPLL2_CON0, FENC_STATUS_CON0,
+ 1, 0, TVDPLL2_CON1, 24, TVDPLL2_CON1, 0, 22, 5),
+ PLL_FENC(CLK_APMIXED2_TVDPLL3, "tvdpll3", TVDPLL3_CON0, FENC_STATUS_CON0,
+ 0, 0, TVDPLL3_CON1, 24, TVDPLL3_CON1, 0, 22, 6),
+};
+
+static const struct mtk_pll_desc apmixed2_desc = {
+ .clks = apmixed2_plls,
+ .num_clks = ARRAY_SIZE(apmixed2_plls),
+};
+
+static int clk_mt8196_apmixed_probe(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ const struct mtk_pll_desc *mcd;
+ int r;
+
+ mcd = device_get_match_data(&pdev->dev);
+ if (!mcd)
+ return -EINVAL;
+
+ clk_data = mtk_alloc_clk_data(mcd->num_clks);
+ if (!clk_data)
+ return -ENOMEM;
+
+ r = mtk_clk_register_plls(node, mcd->clks, mcd->num_clks, clk_data);
+ if (r)
+ goto free_apmixed_data;
+
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (r)
+ goto unregister_plls;
+
+ platform_set_drvdata(pdev, clk_data);
+
+ return r;
+
+unregister_plls:
+ mtk_clk_unregister_plls(mcd->clks, mcd->num_clks, clk_data);
+free_apmixed_data:
+ mtk_free_clk_data(clk_data);
+ return r;
+}
+
+static void clk_mt8196_apmixed_remove(struct platform_device *pdev)
+{
+ const struct mtk_pll_desc *mcd = device_get_match_data(&pdev->dev);
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+ struct device_node *node = pdev->dev.of_node;
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_plls(mcd->clks, mcd->num_clks, clk_data);
+ mtk_free_clk_data(clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8196_apmixed[] = {
+ { .compatible = "mediatek,mt8196-apmixedsys", .data = &apmixed_desc },
+ { .compatible = "mediatek,mt8196-apmixedsys-gp2",
+ .data = &apmixed2_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_apmixed);
+
+static struct platform_driver clk_mt8196_apmixed_drv = {
+ .probe = clk_mt8196_apmixed_probe,
+ .remove = clk_mt8196_apmixed_remove,
+ .driver = {
+ .name = "clk-mt8196-apmixed",
+ .of_match_table = of_match_clk_mt8196_apmixed,
+ },
+};
+module_platform_driver(clk_mt8196_apmixed_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 apmixedsys clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-disp0.c b/drivers/clk/mediatek/clk-mt8196-disp0.c
new file mode 100644
index 000000000000..9474aad26e92
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-disp0.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs mm0_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs mm0_hwv_regs = {
+ .set_ofs = 0x0020,
+ .clr_ofs = 0x0024,
+ .sta_ofs = 0x2c10,
+};
+
+static const struct mtk_gate_regs mm1_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+static const struct mtk_gate_regs mm1_hwv_regs = {
+ .set_ofs = 0x0028,
+ .clr_ofs = 0x002c,
+ .sta_ofs = 0x2c14,
+};
+
+#define GATE_MM0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm0_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr,\
+ }
+
+#define GATE_HWV_MM0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm0_cg_regs, \
+ .hwv_regs = &mm0_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE \
+ }
+
+#define GATE_MM1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm1_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr,\
+ }
+
+#define GATE_HWV_MM1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm1_cg_regs, \
+ .hwv_regs = &mm1_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+static const struct mtk_gate mm_clks[] = {
+ /* MM0 */
+ GATE_HWV_MM0(CLK_MM_CONFIG, "mm_config", "disp", 0),
+ GATE_HWV_MM0(CLK_MM_DISP_MUTEX0, "mm_disp_mutex0", "disp", 1),
+ GATE_HWV_MM0(CLK_MM_DISP_AAL0, "mm_disp_aal0", "disp", 2),
+ GATE_HWV_MM0(CLK_MM_DISP_AAL1, "mm_disp_aal1", "disp", 3),
+ GATE_MM0(CLK_MM_DISP_C3D0, "mm_disp_c3d0", "disp", 4),
+ GATE_MM0(CLK_MM_DISP_C3D1, "mm_disp_c3d1", "disp", 5),
+ GATE_MM0(CLK_MM_DISP_C3D2, "mm_disp_c3d2", "disp", 6),
+ GATE_MM0(CLK_MM_DISP_C3D3, "mm_disp_c3d3", "disp", 7),
+ GATE_MM0(CLK_MM_DISP_CCORR0, "mm_disp_ccorr0", "disp", 8),
+ GATE_MM0(CLK_MM_DISP_CCORR1, "mm_disp_ccorr1", "disp", 9),
+ GATE_MM0(CLK_MM_DISP_CCORR2, "mm_disp_ccorr2", "disp", 10),
+ GATE_MM0(CLK_MM_DISP_CCORR3, "mm_disp_ccorr3", "disp", 11),
+ GATE_MM0(CLK_MM_DISP_CHIST0, "mm_disp_chist0", "disp", 12),
+ GATE_MM0(CLK_MM_DISP_CHIST1, "mm_disp_chist1", "disp", 13),
+ GATE_MM0(CLK_MM_DISP_COLOR0, "mm_disp_color0", "disp", 14),
+ GATE_MM0(CLK_MM_DISP_COLOR1, "mm_disp_color1", "disp", 15),
+ GATE_MM0(CLK_MM_DISP_DITHER0, "mm_disp_dither0", "disp", 16),
+ GATE_MM0(CLK_MM_DISP_DITHER1, "mm_disp_dither1", "disp", 17),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC0, "mm_disp_dli_async0", "disp", 18),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC1, "mm_disp_dli_async1", "disp", 19),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC2, "mm_disp_dli_async2", "disp", 20),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC3, "mm_disp_dli_async3", "disp", 21),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC4, "mm_disp_dli_async4", "disp", 22),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC5, "mm_disp_dli_async5", "disp", 23),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC6, "mm_disp_dli_async6", "disp", 24),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC7, "mm_disp_dli_async7", "disp", 25),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC8, "mm_disp_dli_async8", "disp", 26),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC9, "mm_disp_dli_async9", "disp", 27),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC10, "mm_disp_dli_async10", "disp", 28),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC11, "mm_disp_dli_async11", "disp", 29),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC12, "mm_disp_dli_async12", "disp", 30),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC13, "mm_disp_dli_async13", "disp", 31),
+ /* MM1 */
+ GATE_HWV_MM1(CLK_MM_DISP_DLI_ASYNC14, "mm_disp_dli_async14", "disp", 0),
+ GATE_HWV_MM1(CLK_MM_DISP_DLI_ASYNC15, "mm_disp_dli_async15", "disp", 1),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC0, "mm_disp_dlo_async0", "disp", 2),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC1, "mm_disp_dlo_async1", "disp", 3),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC2, "mm_disp_dlo_async2", "disp", 4),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC3, "mm_disp_dlo_async3", "disp", 5),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC4, "mm_disp_dlo_async4", "disp", 6),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC5, "mm_disp_dlo_async5", "disp", 7),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC6, "mm_disp_dlo_async6", "disp", 8),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC7, "mm_disp_dlo_async7", "disp", 9),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC8, "mm_disp_dlo_async8", "disp", 10),
+ GATE_MM1(CLK_MM_DISP_GAMMA0, "mm_disp_gamma0", "disp", 11),
+ GATE_MM1(CLK_MM_DISP_GAMMA1, "mm_disp_gamma1", "disp", 12),
+ GATE_MM1(CLK_MM_MDP_AAL0, "mm_mdp_aal0", "disp", 13),
+ GATE_MM1(CLK_MM_MDP_AAL1, "mm_mdp_aal1", "disp", 14),
+ GATE_HWV_MM1(CLK_MM_MDP_RDMA0, "mm_mdp_rdma0", "disp", 15),
+ GATE_HWV_MM1(CLK_MM_DISP_POSTMASK0, "mm_disp_postmask0", "disp", 16),
+ GATE_HWV_MM1(CLK_MM_DISP_POSTMASK1, "mm_disp_postmask1", "disp", 17),
+ GATE_HWV_MM1(CLK_MM_MDP_RSZ0, "mm_mdp_rsz0", "disp", 18),
+ GATE_HWV_MM1(CLK_MM_MDP_RSZ1, "mm_mdp_rsz1", "disp", 19),
+ GATE_HWV_MM1(CLK_MM_DISP_SPR0, "mm_disp_spr0", "disp", 20),
+ GATE_MM1(CLK_MM_DISP_TDSHP0, "mm_disp_tdshp0", "disp", 21),
+ GATE_MM1(CLK_MM_DISP_TDSHP1, "mm_disp_tdshp1", "disp", 22),
+ GATE_HWV_MM1(CLK_MM_DISP_WDMA0, "mm_disp_wdma0", "disp", 23),
+ GATE_HWV_MM1(CLK_MM_DISP_Y2R0, "mm_disp_y2r0", "disp", 24),
+ GATE_HWV_MM1(CLK_MM_SMI_SUB_COMM0, "mm_ssc", "disp", 25),
+ GATE_HWV_MM1(CLK_MM_DISP_FAKE_ENG0, "mm_disp_fake_eng0", "disp", 26),
+};
+
+static const struct mtk_clk_desc mm_mcd = {
+ .clks = mm_clks,
+ .num_clks = ARRAY_SIZE(mm_clks),
+};
+
+static const struct platform_device_id clk_mt8196_disp0_id_table[] = {
+ { .name = "clk-mt8196-disp0", .driver_data = (kernel_ulong_t)&mm_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, clk_mt8196_disp0_id_table);
+
+static struct platform_driver clk_mt8196_disp0_drv = {
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
+ .driver = {
+ .name = "clk-mt8196-disp0",
+ },
+ .id_table = clk_mt8196_disp0_id_table,
+};
+module_platform_driver(clk_mt8196_disp0_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 disp0 clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-disp1.c b/drivers/clk/mediatek/clk-mt8196-disp1.c
new file mode 100644
index 000000000000..3bbec79a7010
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-disp1.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs mm10_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs mm10_hwv_regs = {
+ .set_ofs = 0x0010,
+ .clr_ofs = 0x0014,
+ .sta_ofs = 0x2c08,
+};
+
+static const struct mtk_gate_regs mm11_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+static const struct mtk_gate_regs mm11_hwv_regs = {
+ .set_ofs = 0x0018,
+ .clr_ofs = 0x001c,
+ .sta_ofs = 0x2c0c,
+};
+
+#define GATE_MM10(_id, _name, _parent, _shift) {\
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm10_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr,\
+ }
+
+#define GATE_HWV_MM10(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm10_cg_regs, \
+ .hwv_regs = &mm10_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_MM11(_id, _name, _parent, _shift) {\
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm11_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr,\
+ }
+
+#define GATE_HWV_MM11(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm11_cg_regs, \
+ .hwv_regs = &mm11_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ }
+
+static const struct mtk_gate mm1_clks[] = {
+ /* MM10 */
+ GATE_HWV_MM10(CLK_MM1_DISPSYS1_CONFIG, "mm1_dispsys1_config", "disp", 0),
+ GATE_HWV_MM10(CLK_MM1_DISPSYS1_S_CONFIG, "mm1_dispsys1_s_config", "disp", 1),
+ GATE_HWV_MM10(CLK_MM1_DISP_MUTEX0, "mm1_disp_mutex0", "disp", 2),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC20, "mm1_disp_dli_async20", "disp", 3),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC21, "mm1_disp_dli_async21", "disp", 4),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC22, "mm1_disp_dli_async22", "disp", 5),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC23, "mm1_disp_dli_async23", "disp", 6),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC24, "mm1_disp_dli_async24", "disp", 7),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC25, "mm1_disp_dli_async25", "disp", 8),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC26, "mm1_disp_dli_async26", "disp", 9),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC27, "mm1_disp_dli_async27", "disp", 10),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC28, "mm1_disp_dli_async28", "disp", 11),
+ GATE_HWV_MM10(CLK_MM1_DISP_RELAY0, "mm1_disp_relay0", "disp", 12),
+ GATE_HWV_MM10(CLK_MM1_DISP_RELAY1, "mm1_disp_relay1", "disp", 13),
+ GATE_HWV_MM10(CLK_MM1_DISP_RELAY2, "mm1_disp_relay2", "disp", 14),
+ GATE_HWV_MM10(CLK_MM1_DISP_RELAY3, "mm1_disp_relay3", "disp", 15),
+ GATE_HWV_MM10(CLK_MM1_DISP_DP_INTF0, "mm1_DP_CLK", "disp", 16),
+ GATE_HWV_MM10(CLK_MM1_DISP_DP_INTF1, "mm1_disp_dp_intf1", "disp", 17),
+ GATE_HWV_MM10(CLK_MM1_DISP_DSC_WRAP0, "mm1_disp_dsc_wrap0", "disp", 18),
+ GATE_HWV_MM10(CLK_MM1_DISP_DSC_WRAP1, "mm1_disp_dsc_wrap1", "disp", 19),
+ GATE_HWV_MM10(CLK_MM1_DISP_DSC_WRAP2, "mm1_disp_dsc_wrap2", "disp", 20),
+ GATE_HWV_MM10(CLK_MM1_DISP_DSC_WRAP3, "mm1_disp_dsc_wrap3", "disp", 21),
+ GATE_HWV_MM10(CLK_MM1_DISP_DSI0, "mm1_CLK0", "disp", 22),
+ GATE_HWV_MM10(CLK_MM1_DISP_DSI1, "mm1_CLK1", "disp", 23),
+ GATE_HWV_MM10(CLK_MM1_DISP_DSI2, "mm1_CLK2", "disp", 24),
+ GATE_HWV_MM10(CLK_MM1_DISP_DVO0, "mm1_disp_dvo0", "disp", 25),
+ GATE_HWV_MM10(CLK_MM1_DISP_GDMA0, "mm1_disp_gdma0", "disp", 26),
+ GATE_HWV_MM10(CLK_MM1_DISP_MERGE0, "mm1_disp_merge0", "disp", 27),
+ GATE_HWV_MM10(CLK_MM1_DISP_MERGE1, "mm1_disp_merge1", "disp", 28),
+ GATE_HWV_MM10(CLK_MM1_DISP_MERGE2, "mm1_disp_merge2", "disp", 29),
+ GATE_HWV_MM10(CLK_MM1_DISP_ODDMR0, "mm1_disp_oddmr0", "disp", 30),
+ GATE_HWV_MM10(CLK_MM1_DISP_POSTALIGN0, "mm1_disp_postalign0", "disp", 31),
+ /* MM11 */
+ GATE_HWV_MM11(CLK_MM1_DISP_DITHER2, "mm1_disp_dither2", "disp", 0),
+ GATE_HWV_MM11(CLK_MM1_DISP_R2Y0, "mm1_disp_r2y0", "disp", 1),
+ GATE_HWV_MM11(CLK_MM1_DISP_SPLITTER0, "mm1_disp_splitter0", "disp", 2),
+ GATE_HWV_MM11(CLK_MM1_DISP_SPLITTER1, "mm1_disp_splitter1", "disp", 3),
+ GATE_HWV_MM11(CLK_MM1_DISP_SPLITTER2, "mm1_disp_splitter2", "disp", 4),
+ GATE_HWV_MM11(CLK_MM1_DISP_SPLITTER3, "mm1_disp_splitter3", "disp", 5),
+ GATE_HWV_MM11(CLK_MM1_DISP_VDCM0, "mm1_disp_vdcm0", "disp", 6),
+ GATE_HWV_MM11(CLK_MM1_DISP_WDMA1, "mm1_disp_wdma1", "disp", 7),
+ GATE_HWV_MM11(CLK_MM1_DISP_WDMA2, "mm1_disp_wdma2", "disp", 8),
+ GATE_HWV_MM11(CLK_MM1_DISP_WDMA3, "mm1_disp_wdma3", "disp", 9),
+ GATE_HWV_MM11(CLK_MM1_DISP_WDMA4, "mm1_disp_wdma4", "disp", 10),
+ GATE_HWV_MM11(CLK_MM1_MDP_RDMA1, "mm1_mdp_rdma1", "disp", 11),
+ GATE_HWV_MM11(CLK_MM1_SMI_LARB0, "mm1_smi_larb0", "disp", 12),
+ GATE_HWV_MM11(CLK_MM1_MOD1, "mm1_mod1", "clk26m", 13),
+ GATE_HWV_MM11(CLK_MM1_MOD2, "mm1_mod2", "clk26m", 14),
+ GATE_HWV_MM11(CLK_MM1_MOD3, "mm1_mod3", "clk26m", 15),
+ GATE_HWV_MM11(CLK_MM1_MOD4, "mm1_mod4", "dp0", 16),
+ GATE_HWV_MM11(CLK_MM1_MOD5, "mm1_mod5", "dp1", 17),
+ GATE_HWV_MM11(CLK_MM1_MOD6, "mm1_mod6", "dp1", 18),
+ GATE_HWV_MM11(CLK_MM1_CG0, "mm1_cg0", "disp", 20),
+ GATE_HWV_MM11(CLK_MM1_CG1, "mm1_cg1", "disp", 21),
+ GATE_HWV_MM11(CLK_MM1_CG2, "mm1_cg2", "disp", 22),
+ GATE_HWV_MM11(CLK_MM1_CG3, "mm1_cg3", "disp", 23),
+ GATE_HWV_MM11(CLK_MM1_CG4, "mm1_cg4", "disp", 24),
+ GATE_HWV_MM11(CLK_MM1_CG5, "mm1_cg5", "disp", 25),
+ GATE_HWV_MM11(CLK_MM1_CG6, "mm1_cg6", "disp", 26),
+ GATE_HWV_MM11(CLK_MM1_CG7, "mm1_cg7", "disp", 27),
+ GATE_HWV_MM11(CLK_MM1_F26M, "mm1_f26m_ck", "clk26m", 28),
+};
+
+static const struct mtk_clk_desc mm1_mcd = {
+ .clks = mm1_clks,
+ .num_clks = ARRAY_SIZE(mm1_clks),
+};
+
+static const struct platform_device_id clk_mt8196_disp1_id_table[] = {
+ { .name = "clk-mt8196-disp1", .driver_data = (kernel_ulong_t)&mm1_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, clk_mt8196_disp1_id_table);
+
+static struct platform_driver clk_mt8196_disp1_drv = {
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
+ .driver = {
+ .name = "clk-mt8196-disp1",
+ },
+ .id_table = clk_mt8196_disp1_id_table,
+};
+module_platform_driver(clk_mt8196_disp1_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 disp1 clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-imp_iic_wrap.c b/drivers/clk/mediatek/clk-mt8196-imp_iic_wrap.c
new file mode 100644
index 000000000000..a63241671650
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-imp_iic_wrap.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs imp_cg_regs = {
+ .set_ofs = 0xe08,
+ .clr_ofs = 0xe04,
+ .sta_ofs = 0xe00,
+};
+
+#define GATE_IMP(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &imp_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate impc_clks[] = {
+ GATE_IMP(CLK_IMPC_I2C11, "impc_i2c11", "i2c_p", 0),
+ GATE_IMP(CLK_IMPC_I2C12, "impc_i2c12", "i2c_p", 1),
+ GATE_IMP(CLK_IMPC_I2C13, "impc_i2c13", "i2c_p", 2),
+ GATE_IMP(CLK_IMPC_I2C14, "impc_i2c14", "i2c_p", 3),
+};
+
+static const struct mtk_clk_desc impc_mcd = {
+ .clks = impc_clks,
+ .num_clks = ARRAY_SIZE(impc_clks),
+};
+
+static const struct mtk_gate impe_clks[] = {
+ GATE_IMP(CLK_IMPE_I2C5, "impe_i2c5", "i2c_east", 0),
+};
+
+static const struct mtk_clk_desc impe_mcd = {
+ .clks = impe_clks,
+ .num_clks = ARRAY_SIZE(impe_clks),
+};
+
+static const struct mtk_gate_regs impn_hwv_regs = {
+ .set_ofs = 0x0000,
+ .clr_ofs = 0x0004,
+ .sta_ofs = 0x2c00,
+};
+
+#define GATE_HWV_IMPN(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &imp_cg_regs, \
+ .hwv_regs = &impn_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+static const struct mtk_gate impn_clks[] = {
+ GATE_IMP(CLK_IMPN_I2C1, "impn_i2c1", "i2c_north", 0),
+ GATE_IMP(CLK_IMPN_I2C2, "impn_i2c2", "i2c_north", 1),
+ GATE_IMP(CLK_IMPN_I2C4, "impn_i2c4", "i2c_north", 2),
+ GATE_HWV_IMPN(CLK_IMPN_I2C7, "impn_i2c7", "i2c_north", 3),
+ GATE_IMP(CLK_IMPN_I2C8, "impn_i2c8", "i2c_north", 4),
+ GATE_IMP(CLK_IMPN_I2C9, "impn_i2c9", "i2c_north", 5),
+};
+
+static const struct mtk_clk_desc impn_mcd = {
+ .clks = impn_clks,
+ .num_clks = ARRAY_SIZE(impn_clks),
+};
+
+static const struct mtk_gate impw_clks[] = {
+ GATE_IMP(CLK_IMPW_I2C0, "impw_i2c0", "i2c_west", 0),
+ GATE_IMP(CLK_IMPW_I2C3, "impw_i2c3", "i2c_west", 1),
+ GATE_IMP(CLK_IMPW_I2C6, "impw_i2c6", "i2c_west", 2),
+ GATE_IMP(CLK_IMPW_I2C10, "impw_i2c10", "i2c_west", 3),
+};
+
+static const struct mtk_clk_desc impw_mcd = {
+ .clks = impw_clks,
+ .num_clks = ARRAY_SIZE(impw_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8196_imp_iic_wrap[] = {
+ { .compatible = "mediatek,mt8196-imp-iic-wrap-c", .data = &impc_mcd },
+ { .compatible = "mediatek,mt8196-imp-iic-wrap-e", .data = &impe_mcd },
+ { .compatible = "mediatek,mt8196-imp-iic-wrap-n", .data = &impn_mcd },
+ { .compatible = "mediatek,mt8196-imp-iic-wrap-w", .data = &impw_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_imp_iic_wrap);
+
+static struct platform_driver clk_mt8196_imp_iic_wrap_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-imp_iic_wrap",
+ .of_match_table = of_match_clk_mt8196_imp_iic_wrap,
+ },
+};
+module_platform_driver(clk_mt8196_imp_iic_wrap_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 I2C Wrapper clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-mcu.c b/drivers/clk/mediatek/clk-mt8196-mcu.c
new file mode 100644
index 000000000000..5cbcc411ae73
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-mcu.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-pll.h"
+
+#define ARMPLL_LL_CON0 0x008
+#define ARMPLL_LL_CON1 0x00c
+#define ARMPLL_LL_CON2 0x010
+#define ARMPLL_LL_CON3 0x014
+#define ARMPLL_BL_CON0 0x008
+#define ARMPLL_BL_CON1 0x00c
+#define ARMPLL_BL_CON2 0x010
+#define ARMPLL_BL_CON3 0x014
+#define ARMPLL_B_CON0 0x008
+#define ARMPLL_B_CON1 0x00c
+#define ARMPLL_B_CON2 0x010
+#define ARMPLL_B_CON3 0x014
+#define CCIPLL_CON0 0x008
+#define CCIPLL_CON1 0x00c
+#define CCIPLL_CON2 0x010
+#define CCIPLL_CON3 0x014
+#define PTPPLL_CON0 0x008
+#define PTPPLL_CON1 0x00c
+#define PTPPLL_CON2 0x010
+#define PTPPLL_CON3 0x014
+
+#define MT8196_PLL_FMAX (3800UL * MHZ)
+#define MT8196_PLL_FMIN (1500UL * MHZ)
+#define MT8196_INTEGER_BITS 8
+
+#define PLL(_id, _name, _reg, _en_reg, _en_mask, _pll_en_bit, \
+ _flags, _rst_bar_mask, \
+ _pd_reg, _pd_shift, _tuner_reg, \
+ _tuner_en_reg, _tuner_en_bit, \
+ _pcw_reg, _pcw_shift, _pcwbits) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .en_reg = _en_reg, \
+ .en_mask = _en_mask, \
+ .pll_en_bit = _pll_en_bit, \
+ .flags = _flags, \
+ .rst_bar_mask = _rst_bar_mask, \
+ .fmax = MT8196_PLL_FMAX, \
+ .fmin = MT8196_PLL_FMIN, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .tuner_en_reg = _tuner_en_reg, \
+ .tuner_en_bit = _tuner_en_bit, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .pcwbits = _pcwbits, \
+ .pcwibits = MT8196_INTEGER_BITS, \
+ }
+
+static const struct mtk_pll_data cpu_bl_plls[] = {
+ PLL(CLK_CPBL_ARMPLL_BL, "armpll-bl", ARMPLL_BL_CON0, ARMPLL_BL_CON0, 0,
+ 0, PLL_AO, BIT(0), ARMPLL_BL_CON1, 24, 0, 0, 0, ARMPLL_BL_CON1, 0, 22),
+};
+
+static const struct mtk_pll_data cpu_b_plls[] = {
+ PLL(CLK_CPB_ARMPLL_B, "armpll-b", ARMPLL_B_CON0, ARMPLL_B_CON0, 0, 0,
+ PLL_AO, BIT(0), ARMPLL_B_CON1, 24, 0, 0, 0, ARMPLL_B_CON1, 0, 22),
+};
+
+static const struct mtk_pll_data cpu_ll_plls[] = {
+ PLL(CLK_CPLL_ARMPLL_LL, "armpll-ll", ARMPLL_LL_CON0, ARMPLL_LL_CON0, 0,
+ 0, PLL_AO, BIT(0), ARMPLL_LL_CON1, 24, 0, 0, 0, ARMPLL_LL_CON1, 0, 22),
+};
+
+static const struct mtk_pll_data cci_plls[] = {
+ PLL(CLK_CCIPLL, "ccipll", CCIPLL_CON0, CCIPLL_CON0, 0, 0, PLL_AO,
+ BIT(0), CCIPLL_CON1, 24, 0, 0, 0, CCIPLL_CON1, 0, 22),
+};
+
+static const struct mtk_pll_data ptp_plls[] = {
+ PLL(CLK_PTPPLL, "ptppll", PTPPLL_CON0, PTPPLL_CON0, 0, 0, PLL_AO,
+ BIT(0), PTPPLL_CON1, 24, 0, 0, 0, PTPPLL_CON1, 0, 22),
+};
+
+static const struct of_device_id of_match_clk_mt8196_mcu[] = {
+ { .compatible = "mediatek,mt8196-armpll-bl-pll-ctrl",
+ .data = &cpu_bl_plls },
+ { .compatible = "mediatek,mt8196-armpll-b-pll-ctrl",
+ .data = &cpu_b_plls },
+ { .compatible = "mediatek,mt8196-armpll-ll-pll-ctrl",
+ .data = &cpu_ll_plls },
+ { .compatible = "mediatek,mt8196-ccipll-pll-ctrl", .data = &cci_plls },
+ { .compatible = "mediatek,mt8196-ptppll-pll-ctrl", .data = &ptp_plls },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_mcu);
+
+static int clk_mt8196_mcu_probe(struct platform_device *pdev)
+{
+ const struct mtk_pll_data *plls;
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ const int num_plls = 1;
+ int r;
+
+ plls = of_device_get_match_data(&pdev->dev);
+ if (!plls)
+ return -EINVAL;
+
+ clk_data = mtk_alloc_clk_data(num_plls);
+ if (!clk_data)
+ return -ENOMEM;
+
+ r = mtk_clk_register_plls(node, plls, num_plls, clk_data);
+ if (r)
+ goto free_clk_data;
+
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (r)
+ goto unregister_plls;
+
+ platform_set_drvdata(pdev, clk_data);
+
+ return r;
+
+unregister_plls:
+ mtk_clk_unregister_plls(plls, num_plls, clk_data);
+free_clk_data:
+ mtk_free_clk_data(clk_data);
+
+ return r;
+}
+
+static void clk_mt8196_mcu_remove(struct platform_device *pdev)
+{
+ const struct mtk_pll_data *plls = of_device_get_match_data(&pdev->dev);
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+ struct device_node *node = pdev->dev.of_node;
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_plls(plls, 1, clk_data);
+ mtk_free_clk_data(clk_data);
+}
+
+static struct platform_driver clk_mt8196_mcu_drv = {
+ .probe = clk_mt8196_mcu_probe,
+ .remove = clk_mt8196_mcu_remove,
+ .driver = {
+ .name = "clk-mt8196-mcu",
+ .of_match_table = of_match_clk_mt8196_mcu,
+ },
+};
+module_platform_driver(clk_mt8196_mcu_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 mcusys clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-mdpsys.c b/drivers/clk/mediatek/clk-mt8196-mdpsys.c
new file mode 100644
index 000000000000..7667d88f0eb0
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-mdpsys.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs mdp0_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs mdp1_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+static const struct mtk_gate_regs mdp2_cg_regs = {
+ .set_ofs = 0x124,
+ .clr_ofs = 0x128,
+ .sta_ofs = 0x120,
+};
+
+#define GATE_MDP0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mdp0_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_MDP1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mdp1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_MDP2(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mdp2_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate mdp1_clks[] = {
+ /* MDP1-0 */
+ GATE_MDP0(CLK_MDP1_MDP_MUTEX0, "mdp1_mdp_mutex0", "mdp", 0),
+ GATE_MDP0(CLK_MDP1_SMI0, "mdp1_smi0", "mdp", 1),
+ GATE_MDP0(CLK_MDP1_APB_BUS, "mdp1_apb_bus", "mdp", 2),
+ GATE_MDP0(CLK_MDP1_MDP_RDMA0, "mdp1_mdp_rdma0", "mdp", 3),
+ GATE_MDP0(CLK_MDP1_MDP_RDMA1, "mdp1_mdp_rdma1", "mdp", 4),
+ GATE_MDP0(CLK_MDP1_MDP_RDMA2, "mdp1_mdp_rdma2", "mdp", 5),
+ GATE_MDP0(CLK_MDP1_MDP_BIRSZ0, "mdp1_mdp_birsz0", "mdp", 6),
+ GATE_MDP0(CLK_MDP1_MDP_HDR0, "mdp1_mdp_hdr0", "mdp", 7),
+ GATE_MDP0(CLK_MDP1_MDP_AAL0, "mdp1_mdp_aal0", "mdp", 8),
+ GATE_MDP0(CLK_MDP1_MDP_RSZ0, "mdp1_mdp_rsz0", "mdp", 9),
+ GATE_MDP0(CLK_MDP1_MDP_RSZ2, "mdp1_mdp_rsz2", "mdp", 10),
+ GATE_MDP0(CLK_MDP1_MDP_TDSHP0, "mdp1_mdp_tdshp0", "mdp", 11),
+ GATE_MDP0(CLK_MDP1_MDP_COLOR0, "mdp1_mdp_color0", "mdp", 12),
+ GATE_MDP0(CLK_MDP1_MDP_WROT0, "mdp1_mdp_wrot0", "mdp", 13),
+ GATE_MDP0(CLK_MDP1_MDP_WROT1, "mdp1_mdp_wrot1", "mdp", 14),
+ GATE_MDP0(CLK_MDP1_MDP_WROT2, "mdp1_mdp_wrot2", "mdp", 15),
+ GATE_MDP0(CLK_MDP1_MDP_FAKE_ENG0, "mdp1_mdp_fake_eng0", "mdp", 16),
+ GATE_MDP0(CLK_MDP1_APB_DB, "mdp1_apb_db", "mdp", 17),
+ GATE_MDP0(CLK_MDP1_MDP_DLI_ASYNC0, "mdp1_mdp_dli_async0", "mdp", 18),
+ GATE_MDP0(CLK_MDP1_MDP_DLI_ASYNC1, "mdp1_mdp_dli_async1", "mdp", 19),
+ GATE_MDP0(CLK_MDP1_MDP_DLO_ASYNC0, "mdp1_mdp_dlo_async0", "mdp", 20),
+ GATE_MDP0(CLK_MDP1_MDP_DLO_ASYNC1, "mdp1_mdp_dlo_async1", "mdp", 21),
+ GATE_MDP0(CLK_MDP1_MDP_DLI_ASYNC2, "mdp1_mdp_dli_async2", "mdp", 22),
+ GATE_MDP0(CLK_MDP1_MDP_DLO_ASYNC2, "mdp1_mdp_dlo_async2", "mdp", 23),
+ GATE_MDP0(CLK_MDP1_MDP_DLO_ASYNC3, "mdp1_mdp_dlo_async3", "mdp", 24),
+ GATE_MDP0(CLK_MDP1_IMG_DL_ASYNC0, "mdp1_img_dl_async0", "mdp", 25),
+ GATE_MDP0(CLK_MDP1_MDP_RROT0, "mdp1_mdp_rrot0", "mdp", 26),
+ GATE_MDP0(CLK_MDP1_MDP_MERGE0, "mdp1_mdp_merge0", "mdp", 27),
+ GATE_MDP0(CLK_MDP1_MDP_C3D0, "mdp1_mdp_c3d0", "mdp", 28),
+ GATE_MDP0(CLK_MDP1_MDP_FG0, "mdp1_mdp_fg0", "mdp", 29),
+ GATE_MDP0(CLK_MDP1_MDP_CLA2, "mdp1_mdp_cla2", "mdp", 30),
+ GATE_MDP0(CLK_MDP1_MDP_DLO_ASYNC4, "mdp1_mdp_dlo_async4", "mdp", 31),
+ /* MDP1-1 */
+ GATE_MDP1(CLK_MDP1_VPP_RSZ0, "mdp1_vpp_rsz0", "mdp", 0),
+ GATE_MDP1(CLK_MDP1_VPP_RSZ1, "mdp1_vpp_rsz1", "mdp", 1),
+ GATE_MDP1(CLK_MDP1_MDP_DLO_ASYNC5, "mdp1_mdp_dlo_async5", "mdp", 2),
+ GATE_MDP1(CLK_MDP1_IMG0, "mdp1_img0", "mdp", 3),
+ GATE_MDP1(CLK_MDP1_F26M, "mdp1_f26m", "clk26m", 27),
+ /* MDP1-2 */
+ GATE_MDP2(CLK_MDP1_IMG_DL_RELAY0, "mdp1_img_dl_relay0", "mdp", 0),
+ GATE_MDP2(CLK_MDP1_IMG_DL_RELAY1, "mdp1_img_dl_relay1", "mdp", 8),
+};
+
+static const struct mtk_clk_desc mdp1_mcd = {
+ .clks = mdp1_clks,
+ .num_clks = ARRAY_SIZE(mdp1_clks),
+ .need_runtime_pm = true,
+};
+
+
+static const struct mtk_gate mdp_clks[] = {
+ /* MDP0 */
+ GATE_MDP0(CLK_MDP_MDP_MUTEX0, "mdp_mdp_mutex0", "mdp", 0),
+ GATE_MDP0(CLK_MDP_SMI0, "mdp_smi0", "mdp", 1),
+ GATE_MDP0(CLK_MDP_APB_BUS, "mdp_apb_bus", "mdp", 2),
+ GATE_MDP0(CLK_MDP_MDP_RDMA0, "mdp_mdp_rdma0", "mdp", 3),
+ GATE_MDP0(CLK_MDP_MDP_RDMA1, "mdp_mdp_rdma1", "mdp", 4),
+ GATE_MDP0(CLK_MDP_MDP_RDMA2, "mdp_mdp_rdma2", "mdp", 5),
+ GATE_MDP0(CLK_MDP_MDP_BIRSZ0, "mdp_mdp_birsz0", "mdp", 6),
+ GATE_MDP0(CLK_MDP_MDP_HDR0, "mdp_mdp_hdr0", "mdp", 7),
+ GATE_MDP0(CLK_MDP_MDP_AAL0, "mdp_mdp_aal0", "mdp", 8),
+ GATE_MDP0(CLK_MDP_MDP_RSZ0, "mdp_mdp_rsz0", "mdp", 9),
+ GATE_MDP0(CLK_MDP_MDP_RSZ2, "mdp_mdp_rsz2", "mdp", 10),
+ GATE_MDP0(CLK_MDP_MDP_TDSHP0, "mdp_mdp_tdshp0", "mdp", 11),
+ GATE_MDP0(CLK_MDP_MDP_COLOR0, "mdp_mdp_color0", "mdp", 12),
+ GATE_MDP0(CLK_MDP_MDP_WROT0, "mdp_mdp_wrot0", "mdp", 13),
+ GATE_MDP0(CLK_MDP_MDP_WROT1, "mdp_mdp_wrot1", "mdp", 14),
+ GATE_MDP0(CLK_MDP_MDP_WROT2, "mdp_mdp_wrot2", "mdp", 15),
+ GATE_MDP0(CLK_MDP_MDP_FAKE_ENG0, "mdp_mdp_fake_eng0", "mdp", 16),
+ GATE_MDP0(CLK_MDP_APB_DB, "mdp_apb_db", "mdp", 17),
+ GATE_MDP0(CLK_MDP_MDP_DLI_ASYNC0, "mdp_mdp_dli_async0", "mdp", 18),
+ GATE_MDP0(CLK_MDP_MDP_DLI_ASYNC1, "mdp_mdp_dli_async1", "mdp", 19),
+ GATE_MDP0(CLK_MDP_MDP_DLO_ASYNC0, "mdp_mdp_dlo_async0", "mdp", 20),
+ GATE_MDP0(CLK_MDP_MDP_DLO_ASYNC1, "mdp_mdp_dlo_async1", "mdp", 21),
+ GATE_MDP0(CLK_MDP_MDP_DLI_ASYNC2, "mdp_mdp_dli_async2", "mdp", 22),
+ GATE_MDP0(CLK_MDP_MDP_DLO_ASYNC2, "mdp_mdp_dlo_async2", "mdp", 23),
+ GATE_MDP0(CLK_MDP_MDP_DLO_ASYNC3, "mdp_mdp_dlo_async3", "mdp", 24),
+ GATE_MDP0(CLK_MDP_IMG_DL_ASYNC0, "mdp_img_dl_async0", "mdp", 25),
+ GATE_MDP0(CLK_MDP_MDP_RROT0, "mdp_mdp_rrot0", "mdp", 26),
+ GATE_MDP0(CLK_MDP_MDP_MERGE0, "mdp_mdp_merge0", "mdp", 27),
+ GATE_MDP0(CLK_MDP_MDP_C3D0, "mdp_mdp_c3d0", "mdp", 28),
+ GATE_MDP0(CLK_MDP_MDP_FG0, "mdp_mdp_fg0", "mdp", 29),
+ GATE_MDP0(CLK_MDP_MDP_CLA2, "mdp_mdp_cla2", "mdp", 30),
+ GATE_MDP0(CLK_MDP_MDP_DLO_ASYNC4, "mdp_mdp_dlo_async4", "mdp", 31),
+ /* MDP1 */
+ GATE_MDP1(CLK_MDP_VPP_RSZ0, "mdp_vpp_rsz0", "mdp", 0),
+ GATE_MDP1(CLK_MDP_VPP_RSZ1, "mdp_vpp_rsz1", "mdp", 1),
+ GATE_MDP1(CLK_MDP_MDP_DLO_ASYNC5, "mdp_mdp_dlo_async5", "mdp", 2),
+ GATE_MDP1(CLK_MDP_IMG0, "mdp_img0", "mdp", 3),
+ GATE_MDP1(CLK_MDP_F26M, "mdp_f26m", "clk26m", 27),
+ /* MDP2 */
+ GATE_MDP2(CLK_MDP_IMG_DL_RELAY0, "mdp_img_dl_relay0", "mdp", 0),
+ GATE_MDP2(CLK_MDP_IMG_DL_RELAY1, "mdp_img_dl_relay1", "mdp", 8),
+};
+
+static const struct mtk_clk_desc mdp_mcd = {
+ .clks = mdp_clks,
+ .num_clks = ARRAY_SIZE(mdp_clks),
+ .need_runtime_pm = true,
+};
+
+static const struct of_device_id of_match_clk_mt8196_mdpsys[] = {
+ { .compatible = "mediatek,mt8196-mdpsys0", .data = &mdp_mcd },
+ { .compatible = "mediatek,mt8196-mdpsys1", .data = &mdp1_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_mdpsys);
+
+static struct platform_driver clk_mt8196_mdpsys_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-mdpsys",
+ .of_match_table = of_match_clk_mt8196_mdpsys,
+ },
+};
+module_platform_driver(clk_mt8196_mdpsys_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 Multimedia Data Path clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-mfg.c b/drivers/clk/mediatek/clk-mt8196-mfg.c
new file mode 100644
index 000000000000..ae1eb9de79ae
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-mfg.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-pll.h"
+
+#define MFGPLL_CON0 0x008
+#define MFGPLL_CON1 0x00c
+#define MFGPLL_CON2 0x010
+#define MFGPLL_CON3 0x014
+#define MFGPLL_SC0_CON0 0x008
+#define MFGPLL_SC0_CON1 0x00c
+#define MFGPLL_SC0_CON2 0x010
+#define MFGPLL_SC0_CON3 0x014
+#define MFGPLL_SC1_CON0 0x008
+#define MFGPLL_SC1_CON1 0x00c
+#define MFGPLL_SC1_CON2 0x010
+#define MFGPLL_SC1_CON3 0x014
+
+#define MT8196_PLL_FMAX (3800UL * MHZ)
+#define MT8196_PLL_FMIN (1500UL * MHZ)
+#define MT8196_INTEGER_BITS 8
+
+#define PLL(_id, _name, _reg, _en_reg, _en_mask, _pll_en_bit, \
+ _flags, _rst_bar_mask, \
+ _pd_reg, _pd_shift, _tuner_reg, \
+ _tuner_en_reg, _tuner_en_bit, \
+ _pcw_reg, _pcw_shift, _pcwbits) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .en_reg = _en_reg, \
+ .en_mask = _en_mask, \
+ .pll_en_bit = _pll_en_bit, \
+ .flags = _flags, \
+ .rst_bar_mask = _rst_bar_mask, \
+ .fmax = MT8196_PLL_FMAX, \
+ .fmin = MT8196_PLL_FMIN, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .tuner_en_reg = _tuner_en_reg, \
+ .tuner_en_bit = _tuner_en_bit, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .pcwbits = _pcwbits, \
+ .pcwibits = MT8196_INTEGER_BITS, \
+ }
+
+static const struct mtk_pll_data mfg_ao_plls[] = {
+ PLL(CLK_MFG_AO_MFGPLL, "mfgpll", MFGPLL_CON0, MFGPLL_CON0, 0, 0, 0,
+ BIT(0), MFGPLL_CON1, 24, 0, 0, 0,
+ MFGPLL_CON1, 0, 22),
+};
+
+static const struct mtk_pll_data mfgsc0_ao_plls[] = {
+ PLL(CLK_MFGSC0_AO_MFGPLL_SC0, "mfgpll-sc0", MFGPLL_SC0_CON0,
+ MFGPLL_SC0_CON0, 0, 0, 0, BIT(0), MFGPLL_SC0_CON1, 24, 0, 0, 0,
+ MFGPLL_SC0_CON1, 0, 22),
+};
+
+static const struct mtk_pll_data mfgsc1_ao_plls[] = {
+ PLL(CLK_MFGSC1_AO_MFGPLL_SC1, "mfgpll-sc1", MFGPLL_SC1_CON0,
+ MFGPLL_SC1_CON0, 0, 0, 0, BIT(0), MFGPLL_SC1_CON1, 24, 0, 0, 0,
+ MFGPLL_SC1_CON1, 0, 22),
+};
+
+static const struct of_device_id of_match_clk_mt8196_mfg[] = {
+ { .compatible = "mediatek,mt8196-mfgpll-pll-ctrl",
+ .data = &mfg_ao_plls },
+ { .compatible = "mediatek,mt8196-mfgpll-sc0-pll-ctrl",
+ .data = &mfgsc0_ao_plls },
+ { .compatible = "mediatek,mt8196-mfgpll-sc1-pll-ctrl",
+ .data = &mfgsc1_ao_plls },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_mfg);
+
+static int clk_mt8196_mfg_probe(struct platform_device *pdev)
+{
+ const struct mtk_pll_data *plls;
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ const int num_plls = 1;
+ int r;
+
+ plls = of_device_get_match_data(&pdev->dev);
+ if (!plls)
+ return -EINVAL;
+
+ clk_data = mtk_alloc_clk_data(num_plls);
+ if (!clk_data)
+ return -ENOMEM;
+
+ r = mtk_clk_register_plls(node, plls, num_plls, clk_data);
+ if (r)
+ goto free_clk_data;
+
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (r)
+ goto unregister_plls;
+
+ platform_set_drvdata(pdev, clk_data);
+
+ return r;
+
+unregister_plls:
+ mtk_clk_unregister_plls(plls, num_plls, clk_data);
+free_clk_data:
+ mtk_free_clk_data(clk_data);
+
+ return r;
+}
+
+static void clk_mt8196_mfg_remove(struct platform_device *pdev)
+{
+ const struct mtk_pll_data *plls = of_device_get_match_data(&pdev->dev);
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+ struct device_node *node = pdev->dev.of_node;
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_plls(plls, 1, clk_data);
+ mtk_free_clk_data(clk_data);
+}
+
+static struct platform_driver clk_mt8196_mfg_drv = {
+ .probe = clk_mt8196_mfg_probe,
+ .remove = clk_mt8196_mfg_remove,
+ .driver = {
+ .name = "clk-mt8196-mfg",
+ .of_match_table = of_match_clk_mt8196_mfg,
+ },
+};
+module_platform_driver(clk_mt8196_mfg_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 GPU mfg clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-ovl0.c b/drivers/clk/mediatek/clk-mt8196-ovl0.c
new file mode 100644
index 000000000000..d4affd14d2c4
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-ovl0.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs ovl0_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs ovl0_hwv_regs = {
+ .set_ofs = 0x0060,
+ .clr_ofs = 0x0064,
+ .sta_ofs = 0x2c30,
+};
+
+static const struct mtk_gate_regs ovl1_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+static const struct mtk_gate_regs ovl1_hwv_regs = {
+ .set_ofs = 0x0068,
+ .clr_ofs = 0x006c,
+ .sta_ofs = 0x2c34,
+};
+
+#define GATE_HWV_OVL0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ovl0_cg_regs, \
+ .hwv_regs = &ovl0_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_OVL1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ovl1_cg_regs, \
+ .hwv_regs = &ovl1_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+static const struct mtk_gate ovl_clks[] = {
+ /* OVL0 */
+ GATE_HWV_OVL0(CLK_OVLSYS_CONFIG, "ovlsys_config", "disp", 0),
+ GATE_HWV_OVL0(CLK_OVL_FAKE_ENG0, "ovl_fake_eng0", "disp", 1),
+ GATE_HWV_OVL0(CLK_OVL_FAKE_ENG1, "ovl_fake_eng1", "disp", 2),
+ GATE_HWV_OVL0(CLK_OVL_MUTEX0, "ovl_mutex0", "disp", 3),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA0, "ovl_exdma0", "disp", 4),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA1, "ovl_exdma1", "disp", 5),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA2, "ovl_exdma2", "disp", 6),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA3, "ovl_exdma3", "disp", 7),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA4, "ovl_exdma4", "disp", 8),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA5, "ovl_exdma5", "disp", 9),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA6, "ovl_exdma6", "disp", 10),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA7, "ovl_exdma7", "disp", 11),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA8, "ovl_exdma8", "disp", 12),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA9, "ovl_exdma9", "disp", 13),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER0, "ovl_blender0", "disp", 14),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER1, "ovl_blender1", "disp", 15),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER2, "ovl_blender2", "disp", 16),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER3, "ovl_blender3", "disp", 17),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER4, "ovl_blender4", "disp", 18),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER5, "ovl_blender5", "disp", 19),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER6, "ovl_blender6", "disp", 20),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER7, "ovl_blender7", "disp", 21),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER8, "ovl_blender8", "disp", 22),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER9, "ovl_blender9", "disp", 23),
+ GATE_HWV_OVL0(CLK_OVL_OUTPROC0, "ovl_outproc0", "disp", 24),
+ GATE_HWV_OVL0(CLK_OVL_OUTPROC1, "ovl_outproc1", "disp", 25),
+ GATE_HWV_OVL0(CLK_OVL_OUTPROC2, "ovl_outproc2", "disp", 26),
+ GATE_HWV_OVL0(CLK_OVL_OUTPROC3, "ovl_outproc3", "disp", 27),
+ GATE_HWV_OVL0(CLK_OVL_OUTPROC4, "ovl_outproc4", "disp", 28),
+ GATE_HWV_OVL0(CLK_OVL_OUTPROC5, "ovl_outproc5", "disp", 29),
+ GATE_HWV_OVL0(CLK_OVL_MDP_RSZ0, "ovl_mdp_rsz0", "disp", 30),
+ GATE_HWV_OVL0(CLK_OVL_MDP_RSZ1, "ovl_mdp_rsz1", "disp", 31),
+ /* OVL1 */
+ GATE_HWV_OVL1(CLK_OVL_DISP_WDMA0, "ovl_disp_wdma0", "disp", 0),
+ GATE_HWV_OVL1(CLK_OVL_DISP_WDMA1, "ovl_disp_wdma1", "disp", 1),
+ GATE_HWV_OVL1(CLK_OVL_UFBC_WDMA0, "ovl_ufbc_wdma0", "disp", 2),
+ GATE_HWV_OVL1(CLK_OVL_MDP_RDMA0, "ovl_mdp_rdma0", "disp", 3),
+ GATE_HWV_OVL1(CLK_OVL_MDP_RDMA1, "ovl_mdp_rdma1", "disp", 4),
+ GATE_HWV_OVL1(CLK_OVL_BWM0, "ovl_bwm0", "disp", 5),
+ GATE_HWV_OVL1(CLK_OVL_DLI0, "ovl_dli0", "disp", 6),
+ GATE_HWV_OVL1(CLK_OVL_DLI1, "ovl_dli1", "disp", 7),
+ GATE_HWV_OVL1(CLK_OVL_DLI2, "ovl_dli2", "disp", 8),
+ GATE_HWV_OVL1(CLK_OVL_DLI3, "ovl_dli3", "disp", 9),
+ GATE_HWV_OVL1(CLK_OVL_DLI4, "ovl_dli4", "disp", 10),
+ GATE_HWV_OVL1(CLK_OVL_DLI5, "ovl_dli5", "disp", 11),
+ GATE_HWV_OVL1(CLK_OVL_DLI6, "ovl_dli6", "disp", 12),
+ GATE_HWV_OVL1(CLK_OVL_DLI7, "ovl_dli7", "disp", 13),
+ GATE_HWV_OVL1(CLK_OVL_DLI8, "ovl_dli8", "disp", 14),
+ GATE_HWV_OVL1(CLK_OVL_DLO0, "ovl_dlo0", "disp", 15),
+ GATE_HWV_OVL1(CLK_OVL_DLO1, "ovl_dlo1", "disp", 16),
+ GATE_HWV_OVL1(CLK_OVL_DLO2, "ovl_dlo2", "disp", 17),
+ GATE_HWV_OVL1(CLK_OVL_DLO3, "ovl_dlo3", "disp", 18),
+ GATE_HWV_OVL1(CLK_OVL_DLO4, "ovl_dlo4", "disp", 19),
+ GATE_HWV_OVL1(CLK_OVL_DLO5, "ovl_dlo5", "disp", 20),
+ GATE_HWV_OVL1(CLK_OVL_DLO6, "ovl_dlo6", "disp", 21),
+ GATE_HWV_OVL1(CLK_OVL_DLO7, "ovl_dlo7", "disp", 22),
+ GATE_HWV_OVL1(CLK_OVL_DLO8, "ovl_dlo8", "disp", 23),
+ GATE_HWV_OVL1(CLK_OVL_DLO9, "ovl_dlo9", "disp", 24),
+ GATE_HWV_OVL1(CLK_OVL_DLO10, "ovl_dlo10", "disp", 25),
+ GATE_HWV_OVL1(CLK_OVL_DLO11, "ovl_dlo11", "disp", 26),
+ GATE_HWV_OVL1(CLK_OVL_DLO12, "ovl_dlo12", "disp", 27),
+ GATE_HWV_OVL1(CLK_OVLSYS_RELAY0, "ovlsys_relay0", "disp", 28),
+ GATE_HWV_OVL1(CLK_OVL_INLINEROT0, "ovl_inlinerot0", "disp", 29),
+ GATE_HWV_OVL1(CLK_OVL_SMI, "ovl_smi", "disp", 30),
+};
+
+static const struct mtk_clk_desc ovl_mcd = {
+ .clks = ovl_clks,
+ .num_clks = ARRAY_SIZE(ovl_clks),
+};
+
+static const struct platform_device_id clk_mt8196_ovl0_id_table[] = {
+ { .name = "clk-mt8196-ovl0", .driver_data = (kernel_ulong_t)&ovl_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, clk_mt8196_ovl0_id_table);
+
+static struct platform_driver clk_mt8196_ovl0_drv = {
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
+ .driver = {
+ .name = "clk-mt8196-ovl0",
+ },
+ .id_table = clk_mt8196_ovl0_id_table,
+};
+module_platform_driver(clk_mt8196_ovl0_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 ovl0 clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-ovl1.c b/drivers/clk/mediatek/clk-mt8196-ovl1.c
new file mode 100644
index 000000000000..c8843d0d3ede
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-ovl1.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs ovl10_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs ovl10_hwv_regs = {
+ .set_ofs = 0x0050,
+ .clr_ofs = 0x0054,
+ .sta_ofs = 0x2c28,
+};
+
+static const struct mtk_gate_regs ovl11_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+static const struct mtk_gate_regs ovl11_hwv_regs = {
+ .set_ofs = 0x0058,
+ .clr_ofs = 0x005c,
+ .sta_ofs = 0x2c2c,
+};
+
+#define GATE_HWV_OVL10(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ovl10_cg_regs, \
+ .hwv_regs = &ovl10_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_OVL11(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ovl11_cg_regs, \
+ .hwv_regs = &ovl11_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+static const struct mtk_gate ovl1_clks[] = {
+ /* OVL10 */
+ GATE_HWV_OVL10(CLK_OVL1_OVLSYS_CONFIG, "ovl1_ovlsys_config", "disp", 0),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_FAKE_ENG0, "ovl1_ovl_fake_eng0", "disp", 1),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_FAKE_ENG1, "ovl1_ovl_fake_eng1", "disp", 2),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_MUTEX0, "ovl1_ovl_mutex0", "disp", 3),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA0, "ovl1_ovl_exdma0", "disp", 4),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA1, "ovl1_ovl_exdma1", "disp", 5),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA2, "ovl1_ovl_exdma2", "disp", 6),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA3, "ovl1_ovl_exdma3", "disp", 7),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA4, "ovl1_ovl_exdma4", "disp", 8),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA5, "ovl1_ovl_exdma5", "disp", 9),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA6, "ovl1_ovl_exdma6", "disp", 10),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA7, "ovl1_ovl_exdma7", "disp", 11),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA8, "ovl1_ovl_exdma8", "disp", 12),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA9, "ovl1_ovl_exdma9", "disp", 13),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER0, "ovl1_ovl_blender0", "disp", 14),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER1, "ovl1_ovl_blender1", "disp", 15),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER2, "ovl1_ovl_blender2", "disp", 16),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER3, "ovl1_ovl_blender3", "disp", 17),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER4, "ovl1_ovl_blender4", "disp", 18),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER5, "ovl1_ovl_blender5", "disp", 19),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER6, "ovl1_ovl_blender6", "disp", 20),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER7, "ovl1_ovl_blender7", "disp", 21),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER8, "ovl1_ovl_blender8", "disp", 22),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER9, "ovl1_ovl_blender9", "disp", 23),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_OUTPROC0, "ovl1_ovl_outproc0", "disp", 24),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_OUTPROC1, "ovl1_ovl_outproc1", "disp", 25),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_OUTPROC2, "ovl1_ovl_outproc2", "disp", 26),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_OUTPROC3, "ovl1_ovl_outproc3", "disp", 27),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_OUTPROC4, "ovl1_ovl_outproc4", "disp", 28),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_OUTPROC5, "ovl1_ovl_outproc5", "disp", 29),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_MDP_RSZ0, "ovl1_ovl_mdp_rsz0", "disp", 30),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_MDP_RSZ1, "ovl1_ovl_mdp_rsz1", "disp", 31),
+ /* OVL11 */
+ GATE_HWV_OVL11(CLK_OVL1_OVL_DISP_WDMA0, "ovl1_ovl_disp_wdma0", "disp", 0),
+ GATE_HWV_OVL11(CLK_OVL1_OVL_DISP_WDMA1, "ovl1_ovl_disp_wdma1", "disp", 1),
+ GATE_HWV_OVL11(CLK_OVL1_OVL_UFBC_WDMA0, "ovl1_ovl_ufbc_wdma0", "disp", 2),
+ GATE_HWV_OVL11(CLK_OVL1_OVL_MDP_RDMA0, "ovl1_ovl_mdp_rdma0", "disp", 3),
+ GATE_HWV_OVL11(CLK_OVL1_OVL_MDP_RDMA1, "ovl1_ovl_mdp_rdma1", "disp", 4),
+ GATE_HWV_OVL11(CLK_OVL1_OVL_BWM0, "ovl1_ovl_bwm0", "disp", 5),
+ GATE_HWV_OVL11(CLK_OVL1_DLI0, "ovl1_dli0", "disp", 6),
+ GATE_HWV_OVL11(CLK_OVL1_DLI1, "ovl1_dli1", "disp", 7),
+ GATE_HWV_OVL11(CLK_OVL1_DLI2, "ovl1_dli2", "disp", 8),
+ GATE_HWV_OVL11(CLK_OVL1_DLI3, "ovl1_dli3", "disp", 9),
+ GATE_HWV_OVL11(CLK_OVL1_DLI4, "ovl1_dli4", "disp", 10),
+ GATE_HWV_OVL11(CLK_OVL1_DLI5, "ovl1_dli5", "disp", 11),
+ GATE_HWV_OVL11(CLK_OVL1_DLI6, "ovl1_dli6", "disp", 12),
+ GATE_HWV_OVL11(CLK_OVL1_DLI7, "ovl1_dli7", "disp", 13),
+ GATE_HWV_OVL11(CLK_OVL1_DLI8, "ovl1_dli8", "disp", 14),
+ GATE_HWV_OVL11(CLK_OVL1_DLO0, "ovl1_dlo0", "disp", 15),
+ GATE_HWV_OVL11(CLK_OVL1_DLO1, "ovl1_dlo1", "disp", 16),
+ GATE_HWV_OVL11(CLK_OVL1_DLO2, "ovl1_dlo2", "disp", 17),
+ GATE_HWV_OVL11(CLK_OVL1_DLO3, "ovl1_dlo3", "disp", 18),
+ GATE_HWV_OVL11(CLK_OVL1_DLO4, "ovl1_dlo4", "disp", 19),
+ GATE_HWV_OVL11(CLK_OVL1_DLO5, "ovl1_dlo5", "disp", 20),
+ GATE_HWV_OVL11(CLK_OVL1_DLO6, "ovl1_dlo6", "disp", 21),
+ GATE_HWV_OVL11(CLK_OVL1_DLO7, "ovl1_dlo7", "disp", 22),
+ GATE_HWV_OVL11(CLK_OVL1_DLO8, "ovl1_dlo8", "disp", 23),
+ GATE_HWV_OVL11(CLK_OVL1_DLO9, "ovl1_dlo9", "disp", 24),
+ GATE_HWV_OVL11(CLK_OVL1_DLO10, "ovl1_dlo10", "disp", 25),
+ GATE_HWV_OVL11(CLK_OVL1_DLO11, "ovl1_dlo11", "disp", 26),
+ GATE_HWV_OVL11(CLK_OVL1_DLO12, "ovl1_dlo12", "disp", 27),
+ GATE_HWV_OVL11(CLK_OVL1_OVLSYS_RELAY0, "ovl1_ovlsys_relay0", "disp", 28),
+ GATE_HWV_OVL11(CLK_OVL1_OVL_INLINEROT0, "ovl1_ovl_inlinerot0", "disp", 29),
+ GATE_HWV_OVL11(CLK_OVL1_SMI, "ovl1_smi", "disp", 30),
+};
+
+static const struct mtk_clk_desc ovl1_mcd = {
+ .clks = ovl1_clks,
+ .num_clks = ARRAY_SIZE(ovl1_clks),
+};
+
+static const struct platform_device_id clk_mt8196_ovl1_id_table[] = {
+ { .name = "clk-mt8196-ovl1", .driver_data = (kernel_ulong_t)&ovl1_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, clk_mt8196_ovl1_id_table);
+
+static struct platform_driver clk_mt8196_ovl1_drv = {
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
+ .driver = {
+ .name = "clk-mt8196-ovl1",
+ },
+ .id_table = clk_mt8196_ovl1_id_table,
+};
+module_platform_driver(clk_mt8196_ovl1_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 ovl1 clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-peri_ao.c b/drivers/clk/mediatek/clk-mt8196-peri_ao.c
new file mode 100644
index 000000000000..f227a86c5d60
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-peri_ao.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs peri_ao0_cg_regs = {
+ .set_ofs = 0x24,
+ .clr_ofs = 0x28,
+ .sta_ofs = 0x10,
+};
+
+static const struct mtk_gate_regs peri_ao1_cg_regs = {
+ .set_ofs = 0x2c,
+ .clr_ofs = 0x30,
+ .sta_ofs = 0x14,
+};
+
+static const struct mtk_gate_regs peri_ao1_hwv_regs = {
+ .set_ofs = 0x0008,
+ .clr_ofs = 0x000c,
+ .sta_ofs = 0x2c04,
+};
+
+static const struct mtk_gate_regs peri_ao2_cg_regs = {
+ .set_ofs = 0x34,
+ .clr_ofs = 0x38,
+ .sta_ofs = 0x18,
+};
+
+#define GATE_PERI_AO0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &peri_ao0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_PERI_AO1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &peri_ao1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_HWV_PERI_AO1(_id, _name, _parent, _shift) {\
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &peri_ao1_cg_regs, \
+ .hwv_regs = &peri_ao1_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ }
+
+#define GATE_PERI_AO2(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &peri_ao2_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate peri_ao_clks[] = {
+ /* PERI_AO0 */
+ GATE_PERI_AO0(CLK_PERI_AO_UART0_BCLK, "peri_ao_uart0_bclk", "uart", 0),
+ GATE_PERI_AO0(CLK_PERI_AO_UART1_BCLK, "peri_ao_uart1_bclk", "uart", 1),
+ GATE_PERI_AO0(CLK_PERI_AO_UART2_BCLK, "peri_ao_uart2_bclk", "uart", 2),
+ GATE_PERI_AO0(CLK_PERI_AO_UART3_BCLK, "peri_ao_uart3_bclk", "uart", 3),
+ GATE_PERI_AO0(CLK_PERI_AO_UART4_BCLK, "peri_ao_uart4_bclk", "uart", 4),
+ GATE_PERI_AO0(CLK_PERI_AO_UART5_BCLK, "peri_ao_uart5_bclk", "uart", 5),
+ GATE_PERI_AO0(CLK_PERI_AO_PWM_X16W_HCLK, "peri_ao_pwm_x16w", "p_axi", 12),
+ GATE_PERI_AO0(CLK_PERI_AO_PWM_X16W_BCLK, "peri_ao_pwm_x16w_bclk", "pwm", 13),
+ GATE_PERI_AO0(CLK_PERI_AO_PWM_PWM_BCLK0, "peri_ao_pwm_pwm_bclk0", "pwm", 14),
+ GATE_PERI_AO0(CLK_PERI_AO_PWM_PWM_BCLK1, "peri_ao_pwm_pwm_bclk1", "pwm", 15),
+ GATE_PERI_AO0(CLK_PERI_AO_PWM_PWM_BCLK2, "peri_ao_pwm_pwm_bclk2", "pwm", 16),
+ GATE_PERI_AO0(CLK_PERI_AO_PWM_PWM_BCLK3, "peri_ao_pwm_pwm_bclk3", "pwm", 17),
+ /* PERI_AO1 */
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI0_BCLK, "peri_ao_spi0_bclk", "spi0_b", 0),
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI1_BCLK, "peri_ao_spi1_bclk", "spi1_b", 2),
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI2_BCLK, "peri_ao_spi2_bclk", "spi2_b", 3),
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI3_BCLK, "peri_ao_spi3_bclk", "spi3_b", 4),
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI4_BCLK, "peri_ao_spi4_bclk", "spi4_b", 5),
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI5_BCLK, "peri_ao_spi5_bclk", "spi5_b", 6),
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI6_BCLK, "peri_ao_spi6_bclk", "spi6_b", 7),
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI7_BCLK, "peri_ao_spi7_bclk", "spi7_b", 8),
+ GATE_PERI_AO1(CLK_PERI_AO_FLASHIF_FLASH, "peri_ao_flashif_flash", "peri_ao_flashif_27m",
+ 18),
+ GATE_PERI_AO1(CLK_PERI_AO_FLASHIF_27M, "peri_ao_flashif_27m", "sflash", 19),
+ GATE_PERI_AO1(CLK_PERI_AO_FLASHIF_DRAM, "peri_ao_flashif_dram", "p_axi", 20),
+ GATE_PERI_AO1(CLK_PERI_AO_FLASHIF_AXI, "peri_ao_flashif_axi", "peri_ao_flashif_dram", 21),
+ GATE_PERI_AO1(CLK_PERI_AO_FLASHIF_BCLK, "peri_ao_flashif_bclk", "p_axi", 22),
+ GATE_PERI_AO1(CLK_PERI_AO_AP_DMA_X32W_BCLK, "peri_ao_ap_dma_x32w_bclk", "p_axi", 26),
+ /* PERI_AO2 */
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC1_MSDC_SRC, "peri_ao_msdc1_msdc_src", "msdc30_1", 1),
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC1_HCLK, "peri_ao_msdc1", "peri_ao_msdc1_axi", 2),
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC1_AXI, "peri_ao_msdc1_axi", "p_axi", 3),
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC1_HCLK_WRAP, "peri_ao_msdc1_h_wrap", "peri_ao_msdc1", 4),
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC2_MSDC_SRC, "peri_ao_msdc2_msdc_src", "msdc30_2", 10),
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC2_HCLK, "peri_ao_msdc2", "peri_ao_msdc2_axi", 11),
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC2_AXI, "peri_ao_msdc2_axi", "p_axi", 12),
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC2_HCLK_WRAP, "peri_ao_msdc2_h_wrap", "peri_ao_msdc2", 13),
+};
+
+static const struct mtk_clk_desc peri_ao_mcd = {
+ .clks = peri_ao_clks,
+ .num_clks = ARRAY_SIZE(peri_ao_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8196_peri_ao[] = {
+ { .compatible = "mediatek,mt8196-pericfg-ao", .data = &peri_ao_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_peri_ao);
+
+static struct platform_driver clk_mt8196_peri_ao_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-peri-ao",
+ .of_match_table = of_match_clk_mt8196_peri_ao,
+ },
+};
+
+MODULE_DESCRIPTION("MediaTek MT8196 pericfg_ao clock controller driver");
+module_platform_driver(clk_mt8196_peri_ao_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-pextp.c b/drivers/clk/mediatek/clk-mt8196-pextp.c
new file mode 100644
index 000000000000..3e505ecc4b6e
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-pextp.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+#include <dt-bindings/reset/mediatek,mt8196-resets.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+#include "reset.h"
+
+#define MT8196_PEXTP_RST0_SET_OFFSET 0x8
+
+static const struct mtk_gate_regs pext_cg_regs = {
+ .set_ofs = 0x18,
+ .clr_ofs = 0x1c,
+ .sta_ofs = 0x14,
+};
+
+#define GATE_PEXT(_id, _name, _parent, _shift) {\
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &pext_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr,\
+ }
+
+static const struct mtk_gate pext_clks[] = {
+ GATE_PEXT(CLK_PEXT_PEXTP_MAC_P0_TL, "pext_pm0_tl", "tl", 0),
+ GATE_PEXT(CLK_PEXT_PEXTP_MAC_P0_REF, "pext_pm0_ref", "clk26m", 1),
+ GATE_PEXT(CLK_PEXT_PEXTP_PHY_P0_MCU_BUS, "pext_pp0_mcu_bus", "clk26m", 6),
+ GATE_PEXT(CLK_PEXT_PEXTP_PHY_P0_PEXTP_REF, "pext_pp0_pextp_ref", "clk26m", 7),
+ GATE_PEXT(CLK_PEXT_PEXTP_MAC_P0_AXI_250, "pext_pm0_axi_250", "ufs_pexpt0_mem_sub", 12),
+ GATE_PEXT(CLK_PEXT_PEXTP_MAC_P0_AHB_APB, "pext_pm0_ahb_apb", "ufs_pextp0_axi", 13),
+ GATE_PEXT(CLK_PEXT_PEXTP_MAC_P0_PL_P, "pext_pm0_pl_p", "clk26m", 14),
+ GATE_PEXT(CLK_PEXT_PEXTP_VLP_AO_P0_LP, "pext_pextp_vlp_ao_p0_lp", "clk26m", 19),
+};
+
+static u16 pext_rst_ofs[] = { MT8196_PEXTP_RST0_SET_OFFSET };
+
+static u16 pext_rst_idx_map[] = {
+ [MT8196_PEXTP0_RST0_PCIE0_MAC] = 0,
+ [MT8196_PEXTP0_RST0_PCIE0_PHY] = 1,
+};
+
+static const struct mtk_clk_rst_desc pext_rst_desc = {
+ .version = MTK_RST_SET_CLR,
+ .rst_bank_ofs = pext_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(pext_rst_ofs),
+ .rst_idx_map = pext_rst_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(pext_rst_idx_map),
+};
+
+static const struct mtk_clk_desc pext_mcd = {
+ .clks = pext_clks,
+ .num_clks = ARRAY_SIZE(pext_clks),
+ .rst_desc = &pext_rst_desc,
+};
+
+static const struct mtk_gate pext1_clks[] = {
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P1_TL, "pext1_pm1_tl", "tl_p1", 0),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P1_REF, "pext1_pm1_ref", "clk26m", 1),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P2_TL, "pext1_pm2_tl", "tl_p2", 2),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P2_REF, "pext1_pm2_ref", "clk26m", 3),
+ GATE_PEXT(CLK_PEXT1_PEXTP_PHY_P1_MCU_BUS, "pext1_pp1_mcu_bus", "clk26m", 8),
+ GATE_PEXT(CLK_PEXT1_PEXTP_PHY_P1_PEXTP_REF, "pext1_pp1_pextp_ref", "clk26m", 9),
+ GATE_PEXT(CLK_PEXT1_PEXTP_PHY_P2_MCU_BUS, "pext1_pp2_mcu_bus", "clk26m", 10),
+ GATE_PEXT(CLK_PEXT1_PEXTP_PHY_P2_PEXTP_REF, "pext1_pp2_pextp_ref", "clk26m", 11),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P1_AXI_250, "pext1_pm1_axi_250",
+ "pextp1_usb_axi", 16),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P1_AHB_APB, "pext1_pm1_ahb_apb",
+ "pextp1_usb_mem_sub", 17),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P1_PL_P, "pext1_pm1_pl_p", "clk26m", 18),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P2_AXI_250, "pext1_pm2_axi_250",
+ "pextp1_usb_axi", 19),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P2_AHB_APB, "pext1_pm2_ahb_apb",
+ "pextp1_usb_mem_sub", 20),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P2_PL_P, "pext1_pm2_pl_p", "clk26m", 21),
+ GATE_PEXT(CLK_PEXT1_PEXTP_VLP_AO_P1_LP, "pext1_pextp_vlp_ao_p1_lp", "clk26m", 26),
+ GATE_PEXT(CLK_PEXT1_PEXTP_VLP_AO_P2_LP, "pext1_pextp_vlp_ao_p2_lp", "clk26m", 27),
+};
+
+static u16 pext1_rst_idx_map[] = {
+ [MT8196_PEXTP1_RST0_PCIE1_MAC] = 0,
+ [MT8196_PEXTP1_RST0_PCIE1_PHY] = 1,
+ [MT8196_PEXTP1_RST0_PCIE2_MAC] = 8,
+ [MT8196_PEXTP1_RST0_PCIE2_PHY] = 9,
+};
+
+static const struct mtk_clk_rst_desc pext1_rst_desc = {
+ .version = MTK_RST_SET_CLR,
+ .rst_bank_ofs = pext_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(pext_rst_ofs),
+ .rst_idx_map = pext1_rst_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(pext1_rst_idx_map),
+};
+
+static const struct mtk_clk_desc pext1_mcd = {
+ .clks = pext1_clks,
+ .num_clks = ARRAY_SIZE(pext1_clks),
+ .rst_desc = &pext1_rst_desc,
+};
+
+static const struct of_device_id of_match_clk_mt8196_pextp[] = {
+ { .compatible = "mediatek,mt8196-pextp0cfg-ao", .data = &pext_mcd },
+ { .compatible = "mediatek,mt8196-pextp1cfg-ao", .data = &pext1_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_pextp);
+
+static struct platform_driver clk_mt8196_pextp_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-pextp",
+ .of_match_table = of_match_clk_mt8196_pextp,
+ },
+};
+
+module_platform_driver(clk_mt8196_pextp_drv);
+MODULE_DESCRIPTION("MediaTek MT8196 PCIe transmit phy clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-topckgen.c b/drivers/clk/mediatek/clk-mt8196-topckgen.c
new file mode 100644
index 000000000000..6ace11ef6b69
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-topckgen.c
@@ -0,0 +1,985 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-mux.h"
+
+/* MUX SEL REG */
+#define CLK_CFG_UPDATE 0x0004
+#define CLK_CFG_UPDATE1 0x0008
+#define CLK_CFG_UPDATE2 0x000c
+#define CLK_CFG_0 0x0010
+#define CLK_CFG_0_SET 0x0014
+#define CLK_CFG_0_CLR 0x0018
+#define CLK_CFG_1 0x0020
+#define CLK_CFG_1_SET 0x0024
+#define CLK_CFG_1_CLR 0x0028
+#define CLK_CFG_2 0x0030
+#define CLK_CFG_2_SET 0x0034
+#define CLK_CFG_2_CLR 0x0038
+#define CLK_CFG_3 0x0040
+#define CLK_CFG_3_SET 0x0044
+#define CLK_CFG_3_CLR 0x0048
+#define CLK_CFG_4 0x0050
+#define CLK_CFG_4_SET 0x0054
+#define CLK_CFG_4_CLR 0x0058
+#define CLK_CFG_5 0x0060
+#define CLK_CFG_5_SET 0x0064
+#define CLK_CFG_5_CLR 0x0068
+#define CLK_CFG_6 0x0070
+#define CLK_CFG_6_SET 0x0074
+#define CLK_CFG_6_CLR 0x0078
+#define CLK_CFG_7 0x0080
+#define CLK_CFG_7_SET 0x0084
+#define CLK_CFG_7_CLR 0x0088
+#define CLK_CFG_8 0x0090
+#define CLK_CFG_8_SET 0x0094
+#define CLK_CFG_8_CLR 0x0098
+#define CLK_CFG_9 0x00a0
+#define CLK_CFG_9_SET 0x00a4
+#define CLK_CFG_9_CLR 0x00a8
+#define CLK_CFG_10 0x00b0
+#define CLK_CFG_10_SET 0x00b4
+#define CLK_CFG_10_CLR 0x00b8
+#define CLK_CFG_11 0x00c0
+#define CLK_CFG_11_SET 0x00c4
+#define CLK_CFG_11_CLR 0x00c8
+#define CLK_CFG_12 0x00d0
+#define CLK_CFG_12_SET 0x00d4
+#define CLK_CFG_12_CLR 0x00d8
+#define CLK_CFG_13 0x00e0
+#define CLK_CFG_13_SET 0x00e4
+#define CLK_CFG_13_CLR 0x00e8
+#define CLK_CFG_14 0x00f0
+#define CLK_CFG_14_SET 0x00f4
+#define CLK_CFG_14_CLR 0x00f8
+#define CLK_CFG_15 0x0100
+#define CLK_CFG_15_SET 0x0104
+#define CLK_CFG_15_CLR 0x0108
+#define CLK_CFG_16 0x0110
+#define CLK_CFG_16_SET 0x0114
+#define CLK_CFG_16_CLR 0x0118
+#define CLK_CFG_17 0x0120
+#define CLK_CFG_17_SET 0x0124
+#define CLK_CFG_17_CLR 0x0128
+#define CLK_CFG_18 0x0130
+#define CLK_CFG_18_SET 0x0134
+#define CLK_CFG_18_CLR 0x0138
+#define CLK_CFG_19 0x0140
+#define CLK_CFG_19_SET 0x0144
+#define CLK_CFG_19_CLR 0x0148
+#define CLK_AUDDIV_0 0x020c
+#define CLK_FENC_STATUS_MON_0 0x0270
+#define CLK_FENC_STATUS_MON_1 0x0274
+#define CLK_FENC_STATUS_MON_2 0x0278
+
+/* MUX SHIFT */
+#define TOP_MUX_AXI_SHIFT 0
+#define TOP_MUX_MEM_SUB_SHIFT 1
+#define TOP_MUX_IO_NOC_SHIFT 2
+#define TOP_MUX_PERI_AXI_SHIFT 3
+#define TOP_MUX_UFS_PEXTP0_AXI_SHIFT 4
+#define TOP_MUX_PEXTP1_USB_AXI_SHIFT 5
+#define TOP_MUX_PERI_FMEM_SUB_SHIFT 6
+#define TOP_MUX_UFS_PEXPT0_MEM_SUB_SHIFT 7
+#define TOP_MUX_PEXTP1_USB_MEM_SUB_SHIFT 8
+#define TOP_MUX_PERI_NOC_SHIFT 9
+#define TOP_MUX_EMI_N_SHIFT 10
+#define TOP_MUX_EMI_S_SHIFT 11
+#define TOP_MUX_AP2CONN_HOST_SHIFT 14
+#define TOP_MUX_ATB_SHIFT 15
+#define TOP_MUX_CIRQ_SHIFT 16
+#define TOP_MUX_PBUS_156M_SHIFT 17
+#define TOP_MUX_EFUSE_SHIFT 20
+#define TOP_MUX_MCU_L3GIC_SHIFT 21
+#define TOP_MUX_MCU_INFRA_SHIFT 22
+#define TOP_MUX_DSP_SHIFT 23
+#define TOP_MUX_MFG_REF_SHIFT 24
+#define TOP_MUX_MFG_EB_SHIFT 26
+#define TOP_MUX_UART_SHIFT 27
+#define TOP_MUX_SPI0_BCLK_SHIFT 28
+#define TOP_MUX_SPI1_BCLK_SHIFT 29
+#define TOP_MUX_SPI2_BCLK_SHIFT 30
+#define TOP_MUX_SPI3_BCLK_SHIFT 0
+#define TOP_MUX_SPI4_BCLK_SHIFT 1
+#define TOP_MUX_SPI5_BCLK_SHIFT 2
+#define TOP_MUX_SPI6_BCLK_SHIFT 3
+#define TOP_MUX_SPI7_BCLK_SHIFT 4
+#define TOP_MUX_MSDC30_1_SHIFT 7
+#define TOP_MUX_MSDC30_2_SHIFT 8
+#define TOP_MUX_DISP_PWM_SHIFT 9
+#define TOP_MUX_USB_TOP_1P_SHIFT 10
+#define TOP_MUX_SSUSB_XHCI_1P_SHIFT 11
+#define TOP_MUX_SSUSB_FMCNT_P1_SHIFT 12
+#define TOP_MUX_I2C_PERI_SHIFT 13
+#define TOP_MUX_I2C_EAST_SHIFT 14
+#define TOP_MUX_I2C_WEST_SHIFT 15
+#define TOP_MUX_I2C_NORTH_SHIFT 16
+#define TOP_MUX_AES_UFSFDE_SHIFT 17
+#define TOP_MUX_UFS_SHIFT 18
+#define TOP_MUX_AUD_1_SHIFT 21
+#define TOP_MUX_AUD_2_SHIFT 22
+#define TOP_MUX_ADSP_SHIFT 23
+#define TOP_MUX_ADSP_UARTHUB_B_SHIFT 24
+#define TOP_MUX_DPMAIF_MAIN_SHIFT 25
+#define TOP_MUX_PWM_SHIFT 26
+#define TOP_MUX_MCUPM_SHIFT 27
+#define TOP_MUX_SFLASH_SHIFT 28
+#define TOP_MUX_IPSEAST_SHIFT 29
+#define TOP_MUX_TL_SHIFT 0
+#define TOP_MUX_TL_P1_SHIFT 1
+#define TOP_MUX_TL_P2_SHIFT 2
+#define TOP_MUX_EMI_INTERFACE_546_SHIFT 3
+#define TOP_MUX_SDF_SHIFT 4
+#define TOP_MUX_UARTHUB_BCLK_SHIFT 5
+#define TOP_MUX_DPSW_CMP_26M_SHIFT 6
+#define TOP_MUX_SMAPCK_SHIFT 7
+#define TOP_MUX_SSR_PKA_SHIFT 8
+#define TOP_MUX_SSR_DMA_SHIFT 9
+#define TOP_MUX_SSR_KDF_SHIFT 10
+#define TOP_MUX_SSR_RNG_SHIFT 11
+#define TOP_MUX_SPU0_SHIFT 12
+#define TOP_MUX_SPU1_SHIFT 13
+#define TOP_MUX_DXCC_SHIFT 14
+
+/* CKSTA REG */
+#define CKSTA_REG 0x01c8
+#define CKSTA_REG1 0x01cc
+#define CKSTA_REG2 0x01d0
+
+/* DIVIDER REG */
+#define CLK_AUDDIV_2 0x0214
+#define CLK_AUDDIV_3 0x0220
+#define CLK_AUDDIV_4 0x0224
+#define CLK_AUDDIV_5 0x0228
+
+/* HW Voter REG */
+#define HWV_CG_0_SET 0x0000
+#define HWV_CG_0_CLR 0x0004
+#define HWV_CG_0_DONE 0x2c00
+#define HWV_CG_1_SET 0x0008
+#define HWV_CG_1_CLR 0x000c
+#define HWV_CG_1_DONE 0x2c04
+#define HWV_CG_2_SET 0x0010
+#define HWV_CG_2_CLR 0x0014
+#define HWV_CG_2_DONE 0x2c08
+#define HWV_CG_3_SET 0x0018
+#define HWV_CG_3_CLR 0x001c
+#define HWV_CG_3_DONE 0x2c0c
+#define HWV_CG_4_SET 0x0020
+#define HWV_CG_4_CLR 0x0024
+#define HWV_CG_4_DONE 0x2c10
+#define HWV_CG_5_SET 0x0028
+#define HWV_CG_5_CLR 0x002c
+#define HWV_CG_5_DONE 0x2c14
+#define HWV_CG_6_SET 0x0030
+#define HWV_CG_6_CLR 0x0034
+#define HWV_CG_6_DONE 0x2c18
+#define HWV_CG_7_SET 0x0038
+#define HWV_CG_7_CLR 0x003c
+#define HWV_CG_7_DONE 0x2c1c
+#define HWV_CG_8_SET 0x0040
+#define HWV_CG_8_CLR 0x0044
+#define HWV_CG_8_DONE 0x2c20
+
+static const struct mtk_fixed_factor top_divs[] = {
+ FACTOR(CLK_TOP_MAINPLL_D3, "mainpll_d3", "mainpll", 1, 3),
+ FACTOR(CLK_TOP_MAINPLL_D4, "mainpll_d4", "mainpll", 1, 4),
+ FACTOR(CLK_TOP_MAINPLL_D4_D2, "mainpll_d4_d2", "mainpll", 1, 8),
+ FACTOR(CLK_TOP_MAINPLL_D4_D4, "mainpll_d4_d4", "mainpll", 1, 16),
+ FACTOR(CLK_TOP_MAINPLL_D4_D8, "mainpll_d4_d8", "mainpll", 1, 32),
+ FACTOR(CLK_TOP_MAINPLL_D5, "mainpll_d5", "mainpll", 1, 5),
+ FACTOR(CLK_TOP_MAINPLL_D5_D2, "mainpll_d5_d2", "mainpll", 1, 10),
+ FACTOR(CLK_TOP_MAINPLL_D5_D4, "mainpll_d5_d4", "mainpll", 1, 20),
+ FACTOR(CLK_TOP_MAINPLL_D5_D8, "mainpll_d5_d8", "mainpll", 1, 40),
+ FACTOR(CLK_TOP_MAINPLL_D6, "mainpll_d6", "mainpll", 1, 6),
+ FACTOR(CLK_TOP_MAINPLL_D6_D2, "mainpll_d6_d2", "mainpll", 1, 12),
+ FACTOR(CLK_TOP_MAINPLL_D7, "mainpll_d7", "mainpll", 1, 7),
+ FACTOR(CLK_TOP_MAINPLL_D7_D2, "mainpll_d7_d2", "mainpll", 1, 14),
+ FACTOR(CLK_TOP_MAINPLL_D7_D4, "mainpll_d7_d4", "mainpll", 1, 28),
+ FACTOR(CLK_TOP_MAINPLL_D7_D8, "mainpll_d7_d8", "mainpll", 1, 56),
+ FACTOR(CLK_TOP_MAINPLL_D9, "mainpll_d9", "mainpll", 1, 9),
+ FACTOR(CLK_TOP_UNIVPLL_D4, "univpll_d4", "univpll", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL_D4_D2, "univpll_d4_d2", "univpll", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL_D4_D4, "univpll_d4_d4", "univpll", 1, 16),
+ FACTOR(CLK_TOP_UNIVPLL_D4_D8, "univpll_d4_d8", "univpll", 1, 32),
+ FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1, 5),
+ FACTOR(CLK_TOP_UNIVPLL_D5_D2, "univpll_d5_d2", "univpll", 1, 10),
+ FACTOR(CLK_TOP_UNIVPLL_D5_D4, "univpll_d5_d4", "univpll", 1, 20),
+ FACTOR(CLK_TOP_UNIVPLL_D6, "univpll_d6", "univpll", 1, 6),
+ FACTOR(CLK_TOP_UNIVPLL_D6_D2, "univpll_d6_d2", "univpll", 1, 12),
+ FACTOR(CLK_TOP_UNIVPLL_D6_D4, "univpll_d6_d4", "univpll", 1, 24),
+ FACTOR(CLK_TOP_UNIVPLL_D6_D8, "univpll_d6_d8", "univpll", 1, 48),
+ FACTOR(CLK_TOP_UNIVPLL_D6_D16, "univpll_d6_d16", "univpll", 1, 96),
+ FACTOR(CLK_TOP_UNIVPLL_192M, "univpll_192m", "univpll", 1, 13),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D4, "univpll_192m_d4", "univpll", 1, 52),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D8, "univpll_192m_d8", "univpll", 1, 104),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D16, "univpll_192m_d16", "univpll", 1, 208),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D32, "univpll_192m_d32", "univpll", 1, 416),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D10, "univpll_192m_d10", "univpll", 1, 130),
+ FACTOR(CLK_TOP_TVDPLL1_D2, "tvdpll1_d2", "tvdpll1", 1, 2),
+ FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll", 1, 2),
+ FACTOR(CLK_TOP_OSC_D2, "osc_d2", "ulposc", 1, 2),
+ FACTOR(CLK_TOP_OSC_D3, "osc_d3", "ulposc", 1, 3),
+ FACTOR(CLK_TOP_OSC_D4, "osc_d4", "ulposc", 1, 4),
+ FACTOR(CLK_TOP_OSC_D5, "osc_d5", "ulposc", 1, 5),
+ FACTOR(CLK_TOP_OSC_D7, "osc_d7", "ulposc", 1, 7),
+ FACTOR(CLK_TOP_OSC_D8, "osc_d8", "ulposc", 1, 8),
+ FACTOR(CLK_TOP_OSC_D10, "osc_d10", "ulposc", 1, 10),
+ FACTOR(CLK_TOP_OSC_D14, "osc_d14", "ulposc", 1, 14),
+ FACTOR(CLK_TOP_OSC_D20, "osc_d20", "ulposc", 1, 20),
+ FACTOR(CLK_TOP_OSC_D32, "osc_d32", "ulposc", 1, 32),
+ FACTOR(CLK_TOP_OSC_D40, "osc_d40", "ulposc", 1, 40),
+};
+
+static const char * const axi_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "osc_d8",
+ "osc_d4",
+ "mainpll_d4_d4",
+ "mainpll_d7_d2"
+};
+
+static const char * const mem_sub_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "osc_d4",
+ "univpll_d4_d4",
+ "osc_d3",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2",
+ "mainpll_d6",
+ "mainpll_d5",
+ "univpll_d5",
+ "mainpll_d4",
+ "mainpll_d3"
+};
+
+static const char * const io_noc_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "osc_d8",
+ "osc_d4",
+ "mainpll_d6_d2",
+ "mainpll_d9"
+};
+
+static const char * const shared_axi_parents[] = {
+ "clk26m",
+ "mainpll_d7_d8",
+ "mainpll_d5_d8",
+ "osc_d8",
+ "mainpll_d7_d4",
+ "mainpll_d5_d4",
+ "mainpll_d4_d4",
+ "mainpll_d7_d2"
+};
+
+static const char * const shared_sub_parents[] = {
+ "clk26m",
+ "mainpll_d5_d8",
+ "mainpll_d5_d4",
+ "osc_d4",
+ "univpll_d4_d4",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2",
+ "mainpll_d6",
+ "mainpll_d5",
+ "univpll_d5",
+ "mainpll_d4"
+};
+
+static const char * const p_noc_parents[] = {
+ "clk26m",
+ "mainpll_d5_d8",
+ "mainpll_d5_d4",
+ "osc_d4",
+ "univpll_d4_d4",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2",
+ "mainpll_d6",
+ "mainpll_d5",
+ "univpll_d5",
+ "mainpll_d4",
+ "mainpll_d3"
+};
+
+static const char * const emi_parents[] = {
+ "clk26m",
+ "osc_d4",
+ "mainpll_d5_d8",
+ "mainpll_d5_d4",
+ "mainpll_d4_d4",
+ "emipll1_ck"
+};
+
+static const char * const ap2conn_host_parents[] = {
+ "clk26m",
+ "mainpll_d7_d4"
+};
+
+static const char * const atb_parents[] = {
+ "clk26m",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2",
+ "mainpll_d6"
+};
+
+static const char * const cirq_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d7_d4"
+};
+
+static const char * const pbus_156m_parents[] = {
+ "clk26m",
+ "mainpll_d7_d2",
+ "osc_d2",
+ "mainpll_d7"
+};
+
+static const char * const efuse_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const mcu_l3gic_parents[] = {
+ "clk26m",
+ "osc_d8",
+ "mainpll_d4_d4",
+ "mainpll_d7_d2"
+};
+
+static const char * const mcu_infra_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d7_d2",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2",
+ "mainpll_d9",
+ "mainpll_d6"
+};
+
+static const char * const dsp_parents[] = {
+ "clk26m",
+ "osc_d5",
+ "osc_d4",
+ "osc_d3",
+ "univpll_d6_d2",
+ "osc_d2",
+ "univpll_d5",
+ "osc"
+};
+
+static const char * const mfg_ref_parents[] = {
+ "clk26m",
+ "mainpll_d7_d2"
+};
+
+static const char * const mfg_eb_parents[] = {
+ "clk26m",
+ "mainpll_d7_d2",
+ "mainpll_d6_d2",
+ "mainpll_d5_d2"
+};
+
+static const char * const uart_parents[] = {
+ "clk26m",
+ "univpll_d6_d8",
+ "univpll_d6_d4",
+ "univpll_d6_d2"
+};
+
+static const char * const spi_b_parents[] = {
+ "clk26m",
+ "univpll_d6_d4",
+ "univpll_d5_d4",
+ "mainpll_d4_d4",
+ "univpll_d4_d4",
+ "mainpll_d6_d2",
+ "univpll_192m",
+ "univpll_d6_d2"
+};
+
+static const char * const msdc30_parents[] = {
+ "clk26m",
+ "univpll_d6_d4",
+ "mainpll_d6_d2",
+ "univpll_d6_d2",
+ "msdcpll_d2"
+};
+
+static const char * const disp_pwm_parents[] = {
+ "clk26m",
+ "osc_d32",
+ "osc_d8",
+ "univpll_d6_d4",
+ "univpll_d5_d4",
+ "osc_d4",
+ "mainpll_d4_d4"
+};
+
+static const char * const usb_1p_parents[] = {
+ "clk26m",
+ "univpll_d5_d4"
+};
+
+static const char * const usb_fmcnt_p1_parents[] = {
+ "clk26m",
+ "univpll_192m_d4"
+};
+
+static const char * const i2c_parents[] = {
+ "clk26m",
+ "mainpll_d4_d8",
+ "univpll_d5_d4",
+ "mainpll_d4_d4",
+ "univpll_d5_d2"
+};
+
+static const char * const aes_ufsfde_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "univpll_d6_d2",
+ "mainpll_d4_d2",
+ "univpll_d6",
+ "mainpll_d4"
+};
+
+static const char * const ufs_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "univpll_d6_d2",
+ "mainpll_d4_d2",
+ "univpll_d6",
+ "mainpll_d5",
+ "univpll_d5"
+};
+
+static const char * const aud_1_parents[] = {
+ "clk26m",
+ "vlp_apll1"
+};
+
+static const char * const aud_2_parents[] = {
+ "clk26m",
+ "vlp_apll2"
+};
+
+static const char * const adsp_parents[] = {
+ "clk26m",
+ "adsppll"
+};
+
+static const char * const adsp_uarthub_b_parents[] = {
+ "clk26m",
+ "univpll_d6_d4",
+ "univpll_d6_d2"
+};
+
+static const char * const dpmaif_main_parents[] = {
+ "clk26m",
+ "univpll_d4_d4",
+ "univpll_d5_d2",
+ "mainpll_d4_d2",
+ "univpll_d4_d2",
+ "mainpll_d6",
+ "univpll_d6",
+ "mainpll_d5",
+ "univpll_d5"
+};
+
+static const char * const pwm_parents[] = {
+ "clk26m",
+ "mainpll_d7_d4",
+ "univpll_d4_d8"
+};
+
+static const char * const mcupm_parents[] = {
+ "clk26m",
+ "mainpll_d7_d2",
+ "mainpll_d6_d2",
+ "univpll_d6_d2",
+ "mainpll_d5_d2"
+};
+
+static const char * const ipseast_parents[] = {
+ "clk26m",
+ "mainpll_d6",
+ "mainpll_d5",
+ "mainpll_d4",
+ "mainpll_d3"
+};
+
+static const char * const tl_parents[] = {
+ "clk26m",
+ "mainpll_d7_d4",
+ "mainpll_d4_d4",
+ "mainpll_d5_d2"
+};
+
+static const char * const md_emi_parents[] = {
+ "clk26m",
+ "mainpll_d4"
+};
+
+static const char * const sdf_parents[] = {
+ "clk26m",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2",
+ "mainpll_d6",
+ "mainpll_d4",
+ "univpll_d4"
+};
+
+static const char * const uarthub_b_parents[] = {
+ "clk26m",
+ "univpll_d6_d4",
+ "univpll_d6_d2"
+};
+
+static const char * const dpsw_cmp_26m_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const smapparents[] = {
+ "clk26m",
+ "mainpll_d4_d8"
+};
+
+static const char * const ssr_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "mainpll_d4_d2",
+ "mainpll_d7",
+ "mainpll_d6",
+ "mainpll_d5"
+};
+
+static const char * const ssr_kdf_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "mainpll_d4_d2",
+ "mainpll_d7"
+};
+
+static const char * const ssr_rng_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2"
+};
+
+static const char * const spu_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "mainpll_d4_d2",
+ "mainpll_d7",
+ "mainpll_d6",
+ "mainpll_d5"
+};
+
+static const char * const dxcc_parents[] = {
+ "clk26m",
+ "mainpll_d4_d8",
+ "mainpll_d4_d4",
+ "mainpll_d4_d2"
+};
+
+static const char * const apll_m_parents[] = {
+ "aud_1",
+ "aud_2"
+};
+
+static const char * const sflash_parents[] = {
+ "clk26m",
+ "mainpll_d7_d8",
+ "univpll_d6_d8"
+};
+
+static const struct mtk_mux top_muxes[] = {
+ /* CLK_CFG_0 */
+ MUX_CLR_SET_UPD(CLK_TOP_AXI, "axi",
+ axi_parents, CLK_CFG_0, CLK_CFG_0_SET,
+ CLK_CFG_0_CLR, 0, 3,
+ CLK_CFG_UPDATE, TOP_MUX_AXI_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_MEM_SUB, "mem_sub",
+ mem_sub_parents, CLK_CFG_0, CLK_CFG_0_SET,
+ CLK_CFG_0_CLR, 8, 4,
+ CLK_CFG_UPDATE, TOP_MUX_MEM_SUB_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_IO_NOC, "io_noc",
+ io_noc_parents, CLK_CFG_0, CLK_CFG_0_SET,
+ CLK_CFG_0_CLR, 16, 3,
+ CLK_CFG_UPDATE, TOP_MUX_IO_NOC_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_P_AXI, "p_axi",
+ shared_axi_parents, CLK_CFG_0, CLK_CFG_0_SET,
+ CLK_CFG_0_CLR, 24, 3,
+ CLK_CFG_UPDATE, TOP_MUX_PERI_AXI_SHIFT),
+ /* CLK_CFG_1 */
+ MUX_CLR_SET_UPD(CLK_TOP_UFS_PEXTP0_AXI, "ufs_pextp0_axi",
+ shared_axi_parents, CLK_CFG_1, CLK_CFG_1_SET,
+ CLK_CFG_1_CLR, 0, 3,
+ CLK_CFG_UPDATE, TOP_MUX_UFS_PEXTP0_AXI_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_PEXTP1_USB_AXI, "pextp1_usb_axi",
+ shared_axi_parents, CLK_CFG_1, CLK_CFG_1_SET,
+ CLK_CFG_1_CLR, 8, 3,
+ CLK_CFG_UPDATE, TOP_MUX_PEXTP1_USB_AXI_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_P_FMEM_SUB, "p_fmem_sub",
+ shared_sub_parents, CLK_CFG_1, CLK_CFG_1_SET,
+ CLK_CFG_1_CLR, 16, 4,
+ CLK_CFG_UPDATE, TOP_MUX_PERI_FMEM_SUB_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_PEXPT0_MEM_SUB, "ufs_pexpt0_mem_sub",
+ shared_sub_parents, CLK_CFG_1, CLK_CFG_1_SET,
+ CLK_CFG_1_CLR, 24, 4,
+ CLK_CFG_UPDATE, TOP_MUX_UFS_PEXPT0_MEM_SUB_SHIFT),
+ /* CLK_CFG_2 */
+ MUX_CLR_SET_UPD(CLK_TOP_PEXTP1_USB_MEM_SUB, "pextp1_usb_mem_sub",
+ shared_sub_parents, CLK_CFG_2, CLK_CFG_2_SET,
+ CLK_CFG_2_CLR, 0, 4,
+ CLK_CFG_UPDATE, TOP_MUX_PEXTP1_USB_MEM_SUB_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_P_NOC, "p_noc",
+ p_noc_parents, CLK_CFG_2, CLK_CFG_2_SET,
+ CLK_CFG_2_CLR, 8, 4,
+ CLK_CFG_UPDATE, TOP_MUX_PERI_NOC_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_EMI_N, "emi_n",
+ emi_parents, CLK_CFG_2, CLK_CFG_2_SET,
+ CLK_CFG_2_CLR, 16, 3,
+ CLK_CFG_UPDATE, TOP_MUX_EMI_N_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_EMI_S, "emi_s",
+ emi_parents, CLK_CFG_2, CLK_CFG_2_SET,
+ CLK_CFG_2_CLR, 24, 3,
+ CLK_CFG_UPDATE, TOP_MUX_EMI_S_SHIFT),
+ /* CLK_CFG_3 */
+ MUX_CLR_SET_UPD(CLK_TOP_AP2CONN_HOST, "ap2conn_host",
+ ap2conn_host_parents, CLK_CFG_3, CLK_CFG_3_SET,
+ CLK_CFG_3_CLR, 16, 1,
+ CLK_CFG_UPDATE, TOP_MUX_AP2CONN_HOST_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_ATB, "atb",
+ atb_parents, CLK_CFG_3, CLK_CFG_3_SET,
+ CLK_CFG_3_CLR, 24, 2,
+ CLK_CFG_UPDATE, TOP_MUX_ATB_SHIFT),
+ /* CLK_CFG_4 */
+ MUX_CLR_SET_UPD(CLK_TOP_CIRQ, "cirq",
+ cirq_parents, CLK_CFG_4, CLK_CFG_4_SET,
+ CLK_CFG_4_CLR, 0, 2,
+ CLK_CFG_UPDATE, TOP_MUX_CIRQ_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_PBUS_156M, "pbus_156m",
+ pbus_156m_parents, CLK_CFG_4, CLK_CFG_4_SET,
+ CLK_CFG_4_CLR, 8, 2,
+ CLK_CFG_UPDATE, TOP_MUX_PBUS_156M_SHIFT),
+ /* CLK_CFG_5 */
+ MUX_CLR_SET_UPD(CLK_TOP_EFUSE, "efuse",
+ efuse_parents, CLK_CFG_5, CLK_CFG_5_SET,
+ CLK_CFG_5_CLR, 0, 1,
+ CLK_CFG_UPDATE, TOP_MUX_EFUSE_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_MCL3GIC, "mcu_l3gic",
+ mcu_l3gic_parents, CLK_CFG_5, CLK_CFG_5_SET,
+ CLK_CFG_5_CLR, 8, 2,
+ CLK_CFG_UPDATE, TOP_MUX_MCU_L3GIC_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_MCINFRA, "mcu_infra",
+ mcu_infra_parents, CLK_CFG_5, CLK_CFG_5_SET,
+ CLK_CFG_5_CLR, 16, 3,
+ CLK_CFG_UPDATE, TOP_MUX_MCU_INFRA_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_DSP, "dsp",
+ dsp_parents, CLK_CFG_5, CLK_CFG_5_SET,
+ CLK_CFG_5_CLR, 24, 3,
+ CLK_CFG_UPDATE, TOP_MUX_DSP_SHIFT),
+ /* CLK_CFG_6 */
+ MUX_GATE_FENC_CLR_SET_UPD_FLAGS(CLK_TOP_MFG_REF, "mfg_ref", mfg_ref_parents,
+ NULL, ARRAY_SIZE(mfg_ref_parents),
+ CLK_CFG_6, CLK_CFG_6_SET, CLK_CFG_6_CLR,
+ 0, 1, 7, CLK_CFG_UPDATE, TOP_MUX_MFG_REF_SHIFT,
+ CLK_FENC_STATUS_MON_0, 7, CLK_IGNORE_UNUSED),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MFG_EB, "mfg_eb",
+ mfg_eb_parents, CLK_CFG_6, CLK_CFG_6_SET,
+ CLK_CFG_6_CLR, 16, 2,
+ 23, CLK_CFG_UPDATE, TOP_MUX_MFG_EB_SHIFT),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_UART, "uart", uart_parents,
+ CLK_CFG_6, CLK_CFG_6_SET, CLK_CFG_6_CLR,
+ HWV_CG_3_DONE, HWV_CG_3_SET, HWV_CG_3_CLR,
+ 24, 2, 31, CLK_CFG_UPDATE, TOP_MUX_UART_SHIFT,
+ CLK_FENC_STATUS_MON_0, 4),
+ /* CLK_CFG_7 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI0_BCLK, "spi0_b", spi_b_parents,
+ CLK_CFG_7, CLK_CFG_7_SET, CLK_CFG_7_CLR,
+ HWV_CG_4_DONE, HWV_CG_4_SET, HWV_CG_4_CLR,
+ 0, 3, 7, CLK_CFG_UPDATE, TOP_MUX_SPI0_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_0, 3),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI1_BCLK, "spi1_b", spi_b_parents,
+ CLK_CFG_7, CLK_CFG_7_SET, CLK_CFG_7_CLR,
+ HWV_CG_4_DONE, HWV_CG_4_SET, HWV_CG_4_CLR,
+ 8, 3, 15, CLK_CFG_UPDATE, TOP_MUX_SPI1_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_0, 2),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI2_BCLK, "spi2_b", spi_b_parents,
+ CLK_CFG_7, CLK_CFG_7_SET, CLK_CFG_7_CLR,
+ HWV_CG_4_DONE, HWV_CG_4_SET, HWV_CG_4_CLR,
+ 16, 3, 23, CLK_CFG_UPDATE, TOP_MUX_SPI2_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_0, 1),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI3_BCLK, "spi3_b", spi_b_parents,
+ CLK_CFG_7, CLK_CFG_7_SET, CLK_CFG_7_CLR,
+ HWV_CG_4_DONE, HWV_CG_4_SET, HWV_CG_4_CLR,
+ 24, 3, 31, CLK_CFG_UPDATE1, TOP_MUX_SPI3_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_0, 0),
+ /* CLK_CFG_8 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI4_BCLK, "spi4_b", spi_b_parents,
+ CLK_CFG_8, CLK_CFG_8_SET, CLK_CFG_8_CLR,
+ HWV_CG_5_DONE, HWV_CG_5_SET, HWV_CG_5_CLR,
+ 0, 3, 7, CLK_CFG_UPDATE1, TOP_MUX_SPI4_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_1, 31),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI5_BCLK, "spi5_b", spi_b_parents,
+ CLK_CFG_8, CLK_CFG_8_SET, CLK_CFG_8_CLR,
+ HWV_CG_5_DONE, HWV_CG_5_SET, HWV_CG_5_CLR,
+ 8, 3, 15, CLK_CFG_UPDATE1, TOP_MUX_SPI5_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_1, 30),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI6_BCLK, "spi6_b", spi_b_parents,
+ CLK_CFG_8, CLK_CFG_8_SET, CLK_CFG_8_CLR,
+ HWV_CG_5_DONE, HWV_CG_5_SET, HWV_CG_5_CLR,
+ 16, 3, 23, CLK_CFG_UPDATE1, TOP_MUX_SPI6_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_1, 29),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI7_BCLK, "spi7_b", spi_b_parents,
+ CLK_CFG_8, CLK_CFG_8_SET, CLK_CFG_8_CLR,
+ HWV_CG_5_DONE, HWV_CG_5_SET, HWV_CG_5_CLR,
+ 24, 3, 31, CLK_CFG_UPDATE1, TOP_MUX_SPI7_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_1, 28),
+ /* CLK_CFG_9 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_MSDC30_1, "msdc30_1", msdc30_parents,
+ CLK_CFG_9, CLK_CFG_9_SET, CLK_CFG_9_CLR,
+ 16, 3, 23, CLK_CFG_UPDATE1, TOP_MUX_MSDC30_1_SHIFT,
+ CLK_FENC_STATUS_MON_1, 25),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_MSDC30_2, "msdc30_2", msdc30_parents,
+ CLK_CFG_9, CLK_CFG_9_SET, CLK_CFG_9_CLR,
+ 24, 3, 31, CLK_CFG_UPDATE1, TOP_MUX_MSDC30_2_SHIFT,
+ CLK_FENC_STATUS_MON_1, 24),
+ /* CLK_CFG_10 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_DISP_PWM, "disp_pwm", disp_pwm_parents,
+ CLK_CFG_10, CLK_CFG_10_SET, CLK_CFG_10_CLR,
+ 0, 3, 7, CLK_CFG_UPDATE1, TOP_MUX_DISP_PWM_SHIFT,
+ CLK_FENC_STATUS_MON_1, 23),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_USB_TOP_1P, "usb_1p", usb_1p_parents,
+ CLK_CFG_10, CLK_CFG_10_SET, CLK_CFG_10_CLR,
+ 8, 1, 15, CLK_CFG_UPDATE1, TOP_MUX_USB_TOP_1P_SHIFT,
+ CLK_FENC_STATUS_MON_1, 22),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_USB_XHCI_1P, "usb_xhci_1p", usb_1p_parents,
+ CLK_CFG_10, CLK_CFG_10_SET, CLK_CFG_10_CLR,
+ 16, 1, 23, CLK_CFG_UPDATE1, TOP_MUX_SSUSB_XHCI_1P_SHIFT,
+ CLK_FENC_STATUS_MON_1, 21),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_USB_FMCNT_P1, "usb_fmcnt_p1", usb_fmcnt_p1_parents,
+ CLK_CFG_10, CLK_CFG_10_SET, CLK_CFG_10_CLR,
+ 24, 1, 31, CLK_CFG_UPDATE1, TOP_MUX_SSUSB_FMCNT_P1_SHIFT,
+ CLK_FENC_STATUS_MON_1, 20),
+ /* CLK_CFG_11 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_I2C_P, "i2c_p", i2c_parents,
+ CLK_CFG_11, CLK_CFG_11_SET, CLK_CFG_11_CLR,
+ 0, 3, 7, CLK_CFG_UPDATE1, TOP_MUX_I2C_PERI_SHIFT,
+ CLK_FENC_STATUS_MON_1, 19),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_I2C_EAST, "i2c_east", i2c_parents,
+ CLK_CFG_11, CLK_CFG_11_SET, CLK_CFG_11_CLR,
+ 8, 3, 15, CLK_CFG_UPDATE1, TOP_MUX_I2C_EAST_SHIFT,
+ CLK_FENC_STATUS_MON_1, 18),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_I2C_WEST, "i2c_west", i2c_parents,
+ CLK_CFG_11, CLK_CFG_11_SET, CLK_CFG_11_CLR,
+ 16, 3, 23, CLK_CFG_UPDATE1, TOP_MUX_I2C_WEST_SHIFT,
+ CLK_FENC_STATUS_MON_1, 17),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_I2C_NORTH, "i2c_north", i2c_parents,
+ CLK_CFG_11, CLK_CFG_11_SET, CLK_CFG_11_CLR,
+ HWV_CG_6_DONE, HWV_CG_6_SET, HWV_CG_6_CLR,
+ 24, 3, 31, CLK_CFG_UPDATE1, TOP_MUX_I2C_NORTH_SHIFT,
+ CLK_FENC_STATUS_MON_1, 16),
+ /* CLK_CFG_12 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_AES_UFSFDE, "aes_ufsfde", aes_ufsfde_parents,
+ CLK_CFG_12, CLK_CFG_12_SET, CLK_CFG_12_CLR,
+ 0, 3, 7, CLK_CFG_UPDATE1, TOP_MUX_AES_UFSFDE_SHIFT,
+ CLK_FENC_STATUS_MON_1, 15),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_UFS, "ufs", ufs_parents,
+ CLK_CFG_12, CLK_CFG_12_SET, CLK_CFG_12_CLR,
+ 8, 3, 15, CLK_CFG_UPDATE1, TOP_MUX_UFS_SHIFT,
+ CLK_FENC_STATUS_MON_1, 14),
+ /* CLK_CFG_13 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_AUD_1, "aud_1", aud_1_parents,
+ CLK_CFG_13, CLK_CFG_13_SET, CLK_CFG_13_CLR,
+ 0, 1, 7, CLK_CFG_UPDATE1, TOP_MUX_AUD_1_SHIFT,
+ CLK_FENC_STATUS_MON_1, 11),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_AUD_2, "aud_2", aud_2_parents,
+ CLK_CFG_13, CLK_CFG_13_SET, CLK_CFG_13_CLR,
+ 8, 1, 15, CLK_CFG_UPDATE1, TOP_MUX_AUD_2_SHIFT,
+ CLK_FENC_STATUS_MON_1, 10),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_ADSP, "adsp", adsp_parents,
+ CLK_CFG_13, CLK_CFG_13_SET, CLK_CFG_13_CLR,
+ 16, 1, 23, CLK_CFG_UPDATE1, TOP_MUX_ADSP_SHIFT,
+ CLK_FENC_STATUS_MON_1, 9),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ADSP_UARTHUB_B, "adsp_uarthub_b",
+ adsp_uarthub_b_parents, CLK_CFG_13, CLK_CFG_13_SET,
+ CLK_CFG_13_CLR, 24, 2, 31,
+ CLK_CFG_UPDATE1, TOP_MUX_ADSP_UARTHUB_B_SHIFT),
+ /* CLK_CFG_14 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_DPMAIF_MAIN, "dpmaif_main", dpmaif_main_parents,
+ CLK_CFG_14, CLK_CFG_14_SET, CLK_CFG_14_CLR,
+ 0, 4, 7, CLK_CFG_UPDATE1, TOP_MUX_DPMAIF_MAIN_SHIFT,
+ CLK_FENC_STATUS_MON_1, 7),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_PWM, "pwm", pwm_parents,
+ CLK_CFG_14, CLK_CFG_14_SET, CLK_CFG_14_CLR,
+ 8, 2, 15, CLK_CFG_UPDATE1, TOP_MUX_PWM_SHIFT,
+ CLK_FENC_STATUS_MON_1, 6),
+ MUX_CLR_SET_UPD(CLK_TOP_MCUPM, "mcupm",
+ mcupm_parents, CLK_CFG_14, CLK_CFG_14_SET,
+ CLK_CFG_14_CLR, 16, 3,
+ CLK_CFG_UPDATE1, TOP_MUX_MCUPM_SHIFT),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_SFLASH, "sflash", sflash_parents,
+ CLK_CFG_14, CLK_CFG_14_SET, CLK_CFG_14_CLR,
+ 24, 2, 31, CLK_CFG_UPDATE1, TOP_MUX_SFLASH_SHIFT,
+ CLK_FENC_STATUS_MON_1, 4),
+ /* CLK_CFG_15 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_IPSEAST, "ipseast", ipseast_parents,
+ CLK_CFG_15, CLK_CFG_15_SET, CLK_CFG_15_CLR,
+ 0, 3, 7, CLK_CFG_UPDATE1, TOP_MUX_IPSEAST_SHIFT,
+ CLK_FENC_STATUS_MON_1, 3),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_TL, "tl", tl_parents,
+ CLK_CFG_15, CLK_CFG_15_SET, CLK_CFG_15_CLR,
+ 16, 2, 23, CLK_CFG_UPDATE2, TOP_MUX_TL_SHIFT,
+ CLK_FENC_STATUS_MON_1, 1),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_TL_P1, "tl_p1", tl_parents,
+ CLK_CFG_15, CLK_CFG_15_SET, CLK_CFG_15_CLR,
+ 24, 2, 31, CLK_CFG_UPDATE2, TOP_MUX_TL_P1_SHIFT,
+ CLK_FENC_STATUS_MON_1, 0),
+ /* CLK_CFG_16 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_TL_P2, "tl_p2", tl_parents,
+ CLK_CFG_16, CLK_CFG_16_SET, CLK_CFG_16_CLR,
+ 0, 2, 7, CLK_CFG_UPDATE2, TOP_MUX_TL_P2_SHIFT,
+ CLK_FENC_STATUS_MON_2, 31),
+ MUX_CLR_SET_UPD(CLK_TOP_EMI_INTERFACE_546, "emi_interface_546",
+ md_emi_parents, CLK_CFG_16, CLK_CFG_16_SET,
+ CLK_CFG_16_CLR, 8, 1,
+ CLK_CFG_UPDATE2, TOP_MUX_EMI_INTERFACE_546_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_SDF, "sdf",
+ sdf_parents, CLK_CFG_16, CLK_CFG_16_SET,
+ CLK_CFG_16_CLR, 16, 3,
+ CLK_CFG_UPDATE2, TOP_MUX_SDF_SHIFT),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_UARTHUB_BCLK, "uarthub_b", uarthub_b_parents,
+ CLK_CFG_16, CLK_CFG_16_SET, CLK_CFG_16_CLR,
+ HWV_CG_7_DONE, HWV_CG_7_SET, HWV_CG_7_CLR,
+ 24, 2, 31, CLK_CFG_UPDATE2, TOP_MUX_UARTHUB_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_2, 28),
+ /* CLK_CFG_17 */
+ MUX_CLR_SET_UPD(CLK_TOP_DPSW_CMP_26M, "dpsw_cmp_26m",
+ dpsw_cmp_26m_parents, CLK_CFG_17, CLK_CFG_17_SET,
+ CLK_CFG_17_CLR, 0, 1,
+ CLK_CFG_UPDATE2, TOP_MUX_DPSW_CMP_26M_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_SMAP, "smap",
+ smapparents, CLK_CFG_17, CLK_CFG_17_SET,
+ CLK_CFG_17_CLR, 8, 1,
+ CLK_CFG_UPDATE2, TOP_MUX_SMAPCK_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_SSR_PKA, "ssr_pka",
+ ssr_parents, CLK_CFG_17, CLK_CFG_17_SET,
+ CLK_CFG_17_CLR, 16, 3,
+ CLK_CFG_UPDATE2, TOP_MUX_SSR_PKA_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_SSR_DMA, "ssr_dma",
+ ssr_parents, CLK_CFG_17, CLK_CFG_17_SET,
+ CLK_CFG_17_CLR, 24, 3,
+ CLK_CFG_UPDATE2, TOP_MUX_SSR_DMA_SHIFT),
+ /* CLK_CFG_18 */
+ MUX_CLR_SET_UPD(CLK_TOP_SSR_KDF, "ssr_kdf",
+ ssr_kdf_parents, CLK_CFG_18, CLK_CFG_18_SET,
+ CLK_CFG_18_CLR, 0, 2,
+ CLK_CFG_UPDATE2, TOP_MUX_SSR_KDF_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_SSR_RNG, "ssr_rng",
+ ssr_rng_parents, CLK_CFG_18, CLK_CFG_18_SET,
+ CLK_CFG_18_CLR, 8, 2,
+ CLK_CFG_UPDATE2, TOP_MUX_SSR_RNG_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_SPU0, "spu0",
+ spu_parents, CLK_CFG_18, CLK_CFG_18_SET,
+ CLK_CFG_18_CLR, 16, 3,
+ CLK_CFG_UPDATE2, TOP_MUX_SPU0_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_SPU1, "spu1",
+ spu_parents, CLK_CFG_18, CLK_CFG_18_SET,
+ CLK_CFG_18_CLR, 24, 3,
+ CLK_CFG_UPDATE2, TOP_MUX_SPU1_SHIFT),
+ /* CLK_CFG_19 */
+ MUX_CLR_SET_UPD(CLK_TOP_DXCC, "dxcc",
+ dxcc_parents, CLK_CFG_19, CLK_CFG_19_SET,
+ CLK_CFG_19_CLR, 0, 2,
+ CLK_CFG_UPDATE2, TOP_MUX_DXCC_SHIFT),
+};
+
+static const struct mtk_composite top_aud_divs[] = {
+ /* CLK_AUDDIV_2 */
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SIN0, "apll_i2sin0_m", apll_m_parents,
+ CLK_AUDDIV_0, 16, 1, CLK_AUDDIV_2, 0, 8, CLK_AUDDIV_0, 0),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SIN1, "apll_i2sin1_m", apll_m_parents,
+ CLK_AUDDIV_0, 17, 1, CLK_AUDDIV_2, 8, 8, CLK_AUDDIV_0, 1),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SIN2, "apll_i2sin2_m", apll_m_parents,
+ CLK_AUDDIV_0, 18, 1, CLK_AUDDIV_2, 16, 8, CLK_AUDDIV_0, 2),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SIN3, "apll_i2sin3_m", apll_m_parents,
+ CLK_AUDDIV_0, 19, 1, CLK_AUDDIV_2, 24, 8, CLK_AUDDIV_0, 3),
+ /* CLK_AUDDIV_3 */
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SIN4, "apll_i2sin4_m", apll_m_parents,
+ CLK_AUDDIV_0, 20, 1, CLK_AUDDIV_3, 0, 8, CLK_AUDDIV_0, 4),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SIN6, "apll_i2sin6_m", apll_m_parents,
+ CLK_AUDDIV_0, 21, 1, CLK_AUDDIV_3, 8, 8, CLK_AUDDIV_0, 5),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SOUT0, "apll_i2sout0_m", apll_m_parents,
+ CLK_AUDDIV_0, 22, 1, CLK_AUDDIV_3, 16, 8, CLK_AUDDIV_0, 6),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SOUT1, "apll_i2sout1_m", apll_m_parents,
+ CLK_AUDDIV_0, 23, 1, CLK_AUDDIV_3, 24, 8, CLK_AUDDIV_0, 7),
+ /* CLK_AUDDIV_4 */
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SOUT2, "apll_i2sout2_m", apll_m_parents,
+ CLK_AUDDIV_0, 24, 1, CLK_AUDDIV_4, 0, 8, CLK_AUDDIV_0, 8),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SOUT3, "apll_i2sout3_m", apll_m_parents,
+ CLK_AUDDIV_0, 25, 1, CLK_AUDDIV_4, 8, 8, CLK_AUDDIV_0, 9),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SOUT4, "apll_i2sout4_m", apll_m_parents,
+ CLK_AUDDIV_0, 26, 1, CLK_AUDDIV_4, 16, 8, CLK_AUDDIV_0, 10),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SOUT6, "apll_i2sout6_m", apll_m_parents,
+ CLK_AUDDIV_0, 27, 1, CLK_AUDDIV_4, 24, 8, CLK_AUDDIV_0, 11),
+ /* CLK_AUDDIV_5 */
+ MUX_DIV_GATE(CLK_TOP_APLL_FMI2S, "apll_fmi2s_m", apll_m_parents,
+ CLK_AUDDIV_0, 28, 1, CLK_AUDDIV_5, 0, 8, CLK_AUDDIV_0, 12),
+ MUX(CLK_TOP_APLL_TDMOUT, "apll_tdmout_m",
+ apll_m_parents, CLK_AUDDIV_0, 29, 1),
+ DIV_GATE(CLK_TOP_APLL12_DIV_TDMOUT_M, "apll12_div_tdmout_m",
+ "apll_tdmout_m", CLK_AUDDIV_0,
+ 13, CLK_AUDDIV_5, 8, 8),
+ DIV_GATE(CLK_TOP_APLL12_DIV_TDMOUT_B, "apll12_div_tdmout_b",
+ "apll_tdmout_m", CLK_AUDDIV_0,
+ 14, CLK_AUDDIV_5, 8, 16),
+};
+
+static const struct mtk_clk_desc topck_desc = {
+ .factor_clks = top_divs,
+ .num_factor_clks = ARRAY_SIZE(top_divs),
+ .mux_clks = top_muxes,
+ .num_mux_clks = ARRAY_SIZE(top_muxes),
+ .composite_clks = top_aud_divs,
+ .num_composite_clks = ARRAY_SIZE(top_aud_divs)
+};
+
+static const struct of_device_id of_match_clk_mt8196_ck[] = {
+ { .compatible = "mediatek,mt8196-topckgen", .data = &topck_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_ck);
+
+static struct platform_driver clk_mt8196_topck_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-topck",
+ .of_match_table = of_match_clk_mt8196_ck,
+ },
+};
+
+MODULE_DESCRIPTION("MediaTek MT8196 top clock generators driver");
+module_platform_driver(clk_mt8196_topck_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-topckgen2.c b/drivers/clk/mediatek/clk-mt8196-topckgen2.c
new file mode 100644
index 000000000000..6df93d7fbf91
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-topckgen2.c
@@ -0,0 +1,568 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-mux.h"
+
+/* MUX SEL REG */
+#define CKSYS2_CLK_CFG_UPDATE 0x0004
+#define CKSYS2_CLK_CFG_0 0x0010
+#define CKSYS2_CLK_CFG_0_SET 0x0014
+#define CKSYS2_CLK_CFG_0_CLR 0x0018
+#define CKSYS2_CLK_CFG_1 0x0020
+#define CKSYS2_CLK_CFG_1_SET 0x0024
+#define CKSYS2_CLK_CFG_1_CLR 0x0028
+#define CKSYS2_CLK_CFG_2 0x0030
+#define CKSYS2_CLK_CFG_2_SET 0x0034
+#define CKSYS2_CLK_CFG_2_CLR 0x0038
+#define CKSYS2_CLK_CFG_3 0x0040
+#define CKSYS2_CLK_CFG_3_SET 0x0044
+#define CKSYS2_CLK_CFG_3_CLR 0x0048
+#define CKSYS2_CLK_CFG_4 0x0050
+#define CKSYS2_CLK_CFG_4_SET 0x0054
+#define CKSYS2_CLK_CFG_4_CLR 0x0058
+#define CKSYS2_CLK_CFG_5 0x0060
+#define CKSYS2_CLK_CFG_5_SET 0x0064
+#define CKSYS2_CLK_CFG_5_CLR 0x0068
+#define CKSYS2_CLK_CFG_6 0x0070
+#define CKSYS2_CLK_CFG_6_SET 0x0074
+#define CKSYS2_CLK_CFG_6_CLR 0x0078
+#define CKSYS2_CLK_FENC_STATUS_MON_0 0x0174
+
+/* MUX SHIFT */
+#define TOP_MUX_SENINF0_SHIFT 0
+#define TOP_MUX_SENINF1_SHIFT 1
+#define TOP_MUX_SENINF2_SHIFT 2
+#define TOP_MUX_SENINF3_SHIFT 3
+#define TOP_MUX_SENINF4_SHIFT 4
+#define TOP_MUX_SENINF5_SHIFT 5
+#define TOP_MUX_IMG1_SHIFT 6
+#define TOP_MUX_IPE_SHIFT 7
+#define TOP_MUX_CAM_SHIFT 8
+#define TOP_MUX_CAMTM_SHIFT 9
+#define TOP_MUX_DPE_SHIFT 10
+#define TOP_MUX_VDEC_SHIFT 11
+#define TOP_MUX_CCUSYS_SHIFT 12
+#define TOP_MUX_CCUTM_SHIFT 13
+#define TOP_MUX_VENC_SHIFT 14
+#define TOP_MUX_DVO_SHIFT 15
+#define TOP_MUX_DVO_FAVT_SHIFT 16
+#define TOP_MUX_DP1_SHIFT 17
+#define TOP_MUX_DP0_SHIFT 18
+#define TOP_MUX_DISP_SHIFT 19
+#define TOP_MUX_MDP_SHIFT 20
+#define TOP_MUX_MMINFRA_SHIFT 21
+#define TOP_MUX_MMINFRA_SNOC_SHIFT 22
+#define TOP_MUX_MMUP_SHIFT 23
+#define TOP_MUX_MMINFRA_AO_SHIFT 26
+
+/* HW Voter REG */
+#define HWV_CG_30_SET 0x0058
+#define HWV_CG_30_CLR 0x005c
+#define HWV_CG_30_DONE 0x2c2c
+
+#define MM_HWV_CG_30_SET 0x00f0
+#define MM_HWV_CG_30_CLR 0x00f4
+#define MM_HWV_CG_30_DONE 0x2c78
+#define MM_HWV_CG_31_SET 0x00f8
+#define MM_HWV_CG_31_CLR 0x00fc
+#define MM_HWV_CG_31_DONE 0x2c7c
+#define MM_HWV_CG_32_SET 0x0100
+#define MM_HWV_CG_32_CLR 0x0104
+#define MM_HWV_CG_32_DONE 0x2c80
+#define MM_HWV_CG_33_SET 0x0108
+#define MM_HWV_CG_33_CLR 0x010c
+#define MM_HWV_CG_33_DONE 0x2c84
+#define MM_HWV_CG_34_SET 0x0110
+#define MM_HWV_CG_34_CLR 0x0114
+#define MM_HWV_CG_34_DONE 0x2c88
+#define MM_HWV_CG_35_SET 0x0118
+#define MM_HWV_CG_35_CLR 0x011c
+#define MM_HWV_CG_35_DONE 0x2c8c
+#define MM_HWV_CG_36_SET 0x0120
+#define MM_HWV_CG_36_CLR 0x0124
+#define MM_HWV_CG_36_DONE 0x2c90
+#define MM_HWV_MUX_UPDATE_31_0 0x0240
+
+static const struct mtk_fixed_factor top_divs[] = {
+ FACTOR(CLK_TOP2_MAINPLL2_D2, "mainpll2_d2", "mainpll2", 1, 2),
+ FACTOR(CLK_TOP2_MAINPLL2_D3, "mainpll2_d3", "mainpll2", 1, 3),
+ FACTOR(CLK_TOP2_MAINPLL2_D4, "mainpll2_d4", "mainpll2", 1, 4),
+ FACTOR(CLK_TOP2_MAINPLL2_D4_D2, "mainpll2_d4_d2", "mainpll2", 1, 8),
+ FACTOR(CLK_TOP2_MAINPLL2_D4_D4, "mainpll2_d4_d4", "mainpll2", 1, 16),
+ FACTOR(CLK_TOP2_MAINPLL2_D5, "mainpll2_d5", "mainpll2", 1, 5),
+ FACTOR(CLK_TOP2_MAINPLL2_D5_D2, "mainpll2_d5_d2", "mainpll2", 1, 10),
+ FACTOR(CLK_TOP2_MAINPLL2_D6, "mainpll2_d6", "mainpll2", 1, 6),
+ FACTOR(CLK_TOP2_MAINPLL2_D6_D2, "mainpll2_d6_d2", "mainpll2", 1, 12),
+ FACTOR(CLK_TOP2_MAINPLL2_D7, "mainpll2_d7", "mainpll2", 1, 7),
+ FACTOR(CLK_TOP2_MAINPLL2_D7_D2, "mainpll2_d7_d2", "mainpll2", 1, 14),
+ FACTOR(CLK_TOP2_MAINPLL2_D9, "mainpll2_d9", "mainpll2", 1, 9),
+ FACTOR(CLK_TOP2_UNIVPLL2_D3, "univpll2_d3", "univpll2", 1, 3),
+ FACTOR(CLK_TOP2_UNIVPLL2_D4, "univpll2_d4", "univpll2", 1, 4),
+ FACTOR(CLK_TOP2_UNIVPLL2_D4_D2, "univpll2_d4_d2", "univpll2", 1, 8),
+ FACTOR(CLK_TOP2_UNIVPLL2_D5, "univpll2_d5", "univpll2", 1, 5),
+ FACTOR(CLK_TOP2_UNIVPLL2_D5_D2, "univpll2_d5_d2", "univpll2", 1, 10),
+ FACTOR(CLK_TOP2_UNIVPLL2_D6, "univpll2_d6", "univpll2", 1, 6),
+ FACTOR(CLK_TOP2_UNIVPLL2_D6_D2, "univpll2_d6_d2", "univpll2", 1, 12),
+ FACTOR(CLK_TOP2_UNIVPLL2_D6_D4, "univpll2_d6_d4", "univpll2", 1, 24),
+ FACTOR(CLK_TOP2_UNIVPLL2_D7, "univpll2_d7", "univpll2", 1, 7),
+ FACTOR(CLK_TOP2_IMGPLL_D2, "imgpll_d2", "imgpll", 1, 2),
+ FACTOR(CLK_TOP2_IMGPLL_D4, "imgpll_d4", "imgpll", 1, 4),
+ FACTOR(CLK_TOP2_IMGPLL_D5, "imgpll_d5", "imgpll", 1, 5),
+ FACTOR(CLK_TOP2_IMGPLL_D5_D2, "imgpll_d5_d2", "imgpll", 1, 10),
+ FACTOR(CLK_TOP2_MMPLL2_D3, "mmpll2_d3", "mmpll2", 1, 3),
+ FACTOR(CLK_TOP2_MMPLL2_D4, "mmpll2_d4", "mmpll2", 1, 4),
+ FACTOR(CLK_TOP2_MMPLL2_D4_D2, "mmpll2_d4_d2", "mmpll2", 1, 8),
+ FACTOR(CLK_TOP2_MMPLL2_D5, "mmpll2_d5", "mmpll2", 1, 5),
+ FACTOR(CLK_TOP2_MMPLL2_D5_D2, "mmpll2_d5_d2", "mmpll2", 1, 10),
+ FACTOR(CLK_TOP2_MMPLL2_D6, "mmpll2_d6", "mmpll2", 1, 6),
+ FACTOR(CLK_TOP2_MMPLL2_D6_D2, "mmpll2_d6_d2", "mmpll2", 1, 12),
+ FACTOR(CLK_TOP2_MMPLL2_D7, "mmpll2_d7", "mmpll2", 1, 7),
+ FACTOR(CLK_TOP2_MMPLL2_D9, "mmpll2_d9", "mmpll2", 1, 9),
+ FACTOR(CLK_TOP2_TVDPLL1_D4, "tvdpll1_d4", "tvdpll1", 1, 4),
+ FACTOR(CLK_TOP2_TVDPLL1_D8, "tvdpll1_d8", "tvdpll1", 1, 8),
+ FACTOR(CLK_TOP2_TVDPLL1_D16, "tvdpll1_d16", "tvdpll1", 1, 16),
+ FACTOR(CLK_TOP2_TVDPLL2_D2, "tvdpll2_d2", "tvdpll2", 1, 2),
+ FACTOR(CLK_TOP2_TVDPLL2_D4, "tvdpll2_d4", "tvdpll2", 1, 4),
+ FACTOR(CLK_TOP2_TVDPLL2_D8, "tvdpll2_d8", "tvdpll2", 1, 8),
+ FACTOR(CLK_TOP2_TVDPLL2_D16, "tvdpll2_d16", "tvdpll2", 92, 1473),
+ FACTOR(CLK_TOP2_TVDPLL3_D2, "tvdpll3_d2", "tvdpll3", 1, 2),
+ FACTOR(CLK_TOP2_TVDPLL3_D4, "tvdpll3_d4", "tvdpll3", 1, 4),
+ FACTOR(CLK_TOP2_TVDPLL3_D8, "tvdpll3_d8", "tvdpll3", 1, 8),
+ FACTOR(CLK_TOP2_TVDPLL3_D16, "tvdpll3_d16", "tvdpll3", 92, 1473),
+};
+
+static const char * const seninf_parents[] = {
+ "clk26m",
+ "ck_osc_d10",
+ "ck_osc_d8",
+ "ck_osc_d5",
+ "ck_osc_d4",
+ "univpll2_d6_d2",
+ "mainpll2_d9",
+ "ck_osc_d2",
+ "mainpll2_d4_d2",
+ "univpll2_d4_d2",
+ "mmpll2_d4_d2",
+ "univpll2_d7",
+ "mainpll2_d6",
+ "mmpll2_d7",
+ "univpll2_d6",
+ "univpll2_d5"
+};
+
+static const char * const img1_parents[] = {
+ "clk26m",
+ "ck_osc_d4",
+ "ck_osc_d3",
+ "mmpll2_d6_d2",
+ "ck_osc_d2",
+ "imgpll_d5_d2",
+ "mmpll2_d5_d2",
+ "univpll2_d4_d2",
+ "mmpll2_d4_d2",
+ "mmpll2_d7",
+ "univpll2_d6",
+ "mmpll2_d6",
+ "univpll2_d5",
+ "mmpll2_d5",
+ "univpll2_d4",
+ "imgpll_d4"
+};
+
+static const char * const ipe_parents[] = {
+ "clk26m",
+ "ck_osc_d4",
+ "ck_osc_d3",
+ "ck_osc_d2",
+ "univpll2_d6",
+ "mmpll2_d6",
+ "univpll2_d5",
+ "imgpll_d5",
+ "ck_mainpll_d4",
+ "mmpll2_d5",
+ "imgpll_d4"
+};
+
+static const char * const cam_parents[] = {
+ "clk26m",
+ "ck_osc_d10",
+ "ck_osc_d4",
+ "ck_osc_d3",
+ "ck_osc_d2",
+ "mmpll2_d5_d2",
+ "univpll2_d4_d2",
+ "univpll2_d7",
+ "mmpll2_d7",
+ "univpll2_d6",
+ "mmpll2_d6",
+ "univpll2_d5",
+ "mmpll2_d5",
+ "univpll2_d4",
+ "imgpll_d4",
+ "mmpll2_d4"
+};
+
+static const char * const camtm_parents[] = {
+ "clk26m",
+ "univpll2_d6_d4",
+ "ck_osc_d4",
+ "ck_osc_d3",
+ "univpll2_d6_d2"
+};
+
+static const char * const dpe_parents[] = {
+ "clk26m",
+ "mmpll2_d5_d2",
+ "univpll2_d4_d2",
+ "mmpll2_d7",
+ "univpll2_d6",
+ "mmpll2_d6",
+ "univpll2_d5",
+ "mmpll2_d5",
+ "imgpll_d4",
+ "mmpll2_d4"
+};
+
+static const char * const vdec_parents[] = {
+ "clk26m",
+ "ck_mainpll_d5_d2",
+ "mainpll2_d4_d4",
+ "mainpll2_d7_d2",
+ "mainpll2_d6_d2",
+ "mainpll2_d5_d2",
+ "mainpll2_d9",
+ "mainpll2_d4_d2",
+ "mainpll2_d7",
+ "mainpll2_d6",
+ "univpll2_d6",
+ "mainpll2_d5",
+ "mainpll2_d4",
+ "imgpll_d2"
+};
+
+static const char * const ccusys_parents[] = {
+ "clk26m",
+ "ck_osc_d4",
+ "ck_osc_d3",
+ "ck_osc_d2",
+ "mmpll2_d5_d2",
+ "univpll2_d4_d2",
+ "mmpll2_d7",
+ "univpll2_d6",
+ "mmpll2_d6",
+ "univpll2_d5",
+ "mainpll2_d4",
+ "mainpll2_d3",
+ "univpll2_d3"
+};
+
+static const char * const ccutm_parents[] = {
+ "clk26m",
+ "univpll2_d6_d4",
+ "ck_osc_d4",
+ "ck_osc_d3",
+ "univpll2_d6_d2"
+};
+
+static const char * const venc_parents[] = {
+ "clk26m",
+ "mainpll2_d5_d2",
+ "univpll2_d5_d2",
+ "mainpll2_d4_d2",
+ "mmpll2_d9",
+ "univpll2_d4_d2",
+ "mmpll2_d4_d2",
+ "mainpll2_d6",
+ "univpll2_d6",
+ "mainpll2_d5",
+ "mmpll2_d6",
+ "univpll2_d5",
+ "mainpll2_d4",
+ "univpll2_d4",
+ "univpll2_d3"
+};
+
+static const char * const dp1_parents[] = {
+ "clk26m",
+ "tvdpll2_d16",
+ "tvdpll2_d8",
+ "tvdpll2_d4",
+ "tvdpll2_d2"
+};
+
+static const char * const dp0_parents[] = {
+ "clk26m",
+ "tvdpll1_d16",
+ "tvdpll1_d8",
+ "tvdpll1_d4",
+ "ck_tvdpll1_d2"
+};
+
+static const char * const disp_parents[] = {
+ "clk26m",
+ "ck_mainpll_d5_d2",
+ "ck_mainpll_d4_d2",
+ "ck_mainpll_d6",
+ "mainpll2_d5",
+ "mmpll2_d6",
+ "mainpll2_d4",
+ "univpll2_d4",
+ "mainpll2_d3"
+};
+
+static const char * const mdp_parents[] = {
+ "clk26m",
+ "ck_mainpll_d5_d2",
+ "mainpll2_d5_d2",
+ "mmpll2_d6_d2",
+ "mainpll2_d9",
+ "mainpll2_d4_d2",
+ "mainpll2_d7",
+ "mainpll2_d6",
+ "mainpll2_d5",
+ "mmpll2_d6",
+ "mainpll2_d4",
+ "univpll2_d4",
+ "mainpll2_d3"
+};
+
+static const char * const mminfra_parents[] = {
+ "clk26m",
+ "ck_osc_d4",
+ "ck_mainpll_d7_d2",
+ "ck_mainpll_d5_d2",
+ "ck_mainpll_d9",
+ "mmpll2_d6_d2",
+ "mainpll2_d4_d2",
+ "ck_mainpll_d6",
+ "univpll2_d6",
+ "mainpll2_d5",
+ "mmpll2_d6",
+ "univpll2_d5",
+ "mainpll2_d4",
+ "univpll2_d4",
+ "mainpll2_d3",
+ "univpll2_d3"
+};
+
+static const char * const mminfra_snoc_parents[] = {
+ "clk26m",
+ "ck_osc_d4",
+ "ck_mainpll_d7_d2",
+ "ck_mainpll_d9",
+ "ck_mainpll_d7",
+ "ck_mainpll_d6",
+ "mmpll2_d4_d2",
+ "ck_mainpll_d5",
+ "ck_mainpll_d4",
+ "univpll2_d4",
+ "mmpll2_d4",
+ "mainpll2_d3",
+ "univpll2_d3",
+ "mmpll2_d3",
+ "mainpll2_d2"
+};
+
+static const char * const mmup_parents[] = {
+ "clk26m",
+ "mainpll2_d6",
+ "mainpll2_d5",
+ "ck_osc_d2",
+ "ck_osc",
+ "ck_mainpll_d4",
+ "univpll2_d4",
+ "mainpll2_d3"
+};
+
+static const char * const mminfra_ao_parents[] = {
+ "clk26m",
+ "ck_osc_d4",
+ "ck_mainpll_d3"
+};
+
+static const char * const dvo_parents[] = {
+ "clk26m",
+ "tvdpll3_d16",
+ "tvdpll3_d8",
+ "tvdpll3_d4",
+ "tvdpll3_d2"
+};
+
+static const char * const dvo_favt_parents[] = {
+ "clk26m",
+ "tvdpll3_d16",
+ "tvdpll3_d8",
+ "tvdpll3_d4",
+ "vlp_apll1",
+ "vlp_apll2",
+ "tvdpll3_d2"
+};
+
+static const struct mtk_mux top_muxes[] = {
+ /* CKSYS2_CLK_CFG_0 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_SENINF0, "seninf0", seninf_parents,
+ CKSYS2_CLK_CFG_0, CKSYS2_CLK_CFG_0_SET, CKSYS2_CLK_CFG_0_CLR,
+ MM_HWV_CG_30_DONE, MM_HWV_CG_30_SET, MM_HWV_CG_30_CLR,
+ 0, 4, 7, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_SENINF0_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 31),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_SENINF1, "seninf1", seninf_parents,
+ CKSYS2_CLK_CFG_0, CKSYS2_CLK_CFG_0_SET, CKSYS2_CLK_CFG_0_CLR,
+ MM_HWV_CG_30_DONE, MM_HWV_CG_30_SET, MM_HWV_CG_30_CLR,
+ 8, 4, 15, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_SENINF1_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 30),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_SENINF2, "seninf2", seninf_parents,
+ CKSYS2_CLK_CFG_0, CKSYS2_CLK_CFG_0_SET, CKSYS2_CLK_CFG_0_CLR,
+ MM_HWV_CG_30_DONE, MM_HWV_CG_30_SET, MM_HWV_CG_30_CLR,
+ 16, 4, 23, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_SENINF2_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 29),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_SENINF3, "seninf3", seninf_parents,
+ CKSYS2_CLK_CFG_0, CKSYS2_CLK_CFG_0_SET, CKSYS2_CLK_CFG_0_CLR,
+ MM_HWV_CG_30_DONE, MM_HWV_CG_30_SET, MM_HWV_CG_30_CLR,
+ 24, 4, 31, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_SENINF3_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 28),
+ /* CKSYS2_CLK_CFG_1 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_SENINF4, "seninf4", seninf_parents,
+ CKSYS2_CLK_CFG_1, CKSYS2_CLK_CFG_1_SET, CKSYS2_CLK_CFG_1_CLR,
+ MM_HWV_CG_31_DONE, MM_HWV_CG_31_SET, MM_HWV_CG_31_CLR,
+ 0, 4, 7, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_SENINF4_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 27),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_SENINF5, "seninf5", seninf_parents,
+ CKSYS2_CLK_CFG_1, CKSYS2_CLK_CFG_1_SET, CKSYS2_CLK_CFG_1_CLR,
+ MM_HWV_CG_31_DONE, MM_HWV_CG_31_SET, MM_HWV_CG_31_CLR,
+ 8, 4, 15, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_SENINF5_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 26),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_IMG1, "img1", img1_parents,
+ CKSYS2_CLK_CFG_1, CKSYS2_CLK_CFG_1_SET, CKSYS2_CLK_CFG_1_CLR,
+ MM_HWV_CG_31_DONE, MM_HWV_CG_31_SET, MM_HWV_CG_31_CLR,
+ 16, 4, 23, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_IMG1_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 25),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_IPE, "ipe", ipe_parents,
+ CKSYS2_CLK_CFG_1, CKSYS2_CLK_CFG_1_SET, CKSYS2_CLK_CFG_1_CLR,
+ MM_HWV_CG_31_DONE, MM_HWV_CG_31_SET, MM_HWV_CG_31_CLR,
+ 24, 4, 31, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_IPE_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 24),
+ /* CKSYS2_CLK_CFG_2 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_CAM, "cam", cam_parents,
+ CKSYS2_CLK_CFG_2, CKSYS2_CLK_CFG_2_SET, CKSYS2_CLK_CFG_2_CLR,
+ MM_HWV_CG_32_DONE, MM_HWV_CG_32_SET, MM_HWV_CG_32_CLR,
+ 0, 4, 7, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_CAM_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 23),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_CAMTM, "camtm", camtm_parents,
+ CKSYS2_CLK_CFG_2, CKSYS2_CLK_CFG_2_SET, CKSYS2_CLK_CFG_2_CLR,
+ MM_HWV_CG_32_DONE, MM_HWV_CG_32_SET, MM_HWV_CG_32_CLR,
+ 8, 3, 15, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_CAMTM_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 22),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_DPE, "dpe", dpe_parents,
+ CKSYS2_CLK_CFG_2, CKSYS2_CLK_CFG_2_SET, CKSYS2_CLK_CFG_2_CLR,
+ MM_HWV_CG_32_DONE, MM_HWV_CG_32_SET, MM_HWV_CG_32_CLR,
+ 16, 4, 23, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_DPE_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 21),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_VDEC, "vdec", vdec_parents,
+ CKSYS2_CLK_CFG_2, CKSYS2_CLK_CFG_2_SET, CKSYS2_CLK_CFG_2_CLR,
+ MM_HWV_CG_32_DONE, MM_HWV_CG_32_SET, MM_HWV_CG_32_CLR,
+ 24, 4, 31, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_VDEC_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 20),
+ /* CKSYS2_CLK_CFG_3 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_CCUSYS, "ccusys", ccusys_parents,
+ CKSYS2_CLK_CFG_3, CKSYS2_CLK_CFG_3_SET, CKSYS2_CLK_CFG_3_CLR,
+ MM_HWV_CG_33_DONE, MM_HWV_CG_33_SET, MM_HWV_CG_33_CLR,
+ 0, 4, 7, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_CCUSYS_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 19),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_CCUTM, "ccutm", ccutm_parents,
+ CKSYS2_CLK_CFG_3, CKSYS2_CLK_CFG_3_SET, CKSYS2_CLK_CFG_3_CLR,
+ MM_HWV_CG_33_DONE, MM_HWV_CG_33_SET, MM_HWV_CG_33_CLR,
+ 8, 3, 15, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_CCUTM_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 18),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_VENC, "venc", venc_parents,
+ CKSYS2_CLK_CFG_3, CKSYS2_CLK_CFG_3_SET, CKSYS2_CLK_CFG_3_CLR,
+ MM_HWV_CG_33_DONE, MM_HWV_CG_33_SET, MM_HWV_CG_33_CLR,
+ 16, 4, 23, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_VENC_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 17),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP2_DVO, "dvo", dvo_parents,
+ CKSYS2_CLK_CFG_3, CKSYS2_CLK_CFG_3_SET, CKSYS2_CLK_CFG_3_CLR,
+ 24, 3, 31, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_DVO_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 16),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP2_DVO_FAVT, "dvo_favt", dvo_favt_parents,
+ CKSYS2_CLK_CFG_4, CKSYS2_CLK_CFG_4_SET, CKSYS2_CLK_CFG_4_CLR,
+ 0, 3, 7, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_DVO_FAVT_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 15),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP2_DP1, "dp1", dp1_parents,
+ CKSYS2_CLK_CFG_4, CKSYS2_CLK_CFG_4_SET, CKSYS2_CLK_CFG_4_CLR,
+ 8, 3, 15, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_DP1_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 14),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP2_DP0, "dp0", dp0_parents,
+ CKSYS2_CLK_CFG_4, CKSYS2_CLK_CFG_4_SET, CKSYS2_CLK_CFG_4_CLR,
+ 16, 3, 23, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_DP0_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 13),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_DISP, "disp", disp_parents,
+ CKSYS2_CLK_CFG_4, CKSYS2_CLK_CFG_4_SET, CKSYS2_CLK_CFG_4_CLR,
+ MM_HWV_CG_34_DONE, MM_HWV_CG_34_SET, MM_HWV_CG_34_CLR,
+ 24, 4, 31, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_DISP_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 12),
+ /* CKSYS2_CLK_CFG_5 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_MDP, "mdp", mdp_parents,
+ CKSYS2_CLK_CFG_5, CKSYS2_CLK_CFG_5_SET, CKSYS2_CLK_CFG_5_CLR,
+ MM_HWV_CG_35_DONE, MM_HWV_CG_35_SET, MM_HWV_CG_35_CLR,
+ 0, 4, 7, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_MDP_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 11),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_MMINFRA, "mminfra", mminfra_parents,
+ CKSYS2_CLK_CFG_5, CKSYS2_CLK_CFG_5_SET, CKSYS2_CLK_CFG_5_CLR,
+ MM_HWV_CG_35_DONE, MM_HWV_CG_35_SET, MM_HWV_CG_35_CLR,
+ 8, 4, 15, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_MMINFRA_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 10),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_MMINFRA_SNOC, "mminfra_snoc", mminfra_snoc_parents,
+ CKSYS2_CLK_CFG_5, CKSYS2_CLK_CFG_5_SET, CKSYS2_CLK_CFG_5_CLR,
+ MM_HWV_CG_35_DONE, MM_HWV_CG_35_SET, MM_HWV_CG_35_CLR,
+ 16, 4, 23, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_MMINFRA_SNOC_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 9),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP2_MMUP, "mmup", mmup_parents,
+ CKSYS2_CLK_CFG_5, CKSYS2_CLK_CFG_5_SET, CKSYS2_CLK_CFG_5_CLR,
+ 24, 3, 31, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_MMUP_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 8),
+ /* CKSYS2_CLK_CFG_6 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_MMINFRA_AO, "mminfra_ao", mminfra_ao_parents,
+ CKSYS2_CLK_CFG_6, CKSYS2_CLK_CFG_6_SET, CKSYS2_CLK_CFG_6_CLR,
+ MM_HWV_CG_36_DONE, MM_HWV_CG_36_SET, MM_HWV_CG_36_CLR,
+ 16, 2, 7, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_MMINFRA_AO_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 5),
+};
+
+static const struct mtk_clk_desc topck_desc = {
+ .factor_clks = top_divs,
+ .num_factor_clks = ARRAY_SIZE(top_divs),
+ .mux_clks = top_muxes,
+ .num_mux_clks = ARRAY_SIZE(top_muxes),
+};
+
+static const struct of_device_id of_match_clk_mt8196_ck[] = {
+ { .compatible = "mediatek,mt8196-topckgen-gp2", .data = &topck_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_ck);
+
+static struct platform_driver clk_mt8196_topck_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-topck2",
+ .of_match_table = of_match_clk_mt8196_ck,
+ },
+};
+
+MODULE_DESCRIPTION("MediaTek MT8196 GP2 top clock generators driver");
+module_platform_driver(clk_mt8196_topck_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-ufs_ao.c b/drivers/clk/mediatek/clk-mt8196-ufs_ao.c
new file mode 100644
index 000000000000..0c04717b7b4b
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-ufs_ao.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+#include <dt-bindings/reset/mediatek,mt8196-resets.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#define MT8196_UFSAO_RST0_SET_OFFSET 0x48
+#define MT8196_UFSAO_RST1_SET_OFFSET 0x148
+
+static const struct mtk_gate_regs ufsao0_cg_regs = {
+ .set_ofs = 0x108,
+ .clr_ofs = 0x10c,
+ .sta_ofs = 0x104,
+};
+
+static const struct mtk_gate_regs ufsao1_cg_regs = {
+ .set_ofs = 0x8,
+ .clr_ofs = 0xc,
+ .sta_ofs = 0x4,
+};
+
+#define GATE_UFSAO0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ufsao0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_UFSAO1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ufsao1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate ufsao_clks[] = {
+ /* UFSAO0 */
+ GATE_UFSAO0(CLK_UFSAO_UFSHCI_UFS, "ufsao_ufshci_ufs", "ufs", 0),
+ GATE_UFSAO0(CLK_UFSAO_UFSHCI_AES, "ufsao_ufshci_aes", "aes_ufsfde", 1),
+ /* UFSAO1 */
+ GATE_UFSAO1(CLK_UFSAO_UNIPRO_TX_SYM, "ufsao_unipro_tx_sym", "clk26m", 0),
+ GATE_UFSAO1(CLK_UFSAO_UNIPRO_RX_SYM0, "ufsao_unipro_rx_sym0", "clk26m", 1),
+ GATE_UFSAO1(CLK_UFSAO_UNIPRO_RX_SYM1, "ufsao_unipro_rx_sym1", "clk26m", 2),
+ GATE_UFSAO1(CLK_UFSAO_UNIPRO_SYS, "ufsao_unipro_sys", "ufs", 3),
+ GATE_UFSAO1(CLK_UFSAO_UNIPRO_SAP, "ufsao_unipro_sap", "clk26m", 4),
+ GATE_UFSAO1(CLK_UFSAO_PHY_SAP, "ufsao_phy_sap", "clk26m", 8),
+};
+
+static u16 ufsao_rst_ofs[] = {
+ MT8196_UFSAO_RST0_SET_OFFSET,
+ MT8196_UFSAO_RST1_SET_OFFSET
+};
+
+static u16 ufsao_rst_idx_map[] = {
+ [MT8196_UFSAO_RST0_UFS_MPHY] = 8,
+ [MT8196_UFSAO_RST1_UFS_UNIPRO] = 1 * RST_NR_PER_BANK + 0,
+ [MT8196_UFSAO_RST1_UFS_CRYPTO] = 1 * RST_NR_PER_BANK + 1,
+ [MT8196_UFSAO_RST1_UFSHCI] = 1 * RST_NR_PER_BANK + 2,
+};
+
+static const struct mtk_clk_rst_desc ufsao_rst_desc = {
+ .version = MTK_RST_SET_CLR,
+ .rst_bank_ofs = ufsao_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(ufsao_rst_ofs),
+ .rst_idx_map = ufsao_rst_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(ufsao_rst_idx_map),
+};
+
+static const struct mtk_clk_desc ufsao_mcd = {
+ .clks = ufsao_clks,
+ .num_clks = ARRAY_SIZE(ufsao_clks),
+ .rst_desc = &ufsao_rst_desc,
+};
+
+static const struct of_device_id of_match_clk_mt8196_ufs_ao[] = {
+ { .compatible = "mediatek,mt8196-ufscfg-ao", .data = &ufsao_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_ufs_ao);
+
+static struct platform_driver clk_mt8196_ufs_ao_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-ufs-ao",
+ .of_match_table = of_match_clk_mt8196_ufs_ao,
+ },
+};
+
+module_platform_driver(clk_mt8196_ufs_ao_drv);
+MODULE_DESCRIPTION("MediaTek MT8196 ufs_ao clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-vdec.c b/drivers/clk/mediatek/clk-mt8196-vdec.c
new file mode 100644
index 000000000000..f8dcd84a2b58
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-vdec.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs vde20_cg_regs = {
+ .set_ofs = 0x0,
+ .clr_ofs = 0x4,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs vde20_hwv_regs = {
+ .set_ofs = 0x0088,
+ .clr_ofs = 0x008c,
+ .sta_ofs = 0x2c44,
+};
+
+static const struct mtk_gate_regs vde21_cg_regs = {
+ .set_ofs = 0x200,
+ .clr_ofs = 0x204,
+ .sta_ofs = 0x200,
+};
+
+static const struct mtk_gate_regs vde21_hwv_regs = {
+ .set_ofs = 0x0080,
+ .clr_ofs = 0x0084,
+ .sta_ofs = 0x2c40,
+};
+
+static const struct mtk_gate_regs vde22_cg_regs = {
+ .set_ofs = 0x8,
+ .clr_ofs = 0xc,
+ .sta_ofs = 0x8,
+};
+
+static const struct mtk_gate_regs vde22_hwv_regs = {
+ .set_ofs = 0x0078,
+ .clr_ofs = 0x007c,
+ .sta_ofs = 0x2c3c,
+};
+
+#define GATE_HWV_VDE20(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde20_cg_regs, \
+ .hwv_regs = &vde20_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VDE21(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde21_cg_regs, \
+ .hwv_regs = &vde21_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VDE22(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde22_cg_regs, \
+ .hwv_regs = &vde22_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE | \
+ CLK_IGNORE_UNUSED, \
+ }
+
+static const struct mtk_gate vde2_clks[] = {
+ /* VDE20 */
+ GATE_HWV_VDE20(CLK_VDE2_VDEC_CKEN, "vde2_vdec_cken", "vdec", 0),
+ GATE_HWV_VDE20(CLK_VDE2_VDEC_ACTIVE, "vde2_vdec_active", "vdec", 4),
+ GATE_HWV_VDE20(CLK_VDE2_VDEC_CKEN_ENG, "vde2_vdec_cken_eng", "vdec", 8),
+ /* VDE21 */
+ GATE_HWV_VDE21(CLK_VDE2_LAT_CKEN, "vde2_lat_cken", "vdec", 0),
+ GATE_HWV_VDE21(CLK_VDE2_LAT_ACTIVE, "vde2_lat_active", "vdec", 4),
+ GATE_HWV_VDE21(CLK_VDE2_LAT_CKEN_ENG, "vde2_lat_cken_eng", "vdec", 8),
+ /* VDE22 */
+ GATE_HWV_VDE22(CLK_VDE2_LARB1_CKEN, "vde2_larb1_cken", "vdec", 0),
+};
+
+static const struct mtk_clk_desc vde2_mcd = {
+ .clks = vde2_clks,
+ .num_clks = ARRAY_SIZE(vde2_clks),
+ .need_runtime_pm = true,
+};
+
+static const struct mtk_gate_regs vde10_hwv_regs = {
+ .set_ofs = 0x00a0,
+ .clr_ofs = 0x00a4,
+ .sta_ofs = 0x2c50,
+};
+
+static const struct mtk_gate_regs vde11_cg_regs = {
+ .set_ofs = 0x1e0,
+ .clr_ofs = 0x1e0,
+ .sta_ofs = 0x1e0,
+};
+
+static const struct mtk_gate_regs vde11_hwv_regs = {
+ .set_ofs = 0x00b0,
+ .clr_ofs = 0x00b4,
+ .sta_ofs = 0x2c58,
+};
+
+static const struct mtk_gate_regs vde12_cg_regs = {
+ .set_ofs = 0x1ec,
+ .clr_ofs = 0x1ec,
+ .sta_ofs = 0x1ec,
+};
+
+static const struct mtk_gate_regs vde12_hwv_regs = {
+ .set_ofs = 0x00a8,
+ .clr_ofs = 0x00ac,
+ .sta_ofs = 0x2c54,
+};
+
+static const struct mtk_gate_regs vde13_cg_regs = {
+ .set_ofs = 0x200,
+ .clr_ofs = 0x204,
+ .sta_ofs = 0x200,
+};
+
+static const struct mtk_gate_regs vde13_hwv_regs = {
+ .set_ofs = 0x0098,
+ .clr_ofs = 0x009c,
+ .sta_ofs = 0x2c4c,
+};
+
+static const struct mtk_gate_regs vde14_hwv_regs = {
+ .set_ofs = 0x0090,
+ .clr_ofs = 0x0094,
+ .sta_ofs = 0x2c48,
+};
+
+#define GATE_HWV_VDE10(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde20_cg_regs, \
+ .hwv_regs = &vde10_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VDE11(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde11_cg_regs, \
+ .hwv_regs = &vde11_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VDE12(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde12_cg_regs, \
+ .hwv_regs = &vde12_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv, \
+ .flags = CLK_OPS_PARENT_ENABLE \
+ }
+
+#define GATE_HWV_VDE13(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde13_cg_regs, \
+ .hwv_regs = &vde13_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VDE14(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde22_cg_regs, \
+ .hwv_regs = &vde14_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE | \
+ CLK_IGNORE_UNUSED, \
+ }
+
+static const struct mtk_gate vde1_clks[] = {
+ /* VDE10 */
+ GATE_HWV_VDE10(CLK_VDE1_VDEC_CKEN, "vde1_vdec_cken", "vdec", 0),
+ GATE_HWV_VDE10(CLK_VDE1_VDEC_ACTIVE, "vde1_vdec_active", "vdec", 4),
+ GATE_HWV_VDE10(CLK_VDE1_VDEC_CKEN_ENG, "vde1_vdec_cken_eng", "vdec", 8),
+ /* VDE11 */
+ GATE_HWV_VDE11(CLK_VDE1_VDEC_SOC_IPS_EN, "vde1_vdec_soc_ips_en", "vdec", 0),
+ /* VDE12 */
+ GATE_HWV_VDE12(CLK_VDE1_VDEC_SOC_APTV_EN, "vde1_aptv_en", "ck_tck_26m_mx9_ck", 0),
+ GATE_HWV_VDE12(CLK_VDE1_VDEC_SOC_APTV_TOP_EN, "vde1_aptv_topen", "ck_tck_26m_mx9_ck", 1),
+ /* VDE13 */
+ GATE_HWV_VDE13(CLK_VDE1_LAT_CKEN, "vde1_lat_cken", "vdec", 0),
+ GATE_HWV_VDE13(CLK_VDE1_LAT_ACTIVE, "vde1_lat_active", "vdec", 4),
+ GATE_HWV_VDE13(CLK_VDE1_LAT_CKEN_ENG, "vde1_lat_cken_eng", "vdec", 8),
+ /* VDE14 */
+ GATE_HWV_VDE14(CLK_VDE1_LARB1_CKEN, "vde1_larb1_cken", "vdec", 0),
+};
+
+static const struct mtk_clk_desc vde1_mcd = {
+ .clks = vde1_clks,
+ .num_clks = ARRAY_SIZE(vde1_clks),
+ .need_runtime_pm = true,
+};
+
+static const struct of_device_id of_match_clk_mt8196_vdec[] = {
+ { .compatible = "mediatek,mt8196-vdecsys", .data = &vde2_mcd },
+ { .compatible = "mediatek,mt8196-vdecsys-soc", .data = &vde1_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_vdec);
+
+static struct platform_driver clk_mt8196_vdec_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-vdec",
+ .of_match_table = of_match_clk_mt8196_vdec,
+ },
+};
+module_platform_driver(clk_mt8196_vdec_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 Video Decoders clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-vdisp_ao.c b/drivers/clk/mediatek/clk-mt8196-vdisp_ao.c
new file mode 100644
index 000000000000..fddb69d1c3eb
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-vdisp_ao.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs mm_v_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs mm_v_hwv_regs = {
+ .set_ofs = 0x0030,
+ .clr_ofs = 0x0034,
+ .sta_ofs = 0x2c18,
+};
+
+#define GATE_MM_AO_V(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm_v_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE | \
+ CLK_IS_CRITICAL, \
+ }
+
+#define GATE_HWV_MM_V(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm_v_cg_regs, \
+ .hwv_regs = &mm_v_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+static const struct mtk_gate mm_v_clks[] = {
+ GATE_HWV_MM_V(CLK_MM_V_DISP_VDISP_AO_CONFIG, "mm_v_disp_vdisp_ao_config", "disp", 0),
+ GATE_HWV_MM_V(CLK_MM_V_DISP_DPC, "mm_v_disp_dpc", "disp", 16),
+ GATE_MM_AO_V(CLK_MM_V_SMI_SUB_SOMM0, "mm_v_smi_sub_somm0", "disp", 2),
+};
+
+static const struct mtk_clk_desc mm_v_mcd = {
+ .clks = mm_v_clks,
+ .num_clks = ARRAY_SIZE(mm_v_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8196_vdisp_ao[] = {
+ { .compatible = "mediatek,mt8196-vdisp-ao", .data = &mm_v_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_vdisp_ao);
+
+static struct platform_driver clk_mt8196_vdisp_ao_drv = {
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
+ .driver = {
+ .name = "clk-mt8196-vdisp-ao",
+ .of_match_table = of_match_clk_mt8196_vdisp_ao,
+ },
+};
+module_platform_driver(clk_mt8196_vdisp_ao_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 vdisp_ao clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-venc.c b/drivers/clk/mediatek/clk-mt8196-venc.c
new file mode 100644
index 000000000000..13e2e36e945f
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-venc.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs ven10_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs ven10_hwv_regs = {
+ .set_ofs = 0x00b8,
+ .clr_ofs = 0x00bc,
+ .sta_ofs = 0x2c5c,
+};
+
+static const struct mtk_gate_regs ven11_cg_regs = {
+ .set_ofs = 0x10,
+ .clr_ofs = 0x14,
+ .sta_ofs = 0x10,
+};
+
+static const struct mtk_gate_regs ven11_hwv_regs = {
+ .set_ofs = 0x00c0,
+ .clr_ofs = 0x00c4,
+ .sta_ofs = 0x2c60,
+};
+
+#define GATE_VEN10(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven10_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+#define GATE_HWV_VEN10_FLAGS(_id, _name, _parent, _shift, _flags) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven10_cg_regs, \
+ .hwv_regs = &ven10_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv, \
+ .flags = (_flags) | \
+ CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VEN10(_id, _name, _parent, _shift) \
+ GATE_HWV_VEN10_FLAGS(_id, _name, _parent, _shift, 0)
+
+#define GATE_HWV_VEN11(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven11_cg_regs, \
+ .hwv_regs = &ven11_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE \
+ }
+
+static const struct mtk_gate ven1_clks[] = {
+ /* VEN10 */
+ GATE_HWV_VEN10(CLK_VEN1_CKE0_LARB, "ven1_larb", "venc", 0),
+ GATE_HWV_VEN10(CLK_VEN1_CKE1_VENC, "ven1_venc", "venc", 4),
+ GATE_VEN10(CLK_VEN1_CKE2_JPGENC, "ven1_jpgenc", "venc", 8),
+ GATE_VEN10(CLK_VEN1_CKE3_JPGDEC, "ven1_jpgdec", "venc", 12),
+ GATE_VEN10(CLK_VEN1_CKE4_JPGDEC_C1, "ven1_jpgdec_c1", "venc", 16),
+ GATE_HWV_VEN10(CLK_VEN1_CKE5_GALS, "ven1_gals", "venc", 28),
+ GATE_HWV_VEN10(CLK_VEN1_CKE29_VENC_ADAB_CTRL, "ven1_venc_adab_ctrl",
+ "venc", 29),
+ GATE_HWV_VEN10_FLAGS(CLK_VEN1_CKE29_VENC_XPC_CTRL,
+ "ven1_venc_xpc_ctrl", "venc", 30,
+ CLK_IGNORE_UNUSED),
+ GATE_HWV_VEN10(CLK_VEN1_CKE6_GALS_SRAM, "ven1_gals_sram", "venc", 31),
+ /* VEN11 */
+ GATE_HWV_VEN11(CLK_VEN1_RES_FLAT, "ven1_res_flat", "venc", 0),
+};
+
+static const struct mtk_clk_desc ven1_mcd = {
+ .clks = ven1_clks,
+ .num_clks = ARRAY_SIZE(ven1_clks),
+ .need_runtime_pm = true,
+};
+
+static const struct mtk_gate_regs ven20_hwv_regs = {
+ .set_ofs = 0x00c8,
+ .clr_ofs = 0x00cc,
+ .sta_ofs = 0x2c64,
+};
+
+static const struct mtk_gate_regs ven21_hwv_regs = {
+ .set_ofs = 0x00d0,
+ .clr_ofs = 0x00d4,
+ .sta_ofs = 0x2c68,
+};
+
+#define GATE_VEN20(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven10_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+#define GATE_HWV_VEN20(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven10_cg_regs, \
+ .hwv_regs = &ven20_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VEN21(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven11_cg_regs, \
+ .hwv_regs = &ven21_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE \
+ }
+
+static const struct mtk_gate ven2_clks[] = {
+ /* VEN20 */
+ GATE_HWV_VEN20(CLK_VEN2_CKE0_LARB, "ven2_larb", "venc", 0),
+ GATE_HWV_VEN20(CLK_VEN2_CKE1_VENC, "ven2_venc", "venc", 4),
+ GATE_VEN20(CLK_VEN2_CKE2_JPGENC, "ven2_jpgenc", "venc", 8),
+ GATE_VEN20(CLK_VEN2_CKE3_JPGDEC, "ven2_jpgdec", "venc", 12),
+ GATE_HWV_VEN20(CLK_VEN2_CKE5_GALS, "ven2_gals", "venc", 28),
+ GATE_HWV_VEN20(CLK_VEN2_CKE29_VENC_XPC_CTRL, "ven2_venc_xpc_ctrl", "venc", 30),
+ GATE_HWV_VEN20(CLK_VEN2_CKE6_GALS_SRAM, "ven2_gals_sram", "venc", 31),
+ /* VEN21 */
+ GATE_HWV_VEN21(CLK_VEN2_RES_FLAT, "ven2_res_flat", "venc", 0),
+};
+
+static const struct mtk_clk_desc ven2_mcd = {
+ .clks = ven2_clks,
+ .num_clks = ARRAY_SIZE(ven2_clks),
+ .need_runtime_pm = true,
+};
+
+static const struct mtk_gate_regs ven_c20_hwv_regs = {
+ .set_ofs = 0x00d8,
+ .clr_ofs = 0x00dc,
+ .sta_ofs = 0x2c6c,
+};
+
+static const struct mtk_gate_regs ven_c21_hwv_regs = {
+ .set_ofs = 0x00e0,
+ .clr_ofs = 0x00e4,
+ .sta_ofs = 0x2c70,
+};
+
+#define GATE_HWV_VEN_C20(_id, _name, _parent, _shift) {\
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven10_cg_regs, \
+ .hwv_regs = &ven_c20_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VEN_C21(_id, _name, _parent, _shift) {\
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven11_cg_regs, \
+ .hwv_regs = &ven_c21_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+static const struct mtk_gate ven_c2_clks[] = {
+ /* VEN_C20 */
+ GATE_HWV_VEN_C20(CLK_VEN_C2_CKE0_LARB, "ven_c2_larb", "venc", 0),
+ GATE_HWV_VEN_C20(CLK_VEN_C2_CKE1_VENC, "ven_c2_venc", "venc", 4),
+ GATE_HWV_VEN_C20(CLK_VEN_C2_CKE5_GALS, "ven_c2_gals", "venc", 28),
+ GATE_HWV_VEN_C20(CLK_VEN_C2_CKE29_VENC_XPC_CTRL, "ven_c2_venc_xpc_ctrl",
+ "venc", 30),
+ GATE_HWV_VEN_C20(CLK_VEN_C2_CKE6_GALS_SRAM, "ven_c2_gals_sram", "venc", 31),
+ /* VEN_C21 */
+ GATE_HWV_VEN_C21(CLK_VEN_C2_RES_FLAT, "ven_c2_res_flat", "venc", 0),
+};
+
+static const struct mtk_clk_desc ven_c2_mcd = {
+ .clks = ven_c2_clks,
+ .num_clks = ARRAY_SIZE(ven_c2_clks),
+ .need_runtime_pm = true,
+};
+
+static const struct of_device_id of_match_clk_mt8196_venc[] = {
+ { .compatible = "mediatek,mt8196-vencsys", .data = &ven1_mcd },
+ { .compatible = "mediatek,mt8196-vencsys-c1", .data = &ven2_mcd },
+ { .compatible = "mediatek,mt8196-vencsys-c2", .data = &ven_c2_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_venc);
+
+static struct platform_driver clk_mt8196_venc_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-venc",
+ .of_match_table = of_match_clk_mt8196_venc,
+ },
+};
+module_platform_driver(clk_mt8196_venc_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 Video Encoders clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-vlpckgen.c b/drivers/clk/mediatek/clk-mt8196-vlpckgen.c
new file mode 100644
index 000000000000..d59a8a9d9855
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-vlpckgen.c
@@ -0,0 +1,725 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "clk-mtk.h"
+#include "clk-mux.h"
+#include "clk-pll.h"
+
+/* MUX SEL REG */
+#define VLP_CLK_CFG_UPDATE 0x0004
+#define VLP_CLK_CFG_UPDATE1 0x0008
+#define VLP_CLK_CFG_0 0x0010
+#define VLP_CLK_CFG_0_SET 0x0014
+#define VLP_CLK_CFG_0_CLR 0x0018
+#define VLP_CLK_CFG_1 0x0020
+#define VLP_CLK_CFG_1_SET 0x0024
+#define VLP_CLK_CFG_1_CLR 0x0028
+#define VLP_CLK_CFG_2 0x0030
+#define VLP_CLK_CFG_2_SET 0x0034
+#define VLP_CLK_CFG_2_CLR 0x0038
+#define VLP_CLK_CFG_3 0x0040
+#define VLP_CLK_CFG_3_SET 0x0044
+#define VLP_CLK_CFG_3_CLR 0x0048
+#define VLP_CLK_CFG_4 0x0050
+#define VLP_CLK_CFG_4_SET 0x0054
+#define VLP_CLK_CFG_4_CLR 0x0058
+#define VLP_CLK_CFG_5 0x0060
+#define VLP_CLK_CFG_5_SET 0x0064
+#define VLP_CLK_CFG_5_CLR 0x0068
+#define VLP_CLK_CFG_6 0x0070
+#define VLP_CLK_CFG_6_SET 0x0074
+#define VLP_CLK_CFG_6_CLR 0x0078
+#define VLP_CLK_CFG_7 0x0080
+#define VLP_CLK_CFG_7_SET 0x0084
+#define VLP_CLK_CFG_7_CLR 0x0088
+#define VLP_CLK_CFG_8 0x0090
+#define VLP_CLK_CFG_8_SET 0x0094
+#define VLP_CLK_CFG_8_CLR 0x0098
+#define VLP_CLK_CFG_9 0x00a0
+#define VLP_CLK_CFG_9_SET 0x00a4
+#define VLP_CLK_CFG_9_CLR 0x00a8
+#define VLP_CLK_CFG_10 0x00b0
+#define VLP_CLK_CFG_10_SET 0x00b4
+#define VLP_CLK_CFG_10_CLR 0x00b8
+#define VLP_OCIC_FENC_STATUS_MON_0 0x039c
+#define VLP_OCIC_FENC_STATUS_MON_1 0x03a0
+
+/* MUX SHIFT */
+#define TOP_MUX_SCP_SHIFT 0
+#define TOP_MUX_SCP_SPI_SHIFT 1
+#define TOP_MUX_SCP_IIC_SHIFT 2
+#define TOP_MUX_SCP_IIC_HS_SHIFT 3
+#define TOP_MUX_PWRAP_ULPOSC_SHIFT 4
+#define TOP_MUX_SPMI_M_TIA_32K_SHIFT 5
+#define TOP_MUX_APXGPT_26M_B_SHIFT 6
+#define TOP_MUX_DPSW_SHIFT 7
+#define TOP_MUX_DPSW_CENTRAL_SHIFT 8
+#define TOP_MUX_SPMI_M_MST_SHIFT 9
+#define TOP_MUX_DVFSRC_SHIFT 10
+#define TOP_MUX_PWM_VLP_SHIFT 11
+#define TOP_MUX_AXI_VLP_SHIFT 12
+#define TOP_MUX_SYSTIMER_26M_SHIFT 13
+#define TOP_MUX_SSPM_SHIFT 14
+#define TOP_MUX_SRCK_SHIFT 15
+#define TOP_MUX_CAMTG0_SHIFT 16
+#define TOP_MUX_CAMTG1_SHIFT 17
+#define TOP_MUX_CAMTG2_SHIFT 18
+#define TOP_MUX_CAMTG3_SHIFT 19
+#define TOP_MUX_CAMTG4_SHIFT 20
+#define TOP_MUX_CAMTG5_SHIFT 21
+#define TOP_MUX_CAMTG6_SHIFT 22
+#define TOP_MUX_CAMTG7_SHIFT 23
+#define TOP_MUX_SSPM_26M_SHIFT 25
+#define TOP_MUX_ULPOSC_SSPM_SHIFT 26
+#define TOP_MUX_VLP_PBUS_26M_SHIFT 27
+#define TOP_MUX_DEBUG_ERR_FLAG_VLP_26M_SHIFT 28
+#define TOP_MUX_DPMSRDMA_SHIFT 29
+#define TOP_MUX_VLP_PBUS_156M_SHIFT 30
+#define TOP_MUX_SPM_SHIFT 0
+#define TOP_MUX_MMINFRA_VLP_SHIFT 1
+#define TOP_MUX_USB_TOP_SHIFT 2
+#define TOP_MUX_SSUSB_XHCI_SHIFT 3
+#define TOP_MUX_NOC_VLP_SHIFT 4
+#define TOP_MUX_AUDIO_H_SHIFT 5
+#define TOP_MUX_AUD_ENGEN1_SHIFT 6
+#define TOP_MUX_AUD_ENGEN2_SHIFT 7
+#define TOP_MUX_AUD_INTBUS_SHIFT 8
+#define TOP_MUX_SPU_VLP_26M_SHIFT 9
+#define TOP_MUX_SPU0_VLP_SHIFT 10
+#define TOP_MUX_SPU1_VLP_SHIFT 11
+
+/* CKSTA REG */
+#define VLP_CKSTA_REG0 0x0250
+#define VLP_CKSTA_REG1 0x0254
+
+/* HW Voter REG */
+#define HWV_CG_9_SET 0x0048
+#define HWV_CG_9_CLR 0x004c
+#define HWV_CG_9_DONE 0x2c24
+#define HWV_CG_10_SET 0x0050
+#define HWV_CG_10_CLR 0x0054
+#define HWV_CG_10_DONE 0x2c28
+
+/* PLL REG */
+#define VLP_AP_PLL_CON3 0x264
+#define VLP_APLL1_TUNER_CON0 0x2a4
+#define VLP_APLL2_TUNER_CON0 0x2a8
+#define VLP_APLL1_CON0 0x274
+#define VLP_APLL1_CON1 0x278
+#define VLP_APLL1_CON2 0x27c
+#define VLP_APLL1_CON3 0x280
+#define VLP_APLL2_CON0 0x28c
+#define VLP_APLL2_CON1 0x290
+#define VLP_APLL2_CON2 0x294
+#define VLP_APLL2_CON3 0x298
+
+/* vlp apll1 tuner default value*/
+#define VLP_APLL1_TUNER_CON0_VALUE 0x6f28bd4d
+/* vlp apll2 tuner default value + 1*/
+#define VLP_APLL2_TUNER_CON0_VALUE 0x78fd5265
+
+#define VLP_PLLEN_ALL 0x080
+#define VLP_PLLEN_ALL_SET 0x084
+#define VLP_PLLEN_ALL_CLR 0x088
+
+#define MT8196_PLL_FMAX (3800UL * MHZ)
+#define MT8196_PLL_FMIN (1500UL * MHZ)
+#define MT8196_INTEGER_BITS 8
+
+#define PLL_FENC(_id, _name, _reg, _fenc_sta_ofs, _fenc_sta_bit,\
+ _flags, _pd_reg, _pd_shift, \
+ _pcw_reg, _pcw_shift, _pcwbits, \
+ _pll_en_bit) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .fenc_sta_ofs = _fenc_sta_ofs, \
+ .fenc_sta_bit = _fenc_sta_bit, \
+ .flags = _flags, \
+ .fmax = MT8196_PLL_FMAX, \
+ .fmin = MT8196_PLL_FMIN, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .pcwbits = _pcwbits, \
+ .pcwibits = MT8196_INTEGER_BITS, \
+ .en_reg = VLP_PLLEN_ALL, \
+ .en_set_reg = VLP_PLLEN_ALL_SET, \
+ .en_clr_reg = VLP_PLLEN_ALL_CLR, \
+ .pll_en_bit = _pll_en_bit, \
+ .ops = &mtk_pll_fenc_clr_set_ops, \
+}
+
+static DEFINE_SPINLOCK(mt8196_clk_vlp_lock);
+
+static const struct mtk_fixed_factor vlp_divs[] = {
+ FACTOR(CLK_VLP_CLK26M, "vlp_clk26m", "clk26m", 1, 1),
+ FACTOR(CLK_VLP_APLL1_D4, "apll1_d4", "vlp_apll1", 1, 4),
+ FACTOR(CLK_VLP_APLL1_D8, "apll1_d8", "vlp_apll1", 1, 8),
+ FACTOR(CLK_VLP_APLL2_D4, "apll2_d4", "vlp_apll2", 1, 4),
+ FACTOR(CLK_VLP_APLL2_D8, "apll2_d8", "vlp_apll2", 1, 8),
+};
+
+static const char * const vlp_scp_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d6",
+ "mainpll_d4",
+ "mainpll_d3",
+ "vlp_apll1"
+};
+
+static const char * const vlp_scp_spi_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d7_d2",
+ "mainpll_d5_d2"
+};
+
+static const char * const vlp_scp_iic_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d5_d4",
+ "mainpll_d7_d2"
+};
+
+static const char * const vlp_scp_iic_hs_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d5_d4",
+ "mainpll_d7_d2",
+ "mainpll_d7"
+};
+
+static const char * const vlp_pwrap_ulposc_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "osc_d14",
+ "osc_d10"
+};
+
+static const char * const vlp_spmi_32k_parents[] = {
+ "clk26m",
+ "clk32k",
+ "osc_d20",
+ "osc_d14",
+ "osc_d10"
+};
+
+static const char * const vlp_apxgpt_26m_b_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_dpsw_parents[] = {
+ "clk26m",
+ "osc_d10",
+ "osc_d7",
+ "mainpll_d7_d4"
+};
+
+static const char * const vlp_dpsw_central_parents[] = {
+ "clk26m",
+ "osc_d10",
+ "osc_d7",
+ "mainpll_d7_d4"
+};
+
+static const char * const vlp_spmi_m_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "osc_d14",
+ "osc_d10"
+};
+
+static const char * const vlp_dvfsrc_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_pwm_vlp_parents[] = {
+ "clk26m",
+ "clk32k",
+ "osc_d20",
+ "osc_d8",
+ "mainpll_d4_d8"
+};
+
+static const char * const vlp_axi_vlp_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d7_d4",
+ "osc_d4",
+ "mainpll_d7_d2"
+};
+
+static const char * const vlp_systimer_26m_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_sspm_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d5_d2",
+ "osc_d2",
+ "mainpll_d6"
+};
+
+static const char * const vlp_srck_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_camtg0_1_parents[] = {
+ "clk26m",
+ "univpll_192m_d32",
+ "univpll_192m_d16",
+ "clk13m",
+ "osc_d40",
+ "osc_d32",
+ "univpll_192m_d10",
+ "univpll_192m_d8",
+ "univpll_d6_d16",
+ "ulposc3",
+ "osc_d20",
+ "ck2_tvdpll1_d16",
+ "univpll_d6_d8"
+};
+
+static const char * const vlp_camtg2_7_parents[] = {
+ "clk26m",
+ "univpll_192m_d32",
+ "univpll_192m_d16",
+ "clk13m",
+ "osc_d40",
+ "osc_d32",
+ "univpll_192m_d10",
+ "univpll_192m_d8",
+ "univpll_d6_d16",
+ "osc_d20",
+ "ck2_tvdpll1_d16",
+ "univpll_d6_d8"
+};
+
+static const char * const vlp_sspm_26m_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_ulposc_sspm_parents[] = {
+ "clk26m",
+ "osc_d2",
+ "mainpll_d4_d2"
+};
+
+static const char * const vlp_vlp_pbus_26m_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_debug_err_flag_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_dpmsrdma_parents[] = {
+ "clk26m",
+ "mainpll_d7_d2"
+};
+
+static const char * const vlp_vlp_pbus_156m_parents[] = {
+ "clk26m",
+ "osc_d2",
+ "mainpll_d7_d2",
+ "mainpll_d7"
+};
+
+static const char * const vlp_spm_parents[] = {
+ "clk26m",
+ "mainpll_d7_d4"
+};
+
+static const char * const vlp_mminfra_parents[] = {
+ "clk26m",
+ "osc_d4",
+ "mainpll_d3"
+};
+
+static const char * const vlp_usb_parents[] = {
+ "clk26m",
+ "mainpll_d9"
+};
+
+static const char * const vlp_noc_vlp_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d9"
+};
+
+static const char * const vlp_audio_h_parents[] = {
+ "vlp_clk26m",
+ "vlp_apll1",
+ "vlp_apll2"
+};
+
+static const char * const vlp_aud_engen1_parents[] = {
+ "vlp_clk26m",
+ "apll1_d8",
+ "apll1_d4"
+};
+
+static const char * const vlp_aud_engen2_parents[] = {
+ "vlp_clk26m",
+ "apll2_d8",
+ "apll2_d4"
+};
+
+static const char * const vlp_aud_intbus_parents[] = {
+ "vlp_clk26m",
+ "mainpll_d7_d4",
+ "mainpll_d4_d4"
+};
+
+static const u8 vlp_aud_parent_index[] = { 1, 2, 3 };
+
+static const char * const vlp_spvlp_26m_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_spu0_vlp_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d4_d4",
+ "mainpll_d4_d2",
+ "mainpll_d7",
+ "mainpll_d6",
+ "mainpll_d5"
+};
+
+static const char * const vlp_spu1_vlp_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d4_d4",
+ "mainpll_d4_d2",
+ "mainpll_d7",
+ "mainpll_d6",
+ "mainpll_d5"
+};
+
+static const struct mtk_mux vlp_muxes[] = {
+ /* VLP_CLK_CFG_0 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_VLP_SCP, "vlp_scp", vlp_scp_parents,
+ VLP_CLK_CFG_0, VLP_CLK_CFG_0_SET, VLP_CLK_CFG_0_CLR,
+ 0, 3, 7, VLP_CLK_CFG_UPDATE, TOP_MUX_SCP_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 31),
+ MUX_CLR_SET_UPD(CLK_VLP_SCP_SPI, "vlp_scp_spi",
+ vlp_scp_spi_parents, VLP_CLK_CFG_0, VLP_CLK_CFG_0_SET,
+ VLP_CLK_CFG_0_CLR, 8, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SCP_SPI_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SCP_IIC, "vlp_scp_iic",
+ vlp_scp_iic_parents, VLP_CLK_CFG_0, VLP_CLK_CFG_0_SET,
+ VLP_CLK_CFG_0_CLR, 16, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SCP_IIC_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SCP_IIC_HS, "vlp_scp_iic_hs",
+ vlp_scp_iic_hs_parents, VLP_CLK_CFG_0, VLP_CLK_CFG_0_SET,
+ VLP_CLK_CFG_0_CLR, 24, 3,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SCP_IIC_HS_SHIFT),
+ /* VLP_CLK_CFG_1 */
+ MUX_CLR_SET_UPD(CLK_VLP_PWRAP_ULPOSC, "vlp_pwrap_ulposc",
+ vlp_pwrap_ulposc_parents, VLP_CLK_CFG_1, VLP_CLK_CFG_1_SET,
+ VLP_CLK_CFG_1_CLR, 0, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_PWRAP_ULPOSC_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SPMI_M_TIA_32K, "vlp_spmi_32k",
+ vlp_spmi_32k_parents, VLP_CLK_CFG_1, VLP_CLK_CFG_1_SET,
+ VLP_CLK_CFG_1_CLR, 8, 3,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SPMI_M_TIA_32K_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_APXGPT_26M_B, "vlp_apxgpt_26m_b",
+ vlp_apxgpt_26m_b_parents, VLP_CLK_CFG_1, VLP_CLK_CFG_1_SET,
+ VLP_CLK_CFG_1_CLR, 16, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_APXGPT_26M_B_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_DPSW, "vlp_dpsw",
+ vlp_dpsw_parents, VLP_CLK_CFG_1, VLP_CLK_CFG_1_SET,
+ VLP_CLK_CFG_1_CLR, 24, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_DPSW_SHIFT),
+ /* VLP_CLK_CFG_2 */
+ MUX_CLR_SET_UPD(CLK_VLP_DPSW_CENTRAL, "vlp_dpsw_central",
+ vlp_dpsw_central_parents, VLP_CLK_CFG_2, VLP_CLK_CFG_2_SET,
+ VLP_CLK_CFG_2_CLR, 0, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_DPSW_CENTRAL_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SPMI_M_MST, "vlp_spmi_m",
+ vlp_spmi_m_parents, VLP_CLK_CFG_2, VLP_CLK_CFG_2_SET,
+ VLP_CLK_CFG_2_CLR, 8, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SPMI_M_MST_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_DVFSRC, "vlp_dvfsrc",
+ vlp_dvfsrc_parents, VLP_CLK_CFG_2, VLP_CLK_CFG_2_SET,
+ VLP_CLK_CFG_2_CLR, 16, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_DVFSRC_SHIFT),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_VLP_PWM_VLP, "vlp_pwm_vlp", vlp_pwm_vlp_parents,
+ VLP_CLK_CFG_2, VLP_CLK_CFG_2_SET, VLP_CLK_CFG_2_CLR,
+ 24, 3, 31, VLP_CLK_CFG_UPDATE, TOP_MUX_PWM_VLP_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 20),
+ /* VLP_CLK_CFG_3 */
+ MUX_CLR_SET_UPD(CLK_VLP_AXI_VLP, "vlp_axi_vlp",
+ vlp_axi_vlp_parents, VLP_CLK_CFG_3, VLP_CLK_CFG_3_SET,
+ VLP_CLK_CFG_3_CLR, 0, 3,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_AXI_VLP_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SYSTIMER_26M, "vlp_systimer_26m",
+ vlp_systimer_26m_parents, VLP_CLK_CFG_3, VLP_CLK_CFG_3_SET,
+ VLP_CLK_CFG_3_CLR, 8, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SYSTIMER_26M_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SSPM, "vlp_sspm",
+ vlp_sspm_parents, VLP_CLK_CFG_3, VLP_CLK_CFG_3_SET,
+ VLP_CLK_CFG_3_CLR, 16, 3,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SSPM_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SRCK, "vlp_srck",
+ vlp_srck_parents, VLP_CLK_CFG_3, VLP_CLK_CFG_3_SET,
+ VLP_CLK_CFG_3_CLR, 24, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SRCK_SHIFT),
+ /* VLP_CLK_CFG_4 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG0, "vlp_camtg0", vlp_camtg0_1_parents,
+ VLP_CLK_CFG_4, VLP_CLK_CFG_4_SET, VLP_CLK_CFG_4_CLR,
+ HWV_CG_9_DONE, HWV_CG_9_SET, HWV_CG_9_CLR,
+ 0, 4, 7, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG0_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 15),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG1, "vlp_camtg1", vlp_camtg0_1_parents,
+ VLP_CLK_CFG_4, VLP_CLK_CFG_4_SET, VLP_CLK_CFG_4_CLR,
+ HWV_CG_9_DONE, HWV_CG_9_SET, HWV_CG_9_CLR,
+ 8, 4, 15, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG1_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 14),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG2, "vlp_camtg2", vlp_camtg2_7_parents,
+ VLP_CLK_CFG_4, VLP_CLK_CFG_4_SET, VLP_CLK_CFG_4_CLR,
+ HWV_CG_9_DONE, HWV_CG_9_SET, HWV_CG_9_CLR,
+ 16, 4, 23, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG2_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 13),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG3, "vlp_camtg3", vlp_camtg2_7_parents,
+ VLP_CLK_CFG_4, VLP_CLK_CFG_4_SET, VLP_CLK_CFG_4_CLR,
+ HWV_CG_9_DONE, HWV_CG_9_SET, HWV_CG_9_CLR,
+ 24, 4, 31, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG3_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 12),
+ /* VLP_CLK_CFG_5 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG4, "vlp_camtg4", vlp_camtg2_7_parents,
+ VLP_CLK_CFG_5, VLP_CLK_CFG_5_SET, VLP_CLK_CFG_5_CLR,
+ HWV_CG_10_DONE, HWV_CG_10_SET, HWV_CG_10_CLR,
+ 0, 4, 7, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG4_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 11),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG5, "vlp_camtg5", vlp_camtg2_7_parents,
+ VLP_CLK_CFG_5, VLP_CLK_CFG_5_SET, VLP_CLK_CFG_5_CLR,
+ HWV_CG_10_DONE, HWV_CG_10_SET, HWV_CG_10_CLR,
+ 8, 4, 15, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG5_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 10),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG6, "vlp_camtg6", vlp_camtg2_7_parents,
+ VLP_CLK_CFG_5, VLP_CLK_CFG_5_SET, VLP_CLK_CFG_5_CLR,
+ HWV_CG_10_DONE, HWV_CG_10_SET, HWV_CG_10_CLR,
+ 16, 4, 23, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG6_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 9),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG7, "vlp_camtg7", vlp_camtg2_7_parents,
+ VLP_CLK_CFG_5, VLP_CLK_CFG_5_SET, VLP_CLK_CFG_5_CLR,
+ HWV_CG_10_DONE, HWV_CG_10_SET, HWV_CG_10_CLR,
+ 24, 4, 31, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG7_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 8),
+ /* VLP_CLK_CFG_6 */
+ MUX_CLR_SET_UPD(CLK_VLP_SSPM_26M, "vlp_sspm_26m",
+ vlp_sspm_26m_parents, VLP_CLK_CFG_6, VLP_CLK_CFG_6_SET,
+ VLP_CLK_CFG_6_CLR, 8, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SSPM_26M_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_ULPOSC_SSPM, "vlp_ulposc_sspm",
+ vlp_ulposc_sspm_parents, VLP_CLK_CFG_6, VLP_CLK_CFG_6_SET,
+ VLP_CLK_CFG_6_CLR, 16, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_ULPOSC_SSPM_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_VLP_PBUS_26M, "vlp_vlp_pbus_26m",
+ vlp_vlp_pbus_26m_parents, VLP_CLK_CFG_6, VLP_CLK_CFG_6_SET,
+ VLP_CLK_CFG_6_CLR, 24, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_VLP_PBUS_26M_SHIFT),
+ /* VLP_CLK_CFG_7 */
+ MUX_CLR_SET_UPD(CLK_VLP_DEBUG_ERR_FLAG, "vlp_debug_err_flag",
+ vlp_debug_err_flag_parents, VLP_CLK_CFG_7, VLP_CLK_CFG_7_SET,
+ VLP_CLK_CFG_7_CLR, 0, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_DEBUG_ERR_FLAG_VLP_26M_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_DPMSRDMA, "vlp_dpmsrdma",
+ vlp_dpmsrdma_parents, VLP_CLK_CFG_7, VLP_CLK_CFG_7_SET,
+ VLP_CLK_CFG_7_CLR, 8, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_DPMSRDMA_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_VLP_PBUS_156M, "vlp_vlp_pbus_156m",
+ vlp_vlp_pbus_156m_parents, VLP_CLK_CFG_7, VLP_CLK_CFG_7_SET,
+ VLP_CLK_CFG_7_CLR, 16, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_VLP_PBUS_156M_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SPM, "vlp_spm",
+ vlp_spm_parents, VLP_CLK_CFG_7, VLP_CLK_CFG_7_SET,
+ VLP_CLK_CFG_7_CLR, 24, 1,
+ VLP_CLK_CFG_UPDATE1, TOP_MUX_SPM_SHIFT),
+ /* VLP_CLK_CFG_8 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_VLP_MMINFRA, "vlp_mminfra", vlp_mminfra_parents,
+ VLP_CLK_CFG_8, VLP_CLK_CFG_8_SET, VLP_CLK_CFG_8_CLR,
+ 0, 2, 7, VLP_CLK_CFG_UPDATE1, TOP_MUX_MMINFRA_VLP_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_1, 31),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_VLP_USB_TOP, "vlp_usb", vlp_usb_parents,
+ VLP_CLK_CFG_8, VLP_CLK_CFG_8_SET, VLP_CLK_CFG_8_CLR,
+ 8, 1, 15, VLP_CLK_CFG_UPDATE1, TOP_MUX_USB_TOP_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_1, 30),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_VLP_USB_XHCI, "vlp_usb_xhci", vlp_usb_parents,
+ VLP_CLK_CFG_8, VLP_CLK_CFG_8_SET, VLP_CLK_CFG_8_CLR,
+ 16, 1, 23, VLP_CLK_CFG_UPDATE1, TOP_MUX_SSUSB_XHCI_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_1, 29),
+ MUX_CLR_SET_UPD(CLK_VLP_NOC_VLP, "vlp_noc_vlp",
+ vlp_noc_vlp_parents, VLP_CLK_CFG_8, VLP_CLK_CFG_8_SET,
+ VLP_CLK_CFG_8_CLR, 24, 2,
+ VLP_CLK_CFG_UPDATE1, TOP_MUX_NOC_VLP_SHIFT),
+ /* VLP_CLK_CFG_9 */
+ MUX_GATE_FENC_CLR_SET_UPD_INDEXED(CLK_VLP_AUDIO_H, "vlp_audio_h",
+ vlp_audio_h_parents, vlp_aud_parent_index,
+ VLP_CLK_CFG_9, VLP_CLK_CFG_9_SET, VLP_CLK_CFG_9_CLR,
+ 0, 2, 7, VLP_CLK_CFG_UPDATE1, TOP_MUX_AUDIO_H_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_1, 27),
+ MUX_GATE_FENC_CLR_SET_UPD_INDEXED(CLK_VLP_AUD_ENGEN1, "vlp_aud_engen1",
+ vlp_aud_engen1_parents, vlp_aud_parent_index,
+ VLP_CLK_CFG_9, VLP_CLK_CFG_9_SET, VLP_CLK_CFG_9_CLR,
+ 8, 2, 15, VLP_CLK_CFG_UPDATE1, TOP_MUX_AUD_ENGEN1_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_1, 26),
+ MUX_GATE_FENC_CLR_SET_UPD_INDEXED(CLK_VLP_AUD_ENGEN2, "vlp_aud_engen2",
+ vlp_aud_engen2_parents, vlp_aud_parent_index,
+ VLP_CLK_CFG_9, VLP_CLK_CFG_9_SET, VLP_CLK_CFG_9_CLR,
+ 16, 2, 23, VLP_CLK_CFG_UPDATE1, TOP_MUX_AUD_ENGEN2_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_1, 25),
+ MUX_GATE_FENC_CLR_SET_UPD_INDEXED(CLK_VLP_AUD_INTBUS, "vlp_aud_intbus",
+ vlp_aud_intbus_parents, vlp_aud_parent_index,
+ VLP_CLK_CFG_9, VLP_CLK_CFG_9_SET, VLP_CLK_CFG_9_CLR,
+ 24, 2, 31, VLP_CLK_CFG_UPDATE1, TOP_MUX_AUD_INTBUS_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_1, 24),
+ /* VLP_CLK_CFG_10 */
+ MUX_CLR_SET_UPD(CLK_VLP_SPVLP_26M, "vlp_spvlp_26m",
+ vlp_spvlp_26m_parents, VLP_CLK_CFG_10, VLP_CLK_CFG_10_SET,
+ VLP_CLK_CFG_10_CLR, 0, 1,
+ VLP_CLK_CFG_UPDATE1, TOP_MUX_SPU_VLP_26M_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SPU0_VLP, "vlp_spu0_vlp",
+ vlp_spu0_vlp_parents, VLP_CLK_CFG_10, VLP_CLK_CFG_10_SET,
+ VLP_CLK_CFG_10_CLR, 8, 3,
+ VLP_CLK_CFG_UPDATE1, TOP_MUX_SPU0_VLP_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SPU1_VLP, "vlp_spu1_vlp",
+ vlp_spu1_vlp_parents, VLP_CLK_CFG_10, VLP_CLK_CFG_10_SET,
+ VLP_CLK_CFG_10_CLR, 16, 3,
+ VLP_CLK_CFG_UPDATE1, TOP_MUX_SPU1_VLP_SHIFT),
+};
+
+static const struct mtk_pll_data vlp_plls[] = {
+ PLL_FENC(CLK_VLP_APLL1, "vlp_apll1", VLP_APLL1_CON0, 0x0358, 1, 0,
+ VLP_APLL1_CON1, 24, VLP_APLL1_CON2, 0, 32, 0),
+ PLL_FENC(CLK_VLP_APLL2, "vlp_apll2", VLP_APLL2_CON0, 0x0358, 0, 0,
+ VLP_APLL2_CON1, 24, VLP_APLL2_CON2, 0, 32, 1),
+};
+
+static const struct regmap_config vlpckgen_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x1000,
+ .fast_io = true,
+};
+
+static int clk_mt8196_vlp_probe(struct platform_device *pdev)
+{
+ static void __iomem *base;
+ struct clk_hw_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct regmap *regmap;
+
+ clk_data = mtk_alloc_clk_data(ARRAY_SIZE(vlp_muxes) +
+ ARRAY_SIZE(vlp_plls) +
+ ARRAY_SIZE(vlp_divs));
+ if (!clk_data)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(dev, base, &vlpckgen_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ r = mtk_clk_register_factors(vlp_divs, ARRAY_SIZE(vlp_divs), clk_data);
+ if (r)
+ goto free_clk_data;
+
+ r = mtk_clk_register_muxes(&pdev->dev, vlp_muxes, ARRAY_SIZE(vlp_muxes),
+ node, &mt8196_clk_vlp_lock, clk_data);
+ if (r)
+ goto unregister_factors;
+
+ r = mtk_clk_register_plls(node, vlp_plls, ARRAY_SIZE(vlp_plls),
+ clk_data);
+ if (r)
+ goto unregister_muxes;
+
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (r)
+ goto unregister_plls;
+
+ platform_set_drvdata(pdev, clk_data);
+
+ /* Initialize APLL tuner registers */
+ regmap_write(regmap, VLP_APLL1_TUNER_CON0, VLP_APLL1_TUNER_CON0_VALUE);
+ regmap_write(regmap, VLP_APLL2_TUNER_CON0, VLP_APLL2_TUNER_CON0_VALUE);
+
+ return r;
+
+unregister_plls:
+ mtk_clk_unregister_plls(vlp_plls, ARRAY_SIZE(vlp_plls), clk_data);
+unregister_muxes:
+ mtk_clk_unregister_muxes(vlp_muxes, ARRAY_SIZE(vlp_muxes), clk_data);
+unregister_factors:
+ mtk_clk_unregister_factors(vlp_divs, ARRAY_SIZE(vlp_divs), clk_data);
+free_clk_data:
+ mtk_free_clk_data(clk_data);
+
+ return r;
+}
+
+static void clk_mt8196_vlp_remove(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+ struct device_node *node = pdev->dev.of_node;
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_plls(vlp_plls, ARRAY_SIZE(vlp_plls), clk_data);
+ mtk_clk_unregister_muxes(vlp_muxes, ARRAY_SIZE(vlp_muxes), clk_data);
+ mtk_clk_unregister_factors(vlp_divs, ARRAY_SIZE(vlp_divs), clk_data);
+ mtk_free_clk_data(clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8196_vlp_ck[] = {
+ { .compatible = "mediatek,mt8196-vlpckgen" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_vlp_ck);
+
+static struct platform_driver clk_mt8196_vlp_drv = {
+ .probe = clk_mt8196_vlp_probe,
+ .remove = clk_mt8196_vlp_remove,
+ .driver = {
+ .name = "clk-mt8196-vlpck",
+ .of_match_table = of_match_clk_mt8196_vlp_ck,
+ },
+};
+
+MODULE_DESCRIPTION("MediaTek MT8196 VLP clock generator driver");
+module_platform_driver(clk_mt8196_vlp_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8365-apmixedsys.c b/drivers/clk/mediatek/clk-mt8365-apmixedsys.c
index 9b0bc5daeac0..f41b991a0178 100644
--- a/drivers/clk/mediatek/clk-mt8365-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt8365-apmixedsys.c
@@ -163,4 +163,6 @@ static struct platform_driver clk_mt8365_apmixed_drv = {
},
};
builtin_platform_driver(clk_mt8365_apmixed_drv)
+
+MODULE_DESCRIPTION("MediaTek MT8365 apmixedsys clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8365-apu.c b/drivers/clk/mediatek/clk-mt8365-apu.c
index 4f10ce1531d2..2583c4704ffa 100644
--- a/drivers/clk/mediatek/clk-mt8365-apu.c
+++ b/drivers/clk/mediatek/clk-mt8365-apu.c
@@ -46,11 +46,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8365_apu);
static struct platform_driver clk_mt8365_apu_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8365-apu",
.of_match_table = of_match_clk_mt8365_apu,
},
};
module_platform_driver(clk_mt8365_apu_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8365 AI Processing Unit clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8365-cam.c b/drivers/clk/mediatek/clk-mt8365-cam.c
index fe428a4f1d37..89d2bd50263b 100644
--- a/drivers/clk/mediatek/clk-mt8365-cam.c
+++ b/drivers/clk/mediatek/clk-mt8365-cam.c
@@ -48,11 +48,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8365_cam);
static struct platform_driver clk_mt8365_cam_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8365-cam",
.of_match_table = of_match_clk_mt8365_cam,
},
};
module_platform_driver(clk_mt8365_cam_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8365 Camera clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8365-mfg.c b/drivers/clk/mediatek/clk-mt8365-mfg.c
index 4a590284f7e2..41bcd389119c 100644
--- a/drivers/clk/mediatek/clk-mt8365-mfg.c
+++ b/drivers/clk/mediatek/clk-mt8365-mfg.c
@@ -54,11 +54,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8365_mfg);
static struct platform_driver clk_mt8365_mfg_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8365-mfg",
.of_match_table = of_match_clk_mt8365_mfg,
},
};
module_platform_driver(clk_mt8365_mfg_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8365 GPU mfg clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8365-mm.c b/drivers/clk/mediatek/clk-mt8365-mm.c
index 3f62ec750733..56fb2a43ecd0 100644
--- a/drivers/clk/mediatek/clk-mt8365-mm.c
+++ b/drivers/clk/mediatek/clk-mt8365-mm.c
@@ -85,11 +85,13 @@ MODULE_DEVICE_TABLE(platform, clk_mt8365_mm_id_table);
static struct platform_driver clk_mt8365_mm_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8365-mm",
},
.id_table = clk_mt8365_mm_id_table,
};
module_platform_driver(clk_mt8365_mm_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8365 MultiMedia clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8365-vdec.c b/drivers/clk/mediatek/clk-mt8365-vdec.c
index 233924837c3b..f5d0518bc2e0 100644
--- a/drivers/clk/mediatek/clk-mt8365-vdec.c
+++ b/drivers/clk/mediatek/clk-mt8365-vdec.c
@@ -54,11 +54,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8365_vdec);
static struct platform_driver clk_mt8365_vdec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8365-vdec",
.of_match_table = of_match_clk_mt8365_vdec,
},
};
module_platform_driver(clk_mt8365_vdec_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8365 Video Decoders clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8365-venc.c b/drivers/clk/mediatek/clk-mt8365-venc.c
index cc063f18e56b..35abd908537c 100644
--- a/drivers/clk/mediatek/clk-mt8365-venc.c
+++ b/drivers/clk/mediatek/clk-mt8365-venc.c
@@ -43,11 +43,13 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8365_venc);
static struct platform_driver clk_mt8365_venc_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8365-venc",
.of_match_table = of_match_clk_mt8365_venc,
},
};
module_platform_driver(clk_mt8365_venc_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8365 Video Encoders clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8365.c b/drivers/clk/mediatek/clk-mt8365.c
index ac45e4cc9bcd..e7952121112e 100644
--- a/drivers/clk/mediatek/clk-mt8365.c
+++ b/drivers/clk/mediatek/clk-mt8365.c
@@ -809,7 +809,9 @@ static struct platform_driver clk_mt8365_drv = {
.of_match_table = of_match_clk_mt8365,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt8365_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8365 main clocks driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8516-aud.c b/drivers/clk/mediatek/clk-mt8516-aud.c
index 53e1866fb8e2..6227635fd5a1 100644
--- a/drivers/clk/mediatek/clk-mt8516-aud.c
+++ b/drivers/clk/mediatek/clk-mt8516-aud.c
@@ -53,7 +53,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8516_aud);
static struct platform_driver clk_mt8516_aud_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8516-aud",
.of_match_table = of_match_clk_mt8516_aud,
diff --git a/drivers/clk/mediatek/clk-mt8516.c b/drivers/clk/mediatek/clk-mt8516.c
index b8ae837c59dc..21eb052b0a53 100644
--- a/drivers/clk/mediatek/clk-mt8516.c
+++ b/drivers/clk/mediatek/clk-mt8516.c
@@ -669,7 +669,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8516);
static struct platform_driver clk_mt8516_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8516",
.of_match_table = of_match_clk_mt8516,
diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
index bd37ab4d1a9b..19cd27941747 100644
--- a/drivers/clk/mediatek/clk-mtk.c
+++ b/drivers/clk/mediatek/clk-mtk.c
@@ -496,14 +496,16 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev,
}
- devm_pm_runtime_enable(&pdev->dev);
- /*
- * Do a pm_runtime_resume_and_get() to workaround a possible
- * deadlock between clk_register() and the genpd framework.
- */
- r = pm_runtime_resume_and_get(&pdev->dev);
- if (r)
- return r;
+ if (mcd->need_runtime_pm) {
+ devm_pm_runtime_enable(&pdev->dev);
+ /*
+ * Do a pm_runtime_resume_and_get() to workaround a possible
+ * deadlock between clk_register() and the genpd framework.
+ */
+ r = pm_runtime_resume_and_get(&pdev->dev);
+ if (r)
+ return r;
+ }
/* Calculate how many clk_hw_onecell_data entries to allocate */
num_clks = mcd->num_clks + mcd->num_composite_clks;
@@ -585,7 +587,8 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev,
goto unregister_clks;
}
- pm_runtime_put(&pdev->dev);
+ if (mcd->need_runtime_pm)
+ pm_runtime_put(&pdev->dev);
return r;
@@ -618,7 +621,8 @@ free_base:
if (mcd->shared_io && base)
iounmap(base);
- pm_runtime_put(&pdev->dev);
+ if (mcd->need_runtime_pm)
+ pm_runtime_put(&pdev->dev);
return r;
}
@@ -681,4 +685,20 @@ void mtk_clk_simple_remove(struct platform_device *pdev)
}
EXPORT_SYMBOL_GPL(mtk_clk_simple_remove);
+struct regmap *mtk_clk_get_hwv_regmap(struct device_node *node)
+{
+ struct device_node *hwv_node;
+ struct regmap *regmap_hwv;
+
+ hwv_node = of_parse_phandle(node, "mediatek,hardware-voter", 0);
+ if (!hwv_node)
+ return NULL;
+
+ regmap_hwv = device_node_to_regmap(hwv_node);
+ of_node_put(hwv_node);
+
+ return regmap_hwv;
+}
+EXPORT_SYMBOL_GPL(mtk_clk_get_hwv_regmap);
+
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
index 22096501a60a..5417b9264e6d 100644
--- a/drivers/clk/mediatek/clk-mtk.h
+++ b/drivers/clk/mediatek/clk-mtk.h
@@ -20,6 +20,8 @@
#define MHZ (1000 * 1000)
+#define MTK_WAIT_HWV_DONE_US 30
+
struct platform_device;
/*
@@ -173,6 +175,25 @@ struct mtk_composite {
.flags = 0, \
}
+#define MUX_DIV_GATE(_id, _name, _parents, \
+ _mux_reg, _mux_shift, _mux_width, \
+ _div_reg, _div_shift, _div_width, \
+ _gate_reg, _gate_shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_names = _parents, \
+ .num_parents = ARRAY_SIZE(_parents), \
+ .mux_reg = _mux_reg, \
+ .mux_shift = _mux_shift, \
+ .mux_width = _mux_width, \
+ .divider_reg = _div_reg, \
+ .divider_shift = _div_shift, \
+ .divider_width = _div_width, \
+ .gate_reg = _gate_reg, \
+ .gate_shift = _gate_shift, \
+ .flags = CLK_SET_RATE_PARENT, \
+ }
+
int mtk_clk_register_composites(struct device *dev,
const struct mtk_composite *mcs, int num,
void __iomem *base, spinlock_t *lock,
@@ -237,11 +258,14 @@ struct mtk_clk_desc {
int (*clk_notifier_func)(struct device *dev, struct clk *clk);
unsigned int mfg_clk_idx;
+
+ bool need_runtime_pm;
};
int mtk_clk_pdev_probe(struct platform_device *pdev);
void mtk_clk_pdev_remove(struct platform_device *pdev);
int mtk_clk_simple_probe(struct platform_device *pdev);
void mtk_clk_simple_remove(struct platform_device *pdev);
+struct regmap *mtk_clk_get_hwv_regmap(struct device_node *node);
#endif /* __DRV_CLK_MTK_H */
diff --git a/drivers/clk/mediatek/clk-mux.c b/drivers/clk/mediatek/clk-mux.c
index 60990296450b..c5af6dc078a3 100644
--- a/drivers/clk/mediatek/clk-mux.c
+++ b/drivers/clk/mediatek/clk-mux.c
@@ -8,6 +8,7 @@
#include <linux/clk-provider.h>
#include <linux/compiler_types.h>
#include <linux/container_of.h>
+#include <linux/dev_printk.h>
#include <linux/err.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
@@ -15,11 +16,15 @@
#include <linux/spinlock.h>
#include <linux/slab.h>
+#include "clk-mtk.h"
#include "clk-mux.h"
+#define MTK_WAIT_FENC_DONE_US 30
+
struct mtk_clk_mux {
struct clk_hw hw;
struct regmap *regmap;
+ struct regmap *regmap_hwv;
const struct mtk_mux *data;
spinlock_t *lock;
bool reparent;
@@ -30,6 +35,33 @@ static inline struct mtk_clk_mux *to_mtk_clk_mux(struct clk_hw *hw)
return container_of(hw, struct mtk_clk_mux, hw);
}
+static int mtk_clk_mux_fenc_enable_setclr(struct clk_hw *hw)
+{
+ struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ unsigned long flags;
+ u32 val;
+ int ret;
+
+ if (mux->lock)
+ spin_lock_irqsave(mux->lock, flags);
+ else
+ __acquire(mux->lock);
+
+ regmap_write(mux->regmap, mux->data->clr_ofs,
+ BIT(mux->data->gate_shift));
+
+ ret = regmap_read_poll_timeout_atomic(mux->regmap, mux->data->fenc_sta_mon_ofs,
+ val, val & BIT(mux->data->fenc_shift), 1,
+ MTK_WAIT_FENC_DONE_US);
+
+ if (mux->lock)
+ spin_unlock_irqrestore(mux->lock, flags);
+ else
+ __release(mux->lock);
+
+ return ret;
+}
+
static int mtk_clk_mux_enable_setclr(struct clk_hw *hw)
{
struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
@@ -70,6 +102,16 @@ static void mtk_clk_mux_disable_setclr(struct clk_hw *hw)
BIT(mux->data->gate_shift));
}
+static int mtk_clk_mux_fenc_is_enabled(struct clk_hw *hw)
+{
+ struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ u32 val;
+
+ regmap_read(mux->regmap, mux->data->fenc_sta_mon_ofs, &val);
+
+ return !!(val & BIT(mux->data->fenc_shift));
+}
+
static int mtk_clk_mux_is_enabled(struct clk_hw *hw)
{
struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
@@ -80,6 +122,41 @@ static int mtk_clk_mux_is_enabled(struct clk_hw *hw)
return (val & BIT(mux->data->gate_shift)) == 0;
}
+static int mtk_clk_mux_hwv_fenc_enable(struct clk_hw *hw)
+{
+ struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ u32 val;
+ int ret;
+
+ regmap_write(mux->regmap_hwv, mux->data->hwv_set_ofs,
+ BIT(mux->data->gate_shift));
+
+ ret = regmap_read_poll_timeout_atomic(mux->regmap_hwv, mux->data->hwv_sta_ofs,
+ val, val & BIT(mux->data->gate_shift), 0,
+ MTK_WAIT_HWV_DONE_US);
+ if (ret)
+ return ret;
+
+ ret = regmap_read_poll_timeout_atomic(mux->regmap, mux->data->fenc_sta_mon_ofs,
+ val, val & BIT(mux->data->fenc_shift), 1,
+ MTK_WAIT_FENC_DONE_US);
+
+ return ret;
+}
+
+static void mtk_clk_mux_hwv_disable(struct clk_hw *hw)
+{
+ struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ u32 val;
+
+ regmap_write(mux->regmap_hwv, mux->data->hwv_clr_ofs,
+ BIT(mux->data->gate_shift));
+
+ regmap_read_poll_timeout_atomic(mux->regmap_hwv, mux->data->hwv_sta_ofs,
+ val, (val & BIT(mux->data->gate_shift)),
+ 0, MTK_WAIT_HWV_DONE_US);
+}
+
static u8 mtk_clk_mux_get_parent(struct clk_hw *hw)
{
struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
@@ -146,9 +223,15 @@ static int mtk_clk_mux_set_parent_setclr_lock(struct clk_hw *hw, u8 index)
static int mtk_clk_mux_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
- struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ return clk_mux_determine_rate_flags(hw, req, 0);
+}
+
+static bool mtk_clk_mux_uses_hwv(const struct clk_ops *ops)
+{
+ if (ops == &mtk_mux_gate_hwv_fenc_clr_set_upd_ops)
+ return true;
- return clk_mux_determine_rate_flags(hw, req, mux->data->flags);
+ return false;
}
const struct clk_ops mtk_mux_clr_set_upd_ops = {
@@ -168,9 +251,30 @@ const struct clk_ops mtk_mux_gate_clr_set_upd_ops = {
};
EXPORT_SYMBOL_GPL(mtk_mux_gate_clr_set_upd_ops);
+const struct clk_ops mtk_mux_gate_fenc_clr_set_upd_ops = {
+ .enable = mtk_clk_mux_fenc_enable_setclr,
+ .disable = mtk_clk_mux_disable_setclr,
+ .is_enabled = mtk_clk_mux_fenc_is_enabled,
+ .get_parent = mtk_clk_mux_get_parent,
+ .set_parent = mtk_clk_mux_set_parent_setclr_lock,
+ .determine_rate = mtk_clk_mux_determine_rate,
+};
+EXPORT_SYMBOL_GPL(mtk_mux_gate_fenc_clr_set_upd_ops);
+
+const struct clk_ops mtk_mux_gate_hwv_fenc_clr_set_upd_ops = {
+ .enable = mtk_clk_mux_hwv_fenc_enable,
+ .disable = mtk_clk_mux_hwv_disable,
+ .is_enabled = mtk_clk_mux_fenc_is_enabled,
+ .get_parent = mtk_clk_mux_get_parent,
+ .set_parent = mtk_clk_mux_set_parent_setclr_lock,
+ .determine_rate = mtk_clk_mux_determine_rate,
+};
+EXPORT_SYMBOL_GPL(mtk_mux_gate_hwv_fenc_clr_set_upd_ops);
+
static struct clk_hw *mtk_clk_register_mux(struct device *dev,
const struct mtk_mux *mux,
struct regmap *regmap,
+ struct regmap *regmap_hwv,
spinlock_t *lock)
{
struct mtk_clk_mux *clk_mux;
@@ -186,8 +290,13 @@ static struct clk_hw *mtk_clk_register_mux(struct device *dev,
init.parent_names = mux->parent_names;
init.num_parents = mux->num_parents;
init.ops = mux->ops;
+ if (mtk_clk_mux_uses_hwv(init.ops) && !regmap_hwv)
+ return dev_err_ptr_probe(
+ dev, -ENXIO,
+ "regmap not found for hardware voter clocks\n");
clk_mux->regmap = regmap;
+ clk_mux->regmap_hwv = regmap_hwv;
clk_mux->data = mux;
clk_mux->lock = lock;
clk_mux->hw.init = &init;
@@ -220,6 +329,7 @@ int mtk_clk_register_muxes(struct device *dev,
struct clk_hw_onecell_data *clk_data)
{
struct regmap *regmap;
+ struct regmap *regmap_hwv;
struct clk_hw *hw;
int i;
@@ -229,6 +339,12 @@ int mtk_clk_register_muxes(struct device *dev,
return PTR_ERR(regmap);
}
+ regmap_hwv = mtk_clk_get_hwv_regmap(node);
+ if (IS_ERR(regmap_hwv))
+ return dev_err_probe(
+ dev, PTR_ERR(regmap_hwv),
+ "Cannot find hardware voter regmap for %pOF\n", node);
+
for (i = 0; i < num; i++) {
const struct mtk_mux *mux = &muxes[i];
@@ -238,7 +354,7 @@ int mtk_clk_register_muxes(struct device *dev,
continue;
}
- hw = mtk_clk_register_mux(dev, mux, regmap, lock);
+ hw = mtk_clk_register_mux(dev, mux, regmap, regmap_hwv, lock);
if (IS_ERR(hw)) {
pr_err("Failed to register clk %s: %pe\n", mux->name,
diff --git a/drivers/clk/mediatek/clk-mux.h b/drivers/clk/mediatek/clk-mux.h
index 943ad1d7ce4b..151e56dcf884 100644
--- a/drivers/clk/mediatek/clk-mux.h
+++ b/drivers/clk/mediatek/clk-mux.h
@@ -29,10 +29,16 @@ struct mtk_mux {
u32 clr_ofs;
u32 upd_ofs;
+ u32 hwv_set_ofs;
+ u32 hwv_clr_ofs;
+ u32 hwv_sta_ofs;
+ u32 fenc_sta_mon_ofs;
+
u8 mux_shift;
u8 mux_width;
u8 gate_shift;
s8 upd_shift;
+ u8 fenc_shift;
const struct clk_ops *ops;
signed char num_parents;
@@ -77,6 +83,8 @@ struct mtk_mux {
extern const struct clk_ops mtk_mux_clr_set_upd_ops;
extern const struct clk_ops mtk_mux_gate_clr_set_upd_ops;
+extern const struct clk_ops mtk_mux_gate_fenc_clr_set_upd_ops;
+extern const struct clk_ops mtk_mux_gate_hwv_fenc_clr_set_upd_ops;
#define MUX_GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents, _mux_ofs, \
_mux_set_ofs, _mux_clr_ofs, _shift, _width, \
@@ -118,6 +126,85 @@ extern const struct clk_ops mtk_mux_gate_clr_set_upd_ops;
0, _upd_ofs, _upd, CLK_SET_RATE_PARENT, \
mtk_mux_clr_set_upd_ops)
+#define MUX_GATE_HWV_FENC_CLR_SET_UPD_FLAGS(_id, _name, _parents, \
+ _mux_ofs, _mux_set_ofs, _mux_clr_ofs, \
+ _hwv_sta_ofs, _hwv_set_ofs, _hwv_clr_ofs, \
+ _shift, _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc, _flags) { \
+ .id = _id, \
+ .name = _name, \
+ .mux_ofs = _mux_ofs, \
+ .set_ofs = _mux_set_ofs, \
+ .clr_ofs = _mux_clr_ofs, \
+ .hwv_sta_ofs = _hwv_sta_ofs, \
+ .hwv_set_ofs = _hwv_set_ofs, \
+ .hwv_clr_ofs = _hwv_clr_ofs, \
+ .upd_ofs = _upd_ofs, \
+ .fenc_sta_mon_ofs = _fenc_sta_mon_ofs, \
+ .mux_shift = _shift, \
+ .mux_width = _width, \
+ .gate_shift = _gate, \
+ .upd_shift = _upd, \
+ .fenc_shift = _fenc, \
+ .parent_names = _parents, \
+ .num_parents = ARRAY_SIZE(_parents), \
+ .flags = _flags, \
+ .ops = &mtk_mux_gate_hwv_fenc_clr_set_upd_ops, \
+ }
+
+#define MUX_GATE_HWV_FENC_CLR_SET_UPD(_id, _name, _parents, \
+ _mux_ofs, _mux_set_ofs, _mux_clr_ofs, \
+ _hwv_sta_ofs, _hwv_set_ofs, _hwv_clr_ofs, \
+ _shift, _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc) \
+ MUX_GATE_HWV_FENC_CLR_SET_UPD_FLAGS(_id, _name, _parents, \
+ _mux_ofs, _mux_set_ofs, _mux_clr_ofs, \
+ _hwv_sta_ofs, _hwv_set_ofs, _hwv_clr_ofs, \
+ _shift, _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc, 0)
+
+#define MUX_GATE_FENC_CLR_SET_UPD_FLAGS(_id, _name, _parents, _paridx, \
+ _num_parents, _mux_ofs, _mux_set_ofs, _mux_clr_ofs, \
+ _shift, _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc, _flags) { \
+ .id = _id, \
+ .name = _name, \
+ .mux_ofs = _mux_ofs, \
+ .set_ofs = _mux_set_ofs, \
+ .clr_ofs = _mux_clr_ofs, \
+ .upd_ofs = _upd_ofs, \
+ .fenc_sta_mon_ofs = _fenc_sta_mon_ofs, \
+ .mux_shift = _shift, \
+ .mux_width = _width, \
+ .gate_shift = _gate, \
+ .upd_shift = _upd, \
+ .fenc_shift = _fenc, \
+ .parent_names = _parents, \
+ .parent_index = _paridx, \
+ .num_parents = _num_parents, \
+ .flags = _flags, \
+ .ops = &mtk_mux_gate_fenc_clr_set_upd_ops, \
+ }
+
+#define MUX_GATE_FENC_CLR_SET_UPD(_id, _name, _parents, \
+ _mux_ofs, _mux_set_ofs, _mux_clr_ofs, \
+ _shift, _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc) \
+ MUX_GATE_FENC_CLR_SET_UPD_FLAGS(_id, _name, _parents, \
+ NULL, ARRAY_SIZE(_parents), _mux_ofs, \
+ _mux_set_ofs, _mux_clr_ofs, _shift, \
+ _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc, 0)
+
+#define MUX_GATE_FENC_CLR_SET_UPD_INDEXED(_id, _name, _parents, _paridx, \
+ _mux_ofs, _mux_set_ofs, _mux_clr_ofs, \
+ _shift, _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc) \
+ MUX_GATE_FENC_CLR_SET_UPD_FLAGS(_id, _name, _parents, _paridx, \
+ ARRAY_SIZE(_paridx), _mux_ofs, _mux_set_ofs, \
+ _mux_clr_ofs, _shift, _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc, 0)
+
int mtk_clk_register_muxes(struct device *dev,
const struct mtk_mux *muxes,
int num, struct device_node *node,
diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c
index ce453e1718e5..cd2b6ce551c6 100644
--- a/drivers/clk/mediatek/clk-pll.c
+++ b/drivers/clk/mediatek/clk-pll.c
@@ -37,6 +37,13 @@ int mtk_pll_is_prepared(struct clk_hw *hw)
return (readl(pll->en_addr) & BIT(pll->data->pll_en_bit)) != 0;
}
+static int mtk_pll_fenc_is_prepared(struct clk_hw *hw)
+{
+ struct mtk_clk_pll *pll = to_mtk_clk_pll(hw);
+
+ return !!(readl(pll->fenc_addr) & BIT(pll->data->fenc_sta_bit));
+}
+
static unsigned long __mtk_pll_recalc_rate(struct mtk_clk_pll *pll, u32 fin,
u32 pcw, int postdiv)
{
@@ -200,16 +207,19 @@ unsigned long mtk_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
return __mtk_pll_recalc_rate(pll, parent_rate, pcw, postdiv);
}
-long mtk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+int mtk_pll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
{
struct mtk_clk_pll *pll = to_mtk_clk_pll(hw);
u32 pcw = 0;
int postdiv;
- mtk_pll_calc_values(pll, &pcw, &postdiv, rate, *prate);
+ mtk_pll_calc_values(pll, &pcw, &postdiv, req->rate,
+ req->best_parent_rate);
+
+ req->rate = __mtk_pll_recalc_rate(pll, req->best_parent_rate, pcw,
+ postdiv);
- return __mtk_pll_recalc_rate(pll, *prate, pcw, postdiv);
+ return 0;
}
int mtk_pll_prepare(struct clk_hw *hw)
@@ -274,14 +284,43 @@ void mtk_pll_unprepare(struct clk_hw *hw)
writel(r, pll->pwr_addr);
}
+static int mtk_pll_prepare_setclr(struct clk_hw *hw)
+{
+ struct mtk_clk_pll *pll = to_mtk_clk_pll(hw);
+
+ writel(BIT(pll->data->pll_en_bit), pll->en_set_addr);
+
+ /* Wait 20us after enable for the PLL to stabilize */
+ udelay(20);
+
+ return 0;
+}
+
+static void mtk_pll_unprepare_setclr(struct clk_hw *hw)
+{
+ struct mtk_clk_pll *pll = to_mtk_clk_pll(hw);
+
+ writel(BIT(pll->data->pll_en_bit), pll->en_clr_addr);
+}
+
const struct clk_ops mtk_pll_ops = {
.is_prepared = mtk_pll_is_prepared,
.prepare = mtk_pll_prepare,
.unprepare = mtk_pll_unprepare,
.recalc_rate = mtk_pll_recalc_rate,
- .round_rate = mtk_pll_round_rate,
+ .determine_rate = mtk_pll_determine_rate,
+ .set_rate = mtk_pll_set_rate,
+};
+
+const struct clk_ops mtk_pll_fenc_clr_set_ops = {
+ .is_prepared = mtk_pll_fenc_is_prepared,
+ .prepare = mtk_pll_prepare_setclr,
+ .unprepare = mtk_pll_unprepare_setclr,
+ .recalc_rate = mtk_pll_recalc_rate,
+ .determine_rate = mtk_pll_determine_rate,
.set_rate = mtk_pll_set_rate,
};
+EXPORT_SYMBOL_GPL(mtk_pll_fenc_clr_set_ops);
struct clk_hw *mtk_clk_register_pll_ops(struct mtk_clk_pll *pll,
const struct mtk_pll_data *data,
@@ -308,9 +347,15 @@ struct clk_hw *mtk_clk_register_pll_ops(struct mtk_clk_pll *pll,
pll->en_addr = base + data->en_reg;
else
pll->en_addr = pll->base_addr + REG_CON0;
+ if (data->en_set_reg)
+ pll->en_set_addr = base + data->en_set_reg;
+ if (data->en_clr_reg)
+ pll->en_clr_addr = base + data->en_clr_reg;
pll->hw.init = &init;
pll->data = data;
+ pll->fenc_addr = base + data->fenc_sta_ofs;
+
init.name = data->name;
init.flags = (data->flags & PLL_AO) ? CLK_IS_CRITICAL : 0;
init.ops = pll_ops;
@@ -333,12 +378,13 @@ struct clk_hw *mtk_clk_register_pll(const struct mtk_pll_data *data,
{
struct mtk_clk_pll *pll;
struct clk_hw *hw;
+ const struct clk_ops *pll_ops = data->ops ? data->ops : &mtk_pll_ops;
pll = kzalloc(sizeof(*pll), GFP_KERNEL);
if (!pll)
return ERR_PTR(-ENOMEM);
- hw = mtk_clk_register_pll_ops(pll, data, base, &mtk_pll_ops);
+ hw = mtk_clk_register_pll_ops(pll, data, base, pll_ops);
if (IS_ERR(hw))
kfree(pll);
diff --git a/drivers/clk/mediatek/clk-pll.h b/drivers/clk/mediatek/clk-pll.h
index 285c8db958b3..d71c150ce83e 100644
--- a/drivers/clk/mediatek/clk-pll.h
+++ b/drivers/clk/mediatek/clk-pll.h
@@ -29,6 +29,7 @@ struct mtk_pll_data {
u32 reg;
u32 pwr_reg;
u32 en_mask;
+ u32 fenc_sta_ofs;
u32 pd_reg;
u32 tuner_reg;
u32 tuner_en_reg;
@@ -47,8 +48,11 @@ struct mtk_pll_data {
const struct mtk_pll_div_table *div_table;
const char *parent_name;
u32 en_reg;
+ u32 en_set_reg;
+ u32 en_clr_reg;
u8 pll_en_bit; /* Assume 0, indicates BIT(0) by default */
u8 pcw_chg_bit;
+ u8 fenc_sta_bit;
};
/*
@@ -68,6 +72,9 @@ struct mtk_clk_pll {
void __iomem *pcw_addr;
void __iomem *pcw_chg_addr;
void __iomem *en_addr;
+ void __iomem *en_set_addr;
+ void __iomem *en_clr_addr;
+ void __iomem *fenc_addr;
const struct mtk_pll_data *data;
};
@@ -78,6 +85,7 @@ void mtk_clk_unregister_plls(const struct mtk_pll_data *plls, int num_plls,
struct clk_hw_onecell_data *clk_data);
extern const struct clk_ops mtk_pll_ops;
+extern const struct clk_ops mtk_pll_fenc_clr_set_ops;
static inline struct mtk_clk_pll *to_mtk_clk_pll(struct clk_hw *hw)
{
@@ -96,8 +104,7 @@ void mtk_pll_calc_values(struct mtk_clk_pll *pll, u32 *pcw, u32 *postdiv,
u32 freq, u32 fin);
int mtk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate);
-long mtk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate);
+int mtk_pll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req);
struct clk_hw *mtk_clk_register_pll_ops(struct mtk_clk_pll *pll,
const struct mtk_pll_data *data,
diff --git a/drivers/clk/mediatek/clk-pllfh.c b/drivers/clk/mediatek/clk-pllfh.c
index 094ec8a26d66..83630ee07ee9 100644
--- a/drivers/clk/mediatek/clk-pllfh.c
+++ b/drivers/clk/mediatek/clk-pllfh.c
@@ -42,7 +42,7 @@ static const struct clk_ops mtk_pllfh_ops = {
.prepare = mtk_pll_prepare,
.unprepare = mtk_pll_unprepare,
.recalc_rate = mtk_pll_recalc_rate,
- .round_rate = mtk_pll_round_rate,
+ .determine_rate = mtk_pll_determine_rate,
.set_rate = mtk_fhctl_set_rate,
};
diff --git a/drivers/clk/mediatek/reset.c b/drivers/clk/mediatek/reset.c
index 290ceda84ce4..2e3303975096 100644
--- a/drivers/clk/mediatek/reset.c
+++ b/drivers/clk/mediatek/reset.c
@@ -110,65 +110,6 @@ static int reset_xlate(struct reset_controller_dev *rcdev,
return data->desc->rst_idx_map[reset_spec->args[0]];
}
-int mtk_register_reset_controller(struct device_node *np,
- const struct mtk_clk_rst_desc *desc)
-{
- struct regmap *regmap;
- const struct reset_control_ops *rcops = NULL;
- struct mtk_clk_rst_data *data;
- int ret;
-
- if (!desc) {
- pr_err("mtk clock reset desc is NULL\n");
- return -EINVAL;
- }
-
- switch (desc->version) {
- case MTK_RST_SIMPLE:
- rcops = &mtk_reset_ops;
- break;
- case MTK_RST_SET_CLR:
- rcops = &mtk_reset_ops_set_clr;
- break;
- default:
- pr_err("Unknown reset version %d\n", desc->version);
- return -EINVAL;
- }
-
- regmap = device_node_to_regmap(np);
- if (IS_ERR(regmap)) {
- pr_err("Cannot find regmap for %pOF: %pe\n", np, regmap);
- return -EINVAL;
- }
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- data->desc = desc;
- data->regmap = regmap;
- data->rcdev.owner = THIS_MODULE;
- data->rcdev.ops = rcops;
- data->rcdev.of_node = np;
-
- if (data->desc->rst_idx_map_nr > 0) {
- data->rcdev.of_reset_n_cells = 1;
- data->rcdev.nr_resets = desc->rst_idx_map_nr;
- data->rcdev.of_xlate = reset_xlate;
- } else {
- data->rcdev.nr_resets = desc->rst_bank_nr * RST_NR_PER_BANK;
- }
-
- ret = reset_controller_register(&data->rcdev);
- if (ret) {
- pr_err("could not register reset controller: %d\n", ret);
- kfree(data);
- return ret;